aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-4.9
diff options
context:
space:
Diffstat (limited to 'target/linux/layerscape/patches-4.9')
-rw-r--r--target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch532
-rw-r--r--target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch461
-rw-r--r--target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch10085
-rw-r--r--target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch1042
-rw-r--r--target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch412
-rw-r--r--target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch2549
-rw-r--r--target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch155005
-rw-r--r--target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch2062
-rw-r--r--target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch1776
-rw-r--r--target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch11518
-rw-r--r--target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch23027
-rw-r--r--target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch149
-rw-r--r--target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch312
-rw-r--r--target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch370
-rw-r--r--target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch26853
-rw-r--r--target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch3781
-rw-r--r--target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch331
-rw-r--r--target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch73
-rw-r--r--target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch462
-rw-r--r--target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch140
-rw-r--r--target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch1338
-rw-r--r--target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch182
-rw-r--r--target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch611
-rw-r--r--target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch1378
-rw-r--r--target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch688
-rw-r--r--target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch445
-rw-r--r--target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch163
-rw-r--r--target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch1471
-rw-r--r--target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch1192
29 files changed, 248408 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch b/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch
new file mode 100644
index 0000000000..0105f5930b
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch
@@ -0,0 +1,532 @@
+From 11edf9c88acea13d1a02901289060263b4027a77 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 09:52:26 +0800
+Subject: [PATCH] config: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is a integrated patch for layerscape config/makefile support.
+
+Signed-off-by: Yuantian Tang <andy.tang@nxp.com>
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/base/Kconfig | 1 +
+ drivers/crypto/Makefile | 2 +-
+ drivers/net/ethernet/freescale/Kconfig | 4 +-
+ drivers/net/ethernet/freescale/Makefile | 2 +
+ drivers/ptp/Kconfig | 29 ++++++
+ drivers/rtc/Kconfig | 8 ++
+ drivers/rtc/Makefile | 1 +
+ drivers/soc/Kconfig | 3 +-
+ drivers/soc/fsl/Kconfig | 22 +++++
+ drivers/soc/fsl/Kconfig.arm | 16 ++++
+ drivers/soc/fsl/Makefile | 4 +
+ drivers/soc/fsl/layerscape/Kconfig | 10 +++
+ drivers/soc/fsl/layerscape/Makefile | 1 +
+ drivers/soc/fsl/rcpm.c | 154 ++++++++++++++++++++++++++++++++
+ drivers/staging/Kconfig | 4 +
+ drivers/staging/Makefile | 2 +
+ drivers/staging/fsl-dpaa2/Kconfig | 41 +++++++++
+ drivers/staging/fsl-dpaa2/Makefile | 9 ++
+ 18 files changed, 309 insertions(+), 4 deletions(-)
+ create mode 100644 drivers/soc/fsl/Kconfig
+ create mode 100644 drivers/soc/fsl/Kconfig.arm
+ create mode 100644 drivers/soc/fsl/layerscape/Kconfig
+ create mode 100644 drivers/soc/fsl/layerscape/Makefile
+ create mode 100644 drivers/soc/fsl/rcpm.c
+ create mode 100644 drivers/staging/fsl-dpaa2/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/Makefile
+
+diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
+index e1c0e2e0..4211a7fd 100644
+--- a/drivers/base/Kconfig
++++ b/drivers/base/Kconfig
+@@ -237,6 +237,7 @@ config GENERIC_CPU_AUTOPROBE
+
+ config SOC_BUS
+ bool
++ select GLOB
+
+ source "drivers/base/regmap/Kconfig"
+
+diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
+index ad7250fa..6d788fd7 100644
+--- a/drivers/crypto/Makefile
++++ b/drivers/crypto/Makefile
+@@ -3,7 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+ obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
+ obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
+ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+ obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
+diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
+index d1ca45fb..74a2864e 100644
+--- a/drivers/net/ethernet/freescale/Kconfig
++++ b/drivers/net/ethernet/freescale/Kconfig
+@@ -5,7 +5,7 @@
+ config NET_VENDOR_FREESCALE
+ bool "Freescale devices"
+ default y
+- depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
++ depends on FSL_SOC || (QUICC_ENGINE && PPC32) || CPM1 || CPM2 || PPC_MPC512x || \
+ M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
+ ARCH_LAYERSCAPE
+@@ -93,4 +93,6 @@ config GIANFAR
+ and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
+ on the 8540.
+
++source "drivers/net/ethernet/freescale/sdk_fman/Kconfig"
++source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig"
+ endif # NET_VENDOR_FREESCALE
+diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
+index cbe21dc7..a5d4405f 100644
+--- a/drivers/net/ethernet/freescale/Makefile
++++ b/drivers/net/ethernet/freescale/Makefile
+@@ -21,4 +21,6 @@ gianfar_driver-objs := gianfar.o \
+ obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
+
++obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/
++obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/
+ obj-$(CONFIG_FSL_FMAN) += fman/
+diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
+index ee3de342..4c45beda 100644
+--- a/drivers/ptp/Kconfig
++++ b/drivers/ptp/Kconfig
+@@ -39,6 +39,35 @@ config PTP_1588_CLOCK_GIANFAR
+ To compile this driver as a module, choose M here: the module
+ will be called gianfar_ptp.
+
++config PTP_1588_CLOCK_DPAA
++ tristate "Freescale DPAA as PTP clock"
++ depends on FSL_SDK_DPAA_ETH
++ select PTP_1588_CLOCK
++ select FSL_DPAA_TS
++ default n
++ help
++ This driver adds support for using the DPAA 1588 timer module
++ as a PTP clock. This clock is only useful if your PTP programs are
++ getting hardware time stamps on the PTP Ethernet packets
++ using the SO_TIMESTAMPING API.
++
++ To compile this driver as a module, choose M here: the module
++ will be called dpaa_ptp.
++
++config PTP_1588_CLOCK_DPAA2
++ tristate "Freescale DPAA2 as PTP clock"
++ depends on FSL_DPAA2_ETH
++ select PTP_1588_CLOCK
++ default y
++ help
++ This driver adds support for using the DPAA2 1588 timer module
++ as a PTP clock. This clock is only useful if your PTP programs are
++ getting hardware time stamps on the PTP Ethernet packets
++ using the SO_TIMESTAMPING API.
++
++ To compile this driver as a module, choose M here: the module
++ will be called dpaa2-rtc.
++
+ config PTP_1588_CLOCK_IXP46X
+ tristate "Intel IXP46x as PTP clock"
+ depends on IXP4XX_ETH
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index 0723c97e..df610dcd 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -414,6 +414,14 @@ config RTC_DRV_PCF85063
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85063.
+
++config RTC_DRV_PCF85263
++ tristate "NXP PCF85263"
++ help
++ If you say yes here you get support for the PCF85263 RTC chip
++
++ This driver can also be built as a module. If so, the module
++ will be called rtc-pcf85263.
++
+ config RTC_DRV_PCF8563
+ tristate "Philips PCF8563/Epson RTC8564"
+ help
+diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
+index 1ac694a3..7675b8a7 100644
+--- a/drivers/rtc/Makefile
++++ b/drivers/rtc/Makefile
+@@ -111,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
+ obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
+ obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
+ obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
++obj-$(CONFIG_RTC_DRV_PCF85263) += rtc-pcf85263.o
+ obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
+ obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
+ obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o
+diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
+index e6e90e80..f31bceb6 100644
+--- a/drivers/soc/Kconfig
++++ b/drivers/soc/Kconfig
+@@ -1,8 +1,7 @@
+ menu "SOC (System On Chip) specific Drivers"
+
+ source "drivers/soc/bcm/Kconfig"
+-source "drivers/soc/fsl/qbman/Kconfig"
+-source "drivers/soc/fsl/qe/Kconfig"
++source "drivers/soc/fsl/Kconfig"
+ source "drivers/soc/mediatek/Kconfig"
+ source "drivers/soc/qcom/Kconfig"
+ source "drivers/soc/rockchip/Kconfig"
+diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
+new file mode 100644
+index 00000000..d4cd25f1
+--- /dev/null
++++ b/drivers/soc/fsl/Kconfig
+@@ -0,0 +1,22 @@
++#
++# Freescale SOC drivers
++#
++
++source "drivers/soc/fsl/qbman/Kconfig"
++source "drivers/soc/fsl/qe/Kconfig"
++source "drivers/soc/fsl/ls2-console/Kconfig"
++
++config FSL_GUTS
++ bool
++ select SOC_BUS
++ help
++ The global utilities block controls power management, I/O device
++ enabling, power-onreset(POR) configuration monitoring, alternate
++ function selection for multiplexed signals,and clock control.
++ This driver is to manage and access global utilities block.
++ Initially only reading SVR and registering soc device are supported.
++ Other guts accesses, such as reading RCW, should eventually be moved
++ into this driver as well.
++if ARM || ARM64
++source "drivers/soc/fsl/Kconfig.arm"
++endif
+diff --git a/drivers/soc/fsl/Kconfig.arm b/drivers/soc/fsl/Kconfig.arm
+new file mode 100644
+index 00000000..106c9b98
+--- /dev/null
++++ b/drivers/soc/fsl/Kconfig.arm
+@@ -0,0 +1,16 @@
++#
++# Freescale ARM SOC Drivers
++#
++
++config LS_SOC_DRIVERS
++ bool "Layerscape Soc Drivers"
++ depends on ARCH_LAYERSCAPE || SOC_LS1021A
++ default n
++ help
++ Say y here to enable Freescale Layerscape Soc Device Drivers support.
++ The Soc Drivers provides the device driver that is a specific block
++ or feature on Layerscape platform.
++
++if LS_SOC_DRIVERS
++ source "drivers/soc/fsl/layerscape/Kconfig"
++endif
+diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
+index 75e1f533..b8708569 100644
+--- a/drivers/soc/fsl/Makefile
++++ b/drivers/soc/fsl/Makefile
+@@ -5,3 +5,7 @@
+ obj-$(CONFIG_FSL_DPAA) += qbman/
+ obj-$(CONFIG_QUICC_ENGINE) += qe/
+ obj-$(CONFIG_CPM) += qe/
++obj-$(CONFIG_FSL_GUTS) += guts.o
++obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console/
++obj-$(CONFIG_SUSPEND) += rcpm.o
++obj-$(CONFIG_LS_SOC_DRIVERS) += layerscape/
+diff --git a/drivers/soc/fsl/layerscape/Kconfig b/drivers/soc/fsl/layerscape/Kconfig
+new file mode 100644
+index 00000000..e1373aa1
+--- /dev/null
++++ b/drivers/soc/fsl/layerscape/Kconfig
+@@ -0,0 +1,10 @@
++#
++# Layerscape Soc drivers
++#
++config FTM_ALARM
++ bool "FTM alarm driver"
++ default n
++ help
++ Say y here to enable FTM alarm support. The FTM alarm provides
++ alarm functions for wakeup system from deep sleep. There is only
++ one FTM can be used in ALARM(FTM 0).
+diff --git a/drivers/soc/fsl/layerscape/Makefile b/drivers/soc/fsl/layerscape/Makefile
+new file mode 100644
+index 00000000..6299aa1d
+--- /dev/null
++++ b/drivers/soc/fsl/layerscape/Makefile
+@@ -0,0 +1 @@
++obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o
+diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
+new file mode 100644
+index 00000000..a6a31c87
+--- /dev/null
++++ b/drivers/soc/fsl/rcpm.c
+@@ -0,0 +1,154 @@
++/*
++ * Run Control and Power Management (RCPM) driver
++ *
++ * Copyright 2016 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++#define pr_fmt(fmt) "RCPM: %s: " fmt, __func__
++
++#include <linux/kernel.h>
++#include <linux/io.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/suspend.h>
++
++/* RCPM register offset */
++#define RCPM_IPPDEXPCR0 0x140
++
++#define RCPM_WAKEUP_CELL_SIZE 2
++
++struct rcpm_config {
++ int ipp_num;
++ int ippdexpcr_offset;
++ u32 ippdexpcr[2];
++ void *rcpm_reg_base;
++};
++
++static struct rcpm_config *rcpm;
++
++static inline void rcpm_reg_write(u32 offset, u32 value)
++{
++ iowrite32be(value, rcpm->rcpm_reg_base + offset);
++}
++
++static inline u32 rcpm_reg_read(u32 offset)
++{
++ return ioread32be(rcpm->rcpm_reg_base + offset);
++}
++
++static void rcpm_wakeup_fixup(struct device *dev, void *data)
++{
++ struct device_node *node = dev ? dev->of_node : NULL;
++ u32 value[RCPM_WAKEUP_CELL_SIZE];
++ int ret, i;
++
++ if (!dev || !node || !device_may_wakeup(dev))
++ return;
++
++ /*
++ * Get the values in the "rcpm-wakeup" property.
++ * Three values are:
++ * The first is a pointer to the RCPM node.
++ * The second is the value of the ippdexpcr0 register.
++ * The third is the value of the ippdexpcr1 register.
++ */
++ ret = of_property_read_u32_array(node, "fsl,rcpm-wakeup",
++ value, RCPM_WAKEUP_CELL_SIZE);
++ if (ret)
++ return;
++
++ pr_debug("wakeup source: the device %s\n", node->full_name);
++
++ for (i = 0; i < rcpm->ipp_num; i++)
++ rcpm->ippdexpcr[i] |= value[i + 1];
++}
++
++static int rcpm_suspend_prepare(void)
++{
++ int i;
++
++ BUG_ON(!rcpm);
++
++ for (i = 0; i < rcpm->ipp_num; i++)
++ rcpm->ippdexpcr[i] = 0;
++
++ dpm_for_each_dev(NULL, rcpm_wakeup_fixup);
++
++ for (i = 0; i < rcpm->ipp_num; i++) {
++ rcpm_reg_write(rcpm->ippdexpcr_offset + 4 * i,
++ rcpm->ippdexpcr[i]);
++ pr_debug("ippdexpcr%d = 0x%x\n", i, rcpm->ippdexpcr[i]);
++ }
++
++ return 0;
++}
++
++static int rcpm_suspend_notifier_call(struct notifier_block *bl,
++ unsigned long state,
++ void *unused)
++{
++ switch (state) {
++ case PM_SUSPEND_PREPARE:
++ rcpm_suspend_prepare();
++ break;
++ }
++
++ return NOTIFY_DONE;
++}
++
++static struct rcpm_config rcpm_default_config = {
++ .ipp_num = 1,
++ .ippdexpcr_offset = RCPM_IPPDEXPCR0,
++};
++
++static const struct of_device_id rcpm_matches[] = {
++ {
++ .compatible = "fsl,qoriq-rcpm-2.1",
++ .data = &rcpm_default_config,
++ },
++ {}
++};
++
++static struct notifier_block rcpm_suspend_notifier = {
++ .notifier_call = rcpm_suspend_notifier_call,
++};
++
++static int __init layerscape_rcpm_init(void)
++{
++ const struct of_device_id *match;
++ struct device_node *np;
++
++ np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
++ if (!np) {
++ pr_err("Can't find the RCPM node.\n");
++ return -EINVAL;
++ }
++
++ if (match->data)
++ rcpm = (struct rcpm_config *)match->data;
++ else
++ return -EINVAL;
++
++ rcpm->rcpm_reg_base = of_iomap(np, 0);
++ of_node_put(np);
++ if (!rcpm->rcpm_reg_base)
++ return -ENOMEM;
++
++ register_pm_notifier(&rcpm_suspend_notifier);
++
++ pr_info("The RCPM driver initialized.\n");
++
++ return 0;
++}
++
++subsys_initcall(layerscape_rcpm_init);
+diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
+index 58a7b350..f8e54860 100644
+--- a/drivers/staging/Kconfig
++++ b/drivers/staging/Kconfig
+@@ -94,6 +94,8 @@ source "drivers/staging/fbtft/Kconfig"
+
+ source "drivers/staging/fsl-mc/Kconfig"
+
++source "drivers/staging/fsl-dpaa2/Kconfig"
++
+ source "drivers/staging/wilc1000/Kconfig"
+
+ source "drivers/staging/most/Kconfig"
+@@ -106,4 +108,6 @@ source "drivers/staging/greybus/Kconfig"
+
+ source "drivers/staging/vc04_services/Kconfig"
+
++source "drivers/staging/fsl_qbman/Kconfig"
++
+ endif # STAGING
+diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
+index 2fa9745d..cbd7b089 100644
+--- a/drivers/staging/Makefile
++++ b/drivers/staging/Makefile
+@@ -36,9 +36,11 @@ obj-$(CONFIG_UNISYSSPAR) += unisys/
+ obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
+ obj-$(CONFIG_FB_TFT) += fbtft/
+ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
++obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
+ obj-$(CONFIG_WILC1000) += wilc1000/
+ obj-$(CONFIG_MOST) += most/
+ obj-$(CONFIG_ISDN_I4L) += i4l/
+ obj-$(CONFIG_KS7010) += ks7010/
+ obj-$(CONFIG_GREYBUS) += greybus/
+ obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
++obj-$(CONFIG_FSL_SDK_DPA) += fsl_qbman/
+diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
+new file mode 100644
+index 00000000..8042d9cc
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/Kconfig
+@@ -0,0 +1,41 @@
++#
++# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
++#
++
++config FSL_DPAA2
++ bool "Freescale DPAA2 devices"
++ depends on FSL_MC_BUS
++ ---help---
++ Build drivers for Freescale DataPath Acceleration
++ Architecture (DPAA2) family of SoCs.
++
++config FSL_DPAA2_ETH
++ tristate "Freescale DPAA2 Ethernet"
++ depends on FSL_DPAA2 && FSL_MC_DPIO
++ ---help---
++ Ethernet driver for Freescale DPAA2 SoCs, using the
++ Freescale MC bus driver
++
++if FSL_DPAA2_ETH
++config FSL_DPAA2_ETH_USE_ERR_QUEUE
++ bool "Enable Rx error queue"
++ default n
++ ---help---
++ Allow Rx error frames to be enqueued on an error queue
++ and processed by the driver (by default they are dropped
++ in hardware).
++ This may impact performance, recommended for debugging
++ purposes only.
++
++# QBMAN_DEBUG requires some additional DPIO APIs
++config FSL_DPAA2_ETH_DEBUGFS
++ depends on DEBUG_FS && FSL_QBMAN_DEBUG
++ bool "Enable debugfs support"
++ default n
++ ---help---
++ Enable advanced statistics through debugfs interface.
++endif
++
++source "drivers/staging/fsl-dpaa2/mac/Kconfig"
++source "drivers/staging/fsl-dpaa2/evb/Kconfig"
++source "drivers/staging/fsl-dpaa2/ethsw/Kconfig"
+diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
+new file mode 100644
+index 00000000..cbaa8c20
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/Makefile
+@@ -0,0 +1,9 @@
++#
++# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
++#
++
++obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
++obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
++obj-$(CONFIG_FSL_DPAA2_EVB) += evb/
++obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
++obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += rtc/
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch b/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch
new file mode 100644
index 0000000000..da3dc120b7
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch
@@ -0,0 +1,461 @@
+From 7edaf7ed8fbd5fb50950a4fc8067a9c14557d010 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 10:03:52 +0800
+Subject: [PATCH] arch: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is a integrated patch for layerscape arch support.
+
+Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
+Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
+Signed-off-by: Zhao Qiang <B45475@freescale.com>
+Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
+Signed-off-by: Haiying Wang <Haiying.wang@freescale.com>
+Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
+Signed-off-by: Po Liu <po.liu@nxp.com>
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Jianhua Xie <jianhua.xie@nxp.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ arch/arm/include/asm/delay.h | 16 +++++++++
+ arch/arm/include/asm/io.h | 31 ++++++++++++++++++
+ arch/arm/include/asm/mach/map.h | 4 +--
+ arch/arm/include/asm/pgtable.h | 7 ++++
+ arch/arm/kernel/bios32.c | 43 ++++++++++++++++++++++++
+ arch/arm/mm/dma-mapping.c | 1 +
+ arch/arm/mm/ioremap.c | 7 ++++
+ arch/arm/mm/mmu.c | 9 +++++
+ arch/arm64/include/asm/cache.h | 2 +-
+ arch/arm64/include/asm/io.h | 2 ++
+ arch/arm64/include/asm/pci.h | 4 +++
+ arch/arm64/include/asm/pgtable-prot.h | 1 +
+ arch/arm64/include/asm/pgtable.h | 5 +++
+ arch/arm64/kernel/pci.c | 62 +++++++++++++++++++++++++++++++++++
+ arch/arm64/mm/dma-mapping.c | 6 ++++
+ 15 files changed, 197 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
+index b1ce037e..1445b0ca 100644
+--- a/arch/arm/include/asm/delay.h
++++ b/arch/arm/include/asm/delay.h
+@@ -57,6 +57,22 @@ extern void __bad_udelay(void);
+ __const_udelay((n) * UDELAY_MULT)) : \
+ __udelay(n))
+
++#define spin_event_timeout(condition, timeout, delay) \
++({ \
++ typeof(condition) __ret; \
++ int i = 0; \
++ while (!(__ret = (condition)) && (i++ < timeout)) { \
++ if (delay) \
++ udelay(delay); \
++ else \
++ cpu_relax(); \
++ udelay(1); \
++ } \
++ if (!__ret) \
++ __ret = (condition); \
++ __ret; \
++})
++
+ /* Loop-based definitions for assembly code. */
+ extern void __loop_delay(unsigned long loops);
+ extern void __loop_udelay(unsigned long usecs);
+diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
+index 021692c6..172a4f2e 100644
+--- a/arch/arm/include/asm/io.h
++++ b/arch/arm/include/asm/io.h
+@@ -129,6 +129,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
+ #define MT_DEVICE_NONSHARED 1
+ #define MT_DEVICE_CACHED 2
+ #define MT_DEVICE_WC 3
++#define MT_MEMORY_RW_NS 4
+ /*
+ * types 4 onwards can be found in asm/mach/map.h and are undefined
+ * for ioremap
+@@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
+ #endif
+ #endif
+
++/* access ports */
++#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
++#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
++
++#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
++#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
++
++#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
++#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
++
++/* Clear and set bits in one shot. These macros can be used to clear and
++ * set multiple bits in a register using a single read-modify-write. These
++ * macros can also be used to set a multiple-bit bit pattern using a mask,
++ * by specifying the mask in the 'clear' parameter and the new bit pattern
++ * in the 'set' parameter.
++ */
++
++#define clrsetbits_be32(addr, clear, set) \
++ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
++#define clrsetbits_le32(addr, clear, set) \
++ iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
++#define clrsetbits_be16(addr, clear, set) \
++ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
++#define clrsetbits_le16(addr, clear, set) \
++ iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
++#define clrsetbits_8(addr, clear, set) \
++ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
++
+ /*
+ * IO port access primitives
+ * -------------------------
+@@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+ #define ioremap_wc ioremap_wc
+ #define ioremap_wt ioremap_wc
+
++void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
++
+ void iounmap(volatile void __iomem *iomem_cookie);
+ #define iounmap iounmap
+
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 9b7c328f..27f3df7d 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -21,9 +21,9 @@ struct map_desc {
+ unsigned int type;
+ };
+
+-/* types 0-3 are defined in asm/io.h */
++/* types 0-4 are defined in asm/io.h */
+ enum {
+- MT_UNCACHED = 4,
++ MT_UNCACHED = 5,
+ MT_CACHECLEAN,
+ MT_MINICLEAN,
+ MT_LOW_VECTORS,
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index a8d656d9..4ab57b37 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -118,6 +118,13 @@ extern pgprot_t pgprot_s2_device;
+ #define pgprot_noncached(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+
++#define pgprot_cached(prot) \
++ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED)
++
++#define pgprot_cached_ns(prot) \
++ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \
++ L_PTE_MT_DEV_NONSHARED)
++
+ #define pgprot_writecombine(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
+
+diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
+index 2f0e0773..d2f4869a 100644
+--- a/arch/arm/kernel/bios32.c
++++ b/arch/arm/kernel/bios32.c
+@@ -11,6 +11,8 @@
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
++#include <linux/of_irq.h>
++#include <linux/pcieport_if.h>
+
+ #include <asm/mach-types.h>
+ #include <asm/mach/map.h>
+@@ -63,6 +65,47 @@ void pcibios_report_status(u_int status_mask, int warn)
+ pcibios_bus_report_status(bus, status_mask, warn);
+ }
+
++/*
++ * Check device tree if the service interrupts are there
++ */
++int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
++{
++ int ret, count = 0;
++ struct device_node *np = NULL;
++
++ if (dev->bus->dev.of_node)
++ np = dev->bus->dev.of_node;
++
++ if (np == NULL)
++ return 0;
++
++ if (!IS_ENABLED(CONFIG_OF_IRQ))
++ return 0;
++
++ /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
++ * request irq for aer
++ */
++ if (mask & PCIE_PORT_SERVICE_AER) {
++ ret = of_irq_get_byname(np, "aer");
++ if (ret > 0) {
++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
++ count++;
++ }
++ }
++
++ if (mask & PCIE_PORT_SERVICE_PME) {
++ ret = of_irq_get_byname(np, "pme");
++ if (ret > 0) {
++ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
++ count++;
++ }
++ }
++
++ /* TODO: add more service interrupts if there it is in the device tree*/
++
++ return count;
++}
++
+ /*
+ * We don't use this to fix the device, but initialisation of it.
+ * It's not the correct use for this, but it works.
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index ab771000..9b5f4465 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -2392,6 +2392,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+
+ set_dma_ops(dev, dma_ops);
+ }
++EXPORT_SYMBOL(arch_setup_dma_ops);
+
+ void arch_teardown_dma_ops(struct device *dev)
+ {
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index ff0eed23..2f2f4269 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+ }
+ EXPORT_SYMBOL(ioremap_wc);
+
++void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
++{
++ return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
++ __builtin_return_address(0));
++}
++EXPORT_SYMBOL(ioremap_cache_ns);
++
+ /*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space as memory. Needed when the kernel wants to execute
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index f7c74135..4a2fb704 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
++ [MT_MEMORY_RW_NS] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
++ L_PTE_XN,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
++ .domain = DOMAIN_KERNEL,
++ },
+ [MT_ROM] = {
+ .prot_sect = PMD_TYPE_SECT,
+ .domain = DOMAIN_KERNEL,
+@@ -644,6 +651,7 @@ static void __init build_mem_type_table(void)
+ }
+ kern_pgprot |= PTE_EXT_AF;
+ vecs_pgprot |= PTE_EXT_AF;
++ mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
+
+ /*
+ * Set PXN for user mappings
+@@ -672,6 +680,7 @@ static void __init build_mem_type_table(void)
+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
++ mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
+ mem_types[MT_ROM].prot_sect |= cp->pmd;
+diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
+index 5082b30b..bde44993 100644
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -18,7 +18,7 @@
+
+ #include <asm/cachetype.h>
+
+-#define L1_CACHE_SHIFT 7
++#define L1_CACHE_SHIFT 6
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+ /*
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index 0bba427b..36c1fbf3 100644
+--- a/arch/arm64/include/asm/io.h
++++ b/arch/arm64/include/asm/io.h
+@@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
+ #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+ #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
+ #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
++#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), \
++ __pgprot(PROT_NORMAL_NS))
+ #define iounmap __iounmap
+
+ /*
+diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
+index b9a7ba9c..8a189159 100644
+--- a/arch/arm64/include/asm/pci.h
++++ b/arch/arm64/include/asm/pci.h
+@@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+ return -ENODEV;
+ }
+
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++ enum pci_mmap_state mmap_state,
++ int write_combine);
+ static inline int pci_proc_domain(struct pci_bus *bus)
+ {
+ return 1;
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
+index 2142c772..cdf8b25d 100644
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -42,6 +42,7 @@
+ #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+ #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+ #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
++#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+
+ #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 61e21401..b8c876fb 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -356,6 +356,11 @@ static inline int pmd_protnone(pmd_t pmd)
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
+ #define pgprot_writecombine(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
++#define pgprot_cached(prot) \
++ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
++ PTE_PXN | PTE_UXN)
++#define pgprot_cached_ns(prot) \
++ __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED)
+ #define pgprot_device(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
+ #define __HAVE_PHYS_MEM_ACCESS_PROT
+diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
+index 409abc45..0568ec3a 100644
+--- a/arch/arm64/kernel/pci.c
++++ b/arch/arm64/kernel/pci.c
+@@ -17,6 +17,8 @@
+ #include <linux/mm.h>
+ #include <linux/of_pci.h>
+ #include <linux/of_platform.h>
++#include <linux/of_irq.h>
++#include <linux/pcieport_if.h>
+ #include <linux/pci.h>
+ #include <linux/pci-acpi.h>
+ #include <linux/pci-ecam.h>
+@@ -54,6 +56,66 @@ int pcibios_alloc_irq(struct pci_dev *dev)
+ return 0;
+ }
+
++/*
++ * Check device tree if the service interrupts are there
++ */
++int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
++{
++ int ret, count = 0;
++ struct device_node *np = NULL;
++
++ if (dev->bus->dev.of_node)
++ np = dev->bus->dev.of_node;
++
++ if (np == NULL)
++ return 0;
++
++ if (!IS_ENABLED(CONFIG_OF_IRQ))
++ return 0;
++
++ /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
++ * request irq for aer
++ */
++ if (mask & PCIE_PORT_SERVICE_AER) {
++ ret = of_irq_get_byname(np, "aer");
++ if (ret > 0) {
++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
++ count++;
++ }
++ }
++
++ if (mask & PCIE_PORT_SERVICE_PME) {
++ ret = of_irq_get_byname(np, "pme");
++ if (ret > 0) {
++ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
++ count++;
++ }
++ }
++
++ /* TODO: add more service interrupts if there it is in the device tree*/
++
++ return count;
++}
++
++int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++ enum pci_mmap_state mmap_state, int write_combine)
++{
++ if (mmap_state == pci_mmap_io)
++ return -EINVAL;
++
++ /*
++ * Mark this as IO
++ */
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
+ /*
+ * raw_pci_read/write - Platform-specific PCI config space access.
+ */
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index b5bf46ce..1ef0d6df 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -30,6 +30,7 @@
+ #include <linux/swiotlb.h>
+
+ #include <asm/cacheflush.h>
++#include <../../../drivers/staging/fsl-mc/include/mc-bus.h>
+
+ static int swiotlb __ro_after_init;
+
+@@ -917,6 +918,10 @@ static int __init __iommu_dma_init(void)
+ #ifdef CONFIG_PCI
+ if (!ret)
+ ret = register_iommu_dma_ops_notifier(&pci_bus_type);
++#endif
++#ifdef CONFIG_FSL_MC_BUS
++ if (!ret)
++ ret = register_iommu_dma_ops_notifier(&fsl_mc_bus_type);
+ #endif
+ return ret;
+ }
+@@ -971,3 +976,4 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ dev->archdata.dma_coherent = coherent;
+ __iommu_setup_dma_ops(dev, dma_base, size, iommu);
+ }
++EXPORT_SYMBOL(arch_setup_dma_ops);
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch b/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch
new file mode 100644
index 0000000000..c820c9f9c9
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch
@@ -0,0 +1,10085 @@
+From 2b2e3b9a0d2abf276b40843f75d97b623e4ee109 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 10:02:10 +0800
+Subject: [PATCH] dts: support layercape
+
+This is a integrated patch for layerscape dts support.
+
+Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
+Signed-off-by: Alison Wang <b18965@freescale.com>
+Signed-off-by: Li Yang <leoyang.li@nxp.com>
+Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Rajesh Bhagat <rajesh.bhagat@freescale.com>
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
+Signed-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>
+Signed-off-by: Suresh Gupta <suresh.gupta@nxp.com>
+Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
+Signed-off-by: Priyanka Jain <priyanka.jain@nxp.com>
+Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+Signed-off-by: Changming Huang <jerry.huang@nxp.com>
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Meng Yi <meng.yi@nxp.com>
+Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
+Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ arch/arm/boot/dts/alpine.dtsi | 2 +-
+ arch/arm/boot/dts/axm55xx.dtsi | 2 +-
+ arch/arm/boot/dts/ecx-2000.dts | 2 +-
+ arch/arm/boot/dts/imx6ul.dtsi | 4 +-
+ arch/arm/boot/dts/keystone.dtsi | 4 +-
+ arch/arm/boot/dts/ls1021a-qds.dts | 13 +
+ arch/arm/boot/dts/ls1021a-twr.dts | 13 +
+ arch/arm/boot/dts/ls1021a.dtsi | 155 ++--
+ arch/arm/boot/dts/mt6580.dtsi | 2 +-
+ arch/arm/boot/dts/mt6589.dtsi | 2 +-
+ arch/arm/boot/dts/mt8127.dtsi | 2 +-
+ arch/arm/boot/dts/mt8135.dtsi | 2 +-
+ arch/arm/boot/dts/rk3288.dtsi | 2 +-
+ arch/arm/boot/dts/sun6i-a31.dtsi | 2 +-
+ arch/arm/boot/dts/sun7i-a20.dtsi | 4 +-
+ arch/arm/boot/dts/sun8i-a23-a33.dtsi | 2 +-
+ arch/arm/boot/dts/sun9i-a80.dtsi | 2 +-
+ arch/arm64/boot/dts/freescale/Makefile | 16 +
+ arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts | 134 +++
+ arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts | 155 ++++
+ arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts | 91 +++
+ arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 517 ++++++++++++
+ arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi | 45 +
+ .../boot/dts/freescale/fsl-ls1043a-qds-sdk.dts | 69 ++
+ arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts | 171 +++-
+ .../boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts | 69 ++
+ .../boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 117 +++
+ arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 113 ++-
+ arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 302 ++++++-
+ arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi | 48 ++
+ .../boot/dts/freescale/fsl-ls1046a-qds-sdk.dts | 109 +++
+ arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts | 363 ++++++++
+ .../boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts | 76 ++
+ .../boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 110 +++
+ arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts | 218 +++++
+ arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 793 ++++++++++++++++++
+ arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts | 173 ++++
+ arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 236 ++++++
+ arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 816 ++++++++++++++++++
+ arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts | 191 ++---
+ arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts | 169 ++--
+ arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts | 9 +-
+ arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 763 +++--------------
+ arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts | 161 ++++
+ arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts | 162 ++++
+ arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts | 140 ++++
+ arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 195 +++++
+ arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi | 198 +++++
+ arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 161 ++++
+ arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 910 +++++++++++++++++++++
+ .../boot/dts/freescale/qoriq-bman1-portals.dtsi | 81 ++
+ arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi | 66 ++
+ .../boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi | 43 +
+ .../boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi | 43 +
+ .../boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi | 42 +
+ .../boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi | 42 +
+ .../boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi | 42 +
+ .../boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi | 42 +
+ .../boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi | 42 +
+ .../boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi | 42 +
+ .../boot/dts/freescale/qoriq-fman3-0-6oh.dtsi | 47 ++
+ arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi | 130 +++
+ .../boot/dts/freescale/qoriq-qman1-portals.dtsi | 104 +++
+ arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi | 10 +
+ arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi | 4 +-
+ arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi | 4 +-
+ 66 files changed, 7778 insertions(+), 1021 deletions(-)
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi
+
+diff --git a/arch/arm/boot/dts/alpine.dtsi b/arch/arm/boot/dts/alpine.dtsi
+index db8752fc..d0eefc3b 100644
+--- a/arch/arm/boot/dts/alpine.dtsi
++++ b/arch/arm/boot/dts/alpine.dtsi
+@@ -93,7 +93,7 @@
+ interrupt-controller;
+ reg = <0x0 0xfb001000 0x0 0x1000>,
+ <0x0 0xfb002000 0x0 0x2000>,
+- <0x0 0xfb004000 0x0 0x1000>,
++ <0x0 0xfb004000 0x0 0x2000>,
+ <0x0 0xfb006000 0x0 0x2000>;
+ interrupts =
+ <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+diff --git a/arch/arm/boot/dts/axm55xx.dtsi b/arch/arm/boot/dts/axm55xx.dtsi
+index a9d6d593..47799f59 100644
+--- a/arch/arm/boot/dts/axm55xx.dtsi
++++ b/arch/arm/boot/dts/axm55xx.dtsi
+@@ -62,7 +62,7 @@
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x20 0x01001000 0 0x1000>,
+- <0x20 0x01002000 0 0x1000>,
++ <0x20 0x01002000 0 0x2000>,
+ <0x20 0x01004000 0 0x2000>,
+ <0x20 0x01006000 0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
+diff --git a/arch/arm/boot/dts/ecx-2000.dts b/arch/arm/boot/dts/ecx-2000.dts
+index 2ccbb57f..c15e7e0c 100644
+--- a/arch/arm/boot/dts/ecx-2000.dts
++++ b/arch/arm/boot/dts/ecx-2000.dts
+@@ -99,7 +99,7 @@
+ interrupt-controller;
+ interrupts = <1 9 0xf04>;
+ reg = <0xfff11000 0x1000>,
+- <0xfff12000 0x1000>,
++ <0xfff12000 0x2000>,
+ <0xfff14000 0x2000>,
+ <0xfff16000 0x2000>;
+ };
+diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
+index c5c05fdc..c1396873 100644
+--- a/arch/arm/boot/dts/imx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul.dtsi
+@@ -89,11 +89,11 @@
+ };
+
+ intc: interrupt-controller@00a01000 {
+- compatible = "arm,cortex-a7-gic";
++ compatible = "arm,gic-400", "arm,cortex-a7-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x00a01000 0x1000>,
+- <0x00a02000 0x1000>,
++ <0x00a02000 0x2000>,
+ <0x00a04000 0x2000>,
+ <0x00a06000 0x2000>;
+ };
+diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
+index 02708ba2..e30c83fc 100644
+--- a/arch/arm/boot/dts/keystone.dtsi
++++ b/arch/arm/boot/dts/keystone.dtsi
+@@ -30,12 +30,12 @@
+ };
+
+ gic: interrupt-controller {
+- compatible = "arm,cortex-a15-gic";
++ compatible = "arm,gic-400", "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x02561000 0x0 0x1000>,
+ <0x0 0x02562000 0x0 0x2000>,
+- <0x0 0x02564000 0x0 0x1000>,
++ <0x0 0x02564000 0x0 0x2000>,
+ <0x0 0x02566000 0x0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_HIGH)>;
+diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
+index 94087531..5611a9c9 100644
+--- a/arch/arm/boot/dts/ls1021a-qds.dts
++++ b/arch/arm/boot/dts/ls1021a-qds.dts
+@@ -124,6 +124,19 @@
+ };
+ };
+
++&qspi {
++ num-cs = <2>;
++ status = "okay";
++
++ qflash0: s25fl128s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
+ &enet0 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&sgmii_phy1c>;
+diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
+index a8b148ad..907e5392 100644
+--- a/arch/arm/boot/dts/ls1021a-twr.dts
++++ b/arch/arm/boot/dts/ls1021a-twr.dts
+@@ -142,6 +142,19 @@
+ };
+ };
+
++&qspi {
++ num-cs = <2>;
++ status = "okay";
++
++ qflash0: n25q128a13@0 {
++ compatible = "n25q128a13", "jedec,spi-nor";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
+ &enet0 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&sgmii_phy2>;
+diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
+index 368e2193..def82fef 100644
+--- a/arch/arm/boot/dts/ls1021a.dtsi
++++ b/arch/arm/boot/dts/ls1021a.dtsi
+@@ -74,17 +74,24 @@
+ compatible = "arm,cortex-a7";
+ device_type = "cpu";
+ reg = <0xf00>;
+- clocks = <&cluster1_clk>;
++ clocks = <&clockgen 1 0>;
+ };
+
+ cpu@f01 {
+ compatible = "arm,cortex-a7";
+ device_type = "cpu";
+ reg = <0xf01>;
+- clocks = <&cluster1_clk>;
++ clocks = <&clockgen 1 0>;
+ };
+ };
+
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+@@ -108,11 +115,11 @@
+ ranges;
+
+ gic: interrupt-controller@1400000 {
+- compatible = "arm,cortex-a7-gic";
++ compatible = "arm,gic-400", "arm,cortex-a7-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x1401000 0x0 0x1000>,
+- <0x0 0x1402000 0x0 0x1000>,
++ <0x0 0x1402000 0x0 0x2000>,
+ <0x0 0x1404000 0x0 0x2000>,
+ <0x0 0x1406000 0x0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+@@ -120,14 +127,14 @@
+ };
+
+ msi1: msi-controller@1570e00 {
+- compatible = "fsl,1s1021a-msi";
++ compatible = "fsl,ls1021a-msi";
+ reg = <0x0 0x1570e00 0x0 0x8>;
+ msi-controller;
+ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ msi2: msi-controller@1570e08 {
+- compatible = "fsl,1s1021a-msi";
++ compatible = "fsl,ls1021a-msi";
+ reg = <0x0 0x1570e08 0x0 0x8>;
+ msi-controller;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+@@ -137,11 +144,12 @@
+ compatible = "fsl,ifc", "simple-bus";
+ reg = <0x0 0x1530000 0x0 0x10000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++ big-endian;
+ };
+
+ dcfg: dcfg@1ee0000 {
+ compatible = "fsl,ls1021a-dcfg", "syscon";
+- reg = <0x0 0x1ee0000 0x0 0x10000>;
++ reg = <0x0 0x1ee0000 0x0 0x1000>;
+ big-endian;
+ };
+
+@@ -163,7 +171,7 @@
+ <0x0 0x20220520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ dma-coherent;
+ status = "disabled";
+ };
+@@ -214,41 +222,10 @@
+ };
+
+ clockgen: clocking@1ee1000 {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- ranges = <0x0 0x0 0x1ee1000 0x10000>;
+-
+- sysclk: sysclk {
+- compatible = "fixed-clock";
+- #clock-cells = <0>;
+- clock-output-names = "sysclk";
+- };
+-
+- cga_pll1: pll@800 {
+- compatible = "fsl,qoriq-core-pll-2.0";
+- #clock-cells = <1>;
+- reg = <0x800 0x10>;
+- clocks = <&sysclk>;
+- clock-output-names = "cga-pll1", "cga-pll1-div2",
+- "cga-pll1-div4";
+- };
+-
+- platform_clk: pll@c00 {
+- compatible = "fsl,qoriq-core-pll-2.0";
+- #clock-cells = <1>;
+- reg = <0xc00 0x10>;
+- clocks = <&sysclk>;
+- clock-output-names = "platform-clk", "platform-clk-div2";
+- };
+-
+- cluster1_clk: clk0c0@0 {
+- compatible = "fsl,qoriq-core-mux-2.0";
+- #clock-cells = <0>;
+- reg = <0x0 0x10>;
+- clock-names = "pll1cga", "pll1cga-div2", "pll1cga-div4";
+- clocks = <&cga_pll1 0>, <&cga_pll1 1>, <&cga_pll1 2>;
+- clock-output-names = "cluster1-clk";
+- };
++ compatible = "fsl,ls1021a-clockgen";
++ reg = <0x0 0x1ee1000 0x0 0x1000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
+ };
+
+ dspi0: dspi@2100000 {
+@@ -258,7 +235,7 @@
+ reg = <0x0 0x2100000 0x0 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "dspi";
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ spi-num-chipselects = <6>;
+ big-endian;
+ status = "disabled";
+@@ -271,12 +248,27 @@
+ reg = <0x0 0x2110000 0x0 0x10000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "dspi";
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ spi-num-chipselects = <6>;
+ big-endian;
+ status = "disabled";
+ };
+
++ qspi: quadspi@1550000 {
++ compatible = "fsl,ls1021a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x1550000 0x0 0x10000>,
++ <0x0 0x40000000 0x0 0x4000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "qspi_en", "qspi";
++ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
++ big-endian;
++ amba-base = <0x40000000>;
++ status = "disabled";
++ };
++
+ i2c0: i2c@2180000 {
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+@@ -284,7 +276,7 @@
+ reg = <0x0 0x2180000 0x0 0x10000>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "i2c";
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ status = "disabled";
+ };
+
+@@ -295,7 +287,7 @@
+ reg = <0x0 0x2190000 0x0 0x10000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "i2c";
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ status = "disabled";
+ };
+
+@@ -306,7 +298,7 @@
+ reg = <0x0 0x21a0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "i2c";
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ status = "disabled";
+ };
+
+@@ -399,7 +391,7 @@
+ compatible = "fsl,ls1021a-lpuart";
+ reg = <0x0 0x2960000 0x0 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+@@ -408,7 +400,7 @@
+ compatible = "fsl,ls1021a-lpuart";
+ reg = <0x0 0x2970000 0x0 0x1000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+@@ -417,7 +409,7 @@
+ compatible = "fsl,ls1021a-lpuart";
+ reg = <0x0 0x2980000 0x0 0x1000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+@@ -426,7 +418,7 @@
+ compatible = "fsl,ls1021a-lpuart";
+ reg = <0x0 0x2990000 0x0 0x1000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+@@ -435,16 +427,26 @@
+ compatible = "fsl,ls1021a-lpuart";
+ reg = <0x0 0x29a0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
++ ftm0: ftm0@29d0000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x29d0000 0x0 0x10000>,
++ <0x0 0x1ee2140 0x0 0x4>;
++ reg-names = "ftm", "FlexTimer1";
++ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
++ big-endian;
++ status = "okay";
++ };
++
+ wdog0: watchdog@2ad0000 {
+ compatible = "fsl,imx21-wdt";
+ reg = <0x0 0x2ad0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>;
++ clocks = <&clockgen 4 1>;
+ clock-names = "wdog-en";
+ big-endian;
+ };
+@@ -454,8 +456,8 @@
+ compatible = "fsl,vf610-sai";
+ reg = <0x0 0x2b50000 0x0 0x10000>;
+ interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>, <&platform_clk 1>,
+- <&platform_clk 1>, <&platform_clk 1>;
++ clocks = <&clockgen 4 1>, <&clockgen 4 1>,
++ <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "tx", "rx";
+ dmas = <&edma0 1 47>,
+@@ -468,8 +470,8 @@
+ compatible = "fsl,vf610-sai";
+ reg = <0x0 0x2b60000 0x0 0x10000>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 1>, <&platform_clk 1>,
+- <&platform_clk 1>, <&platform_clk 1>;
++ clocks = <&clockgen 4 1>, <&clockgen 4 1>,
++ <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "tx", "rx";
+ dmas = <&edma0 1 45>,
+@@ -489,16 +491,31 @@
+ dma-channels = <32>;
+ big-endian;
+ clock-names = "dmamux0", "dmamux1";
+- clocks = <&platform_clk 1>,
+- <&platform_clk 1>;
++ clocks = <&clockgen 4 1>,
++ <&clockgen 4 1>;
++ };
++
++ qdma: qdma@8390000 {
++ compatible = "fsl,ls1021a-qdma";
++ reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */
++ <0x0 0x8389000 0x0 0x1000>, /* Status regs */
++ <0x0 0x838a000 0x0 0x2000>; /* Block regs */
++ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "qdma-error", "qdma-queue";
++ channels = <8>;
++ queues = <2>;
++ status-sizes = <64>;
++ queue-sizes = <64 64>;
++ big-endian;
+ };
+
+ dcu: dcu@2ce0000 {
+ compatible = "fsl,ls1021a-dcu";
+ reg = <0x0 0x2ce0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&platform_clk 0>,
+- <&platform_clk 0>;
++ clocks = <&clockgen 4 0>,
++ <&clockgen 4 0>;
+ clock-names = "dcu", "pix";
+ big-endian;
+ status = "disabled";
+@@ -626,6 +643,8 @@
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
++ configure-gfladj;
++ dma-coherent;
+ snps,dis_rxdet_inp3_quirk;
+ };
+
+@@ -634,7 +653,9 @@
+ reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */
+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+- interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
++ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
++ interrupt-names = "pme", "aer";
+ fsl,pcie-scfg = <&scfg 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -643,7 +664,7 @@
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&msi1>;
++ msi-parent = <&msi1>, <&msi2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+@@ -657,7 +678,9 @@
+ reg = <0x00 0x03500000 0x0 0x00010000 /* controller registers */
+ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+- interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
++ interrupt-names = "pme", "aer";
+ fsl,pcie-scfg = <&scfg 1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -666,7 +689,7 @@
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&msi2>;
++ msi-parent = <&msi1>, <&msi2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/mt6580.dtsi b/arch/arm/boot/dts/mt6580.dtsi
+index 06fdf6c2..a349dba5 100644
+--- a/arch/arm/boot/dts/mt6580.dtsi
++++ b/arch/arm/boot/dts/mt6580.dtsi
+@@ -91,7 +91,7 @@
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0x10211000 0x1000>,
+- <0x10212000 0x1000>,
++ <0x10212000 0x2000>,
+ <0x10214000 0x2000>,
+ <0x10216000 0x2000>;
+ };
+diff --git a/arch/arm/boot/dts/mt6589.dtsi b/arch/arm/boot/dts/mt6589.dtsi
+index 88b3cb12..0d6f60af 100644
+--- a/arch/arm/boot/dts/mt6589.dtsi
++++ b/arch/arm/boot/dts/mt6589.dtsi
+@@ -102,7 +102,7 @@
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0x10211000 0x1000>,
+- <0x10212000 0x1000>,
++ <0x10212000 0x2000>,
+ <0x10214000 0x2000>,
+ <0x10216000 0x2000>;
+ };
+diff --git a/arch/arm/boot/dts/mt8127.dtsi b/arch/arm/boot/dts/mt8127.dtsi
+index 52086c80..916c095d 100644
+--- a/arch/arm/boot/dts/mt8127.dtsi
++++ b/arch/arm/boot/dts/mt8127.dtsi
+@@ -129,7 +129,7 @@
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10211000 0 0x1000>,
+- <0 0x10212000 0 0x1000>,
++ <0 0x10212000 0 0x2000>,
+ <0 0x10214000 0 0x2000>,
+ <0 0x10216000 0 0x2000>;
+ };
+diff --git a/arch/arm/boot/dts/mt8135.dtsi b/arch/arm/boot/dts/mt8135.dtsi
+index 1d7f92bd..a97b4ee4 100644
+--- a/arch/arm/boot/dts/mt8135.dtsi
++++ b/arch/arm/boot/dts/mt8135.dtsi
+@@ -221,7 +221,7 @@
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10211000 0 0x1000>,
+- <0 0x10212000 0 0x1000>,
++ <0 0x10212000 0 0x2000>,
+ <0 0x10214000 0 0x2000>,
+ <0 0x10216000 0 0x2000>;
+ };
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 17ec2e2d..559fc549 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -1109,7 +1109,7 @@
+ #address-cells = <0>;
+
+ reg = <0xffc01000 0x1000>,
+- <0xffc02000 0x1000>,
++ <0xffc02000 0x2000>,
+ <0xffc04000 0x2000>,
+ <0xffc06000 0x2000>;
+ interrupts = <GIC_PPI 9 0xf04>;
+diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
+index ce196045..97f28399 100644
+--- a/arch/arm/boot/dts/sun6i-a31.dtsi
++++ b/arch/arm/boot/dts/sun6i-a31.dtsi
+@@ -791,7 +791,7 @@
+ gic: interrupt-controller@01c81000 {
+ compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ reg = <0x01c81000 0x1000>,
+- <0x01c82000 0x1000>,
++ <0x01c82000 0x2000>,
+ <0x01c84000 0x2000>,
+ <0x01c86000 0x2000>;
+ interrupt-controller;
+diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
+index 94cf5a1c..81e5a44c 100644
+--- a/arch/arm/boot/dts/sun7i-a20.dtsi
++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
+@@ -1685,9 +1685,9 @@
+ };
+
+ gic: interrupt-controller@01c81000 {
+- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
++ compatible = "arm,gic-400", "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ reg = <0x01c81000 0x1000>,
+- <0x01c82000 0x1000>,
++ <0x01c82000 0x2000>,
+ <0x01c84000 0x2000>,
+ <0x01c86000 0x2000>;
+ interrupt-controller;
+diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+index 300a1bd5..cdff5888 100644
+--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
++++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+@@ -488,7 +488,7 @@
+ gic: interrupt-controller@01c81000 {
+ compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ reg = <0x01c81000 0x1000>,
+- <0x01c82000 0x1000>,
++ <0x01c82000 0x2000>,
+ <0x01c84000 0x2000>,
+ <0x01c86000 0x2000>;
+ interrupt-controller;
+diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
+index 3c5214cb..ba7e7c71 100644
+--- a/arch/arm/boot/dts/sun9i-a80.dtsi
++++ b/arch/arm/boot/dts/sun9i-a80.dtsi
+@@ -613,7 +613,7 @@
+ gic: interrupt-controller@01c41000 {
+ compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ reg = <0x01c41000 0x1000>,
+- <0x01c42000 0x1000>,
++ <0x01c42000 0x2000>,
+ <0x01c44000 0x2000>,
+ <0x01c46000 0x2000>;
+ interrupt-controller;
+diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
+index 1b7783db..2d7986a1 100644
+--- a/arch/arm64/boot/dts/freescale/Makefile
++++ b/arch/arm64/boot/dts/freescale/Makefile
+@@ -1,8 +1,24 @@
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds-sdk.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb-sdk.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb-usdpaa.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds-sdk.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb-sdk.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb-usdpaa.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-qds.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2081a-rdb.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
+
+ always := $(dtb-y)
+ subdir-y := $(dts-dirs)
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+new file mode 100644
+index 00000000..e1274c18
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+@@ -0,0 +1,134 @@
++/*
++ * Device Tree file for Freescale LS1012A Freedom Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++/dts-v1/;
++
++#include "fsl-ls1012a.dtsi"
++
++/ {
++ model = "LS1012A Freedom Board";
++ compatible = "fsl,ls1012a-frdm", "fsl,ls1012a";
++
++ sys_mclk: clock-mclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <25000000>;
++ };
++
++ reg_1p8v: regulator-1p8v {
++ compatible = "regulator-fixed";
++ regulator-name = "1P8V";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
++ };
++
++ sound {
++ compatible = "simple-audio-card";
++ simple-audio-card,format = "i2s";
++ simple-audio-card,widgets =
++ "Microphone", "Microphone Jack",
++ "Headphone", "Headphone Jack",
++ "Speaker", "Speaker Ext",
++ "Line", "Line In Jack";
++ simple-audio-card,routing =
++ "MIC_IN", "Microphone Jack",
++ "Microphone Jack", "Mic Bias",
++ "LINE_IN", "Line In Jack",
++ "Headphone Jack", "HP_OUT",
++ "Speaker Ext", "LINE_OUT";
++
++ simple-audio-card,cpu {
++ sound-dai = <&sai2>;
++ frame-master;
++ bitclock-master;
++ };
++
++ simple-audio-card,codec {
++ sound-dai = <&codec>;
++ frame-master;
++ bitclock-master;
++ system-clock-frequency = <25000000>;
++ };
++ };
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ codec: sgtl5000@a {
++ #sound-dai-cells = <0>;
++ compatible = "fsl,sgtl5000";
++ reg = <0xa>;
++ VDDA-supply = <&reg_1p8v>;
++ VDDIO-supply = <&reg_1p8v>;
++ clocks = <&sys_mclk>;
++ };
++};
++
++&qspi {
++ num-cs = <1>;
++ bus-num = <0>;
++ status = "okay";
++
++ qflash0: s25fs512s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ m25p,fast-read;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&sai2 {
++ status = "okay";
++};
++
++&sata {
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+new file mode 100644
+index 00000000..1e1b2802
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+@@ -0,0 +1,155 @@
++/*
++ * Device Tree file for Freescale LS1012A QDS Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++/dts-v1/;
++
++#include "fsl-ls1012a.dtsi"
++
++/ {
++ model = "LS1012A QDS Board";
++ compatible = "fsl,ls1012a-qds", "fsl,ls1012a";
++
++ sys_mclk: clock-mclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24576000>;
++ };
++
++ reg_3p3v: regulator-3p3v {
++ compatible = "regulator-fixed";
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ sound {
++ compatible = "simple-audio-card";
++ simple-audio-card,format = "i2s";
++ simple-audio-card,widgets =
++ "Microphone", "Microphone Jack",
++ "Headphone", "Headphone Jack",
++ "Speaker", "Speaker Ext",
++ "Line", "Line In Jack";
++ simple-audio-card,routing =
++ "MIC_IN", "Microphone Jack",
++ "Microphone Jack", "Mic Bias",
++ "LINE_IN", "Line In Jack",
++ "Headphone Jack", "HP_OUT",
++ "Speaker Ext", "LINE_OUT";
++
++ simple-audio-card,cpu {
++ sound-dai = <&sai2>;
++ frame-master;
++ bitclock-master;
++ };
++
++ simple-audio-card,codec {
++ sound-dai = <&codec>;
++ frame-master;
++ bitclock-master;
++ system-clock-frequency = <24576000>;
++ };
++ };
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ pca9547@77 {
++ compatible = "nxp,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ i2c@4 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x4>;
++
++ codec: sgtl5000@a {
++ #sound-dai-cells = <0>;
++ compatible = "fsl,sgtl5000";
++ reg = <0xa>;
++ VDDA-supply = <&reg_3p3v>;
++ VDDIO-supply = <&reg_3p3v>;
++ clocks = <&sys_mclk>;
++ };
++ };
++ };
++};
++
++&qspi {
++ num-cs = <2>;
++ bus-num = <0>;
++ status = "okay";
++
++ qflash0: s25fs512s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ m25p,fast-read;
++ reg = <0>;
++ };
++};
++
++&sai2 {
++ status = "okay";
++};
++
++&sata {
++ status = "okay";
++};
++
++&esdhc0 {
++ status = "okay";
++};
++
++&esdhc1 {
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
+new file mode 100644
+index 00000000..90bd2307
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
+@@ -0,0 +1,91 @@
++/*
++ * Device Tree file for Freescale LS1012A RDB Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++/dts-v1/;
++
++#include "fsl-ls1012a.dtsi"
++
++/ {
++ model = "LS1012A RDB Board";
++ compatible = "fsl,ls1012a-rdb", "fsl,ls1012a";
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++};
++
++&qspi {
++ num-cs = <2>;
++ bus-num = <0>;
++ status = "okay";
++
++ qflash0: s25fs512s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ m25p,fast-read;
++ reg = <0>;
++ };
++};
++
++&sata {
++ status = "okay";
++};
++
++&esdhc0 {
++ sd-uhs-sdr104;
++ sd-uhs-sdr50;
++ sd-uhs-sdr25;
++ sd-uhs-sdr12;
++ status = "okay";
++};
++
++&esdhc1 {
++ mmc-hs200-1_8v;
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+new file mode 100644
+index 00000000..9ede9d52
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+@@ -0,0 +1,517 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1012A family SoC.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/thermal/thermal.h>
++
++/ {
++ compatible = "fsl,ls1012a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ aliases {
++ crypto = &crypto;
++ rtic_a = &rtic_a;
++ rtic_b = &rtic_b;
++ rtic_c = &rtic_c;
++ rtic_d = &rtic_d;
++ sec_mon = &sec_mon;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0>;
++ clocks = <&clockgen 1 0>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++ };
++
++ idle-states {
++ /*
++ * PSCI node is not added default, U-boot will add missing
++ * parts if it determines to use PSCI.
++ */
++ entry-method = "arm,psci";
++
++ CPU_PH20: cpu-ph20 {
++ compatible = "arm,idle-state";
++ idle-state-name = "PH20";
++ arm,psci-suspend-param = <0x0>;
++ entry-latency-us = <1000>;
++ exit-latency-us = <1000>;
++ min-residency-us = <3000>;
++ };
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <125000000>;
++ clock-output-names = "sysclk";
++ };
++
++ coreclk: coreclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "coreclk";
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
++ <1 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */
++ <1 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */
++ <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ gic: interrupt-controller@1400000 {
++ compatible = "arm,gic-400";
++ #interrupt-cells = <3>;
++ interrupt-controller;
++ reg = <0x0 0x1401000 0 0x1000>, /* GICD */
++ <0x0 0x1402000 0 0x2000>, /* GICC */
++ <0x0 0x1404000 0 0x2000>, /* GICH */
++ <0x0 0x1406000 0 0x2000>; /* GICV */
++ interrupts = <1 9 IRQ_TYPE_LEVEL_LOW>;
++ };
++
++ reboot {
++ compatible = "syscon-reboot";
++ regmap = <&dcfg>;
++ offset = <0xb0>;
++ mask = <0x02>;
++ };
++
++ soc {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ scfg: scfg@1570000 {
++ compatible = "fsl,ls1012a-scfg", "syscon";
++ reg = <0x0 0x1570000 0x0 0x10000>;
++ big-endian;
++ };
++
++ crypto: crypto@1700000 {
++ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
++ "fsl,sec-v4.0";
++ fsl,sec-era = <8>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x00 0x1700000 0x100000>;
++ reg = <0x00 0x1700000 0x0 0x100000>;
++ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++
++ sec_jr0: jr@10000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x10000 0x10000>;
++ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr@20000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x20000 0x10000>;
++ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr2: jr@30000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x30000 0x10000>;
++ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr3: jr@40000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x40000 0x10000>;
++ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ rtic@60000 {
++ compatible = "fsl,sec-v5.4-rtic",
++ "fsl,sec-v5.0-rtic",
++ "fsl,sec-v4.0-rtic";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x60000 0x100 0x60e00 0x18>;
++ ranges = <0x0 0x60100 0x500>;
++
++ rtic_a: rtic-a@0 {
++ compatible = "fsl,sec-v5.4-rtic-memory",
++ "fsl,sec-v5.0-rtic-memory",
++ "fsl,sec-v4.0-rtic-memory";
++ reg = <0x00 0x20 0x100 0x100>;
++ };
++
++ rtic_b: rtic-b@20 {
++ compatible = "fsl,sec-v5.4-rtic-memory",
++ "fsl,sec-v5.0-rtic-memory",
++ "fsl,sec-v4.0-rtic-memory";
++ reg = <0x20 0x20 0x200 0x100>;
++ };
++
++ rtic_c: rtic-c@40 {
++ compatible = "fsl,sec-v5.4-rtic-memory",
++ "fsl,sec-v5.0-rtic-memory",
++ "fsl,sec-v4.0-rtic-memory";
++ reg = <0x40 0x20 0x300 0x100>;
++ };
++
++ rtic_d: rtic-d@60 {
++ compatible = "fsl,sec-v5.4-rtic-memory",
++ "fsl,sec-v5.0-rtic-memory",
++ "fsl,sec-v4.0-rtic-memory";
++ reg = <0x60 0x20 0x400 0x100>;
++ };
++ };
++ };
++
++ sec_mon: sec_mon@1e90000 {
++ compatible = "fsl,sec-v5.4-mon", "fsl,sec-v5.0-mon",
++ "fsl,sec-v4.0-mon";
++ reg = <0x0 0x1e90000 0x0 0x10000>;
++ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ dcfg: dcfg@1ee0000 {
++ compatible = "fsl,ls1012a-dcfg",
++ "syscon";
++ reg = <0x0 0x1ee0000 0x0 0x10000>;
++ big-endian;
++ };
++
++ clockgen: clocking@1ee1000 {
++ compatible = "fsl,ls1012a-clockgen";
++ reg = <0x0 0x1ee1000 0x0 0x1000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk &coreclk>;
++ clock-names = "sysclk", "coreclk";
++ };
++
++ tmu: tmu@1f00000 {
++ compatible = "fsl,qoriq-tmu";
++ reg = <0x0 0x1f00000 0x0 0x10000>;
++ interrupts = <0 33 0x4>;
++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
++ fsl,tmu-calibration = <0x00000000 0x00000026
++ 0x00000001 0x0000002d
++ 0x00000002 0x00000032
++ 0x00000003 0x00000039
++ 0x00000004 0x0000003f
++ 0x00000005 0x00000046
++ 0x00000006 0x0000004d
++ 0x00000007 0x00000054
++ 0x00000008 0x0000005a
++ 0x00000009 0x00000061
++ 0x0000000a 0x0000006a
++ 0x0000000b 0x00000071
++
++ 0x00010000 0x00000025
++ 0x00010001 0x0000002c
++ 0x00010002 0x00000035
++ 0x00010003 0x0000003d
++ 0x00010004 0x00000045
++ 0x00010005 0x0000004e
++ 0x00010006 0x00000057
++ 0x00010007 0x00000061
++ 0x00010008 0x0000006b
++ 0x00010009 0x00000076
++
++ 0x00020000 0x00000029
++ 0x00020001 0x00000033
++ 0x00020002 0x0000003d
++ 0x00020003 0x00000049
++ 0x00020004 0x00000056
++ 0x00020005 0x00000061
++ 0x00020006 0x0000006d
++
++ 0x00030000 0x00000021
++ 0x00030001 0x0000002a
++ 0x00030002 0x0000003c
++ 0x00030003 0x0000004e>;
++ big-endian;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 0>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ cpu_crit: cpu-crit {
++ temperature = <95000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++ };
++
++ esdhc0: esdhc@1560000 {
++ compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
++ reg = <0x0 0x1560000 0x0 0x10000>;
++ interrupts = <0 62 0x4>;
++ clocks = <&clockgen 4 0>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ big-endian;
++ bus-width = <4>;
++ status = "disabled";
++ };
++
++ esdhc1: esdhc@1580000 {
++ compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
++ reg = <0x0 0x1580000 0x0 0x10000>;
++ interrupts = <0 65 0x4>;
++ clocks = <&clockgen 4 0>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ big-endian;
++ broken-cd;
++ bus-width = <4>;
++ status = "disabled";
++ };
++
++ ftm0: ftm0@29d0000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x29d0000 0x0 0x10000>,
++ <0x0 0x1ee2140 0x0 0x4>;
++ reg-names = "ftm", "FlexTimer1";
++ interrupts = <0 86 0x4>;
++ big-endian;
++ };
++
++ i2c0: i2c@2180000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2180000 0x0 0x10000>;
++ interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 0>;
++ status = "disabled";
++ };
++
++ i2c1: i2c@2190000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2190000 0x0 0x10000>;
++ interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 0>;
++ status = "disabled";
++ };
++
++ duart0: serial@21c0500 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x00 0x21c0500 0x0 0x100>;
++ interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 0>;
++ status = "disabled";
++ };
++
++ duart1: serial@21c0600 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x00 0x21c0600 0x0 0x100>;
++ interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 0>;
++ status = "disabled";
++ };
++
++ gpio0: gpio@2300000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <0 66 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio@2310000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ qspi: quadspi@1550000 {
++ compatible = "fsl,ls1012a-qspi", "fsl,ls1021a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x1550000 0x0 0x10000>,
++ <0x0 0x40000000 0x0 0x10000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <0 99 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "qspi_en", "qspi";
++ clocks = <&clockgen 4 0>, <&clockgen 4 0>;
++ big-endian;
++ fsl,qspi-has-second-chip;
++ status = "disabled";
++ };
++
++ wdog0: wdog@2ad0000 {
++ compatible = "fsl,ls1012a-wdt",
++ "fsl,imx21-wdt";
++ reg = <0x0 0x2ad0000 0x0 0x10000>;
++ interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 0>;
++ big-endian;
++ };
++
++ sai1: sai@2b50000 {
++ #sound-dai-cells = <0>;
++ compatible = "fsl,vf610-sai";
++ reg = <0x0 0x2b50000 0x0 0x10000>;
++ interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>,
++ <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "bus", "mclk1", "mclk2", "mclk3";
++ dma-names = "tx", "rx";
++ dmas = <&edma0 1 47>,
++ <&edma0 1 46>;
++ status = "disabled";
++ };
++
++ sai2: sai@2b60000 {
++ #sound-dai-cells = <0>;
++ compatible = "fsl,vf610-sai";
++ reg = <0x0 0x2b60000 0x0 0x10000>;
++ interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>,
++ <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "bus", "mclk1", "mclk2", "mclk3";
++ dma-names = "tx", "rx";
++ dmas = <&edma0 1 45>,
++ <&edma0 1 44>;
++ status = "disabled";
++ };
++
++ edma0: edma@2c00000 {
++ #dma-cells = <2>;
++ compatible = "fsl,vf610-edma";
++ reg = <0x0 0x2c00000 0x0 0x10000>,
++ <0x0 0x2c10000 0x0 0x10000>,
++ <0x0 0x2c20000 0x0 0x10000>;
++ interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>,
++ <0 103 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "edma-tx", "edma-err";
++ dma-channels = <32>;
++ big-endian;
++ clock-names = "dmamux0", "dmamux1";
++ clocks = <&clockgen 4 3>,
++ <&clockgen 4 3>;
++ };
++
++ usb0: usb3@2f00000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x2f00000 0x0 0x10000>;
++ interrupts = <0 60 0x4>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ usb1: usb2@8600000 {
++ compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
++ reg = <0x0 0x8600000 0x0 0x1000>;
++ interrupts = <0 139 0x4>;
++ dr_mode = "host";
++ phy_type = "ulpi";
++ };
++
++ sata: sata@3200000 {
++ compatible = "fsl,ls1012a-ahci", "fsl,ls1043a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>,
++ <0x0 0x20140520 0x0 0x4>;
++ reg-names = "ahci", "sata-ecc";
++ interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 0>;
++ dma-coherent;
++ status = "disabled";
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
+new file mode 100644
+index 00000000..169e1714
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
+@@ -0,0 +1,45 @@
++/*
++ * QorIQ FMan v3 device tree nodes for ls1043
++ *
++ * Copyright 2015-2016 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++&soc {
++
++/* include used FMan blocks */
++#include "qoriq-fman3-0.dtsi"
++#include "qoriq-fman3-0-1g-0.dtsi"
++#include "qoriq-fman3-0-1g-1.dtsi"
++#include "qoriq-fman3-0-1g-2.dtsi"
++#include "qoriq-fman3-0-1g-3.dtsi"
++#include "qoriq-fman3-0-1g-4.dtsi"
++#include "qoriq-fman3-0-1g-5.dtsi"
++#include "qoriq-fman3-0-10g-0.dtsi"
++
++};
++
++&fman0 {
++ /* these aliases provide the FMan ports mapping */
++ enet0: ethernet@e0000 {
++ };
++
++ enet1: ethernet@e2000 {
++ };
++
++ enet2: ethernet@e4000 {
++ };
++
++ enet3: ethernet@e6000 {
++ };
++
++ enet4: ethernet@e8000 {
++ };
++
++ enet5: ethernet@ea000 {
++ };
++
++ enet6: ethernet@f0000 {
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
+new file mode 100644
+index 00000000..6c13b416
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
+@@ -0,0 +1,69 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
++ *
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
++ *
++ * Mingkai Hu <Mingkai.hu@freescale.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "fsl-ls1043a-qds.dts"
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+index dd9e9194..08abff73 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+@@ -1,7 +1,7 @@
+ /*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+- * Copyright 2014-2015, Freescale Semiconductor
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * Mingkai Hu <Mingkai.hu@freescale.com>
+ *
+@@ -45,7 +45,7 @@
+ */
+
+ /dts-v1/;
+-/include/ "fsl-ls1043a.dtsi"
++#include "fsl-ls1043a.dtsi"
+
+ / {
+ model = "LS1043A QDS Board";
+@@ -60,6 +60,22 @@
+ serial1 = &duart1;
+ serial2 = &duart2;
+ serial3 = &duart3;
++ sgmii_riser_s1_p1 = &sgmii_phy_s1_p1;
++ sgmii_riser_s2_p1 = &sgmii_phy_s2_p1;
++ sgmii_riser_s3_p1 = &sgmii_phy_s3_p1;
++ sgmii_riser_s4_p1 = &sgmii_phy_s4_p1;
++ qsgmii_s1_p1 = &qsgmii_phy_s1_p1;
++ qsgmii_s1_p2 = &qsgmii_phy_s1_p2;
++ qsgmii_s1_p3 = &qsgmii_phy_s1_p3;
++ qsgmii_s1_p4 = &qsgmii_phy_s1_p4;
++ qsgmii_s2_p1 = &qsgmii_phy_s2_p1;
++ qsgmii_s2_p2 = &qsgmii_phy_s2_p2;
++ qsgmii_s2_p3 = &qsgmii_phy_s2_p3;
++ qsgmii_s2_p4 = &qsgmii_phy_s2_p4;
++ emi1_slot1 = &ls1043mdio_s1;
++ emi1_slot2 = &ls1043mdio_s2;
++ emi1_slot3 = &ls1043mdio_s3;
++ emi1_slot4 = &ls1043mdio_s4;
+ };
+
+ chosen {
+@@ -97,8 +113,11 @@
+ };
+
+ fpga: board-control@2,0 {
+- compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis", "simple-bus";
+ reg = <0x2 0x0 0x0000100>;
++ ranges = <0 2 0 0x100>;
+ };
+ };
+
+@@ -181,3 +200,149 @@
+ reg = <0>;
+ };
+ };
++
++#include "fsl-ls1043-post.dtsi"
++
++&fman0 {
++ ethernet@e0000 {
++ phy-handle = <&qsgmii_phy_s2_p1>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@e2000 {
++ phy-handle = <&qsgmii_phy_s2_p2>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@e4000 {
++ phy-handle = <&rgmii_phy1>;
++ phy-connection-type = "rgmii";
++ };
++
++ ethernet@e6000 {
++ phy-handle = <&rgmii_phy2>;
++ phy-connection-type = "rgmii";
++ };
++
++ ethernet@e8000 {
++ phy-handle = <&qsgmii_phy_s2_p3>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@ea000 {
++ phy-handle = <&qsgmii_phy_s2_p4>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@f0000 { /* DTSEC9/10GEC1 */
++ fixed-link = <1 1 10000 0 0>;
++ phy-connection-type = "xgmii";
++ };
++};
++
++&fpga {
++ mdio-mux-emi1 {
++ compatible = "mdio-mux-mmioreg", "mdio-mux";
++ mdio-parent-bus = <&mdio0>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x54 1>; /* BRDCFG4 */
++ mux-mask = <0xe0>; /* EMI1 */
++
++ /* On-board RGMII1 PHY */
++ ls1043mdio0: mdio@0 {
++ reg = <0>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ rgmii_phy1: ethernet-phy@1 { /* MAC3 */
++ reg = <0x1>;
++ };
++ };
++
++ /* On-board RGMII2 PHY */
++ ls1043mdio1: mdio@1 {
++ reg = <0x20>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ rgmii_phy2: ethernet-phy@2 { /* MAC4 */
++ reg = <0x2>;
++ };
++ };
++
++ /* Slot 1 */
++ ls1043mdio_s1: mdio@2 {
++ reg = <0x40>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++
++ qsgmii_phy_s1_p1: ethernet-phy@4 {
++ reg = <0x4>;
++ };
++ qsgmii_phy_s1_p2: ethernet-phy@5 {
++ reg = <0x5>;
++ };
++ qsgmii_phy_s1_p3: ethernet-phy@6 {
++ reg = <0x6>;
++ };
++ qsgmii_phy_s1_p4: ethernet-phy@7 {
++ reg = <0x7>;
++ };
++
++ sgmii_phy_s1_p1: ethernet-phy@1c {
++ reg = <0x1c>;
++ };
++ };
++
++ /* Slot 2 */
++ ls1043mdio_s2: mdio@3 {
++ reg = <0x60>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++
++ qsgmii_phy_s2_p1: ethernet-phy@8 {
++ reg = <0x8>;
++ };
++ qsgmii_phy_s2_p2: ethernet-phy@9 {
++ reg = <0x9>;
++ };
++ qsgmii_phy_s2_p3: ethernet-phy@a {
++ reg = <0xa>;
++ };
++ qsgmii_phy_s2_p4: ethernet-phy@b {
++ reg = <0xb>;
++ };
++
++ sgmii_phy_s2_p1: ethernet-phy@1c {
++ reg = <0x1c>;
++ };
++ };
++
++ /* Slot 3 */
++ ls1043mdio_s3: mdio@4 {
++ reg = <0x80>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++
++ sgmii_phy_s3_p1: ethernet-phy@1c {
++ reg = <0x1c>;
++ };
++ };
++
++ /* Slot 4 */
++ ls1043mdio_s4: mdio@5 {
++ reg = <0xa0>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++
++ sgmii_phy_s4_p1: ethernet-phy@1c {
++ reg = <0x1c>;
++ };
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts
+new file mode 100644
+index 00000000..ac4b9a41
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts
+@@ -0,0 +1,69 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
++ *
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
++ *
++ * Mingkai Hu <Mingkai.hu@freescale.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "fsl-ls1043a-rdb.dts"
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts
+new file mode 100644
+index 00000000..4e46a0a5
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts
+@@ -0,0 +1,117 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
++ *
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "fsl-ls1043a-rdb-sdk.dts"
++
++&soc {
++ bp7: buffer-pool@7 {
++ compatible = "fsl,p4080-bpool", "fsl,bpool";
++ fsl,bpid = <7>;
++ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
++ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
++ };
++
++ bp8: buffer-pool@8 {
++ compatible = "fsl,p4080-bpool", "fsl,bpool";
++ fsl,bpid = <8>;
++ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ };
++
++ bp9: buffer-pool@9 {
++ compatible = "fsl,p4080-bpool", "fsl,bpool";
++ fsl,bpid = <9>;
++ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>;
++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ };
++
++ fsl,dpaa {
++ compatible = "fsl,ls1043a", "fsl,dpaa", "simple-bus";
++
++ ethernet@0 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x50 1 0x51 1>;
++ fsl,qman-frame-queues-tx = <0x70 1 0x71 1>;
++ };
++
++ ethernet@1 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x52 1 0x53 1>;
++ fsl,qman-frame-queues-tx = <0x72 1 0x73 1>;
++ };
++
++ ethernet@2 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
++ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
++ };
++
++ ethernet@3 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
++ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
++ };
++
++ ethernet@4 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>;
++ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>;
++ };
++
++ ethernet@5 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x60 1 0x61 1>;
++ fsl,qman-frame-queues-tx = <0x80 1 0x81 1>;
++ };
++
++ ethernet@8 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
++ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
++
++ };
++ dpa-fman0-oh@2 {
++ compatible = "fsl,dpa-oh";
++ /* Define frame queues for the OH port*/
++ /* <OH Rx error, OH Rx default> */
++ fsl,qman-frame-queues-oh = <0x5a 1 0x5b 1>;
++ fsl,fman-oh-port = <&fman0_oh2>;
++ };
++ };
++};
++/ {
++ reserved-memory {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ usdpaa_mem: usdpaa_mem {
++ compatible = "fsl,usdpaa-mem";
++ alloc-ranges = <0 0 0x10000 0>;
++ size = <0 0x10000000>;
++ alignment = <0 0x10000000>;
++ };
++ };
++};
++
++&fman0 {
++ fman0_oh2: port@83000 {
++ cell-index = <1>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x83000 0x1000>;
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+index d2313e05..f92ae325 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+@@ -1,7 +1,7 @@
+ /*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+- * Copyright 2014-2015, Freescale Semiconductor
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * Mingkai Hu <Mingkai.hu@freescale.com>
+ *
+@@ -45,7 +45,7 @@
+ */
+
+ /dts-v1/;
+-/include/ "fsl-ls1043a.dtsi"
++#include "fsl-ls1043a.dtsi"
+
+ / {
+ model = "LS1043A RDB Board";
+@@ -86,6 +86,10 @@
+ compatible = "pericom,pt7c4338";
+ reg = <0x68>;
+ };
++ rtc@51 {
++ compatible = "nxp,pcf85263";
++ reg = <0x51>;
++ };
+ };
+
+ &ifc {
+@@ -130,6 +134,38 @@
+ reg = <0>;
+ spi-max-frequency = <1000000>; /* input clock */
+ };
++
++ slic@2 {
++ compatible = "maxim,ds26522";
++ reg = <2>;
++ spi-max-frequency = <2000000>;
++ fsl,spi-cs-sck-delay = <100>;
++ fsl,spi-sck-cs-delay = <50>;
++ };
++
++ slic@3 {
++ compatible = "maxim,ds26522";
++ reg = <3>;
++ spi-max-frequency = <2000000>;
++ fsl,spi-cs-sck-delay = <100>;
++ fsl,spi-sck-cs-delay = <50>;
++ };
++};
++
++&uqe {
++ ucc_hdlc: ucc@2000 {
++ compatible = "fsl,ucc-hdlc";
++ rx-clock-name = "clk8";
++ tx-clock-name = "clk9";
++ fsl,rx-sync-clock = "rsync_pin";
++ fsl,tx-sync-clock = "tsync_pin";
++ fsl,tx-timeslot-mask = <0xfffffffe>;
++ fsl,rx-timeslot-mask = <0xfffffffe>;
++ fsl,tdm-framer-type = "e1";
++ fsl,tdm-id = <0>;
++ fsl,siram-entry-id = <0>;
++ fsl,tdm-interface;
++ };
+ };
+
+ &duart0 {
+@@ -139,3 +175,76 @@
+ &duart1 {
+ status = "okay";
+ };
++
++#include "fsl-ls1043-post.dtsi"
++
++&fman0 {
++ ethernet@e0000 {
++ phy-handle = <&qsgmii_phy1>;
++ phy-connection-type = "qsgmii";
++ };
++
++ ethernet@e2000 {
++ phy-handle = <&qsgmii_phy2>;
++ phy-connection-type = "qsgmii";
++ };
++
++ ethernet@e4000 {
++ phy-handle = <&rgmii_phy1>;
++ phy-connection-type = "rgmii-txid";
++ };
++
++ ethernet@e6000 {
++ phy-handle = <&rgmii_phy2>;
++ phy-connection-type = "rgmii-txid";
++ };
++
++ ethernet@e8000 {
++ phy-handle = <&qsgmii_phy3>;
++ phy-connection-type = "qsgmii";
++ };
++
++ ethernet@ea000 {
++ phy-handle = <&qsgmii_phy4>;
++ phy-connection-type = "qsgmii";
++ };
++
++ ethernet@f0000 { /* 10GEC1 */
++ phy-handle = <&aqr105_phy>;
++ phy-connection-type = "xgmii";
++ };
++
++ mdio@fc000 {
++ rgmii_phy1: ethernet-phy@1 {
++ reg = <0x1>;
++ };
++
++ rgmii_phy2: ethernet-phy@2 {
++ reg = <0x2>;
++ };
++
++ qsgmii_phy1: ethernet-phy@4 {
++ reg = <0x4>;
++ };
++
++ qsgmii_phy2: ethernet-phy@5 {
++ reg = <0x5>;
++ };
++
++ qsgmii_phy3: ethernet-phy@6 {
++ reg = <0x6>;
++ };
++
++ qsgmii_phy4: ethernet-phy@7 {
++ reg = <0x7>;
++ };
++ };
++
++ mdio@fd000 {
++ aqr105_phy: ethernet-phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 132 4>;
++ reg = <0x1>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+index 97d331ec..8b27faaf 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+@@ -1,7 +1,7 @@
+ /*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+- * Copyright 2014-2015, Freescale Semiconductor
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * Mingkai Hu <Mingkai.hu@freescale.com>
+ *
+@@ -44,12 +44,25 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
++#include <dt-bindings/thermal/thermal.h>
++
+ / {
+ compatible = "fsl,ls1043a";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
++ aliases {
++ fman0 = &fman0;
++ ethernet0 = &enet0;
++ ethernet1 = &enet1;
++ ethernet2 = &enet2;
++ ethernet3 = &enet3;
++ ethernet4 = &enet4;
++ ethernet5 = &enet5;
++ ethernet6 = &enet6;
++ };
++
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -66,6 +79,8 @@
+ reg = <0x0>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
+ };
+
+ cpu1: cpu@1 {
+@@ -74,6 +89,7 @@
+ reg = <0x1>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
++ cpu-idle-states = <&CPU_PH20>;
+ };
+
+ cpu2: cpu@2 {
+@@ -82,6 +98,7 @@
+ reg = <0x2>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
++ cpu-idle-states = <&CPU_PH20>;
+ };
+
+ cpu3: cpu@3 {
+@@ -90,6 +107,7 @@
+ reg = <0x3>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
++ cpu-idle-states = <&CPU_PH20>;
+ };
+
+ l2: l2-cache {
+@@ -97,12 +115,56 @@
+ };
+ };
+
++ idle-states {
++ /*
++ * PSCI node is not added default, U-boot will add missing
++ * parts if it determines to use PSCI.
++ */
++ entry-method = "arm,psci";
++
++ CPU_PH20: cpu-ph20 {
++ compatible = "arm,idle-state";
++ idle-state-name = "PH20";
++ arm,psci-suspend-param = <0x0>;
++ entry-latency-us = <1000>;
++ exit-latency-us = <1000>;
++ min-residency-us = <3000>;
++ };
++ };
++
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0 0x80000000>;
+ /* DRAM space 1, size: 2GiB DRAM */
+ };
+
++ reserved-memory {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ bman_fbpr: bman-fbpr {
++ compatible = "shared-dma-pool";
++ size = <0 0x1000000>;
++ alignment = <0 0x1000000>;
++ no-map;
++ };
++
++ qman_fqd: qman-fqd {
++ compatible = "shared-dma-pool";
++ size = <0 0x400000>;
++ alignment = <0 0x400000>;
++ no-map;
++ };
++
++ qman_pfdr: qman-pfdr {
++ compatible = "shared-dma-pool";
++ size = <0 0x2000000>;
++ alignment = <0 0x2000000>;
++ no-map;
++ };
++ };
++
+ sysclk: sysclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+@@ -149,7 +211,7 @@
+ interrupts = <1 9 0xf08>;
+ };
+
+- soc {
++ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+@@ -213,13 +275,14 @@
+
+ dcfg: dcfg@1ee0000 {
+ compatible = "fsl,ls1043a-dcfg", "syscon";
+- reg = <0x0 0x1ee0000 0x0 0x10000>;
++ reg = <0x0 0x1ee0000 0x0 0x1000>;
+ big-endian;
+ };
+
+ ifc: ifc@1530000 {
+ compatible = "fsl,ifc", "simple-bus";
+ reg = <0x0 0x1530000 0x0 0x10000>;
++ big-endian;
+ interrupts = <0 43 0x4>;
+ };
+
+@@ -255,6 +318,103 @@
+ big-endian;
+ };
+
++ tmu: tmu@1f00000 {
++ compatible = "fsl,qoriq-tmu";
++ reg = <0x0 0x1f00000 0x0 0x10000>;
++ interrupts = <0 33 0x4>;
++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
++ fsl,tmu-calibration = <0x00000000 0x00000026
++ 0x00000001 0x0000002d
++ 0x00000002 0x00000032
++ 0x00000003 0x00000039
++ 0x00000004 0x0000003f
++ 0x00000005 0x00000046
++ 0x00000006 0x0000004d
++ 0x00000007 0x00000054
++ 0x00000008 0x0000005a
++ 0x00000009 0x00000061
++ 0x0000000a 0x0000006a
++ 0x0000000b 0x00000071
++
++ 0x00010000 0x00000025
++ 0x00010001 0x0000002c
++ 0x00010002 0x00000035
++ 0x00010003 0x0000003d
++ 0x00010004 0x00000045
++ 0x00010005 0x0000004e
++ 0x00010006 0x00000057
++ 0x00010007 0x00000061
++ 0x00010008 0x0000006b
++ 0x00010009 0x00000076
++
++ 0x00020000 0x00000029
++ 0x00020001 0x00000033
++ 0x00020002 0x0000003d
++ 0x00020003 0x00000049
++ 0x00020004 0x00000056
++ 0x00020005 0x00000061
++ 0x00020006 0x0000006d
++
++ 0x00030000 0x00000021
++ 0x00030001 0x0000002a
++ 0x00030002 0x0000003c
++ 0x00030003 0x0000004e>;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++
++ thermal-sensors = <&tmu 3>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++ cpu_crit: cpu-crit {
++ temperature = <95000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++ };
++
++ qman: qman@1880000 {
++ compatible = "fsl,qman";
++ reg = <0x00 0x1880000 0x0 0x10000>;
++ interrupts = <0 45 0x4>;
++ memory-region = <&qman_fqd &qman_pfdr>;
++ };
++
++ bman: bman@1890000 {
++ compatible = "fsl,bman";
++ reg = <0x00 0x1890000 0x0 0x10000>;
++ interrupts = <0 45 0x4>;
++ memory-region = <&bman_fbpr>;
++ };
++
++ bportals: bman-portals@508000000 {
++ ranges = <0x0 0x5 0x08000000 0x8000000>;
++ };
++
++ qportals: qman-portals@500000000 {
++ ranges = <0x0 0x5 0x00000000 0x8000000>;
++ };
++
+ dspi0: dspi@2100000 {
+ compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi";
+ #address-cells = <1>;
+@@ -396,6 +556,72 @@
+ #interrupt-cells = <2>;
+ };
+
++ uqe: uqe@2400000 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ device_type = "qe";
++ compatible = "fsl,qe", "simple-bus";
++ ranges = <0x0 0x0 0x2400000 0x40000>;
++ reg = <0x0 0x2400000 0x0 0x480>;
++ brg-frequency = <100000000>;
++ bus-frequency = <200000000>;
++
++ fsl,qe-num-riscs = <1>;
++ fsl,qe-num-snums = <28>;
++
++ qeic: qeic@80 {
++ compatible = "fsl,qe-ic";
++ reg = <0x80 0x80>;
++ #address-cells = <0>;
++ interrupt-controller;
++ #interrupt-cells = <1>;
++ interrupts = <0 77 0x04 0 77 0x04>;
++ };
++
++ si1: si@700 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,ls1043-qe-si",
++ "fsl,t1040-qe-si";
++ reg = <0x700 0x80>;
++ };
++
++ siram1: siram@1000 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "fsl,ls1043-qe-siram",
++ "fsl,t1040-qe-siram";
++ reg = <0x1000 0x800>;
++ };
++
++ ucc@2000 {
++ cell-index = <1>;
++ reg = <0x2000 0x200>;
++ interrupts = <32>;
++ interrupt-parent = <&qeic>;
++ };
++
++ ucc@2200 {
++ cell-index = <3>;
++ reg = <0x2200 0x200>;
++ interrupts = <34>;
++ interrupt-parent = <&qeic>;
++ };
++
++ muram@10000 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "fsl,qe-muram", "fsl,cpm-muram";
++ ranges = <0x0 0x10000 0x6000>;
++
++ data-only@0 {
++ compatible = "fsl,qe-muram-data",
++ "fsl,cpm-muram-data";
++ reg = <0x0 0x6000>;
++ };
++ };
++ };
++
+ lpuart0: serial@2950000 {
+ compatible = "fsl,ls1021a-lpuart";
+ reg = <0x0 0x2950000 0x0 0x1000>;
+@@ -450,6 +676,16 @@
+ status = "disabled";
+ };
+
++ ftm0: ftm0@29d0000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x29d0000 0x0 0x10000>,
++ <0x0 0x1ee2140 0x0 0x4>;
++ reg-names = "ftm", "FlexTimer1";
++ interrupts = <0 86 0x4>;
++ big-endian;
++ status = "okay";
++ };
++
+ wdog0: wdog@2ad0000 {
+ compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt";
+ reg = <0x0 0x2ad0000 0x0 0x10000>;
+@@ -482,6 +718,8 @@
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,dma-snooping;
+ };
+
+ usb1: usb3@3000000 {
+@@ -491,6 +729,9 @@
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,dma-snooping;
++ configure-gfladj;
+ };
+
+ usb2: usb3@3100000 {
+@@ -500,32 +741,52 @@
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,dma-snooping;
++ configure-gfladj;
+ };
+
+ sata: sata@3200000 {
+ compatible = "fsl,ls1043a-ahci";
+- reg = <0x0 0x3200000 0x0 0x10000>;
++ reg = <0x0 0x3200000 0x0 0x10000>,
++ <0x0 0x20140520 0x0 0x4>;
++ reg-names = "ahci", "sata-ecc";
+ interrupts = <0 69 0x4>;
+ clocks = <&clockgen 4 0>;
+ dma-coherent;
+ };
+
++ qdma: qdma@8380000 {
++ compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma";
++ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
++ <0x0 0x8390000 0x0 0x10000>, /* Status regs */
++ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */
++ interrupts = <0 152 0x4>,
++ <0 39 0x4>;
++ interrupt-names = "qdma-error", "qdma-queue";
++ channels = <8>;
++ queues = <2>;
++ status-sizes = <64>;
++ queue-sizes = <64 64>;
++ big-endian;
++ };
++
+ msi1: msi-controller1@1571000 {
+- compatible = "fsl,1s1043a-msi";
++ compatible = "fsl,ls1043a-msi";
+ reg = <0x0 0x1571000 0x0 0x8>;
+ msi-controller;
+ interrupts = <0 116 0x4>;
+ };
+
+ msi2: msi-controller2@1572000 {
+- compatible = "fsl,1s1043a-msi";
++ compatible = "fsl,ls1043a-msi";
+ reg = <0x0 0x1572000 0x0 0x8>;
+ msi-controller;
+ interrupts = <0 126 0x4>;
+ };
+
+ msi3: msi-controller3@1573000 {
+- compatible = "fsl,1s1043a-msi";
++ compatible = "fsl,ls1043a-msi";
+ reg = <0x0 0x1573000 0x0 0x8>;
+ msi-controller;
+ interrupts = <0 160 0x4>;
+@@ -536,9 +797,9 @@
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+- interrupts = <0 118 0x4>, /* controller interrupt */
+- <0 117 0x4>; /* PME interrupt */
+- interrupt-names = "intr", "pme";
++ interrupts = <0 117 0x4>, /* PME interrupt */
++ <0 118 0x4>; /* aer interrupt */
++ interrupt-names = "pme", "aer";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+@@ -547,7 +808,7 @@
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&msi1>;
++ msi-parent = <&msi1>, <&msi2>, <&msi3>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 110 0x4>,
+@@ -561,9 +822,9 @@
+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
+ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+- interrupts = <0 128 0x4>,
+- <0 127 0x4>;
+- interrupt-names = "intr", "pme";
++ interrupts = <0 127 0x4>,
++ <0 128 0x4>;
++ interrupt-names = "pme", "aer";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+@@ -572,7 +833,7 @@
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&msi2>;
++ msi-parent = <&msi1>, <&msi2>, <&msi3>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 120 0x4>,
+@@ -586,9 +847,9 @@
+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
+ 0x50 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+- interrupts = <0 162 0x4>,
+- <0 161 0x4>;
+- interrupt-names = "intr", "pme";
++ interrupts = <0 161 0x4>,
++ <0 162 0x4>;
++ interrupt-names = "pme", "aer";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+@@ -597,7 +858,7 @@
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&msi3>;
++ msi-parent = <&msi1>, <&msi2>, <&msi3>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
+@@ -608,3 +869,6 @@
+ };
+
+ };
++
++#include "qoriq-qman1-portals.dtsi"
++#include "qoriq-bman1-portals.dtsi"
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
+new file mode 100644
+index 00000000..f5017dba
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
+@@ -0,0 +1,48 @@
++/*
++ * QorIQ FMan v3 device tree nodes for ls1046
++ *
++ * Copyright 2015-2016 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++&soc {
++
++/* include used FMan blocks */
++#include "qoriq-fman3-0.dtsi"
++#include "qoriq-fman3-0-1g-0.dtsi"
++#include "qoriq-fman3-0-1g-1.dtsi"
++#include "qoriq-fman3-0-1g-2.dtsi"
++#include "qoriq-fman3-0-1g-3.dtsi"
++#include "qoriq-fman3-0-1g-4.dtsi"
++#include "qoriq-fman3-0-1g-5.dtsi"
++#include "qoriq-fman3-0-10g-0.dtsi"
++#include "qoriq-fman3-0-10g-1.dtsi"
++};
++
++&fman0 {
++ /* these aliases provide the FMan ports mapping */
++ enet0: ethernet@e0000 {
++ };
++
++ enet1: ethernet@e2000 {
++ };
++
++ enet2: ethernet@e4000 {
++ };
++
++ enet3: ethernet@e6000 {
++ };
++
++ enet4: ethernet@e8000 {
++ };
++
++ enet5: ethernet@ea000 {
++ };
++
++ enet6: ethernet@f0000 {
++ };
++
++ enet7: ethernet@f2000 {
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts
+new file mode 100644
+index 00000000..c375af47
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts
+@@ -0,0 +1,109 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
++ *
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
++ *
++ * Mingkai Hu <Mingkai.hu@freescale.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "fsl-ls1046a-qds.dts"
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fsldpaa {
++ ethernet@9 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet7>;
++ };
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
++
++&dspi {
++ bus-num = <0>;
++ status = "okay";
++
++ flash@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "n25q128a11", "jedec,spi-nor";
++ reg = <0>;
++ spi-max-frequency = <10000000>;
++ };
++
++ flash@1 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "sst25wf040b", "jedec,spi-nor";
++ spi-cpol;
++ spi-cpha;
++ reg = <1>;
++ spi-max-frequency = <10000000>;
++ };
++
++ flash@2 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "en25s64", "jedec,spi-nor";
++ spi-cpol;
++ spi-cpha;
++ reg = <2>;
++ spi-max-frequency = <10000000>;
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+new file mode 100644
+index 00000000..3b8e9b7e
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+@@ -0,0 +1,363 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * Shaohui Xie <Shaohui.Xie@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls1046a.dtsi"
++
++/ {
++ model = "LS1046A QDS Board";
++ compatible = "fsl,ls1046a-qds", "fsl,ls1046a";
++
++ aliases {
++ gpio0 = &gpio0;
++ gpio1 = &gpio1;
++ gpio2 = &gpio2;
++ gpio3 = &gpio3;
++ serial0 = &duart0;
++ serial1 = &duart1;
++ serial2 = &duart2;
++ serial3 = &duart3;
++
++ emi1_slot1 = &ls1046mdio_s1;
++ emi1_slot2 = &ls1046mdio_s2;
++ emi1_slot4 = &ls1046mdio_s4;
++
++ sgmii_s1_p1 = &sgmii_phy_s1_p1;
++ sgmii_s1_p2 = &sgmii_phy_s1_p2;
++ sgmii_s1_p3 = &sgmii_phy_s1_p3;
++ sgmii_s1_p4 = &sgmii_phy_s1_p4;
++ sgmii_s4_p1 = &sgmii_phy_s4_p1;
++ qsgmii_s2_p1 = &qsgmii_phy_s2_p1;
++ qsgmii_s2_p2 = &qsgmii_phy_s2_p2;
++ qsgmii_s2_p3 = &qsgmii_phy_s2_p3;
++ qsgmii_s2_p4 = &qsgmii_phy_s2_p4;
++ };
++
++ chosen {
++ stdout-path = "serial0:115200n8";
++ };
++};
++
++&dspi {
++ bus-num = <0>;
++ status = "okay";
++
++ flash@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "n25q128a11", "jedec,spi-nor";
++ reg = <0>;
++ spi-max-frequency = <10000000>;
++ };
++
++ flash@1 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "sst25wf040b", "jedec,spi-nor";
++ spi-cpol;
++ spi-cpha;
++ reg = <1>;
++ spi-max-frequency = <10000000>;
++ };
++
++ flash@2 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "en25s64", "jedec,spi-nor";
++ spi-cpol;
++ spi-cpha;
++ reg = <2>;
++ spi-max-frequency = <10000000>;
++ };
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&duart1 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ pca9547@77 {
++ compatible = "nxp,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x2>;
++
++ ina220@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <1000>;
++ };
++
++ ina220@41 {
++ compatible = "ti,ina220";
++ reg = <0x41>;
++ shunt-resistor = <1000>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ rtc@51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ /* IRQ10_B */
++ interrupts = <0 150 0x4>;
++ };
++
++ eeprom@56 {
++ compatible = "atmel,24c512";
++ reg = <0x56>;
++ };
++
++ eeprom@57 {
++ compatible = "atmel,24c512";
++ reg = <0x57>;
++ };
++
++ temp-sensor@4c {
++ compatible = "adi,adt7461a";
++ reg = <0x4c>;
++ };
++ };
++ };
++};
++
++&ifc {
++ #address-cells = <2>;
++ #size-cells = <1>;
++ /* NOR, NAND Flashes and FPGA on board */
++ ranges = <0x0 0x0 0x0 0x60000000 0x08000000
++ 0x1 0x0 0x0 0x7e800000 0x00010000
++ 0x2 0x0 0x0 0x7fb00000 0x00000100>;
++ status = "okay";
++
++ nor@0,0 {
++ compatible = "cfi-flash";
++ reg = <0x0 0x0 0x8000000>;
++ bank-width = <2>;
++ device-width = <1>;
++ };
++
++ nand@1,0 {
++ compatible = "fsl,ifc-nand";
++ reg = <0x1 0x0 0x10000>;
++ };
++
++ fpga: board-control@2,0 {
++ compatible = "fsl,ls1046aqds-fpga", "fsl,fpga-qixis", "simple-bus";
++ reg = <0x2 0x0 0x0000100>;
++ ranges = <0 2 0 0x100>;
++ };
++};
++
++&lpuart0 {
++ status = "okay";
++};
++
++&qspi {
++ num-cs = <2>;
++ bus-num = <0>;
++ status = "okay";
++
++ qflash0: s25fl128s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++#include "fsl-ls1046-post.dtsi"
++
++&fman0 {
++ ethernet@e0000 {
++ phy-handle = <&qsgmii_phy_s2_p1>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@e2000 {
++ phy-handle = <&sgmii_phy_s4_p1>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@e4000 {
++ phy-handle = <&rgmii_phy1>;
++ phy-connection-type = "rgmii";
++ };
++
++ ethernet@e6000 {
++ phy-handle = <&rgmii_phy2>;
++ phy-connection-type = "rgmii";
++ };
++
++ ethernet@e8000 {
++ phy-handle = <&sgmii_phy_s1_p3>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@ea000 {
++ phy-handle = <&sgmii_phy_s1_p4>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@f0000 { /* DTSEC9/10GEC1 */
++ phy-handle = <&sgmii_phy_s1_p1>;
++ phy-connection-type = "xgmii";
++ };
++
++ ethernet@f2000 { /* DTSEC10/10GEC2 */
++ phy-handle = <&sgmii_phy_s1_p2>;
++ phy-connection-type = "xgmii";
++ };
++};
++
++&fpga {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ mdio-mux-emi1 {
++ compatible = "mdio-mux-mmioreg", "mdio-mux";
++ mdio-parent-bus = <&mdio0>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x54 1>; /* BRDCFG4 */
++ mux-mask = <0xe0>; /* EMI1 */
++
++ /* On-board RGMII1 PHY */
++ ls1046mdio0: mdio@0 {
++ reg = <0>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ rgmii_phy1: ethernet-phy@1 { /* MAC3 */
++ reg = <0x1>;
++ };
++ };
++
++ /* On-board RGMII2 PHY */
++ ls1046mdio1: mdio@1 {
++ reg = <0x20>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ rgmii_phy2: ethernet-phy@2 { /* MAC4 */
++ reg = <0x2>;
++ };
++ };
++
++ /* Slot 1 */
++ ls1046mdio_s1: mdio@2 {
++ reg = <0x40>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++
++ sgmii_phy_s1_p1: ethernet-phy@1c {
++ reg = <0x1c>;
++ };
++
++ sgmii_phy_s1_p2: ethernet-phy@1d {
++ reg = <0x1d>;
++ };
++
++ sgmii_phy_s1_p3: ethernet-phy@1e {
++ reg = <0x1e>;
++ };
++
++ sgmii_phy_s1_p4: ethernet-phy@1f {
++ reg = <0x1f>;
++ };
++ };
++
++ /* Slot 2 */
++ ls1046mdio_s2: mdio@3 {
++ reg = <0x60>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++
++ qsgmii_phy_s2_p1: ethernet-phy@8 {
++ reg = <0x8>;
++ };
++ qsgmii_phy_s2_p2: ethernet-phy@9 {
++ reg = <0x9>;
++ };
++ qsgmii_phy_s2_p3: ethernet-phy@a {
++ reg = <0xa>;
++ };
++ qsgmii_phy_s2_p4: ethernet-phy@b {
++ reg = <0xb>;
++ };
++ };
++
++ /* Slot 4 */
++ ls1046mdio_s4: mdio@5 {
++ reg = <0x80>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++
++ sgmii_phy_s4_p1: ethernet-phy@1c {
++ reg = <0x1c>;
++ };
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
+new file mode 100644
+index 00000000..bfe2f36c
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
+@@ -0,0 +1,76 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
++ *
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
++ *
++ * Mingkai Hu <Mingkai.hu@freescale.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "fsl-ls1046a-rdb.dts"
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fsldpaa {
++ ethernet@9 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet7>;
++ };
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
+new file mode 100644
+index 00000000..54336aa6
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
+@@ -0,0 +1,110 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "fsl-ls1046a-rdb-sdk.dts"
++
++&soc {
++ bp7: buffer-pool@7 {
++ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
++ fsl,bpid = <7>;
++ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
++ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
++ };
++
++ bp8: buffer-pool@8 {
++ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
++ fsl,bpid = <8>;
++ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ };
++
++ bp9: buffer-pool@9 {
++ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
++ fsl,bpid = <9>;
++ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>;
++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ };
++
++ fsl,dpaa {
++ compatible = "fsl,ls1046a", "fsl,dpaa", "simple-bus";
++
++ ethernet@2 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
++ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
++ };
++
++ ethernet@3 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
++ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
++ };
++
++ ethernet@4 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>;
++ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>;
++ };
++
++ ethernet@5 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x5a 1 0x5b 1>;
++ fsl,qman-frame-queues-tx = <0x7a 1 0x7b 1>;
++ };
++
++ ethernet@8 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
++ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
++ };
++
++ ethernet@9 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
++ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
++ };
++
++ dpa-fman0-oh@2 {
++ compatible = "fsl,dpa-oh";
++ /* Define frame queues for the OH port*/
++ /* <OH Rx error, OH Rx default> */
++ fsl,qman-frame-queues-oh = <0x60 1 0x61 1>;
++ fsl,fman-oh-port = <&fman0_oh2>;
++ };
++ };
++};
++/ {
++ reserved-memory {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ usdpaa_mem: usdpaa_mem {
++ compatible = "fsl,usdpaa-mem";
++ alloc-ranges = <0 0 0x10000 0>;
++ size = <0 0x10000000>;
++ alignment = <0 0x10000000>;
++ };
++ };
++};
++
++&fman0 {
++ fman0_oh2: port@83000 {
++ cell-index = <1>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x83000 0x1000>;
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
+new file mode 100644
+index 00000000..be9b62ca
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
+@@ -0,0 +1,218 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * Mingkai Hu <mingkai.hu@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls1046a.dtsi"
++
++/ {
++ model = "LS1046A RDB Board";
++ compatible = "fsl,ls1046a-rdb", "fsl,ls1046a";
++
++ aliases {
++ serial0 = &duart0;
++ serial1 = &duart1;
++ serial2 = &duart2;
++ serial3 = &duart3;
++ };
++
++ chosen {
++ stdout-path = "serial0:115200n8";
++ };
++};
++
++&esdhc {
++ mmc-hs200-1_8v;
++ sd-uhs-sdr104;
++ sd-uhs-sdr50;
++ sd-uhs-sdr25;
++ sd-uhs-sdr12;
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&duart1 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ ina220@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <1000>;
++ };
++
++ temp-sensor@4c {
++ compatible = "adi,adt7461";
++ reg = <0x4c>;
++ };
++
++ eeprom@56 {
++ compatible = "atmel,24c512";
++ reg = <0x52>;
++ };
++
++ eeprom@57 {
++ compatible = "atmel,24c512";
++ reg = <0x53>;
++ };
++};
++
++&i2c3 {
++ status = "okay";
++
++ rtc@51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ };
++};
++
++&ifc {
++ #address-cells = <2>;
++ #size-cells = <1>;
++ /* NAND Flashe and CPLD on board */
++ ranges = <0x0 0x0 0x0 0x7e800000 0x00010000
++ 0x2 0x0 0x0 0x7fb00000 0x00000100>;
++ status = "okay";
++
++ nand@0,0 {
++ compatible = "fsl,ifc-nand";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x0 0x0 0x10000>;
++ };
++
++ cpld: board-control@2,0 {
++ compatible = "fsl,ls1046ardb-cpld";
++ reg = <0x2 0x0 0x0000100>;
++ };
++};
++
++&qspi {
++ num-cs = <2>;
++ bus-num = <0>;
++ status = "okay";
++
++ qflash0: s25fs512s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++
++ qflash1: s25fs512s@1 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <1>;
++ };
++};
++
++#include "fsl-ls1046-post.dtsi"
++
++&fman0 {
++ ethernet@e4000 {
++ phy-handle = <&rgmii_phy1>;
++ phy-connection-type = "rgmii";
++ };
++
++ ethernet@e6000 {
++ phy-handle = <&rgmii_phy2>;
++ phy-connection-type = "rgmii";
++ };
++
++ ethernet@e8000 {
++ phy-handle = <&sgmii_phy1>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@ea000 {
++ phy-handle = <&sgmii_phy2>;
++ phy-connection-type = "sgmii";
++ };
++
++ ethernet@f0000 { /* 10GEC1 */
++ phy-handle = <&aqr106_phy>;
++ phy-connection-type = "xgmii";
++ };
++
++ ethernet@f2000 { /* 10GEC2 */
++ fixed-link = <0 1 1000 0 0>;
++ phy-connection-type = "xgmii";
++ };
++
++ mdio@fc000 {
++ rgmii_phy1: ethernet-phy@1 {
++ reg = <0x1>;
++ };
++
++ rgmii_phy2: ethernet-phy@2 {
++ reg = <0x2>;
++ };
++
++ sgmii_phy1: ethernet-phy@3 {
++ reg = <0x3>;
++ };
++
++ sgmii_phy2: ethernet-phy@4 {
++ reg = <0x4>;
++ };
++ };
++
++ mdio@fd000 {
++ aqr106_phy: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 131 4>;
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+new file mode 100644
+index 00000000..6b87266f
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+@@ -0,0 +1,793 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * Mingkai Hu <mingkai.hu@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/thermal/thermal.h>
++
++/ {
++ compatible = "fsl,ls1046a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ aliases {
++ crypto = &crypto;
++ fman0 = &fman0;
++ ethernet0 = &enet0;
++ ethernet1 = &enet1;
++ ethernet2 = &enet2;
++ ethernet3 = &enet3;
++ ethernet4 = &enet4;
++ ethernet5 = &enet5;
++ ethernet6 = &enet6;
++ ethernet7 = &enet7;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ #cooling-cells = <2>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x1>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu2: cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x2>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu3: cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x3>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ l2: l2-cache {
++ compatible = "cache";
++ };
++ };
++
++ idle-states {
++ /*
++ * PSCI node is not added default, U-boot will add missing
++ * parts if it determines to use PSCI.
++ */
++ entry-method = "arm,psci";
++
++ CPU_PH20: cpu-ph20 {
++ compatible = "arm,idle-state";
++ idle-state-name = "PH20";
++ arm,psci-suspend-param = <0x0>;
++ entry-latency-us = <1000>;
++ exit-latency-us = <1000>;
++ min-residency-us = <3000>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
++ reboot {
++ compatible ="syscon-reboot";
++ regmap = <&dcfg>;
++ offset = <0xb0>;
++ mask = <0x02>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xf) |
++ IRQ_TYPE_LEVEL_LOW)>,
++ <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xf) |
++ IRQ_TYPE_LEVEL_LOW)>,
++ <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xf) |
++ IRQ_TYPE_LEVEL_LOW)>,
++ <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xf) |
++ IRQ_TYPE_LEVEL_LOW)>;
++ };
++
++ pmu {
++ compatible = "arm,cortex-a72-pmu";
++ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-affinity = <&cpu0>,
++ <&cpu1>,
++ <&cpu2>,
++ <&cpu3>;
++ };
++
++ gic: interrupt-controller@1400000 {
++ compatible = "arm,gic-400";
++ #interrupt-cells = <3>;
++ interrupt-controller;
++ reg = <0x0 0x1410000 0 0x10000>, /* GICD */
++ <0x0 0x1420000 0 0x20000>, /* GICC */
++ <0x0 0x1440000 0 0x20000>, /* GICH */
++ <0x0 0x1460000 0 0x20000>; /* GICV */
++ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_RAW(0xf) |
++ IRQ_TYPE_LEVEL_LOW)>;
++ };
++
++ soc: soc {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ ddr: memory-controller@1080000 {
++ compatible = "fsl,qoriq-memory-controller";
++ reg = <0x0 0x1080000 0x0 0x1000>;
++ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
++ big-endian;
++ };
++
++ ifc: ifc@1530000 {
++ compatible = "fsl,ifc", "simple-bus";
++ reg = <0x0 0x1530000 0x0 0x10000>;
++ big-endian;
++ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ qspi: quadspi@1550000 {
++ compatible = "fsl,ls1021a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x1550000 0x0 0x10000>,
++ <0x0 0x40000000 0x0 0x10000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "qspi_en", "qspi";
++ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
++ big-endian;
++ fsl,qspi-has-second-chip;
++ status = "disabled";
++ };
++
++ esdhc: esdhc@1560000 {
++ compatible = "fsl,ls1046a-esdhc", "fsl,esdhc";
++ reg = <0x0 0x1560000 0x0 0x10000>;
++ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 2 1>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ big-endian;
++ bus-width = <4>;
++ };
++
++ scfg: scfg@1570000 {
++ compatible = "fsl,ls1046a-scfg", "syscon";
++ reg = <0x0 0x1570000 0x0 0x10000>;
++ big-endian;
++ };
++
++ crypto: crypto@1700000 {
++ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
++ "fsl,sec-v4.0";
++ fsl,sec-era = <8>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x00 0x1700000 0x100000>;
++ reg = <0x00 0x1700000 0x0 0x100000>;
++ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++
++ sec_jr0: jr@10000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x10000 0x10000>;
++ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr@20000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x20000 0x10000>;
++ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr2: jr@30000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x30000 0x10000>;
++ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr3: jr@40000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x40000 0x10000>;
++ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
++ };
++ };
++
++ qman: qman@1880000 {
++ compatible = "fsl,qman";
++ reg = <0x00 0x1880000 0x0 0x10000>;
++ interrupts = <0 45 0x4>;
++ memory-region = <&qman_fqd &qman_pfdr>;
++
++ };
++
++ bman: bman@1890000 {
++ compatible = "fsl,bman";
++ reg = <0x00 0x1890000 0x0 0x10000>;
++ interrupts = <0 45 0x4>;
++ memory-region = <&bman_fbpr>;
++
++ };
++
++ qportals: qman-portals@500000000 {
++ ranges = <0x0 0x5 0x00000000 0x8000000>;
++ };
++
++ bportals: bman-portals@508000000 {
++ ranges = <0x0 0x5 0x08000000 0x8000000>;
++ };
++
++ dcfg: dcfg@1ee0000 {
++ compatible = "fsl,ls1046a-dcfg", "syscon";
++ reg = <0x0 0x1ee0000 0x0 0x1000>;
++ big-endian;
++ };
++
++ clockgen: clocking@1ee1000 {
++ compatible = "fsl,ls1046a-clockgen";
++ reg = <0x0 0x1ee1000 0x0 0x1000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
++
++ tmu: tmu@1f00000 {
++ compatible = "fsl,qoriq-tmu";
++ reg = <0x0 0x1f00000 0x0 0x10000>;
++ interrupts = <0 33 0x4>;
++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
++ fsl,tmu-calibration =
++ /* Calibration data group 1 */
++ <0x00000000 0x00000026
++ 0x00000001 0x0000002d
++ 0x00000002 0x00000032
++ 0x00000003 0x00000039
++ 0x00000004 0x0000003f
++ 0x00000005 0x00000046
++ 0x00000006 0x0000004d
++ 0x00000007 0x00000054
++ 0x00000008 0x0000005a
++ 0x00000009 0x00000061
++ 0x0000000a 0x0000006a
++ 0x0000000b 0x00000071
++ /* Calibration data group 2 */
++ 0x00010000 0x00000025
++ 0x00010001 0x0000002c
++ 0x00010002 0x00000035
++ 0x00010003 0x0000003d
++ 0x00010004 0x00000045
++ 0x00010005 0x0000004e
++ 0x00010006 0x00000057
++ 0x00010007 0x00000061
++ 0x00010008 0x0000006b
++ 0x00010009 0x00000076
++ /* Calibration data group 3 */
++ 0x00020000 0x00000029
++ 0x00020001 0x00000033
++ 0x00020002 0x0000003d
++ 0x00020003 0x00000049
++ 0x00020004 0x00000056
++ 0x00020005 0x00000061
++ 0x00020006 0x0000006d
++ /* Calibration data group 4 */
++ 0x00030000 0x00000021
++ 0x00030001 0x0000002a
++ 0x00030002 0x0000003c
++ 0x00030003 0x0000004e>;
++ big-endian;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 3>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ cpu_crit: cpu-crit {
++ temperature = <95000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++ };
++
++ dspi: dspi@2100000 {
++ compatible = "fsl,ls1021a-v1.0-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2100000 0x0 0x10000>;
++ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "dspi";
++ clocks = <&clockgen 4 1>;
++ spi-num-chipselects = <5>;
++ big-endian;
++ status = "disabled";
++ };
++
++ i2c0: i2c@2180000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2180000 0x0 0x10000>;
++ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ dmas = <&edma0 1 39>,
++ <&edma0 1 38>;
++ dma-names = "tx", "rx";
++ status = "disabled";
++ };
++
++ i2c1: i2c@2190000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2190000 0x0 0x10000>;
++ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ status = "disabled";
++ };
++
++ i2c2: i2c@21a0000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x21a0000 0x0 0x10000>;
++ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ status = "disabled";
++ };
++
++ i2c3: i2c@21b0000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x21b0000 0x0 0x10000>;
++ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ status = "disabled";
++ };
++
++ duart0: serial@21c0500 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x00 0x21c0500 0x0 0x100>;
++ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ };
++
++ duart1: serial@21c0600 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x00 0x21c0600 0x0 0x100>;
++ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ };
++
++ duart2: serial@21d0500 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21d0500 0x0 0x100>;
++ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ };
++
++ duart3: serial@21d0600 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21d0600 0x0 0x100>;
++ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ };
++
++ gpio0: gpio@2300000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio@2310000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio@2320000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2320000 0x0 0x10000>;
++ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio3: gpio@2330000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2330000 0x0 0x10000>;
++ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ lpuart0: serial@2950000 {
++ compatible = "fsl,ls1021a-lpuart";
++ reg = <0x0 0x2950000 0x0 0x1000>;
++ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 0>;
++ clock-names = "ipg";
++ status = "disabled";
++ };
++
++ lpuart1: serial@2960000 {
++ compatible = "fsl,ls1021a-lpuart";
++ reg = <0x0 0x2960000 0x0 0x1000>;
++ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ clock-names = "ipg";
++ status = "disabled";
++ };
++
++ lpuart2: serial@2970000 {
++ compatible = "fsl,ls1021a-lpuart";
++ reg = <0x0 0x2970000 0x0 0x1000>;
++ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ clock-names = "ipg";
++ status = "disabled";
++ };
++
++ lpuart3: serial@2980000 {
++ compatible = "fsl,ls1021a-lpuart";
++ reg = <0x0 0x2980000 0x0 0x1000>;
++ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ clock-names = "ipg";
++ status = "disabled";
++ };
++
++ lpuart4: serial@2990000 {
++ compatible = "fsl,ls1021a-lpuart";
++ reg = <0x0 0x2990000 0x0 0x1000>;
++ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ clock-names = "ipg";
++ status = "disabled";
++ };
++
++ lpuart5: serial@29a0000 {
++ compatible = "fsl,ls1021a-lpuart";
++ reg = <0x0 0x29a0000 0x0 0x1000>;
++ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ clock-names = "ipg";
++ status = "disabled";
++ };
++
++ ftm0: ftm0@29d0000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x29d0000 0x0 0x10000>,
++ <0x0 0x1ee2140 0x0 0x4>;
++ reg-names = "ftm", "FlexTimer1";
++ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
++ big-endian;
++ };
++
++ wdog0: watchdog@2ad0000 {
++ compatible = "fsl,imx21-wdt";
++ reg = <0x0 0x2ad0000 0x0 0x10000>;
++ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ big-endian;
++ };
++
++ edma0: edma@2c00000 {
++ #dma-cells = <2>;
++ compatible = "fsl,vf610-edma";
++ reg = <0x0 0x2c00000 0x0 0x10000>,
++ <0x0 0x2c10000 0x0 0x10000>,
++ <0x0 0x2c20000 0x0 0x10000>;
++ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "edma-tx", "edma-err";
++ dma-channels = <32>;
++ big-endian;
++ clock-names = "dmamux0", "dmamux1";
++ clocks = <&clockgen 4 1>,
++ <&clockgen 4 1>;
++ };
++
++ usb0: usb@2f00000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x2f00000 0x0 0x10000>;
++ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ usb1: usb@3000000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3000000 0x0 0x10000>;
++ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ usb2: usb@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ sata: sata@3200000 {
++ compatible = "fsl,ls1046a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>,
++ <0x0 0x20140520 0x0 0x4>;
++ reg-names = "ahci", "sata-ecc";
++ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ dma-coherent;
++ };
++
++ qdma: qdma@8380000 {
++ compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma";
++ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
++ <0x0 0x8390000 0x0 0x10000>, /* Status regs */
++ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */
++ interrupts = <0 153 0x4>,
++ <0 39 0x4>;
++ interrupt-names = "qdma-error", "qdma-queue";
++ channels = <8>;
++ queues = <2>;
++ status-sizes = <64>;
++ queue-sizes = <64 64>;
++ big-endian;
++ };
++
++ msi1: msi-controller@1580000 {
++ compatible = "fsl,ls1046a-msi";
++ msi-controller;
++ reg = <0x0 0x1580000 0x0 0x10000>;
++ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ msi2: msi-controller@1590000 {
++ compatible = "fsl,ls1046a-msi";
++ msi-controller;
++ reg = <0x0 0x1590000 0x0 0x10000>;
++ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ msi3: msi-controller@15a0000 {
++ compatible = "fsl,ls1046a-msi";
++ msi-controller;
++ reg = <0x0 0x15a0000 0x0 0x10000>;
++ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ pcie@3400000 {
++ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
++ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
++ interrupt-names = "pme", "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&msi1>, <&msi2>, <&msi3>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ pcie@3500000 {
++ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pme", "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <2>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&msi1>, <&msi2>, <&msi3>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ pcie@3600000 {
++ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x50 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pme", "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <2>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&msi1>, <&msi2>, <&msi3>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ };
++
++ reserved-memory {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ bman_fbpr: bman-fbpr {
++ compatible = "shared-dma-pool";
++ size = <0 0x1000000>;
++ alignment = <0 0x1000000>;
++ no-map;
++ };
++ qman_fqd: qman-fqd {
++ compatible = "shared-dma-pool";
++ size = <0 0x800000>;
++ alignment = <0 0x800000>;
++ no-map;
++ };
++ qman_pfdr: qman-pfdr {
++ compatible = "shared-dma-pool";
++ size = <0 0x2000000>;
++ alignment = <0 0x2000000>;
++ no-map;
++ };
++ };
++};
++
++#include "qoriq-qman1-portals.dtsi"
++#include "qoriq-bman1-portals.dtsi"
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+new file mode 100644
+index 00000000..f61ec261
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+@@ -0,0 +1,173 @@
++/*
++ * Device Tree file for NXP LS1088A QDS Board.
++ *
++ * Copyright 2017 NXP
++ *
++ * Harninder Rai <harninder.rai@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls1088a.dtsi"
++
++/ {
++ model = "LS1088A QDS Board";
++ compatible = "fsl,ls1088a-qds", "fsl,ls1088a";
++};
++
++&i2c0 {
++ status = "okay";
++
++ i2c-switch@77 {
++ compatible = "nxp,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x2>;
++
++ ina220@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <1000>;
++ };
++
++ ina220@41 {
++ compatible = "ti,ina220";
++ reg = <0x41>;
++ shunt-resistor = <1000>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ temp-sensor@4c {
++ compatible = "adi,adt7461a";
++ reg = <0x4c>;
++ };
++
++ rtc@51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ /* IRQ10_B */
++ interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ eeprom@56 {
++ compatible = "atmel,24c512";
++ reg = <0x56>;
++ };
++
++ eeprom@57 {
++ compatible = "atmel,24c512";
++ reg = <0x57>;
++ };
++ };
++ };
++};
++
++&qspi {
++ status = "okay";
++ qflash0: s25fs512s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ m25p,fast-read;
++ reg = <0>;
++ };
++
++ qflash1: s25fs512s@1 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ m25p,fast-read;
++ reg = <1>;
++ };
++};
++
++&ifc {
++ status = "okay";
++
++ ranges = <0 0 0x5 0x80000000 0x08000000
++ 2 0 0x5 0x30000000 0x00010000
++ 3 0 0x5 0x20000000 0x00010000>;
++
++ nor@0,0 {
++ compatible = "cfi-flash";
++ reg = <0x0 0x0 0x8000000>;
++ bank-width = <2>;
++ device-width = <1>;
++ };
++
++ nand@2,0 {
++ compatible = "fsl,ifc-nand";
++ reg = <0x2 0x0 0x10000>;
++ };
++
++ fpga: board-control@3,0 {
++ compatible = "fsl,ls1088aqds-fpga", "fsl,fpga-qixis";
++ reg = <0x3 0x0 0x0000100>;
++ };
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&duart1 {
++ status = "okay";
++};
++
++&esdhc {
++ status = "okay";
++};
++
++&sata {
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+new file mode 100644
+index 00000000..a4cbc2d5
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+@@ -0,0 +1,236 @@
++/*
++ * Device Tree file for NXP LS1088A RDB Board.
++ *
++ * Copyright 2017 NXP
++ *
++ * Harninder Rai <harninder.rai@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls1088a.dtsi"
++
++/ {
++ model = "L1088A RDB Board";
++ compatible = "fsl,ls1088a-rdb", "fsl,ls1088a";
++};
++
++&i2c0 {
++ status = "okay";
++
++ i2c-switch@77 {
++ compatible = "nxp,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x2>;
++
++ ina220@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <1000>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ temp-sensor@4c {
++ compatible = "adi,adt7461a";
++ reg = <0x4c>;
++ };
++
++ rtc@51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ /* IRQ10_B */
++ interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
++ };
++ };
++ };
++};
++
++&qspi {
++ status = "okay";
++ qflash0: s25fs512s@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ m25p,fast-read;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++
++ qflash1: s25fs512s@1 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ m25p,fast-read;
++ spi-max-frequency = <20000000>;
++ reg = <1>;
++ };
++};
++
++&ifc {
++ status = "okay";
++
++ ranges = <0 0 0x5 0x30000000 0x00010000
++ 2 0 0x5 0x20000000 0x00010000>;
++
++ nand@0,0 {
++ compatible = "fsl,ifc-nand";
++ reg = <0x0 0x0 0x10000>;
++ };
++
++ fpga: board-control@2,0 {
++ compatible = "fsl,ls1088ardb-fpga", "fsl,fpga-qixis";
++ reg = <0x2 0x0 0x0000100>;
++ };
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&duart1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
++
++&esdhc {
++ status = "okay";
++};
++
++&sata {
++ status = "okay";
++};
++
++&emdio1 {
++ /* Freescale F104 PHY1 */
++ mdio1_phy1: emdio1_phy@1 {
++ reg = <0x1c>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy2: emdio1_phy@2 {
++ reg = <0x1d>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy3: emdio1_phy@3 {
++ reg = <0x1e>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy4: emdio1_phy@4 {
++ reg = <0x1f>;
++ phy-connection-type = "qsgmii";
++ };
++ /* F104 PHY2 */
++ mdio1_phy5: emdio1_phy@5 {
++ reg = <0x0c>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy6: emdio1_phy@6 {
++ reg = <0x0d>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy7: emdio1_phy@7 {
++ reg = <0x0e>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy8: emdio1_phy@8 {
++ reg = <0x0f>;
++ phy-connection-type = "qsgmii";
++ };
++};
++
++&emdio2 {
++ /* Aquantia AQR105 10G PHY */
++ mdio2_phy1: emdio2_phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 2 0x4>;
++ reg = <0x0>;
++ phy-connection-type = "xfi";
++ };
++};
++
++/* DPMAC connections to external PHYs
++ * based on LS1088A RM RevC - $24.1.2 SerDes Options
++ */
++/* DPMAC1 is 10G SFP+, fixed link */
++&dpmac2 {
++ phy-handle = <&mdio2_phy1>;
++};
++&dpmac3 {
++ phy-handle = <&mdio1_phy5>;
++};
++&dpmac4 {
++ phy-handle = <&mdio1_phy6>;
++};
++&dpmac5 {
++ phy-handle = <&mdio1_phy7>;
++};
++&dpmac6 {
++ phy-handle = <&mdio1_phy8>;
++};
++&dpmac7 {
++ phy-handle = <&mdio1_phy1>;
++};
++&dpmac8 {
++ phy-handle = <&mdio1_phy2>;
++};
++&dpmac9 {
++ phy-handle = <&mdio1_phy3>;
++};
++&dpmac10 {
++ phy-handle = <&mdio1_phy4>;
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+new file mode 100644
+index 00000000..14585ab2
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+@@ -0,0 +1,816 @@
++/*
++ * Device Tree Include file for NXP Layerscape-1088A family SoC.
++ *
++ * Copyright 2017 NXP
++ *
++ * Harninder Rai <harninder.rai@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/thermal/thermal.h>
++
++/ {
++ compatible = "fsl,ls1088a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ aliases {
++ crypto = &crypto;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ /* We have 2 clusters having 4 Cortex-A53 cores each */
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0>;
++ clocks = <&clockgen 1 0>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x1>;
++ clocks = <&clockgen 1 0>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu2: cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x2>;
++ clocks = <&clockgen 1 0>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu3: cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x3>;
++ clocks = <&clockgen 1 0>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu4: cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x100>;
++ clocks = <&clockgen 1 1>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu5: cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x101>;
++ clocks = <&clockgen 1 1>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu6: cpu@102 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x102>;
++ clocks = <&clockgen 1 1>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu7: cpu@103 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x103>;
++ clocks = <&clockgen 1 1>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++ };
++
++ idle-states {
++ /*
++ * PSCI node is not added default, U-boot will add missing
++ * parts if it determines to use PSCI.
++ */
++ entry-method = "arm,psci";
++
++ CPU_PH20: cpu-ph20 {
++ compatible = "arm,idle-state";
++ idle-state-name = "PH20";
++ arm,psci-suspend-param = <0x0>;
++ entry-latency-us = <1000>;
++ exit-latency-us = <1000>;
++ min-residency-us = <3000>;
++ };
++ };
++
++ gic: interrupt-controller@6000000 {
++ compatible = "arm,gic-v3";
++ #interrupt-cells = <3>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ interrupt-controller;
++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
++ <0x0 0x06100000 0 0x100000>, /* GICR(RD_base+SGI_base)*/
++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */
++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */
++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */
++ interrupts = <1 9 IRQ_TYPE_LEVEL_HIGH>;
++
++ its: gic-its@6020000 {
++ compatible = "arm,gic-v3-its";
++ msi-controller;
++ reg = <0x0 0x6020000 0 0x20000>;
++ };
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
++ <1 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */
++ <1 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */
++ <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
++ };
++
++ fsl_mc: fsl-mc@80c000000 {
++ compatible = "fsl,qoriq-mc";
++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
++ #address-cells = <3>;
++ #size-cells = <1>;
++
++ /*
++ * Region type 0x0 - MC portals
++ * Region type 0x1 - QBMAN portals
++ */
++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
++
++ dpmacs {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dpmac1: dpmac@1 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <1>;
++ };
++ dpmac2: dpmac@2 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <2>;
++ };
++ dpmac3: dpmac@3 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <3>;
++ };
++ dpmac4: dpmac@4 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <4>;
++ };
++ dpmac5: dpmac@5 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <5>;
++ };
++ dpmac6: dpmac@6 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <6>;
++ };
++ dpmac7: dpmac@7 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <7>;
++ };
++ dpmac8: dpmac@8 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <8>;
++ };
++ dpmac9: dpmac@9 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <9>;
++ };
++ dpmac10: dpmac@10 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xa>;
++ };
++ };
++
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
++ dcfg: dcfg@1e00000 {
++ compatible = "fsl,ls1088a-dcfg", "syscon";
++ reg = <0x0 0x1e00000 0x0 0x10000>;
++ little-endian;
++ };
++
++ rstcr: syscon@1e60000 {
++ compatible = "fsl,ls1088a-rstcr", "syscon";
++ reg = <0x0 0x1e60000 0x0 0x4>;
++ };
++
++ reboot {
++ compatible = "syscon-reboot";
++ regmap = <&rstcr>;
++ offset = <0x0>;
++ mask = <0x02>;
++ };
++
++
++ soc {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ clockgen: clocking@1300000 {
++ compatible = "fsl,ls1088a-clockgen";
++ reg = <0 0x1300000 0 0xa0000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
++
++ tmu: tmu@1f80000 {
++ compatible = "fsl,qoriq-tmu";
++ reg = <0x0 0x1f80000 0x0 0x10000>;
++ interrupts = <0 23 0x4>;
++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
++ fsl,tmu-calibration =
++ /* Calibration data group 1 */
++ <0x00000000 0x00000026
++ 0x00000001 0x0000002d
++ 0x00000002 0x00000032
++ 0x00000003 0x00000039
++ 0x00000004 0x0000003f
++ 0x00000005 0x00000046
++ 0x00000006 0x0000004d
++ 0x00000007 0x00000054
++ 0x00000008 0x0000005a
++ 0x00000009 0x00000061
++ 0x0000000a 0x0000006a
++ 0x0000000b 0x00000071
++ /* Calibration data group 2 */
++ 0x00010000 0x00000025
++ 0x00010001 0x0000002c
++ 0x00010002 0x00000035
++ 0x00010003 0x0000003d
++ 0x00010004 0x00000045
++ 0x00010005 0x0000004e
++ 0x00010006 0x00000057
++ 0x00010007 0x00000061
++ 0x00010008 0x0000006b
++ 0x00010009 0x00000076
++ /* Calibration data group 3 */
++ 0x00020000 0x00000029
++ 0x00020001 0x00000033
++ 0x00020002 0x0000003d
++ 0x00020003 0x00000049
++ 0x00020004 0x00000056
++ 0x00020005 0x00000061
++ 0x00020006 0x0000006d
++ /* Calibration data group 4 */
++ 0x00030000 0x00000021
++ 0x00030001 0x0000002a
++ 0x00030002 0x0000003c
++ 0x00030003 0x0000004e>;
++ little-endian;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 0>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ cpu_crit: cpu-crit {
++ temperature = <95000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map1 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu4 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++ };
++
++ duart0: serial@21c0500 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0500 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ duart1: serial@21c0600 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0600 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ cluster1_core0_watchdog: wdt@c000000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc000000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster1_core1_watchdog: wdt@c010000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc010000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster1_core2_watchdog: wdt@c020000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc020000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster1_core3_watchdog: wdt@c030000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc030000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core0_watchdog: wdt@c100000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc100000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core1_watchdog: wdt@c110000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc110000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core2_watchdog: wdt@c120000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc120000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core3_watchdog: wdt@c130000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc130000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ gpio0: gpio@2300000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio@2310000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio@2320000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2320000 0x0 0x10000>;
++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio3: gpio@2330000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2330000 0x0 0x10000>;
++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ /* TODO: WRIOP (CCSR?) */
++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000,
++ * E-MDIO1: 0x1_6000
++ */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B96000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian; /* force the driver in LE mode */
++
++ /* Not necessary on the QDS, but needed on the RDB */
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000,
++ * E-MDIO2: 0x1_7000
++ */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B97000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian; /* force the driver in LE mode */
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ ifc: ifc@2240000 {
++ compatible = "fsl,ifc", "simple-bus";
++ reg = <0x0 0x2240000 0x0 0x20000>;
++ interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
++ little-endian;
++ #address-cells = <2>;
++ #size-cells = <1>;
++
++ };
++
++ ftm0: ftm0@2800000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x2800000 0x0 0x10000>;
++ interrupts = <0 44 4>;
++ };
++
++ i2c0: i2c@2000000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2000000 0x0 0x10000>;
++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ status = "disabled";
++ };
++
++ i2c1: i2c@2010000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2010000 0x0 0x10000>;
++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ status = "disabled";
++ };
++
++ i2c2: i2c@2020000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2020000 0x0 0x10000>;
++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ status = "disabled";
++ };
++
++ i2c3: i2c@2030000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2030000 0x0 0x10000>;
++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ status = "disabled";
++ };
++
++ qspi: quadspi@20c0000 {
++ compatible = "fsl,ls2080a-qspi", "fsl,ls1088a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x20c0000 0x0 0x10000>,
++ <0x0 0x20000000 0x0 0x10000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <0 25 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "qspi_en", "qspi";
++ fsl,qspi-has-second-chip;
++ };
++
++ esdhc: esdhc@2140000 {
++ compatible = "fsl,ls1088a-esdhc", "fsl,esdhc";
++ reg = <0x0 0x2140000 0x0 0x10000>;
++ interrupts = <0 28 0x4>; /* Level high type */
++ clock-frequency = <0>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ little-endian;
++ bus-width = <4>;
++ status = "disabled";
++ };
++
++ usb0: usb3@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ usb1: usb3@3110000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ sata: sata@3200000 {
++ compatible = "fsl,ls1088a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>,
++ <0x7 0x100520 0x0 0x4>;
++ reg-names = "ahci", "sata-ecc";
++ interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ dma-coherent;
++ status = "disabled";
++ };
++
++ pcie@3400000 {
++ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ pcie@3500000 {
++ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 0 115 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 0 116 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 0 117 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ pcie@3600000 {
++ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 0 120 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 0 121 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 0 122 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ smmu: iommu@5000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x5000000 0 0x800000>;
++ #global-interrupts = <12>;
++ #iommu-cells = <1>;
++ stream-match-mask = <0x7C00>;
++ interrupts = <0 13 4>, /* global secure fault */
++ <0 14 4>, /* combined secure interrupt */
++ <0 15 4>, /* global non-secure fault */
++ <0 16 4>, /* combined non-secure interrupt */
++ /* performance counter interrupts 0-7 */
++ <0 211 4>,
++ <0 212 4>,
++ <0 213 4>,
++ <0 214 4>,
++ <0 215 4>,
++ <0 216 4>,
++ <0 217 4>,
++ <0 218 4>,
++ /* per context interrupt, 64 interrupts */
++ <0 146 4>,
++ <0 147 4>,
++ <0 148 4>,
++ <0 149 4>,
++ <0 150 4>,
++ <0 151 4>,
++ <0 152 4>,
++ <0 153 4>,
++ <0 154 4>,
++ <0 155 4>,
++ <0 156 4>,
++ <0 157 4>,
++ <0 158 4>,
++ <0 159 4>,
++ <0 160 4>,
++ <0 161 4>,
++ <0 162 4>,
++ <0 163 4>,
++ <0 164 4>,
++ <0 165 4>,
++ <0 166 4>,
++ <0 167 4>,
++ <0 168 4>,
++ <0 169 4>,
++ <0 170 4>,
++ <0 171 4>,
++ <0 172 4>,
++ <0 173 4>,
++ <0 174 4>,
++ <0 175 4>,
++ <0 176 4>,
++ <0 177 4>,
++ <0 178 4>,
++ <0 179 4>,
++ <0 180 4>,
++ <0 181 4>,
++ <0 182 4>,
++ <0 183 4>,
++ <0 184 4>,
++ <0 185 4>,
++ <0 186 4>,
++ <0 187 4>,
++ <0 188 4>,
++ <0 189 4>,
++ <0 190 4>,
++ <0 191 4>,
++ <0 192 4>,
++ <0 193 4>,
++ <0 194 4>,
++ <0 195 4>,
++ <0 196 4>,
++ <0 197 4>,
++ <0 198 4>,
++ <0 199 4>,
++ <0 200 4>,
++ <0 201 4>,
++ <0 202 4>,
++ <0 203 4>,
++ <0 204 4>,
++ <0 205 4>,
++ <0 206 4>,
++ <0 207 4>,
++ <0 208 4>,
++ <0 209 4>;
++ };
++
++ crypto: crypto@8000000 {
++ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
++ fsl,sec-era = <8>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x00 0x8000000 0x100000>;
++ reg = <0x00 0x8000000 0x0 0x100000>;
++ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
++ dma-coherent;
++
++ sec_jr0: jr@10000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x10000 0x10000>;
++ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr@20000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x20000 0x10000>;
++ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr2: jr@30000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x30000 0x10000>;
++ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr3: jr@40000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x40000 0x10000>;
++ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
++ };
++ };
++ };
++
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+index b0dd0109..ba1a79dd 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+@@ -1,8 +1,10 @@
+ /*
+ * Device Tree file for Freescale LS2080a QDS Board.
+ *
+- * Copyright (C) 2015, Freescale Semiconductor
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
+ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ * Bhupesh Sharma <bhupesh.sharma@freescale.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+@@ -46,169 +48,76 @@
+
+ /dts-v1/;
+
+-/include/ "fsl-ls2080a.dtsi"
++#include "fsl-ls2080a.dtsi"
++#include "fsl-ls208xa-qds.dtsi"
+
+ / {
+ model = "Freescale Layerscape 2080a QDS Board";
+ compatible = "fsl,ls2080a-qds", "fsl,ls2080a";
+
+- aliases {
+- serial0 = &serial0;
+- serial1 = &serial1;
+- };
+-
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+ };
+
+-&esdhc {
+- status = "okay";
+-};
+-
+ &ifc {
+- status = "okay";
+- #address-cells = <2>;
+- #size-cells = <1>;
+- ranges = <0x0 0x0 0x5 0x80000000 0x08000000
+- 0x2 0x0 0x5 0x30000000 0x00010000
+- 0x3 0x0 0x5 0x20000000 0x00010000>;
+-
+- nor@0,0 {
++ boardctrl: board-control@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+- compatible = "cfi-flash";
+- reg = <0x0 0x0 0x8000000>;
+- bank-width = <2>;
+- device-width = <1>;
+- };
++ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus";
++ reg = <3 0 0x300>; /* TODO check address */
++ ranges = <0 3 0 0x300>;
+
+- nand@2,0 {
+- compatible = "fsl,ifc-nand";
+- reg = <0x2 0x0 0x10000>;
+- };
++ mdio_mux_emi1 {
++ compatible = "mdio-mux-mmioreg", "mdio-mux";
++ mdio-parent-bus = <&emdio1>;
++ reg = <0x54 1>; /* BRDCFG4 */
++ mux-mask = <0xe0>; /* EMI1_MDIO */
+
+- cpld@3,0 {
+- reg = <0x3 0x0 0x10000>;
+- compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
+- };
+-};
+-
+-&i2c0 {
+- status = "okay";
+- pca9547@77 {
+- compatible = "nxp,pca9547";
+- reg = <0x77>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- i2c@0 {
+- #address-cells = <1>;
++ #address-cells=<1>;
+ #size-cells = <0>;
+- reg = <0x00>;
+- rtc@68 {
+- compatible = "dallas,ds3232";
+- reg = <0x68>;
+- };
+- };
+
+- i2c@2 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x02>;
+-
+- ina220@40 {
+- compatible = "ti,ina220";
+- reg = <0x40>;
+- shunt-resistor = <500>;
+- };
+-
+- ina220@41 {
+- compatible = "ti,ina220";
+- reg = <0x41>;
+- shunt-resistor = <1000>;
+- };
+- };
+-
+- i2c@3 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x3>;
+-
+- adt7481@4c {
+- compatible = "adi,adt7461";
+- reg = <0x4c>;
++ /* Child MDIO buses, one for each riser card:
++ * reg = 0x0, 0x20, 0x40, 0x60, 0x80, 0xa0.
++ * VSC8234 PHYs on the riser cards.
++ */
++
++ mdio_mux3: mdio@60 {
++ reg = <0x60>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ mdio0_phy12: mdio_phy0@1c {
++ reg = <0x1c>;
++ phy-connection-type = "sgmii";
++ };
++ mdio0_phy13: mdio_phy1@1d {
++ reg = <0x1d>;
++ phy-connection-type = "sgmii";
++ };
++ mdio0_phy14: mdio_phy2@1e {
++ reg = <0x1e>;
++ phy-connection-type = "sgmii";
++ };
++ mdio0_phy15: mdio_phy3@1f {
++ reg = <0x1f>;
++ phy-connection-type = "sgmii";
++ };
+ };
+ };
+ };
+ };
+
+-&i2c1 {
+- status = "disabled";
+-};
+-
+-&i2c2 {
+- status = "disabled";
+-};
+-
+-&i2c3 {
+- status = "disabled";
+-};
+-
+-&dspi {
+- status = "okay";
+- dflash0: n25q128a {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "st,m25p80";
+- spi-max-frequency = <3000000>;
+- reg = <0>;
+- };
+- dflash1: sst25wf040b {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "st,m25p80";
+- spi-max-frequency = <3000000>;
+- reg = <1>;
+- };
+- dflash2: en25s64 {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "st,m25p80";
+- spi-max-frequency = <3000000>;
+- reg = <2>;
+- };
+-};
+-
+-&qspi {
+- status = "okay";
+- flash0: s25fl256s1@0 {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "st,m25p80";
+- spi-max-frequency = <20000000>;
+- reg = <0>;
+- };
+- flash2: s25fl256s1@2 {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "st,m25p80";
+- spi-max-frequency = <20000000>;
+- reg = <0>;
+- };
+-};
+-
+-&sata0 {
+- status = "okay";
++/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */
++&dpmac9 {
++ phy-handle = <&mdio0_phy12>;
+ };
+-
+-&sata1 {
+- status = "okay";
++&dpmac10 {
++ phy-handle = <&mdio0_phy13>;
+ };
+-
+-&usb0 {
+- status = "okay";
++&dpmac11 {
++ phy-handle = <&mdio0_phy14>;
+ };
+-
+-&usb1 {
+- status = "okay";
++&dpmac12 {
++ phy-handle = <&mdio0_phy15>;
+ };
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
+index ad0ebb8a..025f0f54 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
+@@ -1,8 +1,10 @@
+ /*
+ * Device Tree file for Freescale LS2080a RDB Board.
+ *
+- * Copyright (C) 2015, Freescale Semiconductor
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
+ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ * Bhupesh Sharma <bhupesh.sharma@freescale.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+@@ -46,125 +48,94 @@
+
+ /dts-v1/;
+
+-/include/ "fsl-ls2080a.dtsi"
++#include "fsl-ls2080a.dtsi"
++#include "fsl-ls208xa-rdb.dtsi"
+
+ / {
+ model = "Freescale Layerscape 2080a RDB Board";
+ compatible = "fsl,ls2080a-rdb", "fsl,ls2080a";
+
+- aliases {
+- serial0 = &serial0;
+- serial1 = &serial1;
+- };
+-
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+ };
+
+-&esdhc {
+- status = "okay";
+-};
+-
+-&ifc {
+- status = "okay";
+- #address-cells = <2>;
+- #size-cells = <1>;
+- ranges = <0x0 0x0 0x5 0x80000000 0x08000000
+- 0x2 0x0 0x5 0x30000000 0x00010000
+- 0x3 0x0 0x5 0x20000000 0x00010000>;
+-
+- nor@0,0 {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "cfi-flash";
+- reg = <0x0 0x0 0x8000000>;
+- bank-width = <2>;
+- device-width = <1>;
++&emdio1 {
++ status = "disabled";
++ /* CS4340 PHYs */
++ mdio1_phy1: emdio1_phy@1 {
++ reg = <0x10>;
++ phy-connection-type = "xfi";
+ };
+-
+- nand@2,0 {
+- compatible = "fsl,ifc-nand";
+- reg = <0x2 0x0 0x10000>;
++ mdio1_phy2: emdio1_phy@2 {
++ reg = <0x11>;
++ phy-connection-type = "xfi";
+ };
+-
+- cpld@3,0 {
+- reg = <0x3 0x0 0x10000>;
+- compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
++ mdio1_phy3: emdio1_phy@3 {
++ reg = <0x12>;
++ phy-connection-type = "xfi";
+ };
+-
+-};
+-
+-&i2c0 {
+- status = "okay";
+- pca9547@75 {
+- compatible = "nxp,pca9547";
+- reg = <0x75>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- status = "disabled";
+- i2c@1 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x01>;
+- rtc@68 {
+- compatible = "dallas,ds3232";
+- reg = <0x68>;
+- };
+- };
+-
+- i2c@3 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x3>;
+-
+- adt7481@4c {
+- compatible = "adi,adt7461";
+- reg = <0x4c>;
+- };
+- };
++ mdio1_phy4: emdio1_phy@4 {
++ reg = <0x13>;
++ phy-connection-type = "xfi";
+ };
+ };
+
+-&i2c1 {
+- status = "disabled";
+-};
+-
+-&i2c2 {
+- status = "disabled";
+-};
+-
+-&i2c3 {
+- status = "disabled";
+-};
+-
+-&dspi {
+- status = "okay";
+- dflash0: n25q512a {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "st,m25p80";
+- spi-max-frequency = <3000000>;
+- reg = <0>;
++&emdio2 {
++ /* AQR405 PHYs */
++ mdio2_phy1: emdio2_phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 1 0x4>; /* Level high type */
++ reg = <0x0>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy2: emdio2_phy@2 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 2 0x4>; /* Level high type */
++ reg = <0x1>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy3: emdio2_phy@3 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 4 0x4>; /* Level high type */
++ reg = <0x2>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy4: emdio2_phy@4 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 5 0x4>; /* Level high type */
++ reg = <0x3>;
++ phy-connection-type = "xfi";
+ };
+ };
+
+-&qspi {
+- status = "disabled";
+-};
++/* Update DPMAC connections to external PHYs, under the assumption of
++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board.
++ */
++/* Leave Cortina nodes commented out until driver is integrated
++ *&dpmac1 {
++ * phy-handle = <&mdio1_phy1>;
++ *};
++ *&dpmac2 {
++ * phy-handle = <&mdio1_phy2>;
++ *};
++ *&dpmac3 {
++ * phy-handle = <&mdio1_phy3>;
++ *};
++ *&dpmac4 {
++ * phy-handle = <&mdio1_phy4>;
++ *};
++ */
+
+-&sata0 {
+- status = "okay";
++&dpmac5 {
++ phy-handle = <&mdio2_phy1>;
+ };
+-
+-&sata1 {
+- status = "okay";
++&dpmac6 {
++ phy-handle = <&mdio2_phy2>;
+ };
+-
+-&usb0 {
+- status = "okay";
++&dpmac7 {
++ phy-handle = <&mdio2_phy3>;
+ };
+-
+-&usb1 {
+- status = "okay";
++&dpmac8 {
++ phy-handle = <&mdio2_phy4>;
+ };
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
+index 505d0380..fbbb73e5 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
+@@ -1,7 +1,7 @@
+ /*
+ * Device Tree file for Freescale LS2080a software Simulator model
+ *
+- * Copyright (C) 2014-2015, Freescale Semiconductor
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * Bhupesh Sharma <bhupesh.sharma@freescale.com>
+ *
+@@ -46,17 +46,12 @@
+
+ /dts-v1/;
+
+-/include/ "fsl-ls2080a.dtsi"
++#include "fsl-ls2080a.dtsi"
+
+ / {
+ model = "Freescale Layerscape 2080a software Simulator model";
+ compatible = "fsl,ls2080a-simu", "fsl,ls2080a";
+
+- aliases {
+- serial0 = &serial0;
+- serial1 = &serial1;
+- };
+-
+ ethernet@2210000 {
+ compatible = "smsc,lan91c111";
+ reg = <0x0 0x2210000 0x0 0x100>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+index 7f0dc13b..71f15fab 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+@@ -1,8 +1,9 @@
+ /*
+ * Device Tree Include file for Freescale Layerscape-2080A family SoC.
+ *
+- * Copyright (C) 2014-2015, Freescale Semiconductor
++ * Copyright 2014-2016 Freescale Semiconductor, Inc.
+ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ * Bhupesh Sharma <bhupesh.sharma@freescale.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+@@ -44,696 +45,132 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-/ {
+- compatible = "fsl,ls2080a";
+- interrupt-parent = <&gic>;
+- #address-cells = <2>;
+- #size-cells = <2>;
++#include "fsl-ls208xa.dtsi"
+
+- cpus {
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- /*
+- * We expect the enable-method for cpu's to be "psci", but this
+- * is dependent on the SoC FW, which will fill this in.
+- *
+- * Currently supported enable-method is psci v0.2
+- */
+-
+- /* We have 4 clusters having 2 Cortex-A57 cores each */
+- cpu@0 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x0>;
+- clocks = <&clockgen 1 0>;
+- next-level-cache = <&cluster0_l2>;
+- };
+-
+- cpu@1 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x1>;
+- clocks = <&clockgen 1 0>;
+- next-level-cache = <&cluster0_l2>;
+- };
+-
+- cpu@100 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x100>;
+- clocks = <&clockgen 1 1>;
+- next-level-cache = <&cluster1_l2>;
+- };
+-
+- cpu@101 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x101>;
+- clocks = <&clockgen 1 1>;
+- next-level-cache = <&cluster1_l2>;
+- };
+-
+- cpu@200 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x200>;
+- clocks = <&clockgen 1 2>;
+- next-level-cache = <&cluster2_l2>;
+- };
+-
+- cpu@201 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x201>;
+- clocks = <&clockgen 1 2>;
+- next-level-cache = <&cluster2_l2>;
+- };
+-
+- cpu@300 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x300>;
+- clocks = <&clockgen 1 3>;
+- next-level-cache = <&cluster3_l2>;
+- };
+-
+- cpu@301 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a57";
+- reg = <0x301>;
+- clocks = <&clockgen 1 3>;
+- next-level-cache = <&cluster3_l2>;
+- };
+-
+- cluster0_l2: l2-cache0 {
+- compatible = "cache";
+- };
+-
+- cluster1_l2: l2-cache1 {
+- compatible = "cache";
+- };
+-
+- cluster2_l2: l2-cache2 {
+- compatible = "cache";
+- };
+-
+- cluster3_l2: l2-cache3 {
+- compatible = "cache";
+- };
++&cpu {
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&cluster0_l2>;
++ #cooling-cells = <2>;
+ };
+
+- memory@80000000 {
+- device_type = "memory";
+- reg = <0x00000000 0x80000000 0 0x80000000>;
+- /* DRAM space - 1, size : 2 GB DRAM */
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x1>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&cluster0_l2>;
+ };
+
+- sysclk: sysclk {
+- compatible = "fixed-clock";
+- #clock-cells = <0>;
+- clock-frequency = <100000000>;
+- clock-output-names = "sysclk";
++ cpu2: cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x100>;
++ clocks = <&clockgen 1 1>;
++ next-level-cache = <&cluster1_l2>;
++ #cooling-cells = <2>;
+ };
+
+- gic: interrupt-controller@6000000 {
+- compatible = "arm,gic-v3";
+- reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
+- <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */
+- <0x0 0x0c0c0000 0 0x2000>, /* GICC */
+- <0x0 0x0c0d0000 0 0x1000>, /* GICH */
+- <0x0 0x0c0e0000 0 0x20000>; /* GICV */
+- #interrupt-cells = <3>;
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
+- interrupt-controller;
+- interrupts = <1 9 0x4>;
+-
+- its: gic-its@6020000 {
+- compatible = "arm,gic-v3-its";
+- msi-controller;
+- reg = <0x0 0x6020000 0 0x20000>;
+- };
++ cpu3: cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x101>;
++ clocks = <&clockgen 1 1>;
++ next-level-cache = <&cluster1_l2>;
+ };
+
+- rstcr: syscon@1e60000 {
+- compatible = "fsl,ls2080a-rstcr", "syscon";
+- reg = <0x0 0x1e60000 0x0 0x4>;
++ cpu4: cpu@200 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x200>;
++ clocks = <&clockgen 1 2>;
++ next-level-cache = <&cluster2_l2>;
++ #cooling-cells = <2>;
+ };
+
+- reboot {
+- compatible ="syscon-reboot";
+- regmap = <&rstcr>;
+- offset = <0x0>;
+- mask = <0x2>;
++ cpu5: cpu@201 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x201>;
++ clocks = <&clockgen 1 2>;
++ next-level-cache = <&cluster2_l2>;
+ };
+
+- timer {
+- compatible = "arm,armv8-timer";
+- interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
+- <1 14 4>, /* Physical Non-Secure PPI, active-low */
+- <1 11 4>, /* Virtual PPI, active-low */
+- <1 10 4>; /* Hypervisor PPI, active-low */
+- fsl,erratum-a008585;
++ cpu6: cpu@300 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x300>;
++ clocks = <&clockgen 1 3>;
++ next-level-cache = <&cluster3_l2>;
++ #cooling-cells = <2>;
+ };
+
+- pmu {
+- compatible = "arm,armv8-pmuv3";
+- interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
++ cpu7: cpu@301 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x301>;
++ clocks = <&clockgen 1 3>;
++ next-level-cache = <&cluster3_l2>;
+ };
+
+- soc {
+- compatible = "simple-bus";
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
+-
+- clockgen: clocking@1300000 {
+- compatible = "fsl,ls2080a-clockgen";
+- reg = <0 0x1300000 0 0xa0000>;
+- #clock-cells = <2>;
+- clocks = <&sysclk>;
+- };
+-
+- serial0: serial@21c0500 {
+- compatible = "fsl,ns16550", "ns16550a";
+- reg = <0x0 0x21c0500 0x0 0x100>;
+- clocks = <&clockgen 4 3>;
+- interrupts = <0 32 0x4>; /* Level high type */
+- };
+-
+- serial1: serial@21c0600 {
+- compatible = "fsl,ns16550", "ns16550a";
+- reg = <0x0 0x21c0600 0x0 0x100>;
+- clocks = <&clockgen 4 3>;
+- interrupts = <0 32 0x4>; /* Level high type */
+- };
+-
+- cluster1_core0_watchdog: wdt@c000000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc000000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- cluster1_core1_watchdog: wdt@c010000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc010000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- cluster2_core0_watchdog: wdt@c100000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc100000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- cluster2_core1_watchdog: wdt@c110000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc110000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- cluster3_core0_watchdog: wdt@c200000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc200000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- cluster3_core1_watchdog: wdt@c210000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc210000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- cluster4_core0_watchdog: wdt@c300000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc300000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- cluster4_core1_watchdog: wdt@c310000 {
+- compatible = "arm,sp805-wdt", "arm,primecell";
+- reg = <0x0 0xc310000 0x0 0x1000>;
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "apb_pclk", "wdog_clk";
+- };
+-
+- fsl_mc: fsl-mc@80c000000 {
+- compatible = "fsl,qoriq-mc";
+- reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
+- <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
+- msi-parent = <&its>;
+- #address-cells = <3>;
+- #size-cells = <1>;
+-
+- /*
+- * Region type 0x0 - MC portals
+- * Region type 0x1 - QBMAN portals
+- */
+- ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
+- 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
+-
+- /*
+- * Define the maximum number of MACs present on the SoC.
+- */
+- dpmacs {
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- dpmac1: dpmac@1 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x1>;
+- };
+-
+- dpmac2: dpmac@2 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x2>;
+- };
+-
+- dpmac3: dpmac@3 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x3>;
+- };
+-
+- dpmac4: dpmac@4 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x4>;
+- };
+-
+- dpmac5: dpmac@5 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x5>;
+- };
+-
+- dpmac6: dpmac@6 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x6>;
+- };
+-
+- dpmac7: dpmac@7 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x7>;
+- };
+-
+- dpmac8: dpmac@8 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x8>;
+- };
+-
+- dpmac9: dpmac@9 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x9>;
+- };
+-
+- dpmac10: dpmac@a {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0xa>;
+- };
+-
+- dpmac11: dpmac@b {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0xb>;
+- };
+-
+- dpmac12: dpmac@c {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0xc>;
+- };
+-
+- dpmac13: dpmac@d {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0xd>;
+- };
+-
+- dpmac14: dpmac@e {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0xe>;
+- };
+-
+- dpmac15: dpmac@f {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0xf>;
+- };
+-
+- dpmac16: dpmac@10 {
+- compatible = "fsl,qoriq-mc-dpmac";
+- reg = <0x10>;
+- };
+- };
+- };
+-
+- smmu: iommu@5000000 {
+- compatible = "arm,mmu-500";
+- reg = <0 0x5000000 0 0x800000>;
+- #global-interrupts = <12>;
+- interrupts = <0 13 4>, /* global secure fault */
+- <0 14 4>, /* combined secure interrupt */
+- <0 15 4>, /* global non-secure fault */
+- <0 16 4>, /* combined non-secure interrupt */
+- /* performance counter interrupts 0-7 */
+- <0 211 4>, <0 212 4>,
+- <0 213 4>, <0 214 4>,
+- <0 215 4>, <0 216 4>,
+- <0 217 4>, <0 218 4>,
+- /* per context interrupt, 64 interrupts */
+- <0 146 4>, <0 147 4>,
+- <0 148 4>, <0 149 4>,
+- <0 150 4>, <0 151 4>,
+- <0 152 4>, <0 153 4>,
+- <0 154 4>, <0 155 4>,
+- <0 156 4>, <0 157 4>,
+- <0 158 4>, <0 159 4>,
+- <0 160 4>, <0 161 4>,
+- <0 162 4>, <0 163 4>,
+- <0 164 4>, <0 165 4>,
+- <0 166 4>, <0 167 4>,
+- <0 168 4>, <0 169 4>,
+- <0 170 4>, <0 171 4>,
+- <0 172 4>, <0 173 4>,
+- <0 174 4>, <0 175 4>,
+- <0 176 4>, <0 177 4>,
+- <0 178 4>, <0 179 4>,
+- <0 180 4>, <0 181 4>,
+- <0 182 4>, <0 183 4>,
+- <0 184 4>, <0 185 4>,
+- <0 186 4>, <0 187 4>,
+- <0 188 4>, <0 189 4>,
+- <0 190 4>, <0 191 4>,
+- <0 192 4>, <0 193 4>,
+- <0 194 4>, <0 195 4>,
+- <0 196 4>, <0 197 4>,
+- <0 198 4>, <0 199 4>,
+- <0 200 4>, <0 201 4>,
+- <0 202 4>, <0 203 4>,
+- <0 204 4>, <0 205 4>,
+- <0 206 4>, <0 207 4>,
+- <0 208 4>, <0 209 4>;
+- mmu-masters = <&fsl_mc 0x300 0>;
+- };
+-
+- dspi: dspi@2100000 {
+- status = "disabled";
+- compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi";
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x0 0x2100000 0x0 0x10000>;
+- interrupts = <0 26 0x4>; /* Level high type */
+- clocks = <&clockgen 4 3>;
+- clock-names = "dspi";
+- spi-num-chipselects = <5>;
+- bus-num = <0>;
+- };
+-
+- esdhc: esdhc@2140000 {
+- status = "disabled";
+- compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
+- reg = <0x0 0x2140000 0x0 0x10000>;
+- interrupts = <0 28 0x4>; /* Level high type */
+- clock-frequency = <0>; /* Updated by bootloader */
+- voltage-ranges = <1800 1800 3300 3300>;
+- sdhci,auto-cmd12;
+- little-endian;
+- bus-width = <4>;
+- };
+-
+- gpio0: gpio@2300000 {
+- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+- reg = <0x0 0x2300000 0x0 0x10000>;
+- interrupts = <0 36 0x4>; /* Level high type */
+- gpio-controller;
+- little-endian;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- gpio1: gpio@2310000 {
+- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+- reg = <0x0 0x2310000 0x0 0x10000>;
+- interrupts = <0 36 0x4>; /* Level high type */
+- gpio-controller;
+- little-endian;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- gpio2: gpio@2320000 {
+- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+- reg = <0x0 0x2320000 0x0 0x10000>;
+- interrupts = <0 37 0x4>; /* Level high type */
+- gpio-controller;
+- little-endian;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- gpio3: gpio@2330000 {
+- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+- reg = <0x0 0x2330000 0x0 0x10000>;
+- interrupts = <0 37 0x4>; /* Level high type */
+- gpio-controller;
+- little-endian;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- i2c0: i2c@2000000 {
+- status = "disabled";
+- compatible = "fsl,vf610-i2c";
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x0 0x2000000 0x0 0x10000>;
+- interrupts = <0 34 0x4>; /* Level high type */
+- clock-names = "i2c";
+- clocks = <&clockgen 4 3>;
+- };
+-
+- i2c1: i2c@2010000 {
+- status = "disabled";
+- compatible = "fsl,vf610-i2c";
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x0 0x2010000 0x0 0x10000>;
+- interrupts = <0 34 0x4>; /* Level high type */
+- clock-names = "i2c";
+- clocks = <&clockgen 4 3>;
+- };
+-
+- i2c2: i2c@2020000 {
+- status = "disabled";
+- compatible = "fsl,vf610-i2c";
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x0 0x2020000 0x0 0x10000>;
+- interrupts = <0 35 0x4>; /* Level high type */
+- clock-names = "i2c";
+- clocks = <&clockgen 4 3>;
+- };
+-
+- i2c3: i2c@2030000 {
+- status = "disabled";
+- compatible = "fsl,vf610-i2c";
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x0 0x2030000 0x0 0x10000>;
+- interrupts = <0 35 0x4>; /* Level high type */
+- clock-names = "i2c";
+- clocks = <&clockgen 4 3>;
+- };
+-
+- ifc: ifc@2240000 {
+- compatible = "fsl,ifc", "simple-bus";
+- reg = <0x0 0x2240000 0x0 0x20000>;
+- interrupts = <0 21 0x4>; /* Level high type */
+- little-endian;
+- #address-cells = <2>;
+- #size-cells = <1>;
++ cluster0_l2: l2-cache0 {
++ compatible = "cache";
++ };
+
+- ranges = <0 0 0x5 0x80000000 0x08000000
+- 2 0 0x5 0x30000000 0x00010000
+- 3 0 0x5 0x20000000 0x00010000>;
+- };
++ cluster1_l2: l2-cache1 {
++ compatible = "cache";
++ };
+
+- qspi: quadspi@20c0000 {
+- status = "disabled";
+- compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi";
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <0x0 0x20c0000 0x0 0x10000>,
+- <0x0 0x20000000 0x0 0x10000000>;
+- reg-names = "QuadSPI", "QuadSPI-memory";
+- interrupts = <0 25 0x4>; /* Level high type */
+- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+- clock-names = "qspi_en", "qspi";
+- };
++ cluster2_l2: l2-cache2 {
++ compatible = "cache";
++ };
+
+- pcie@3400000 {
+- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+- "snps,dw-pcie";
+- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+- 0x10 0x00000000 0x0 0x00002000>; /* configuration space */
+- reg-names = "regs", "config";
+- interrupts = <0 108 0x4>; /* Level high type */
+- interrupt-names = "intr";
+- #address-cells = <3>;
+- #size-cells = <2>;
+- device_type = "pci";
+- dma-coherent;
+- num-lanes = <4>;
+- bus-range = <0x0 0xff>;
+- ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */
+- 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&its>;
+- #interrupt-cells = <1>;
+- interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
+- <0000 0 0 2 &gic 0 0 0 110 4>,
+- <0000 0 0 3 &gic 0 0 0 111 4>,
+- <0000 0 0 4 &gic 0 0 0 112 4>;
+- };
++ cluster3_l2: l2-cache3 {
++ compatible = "cache";
++ };
++};
+
+- pcie@3500000 {
+- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+- "snps,dw-pcie";
+- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
+- 0x12 0x00000000 0x0 0x00002000>; /* configuration space */
+- reg-names = "regs", "config";
+- interrupts = <0 113 0x4>; /* Level high type */
+- interrupt-names = "intr";
+- #address-cells = <3>;
+- #size-cells = <2>;
+- device_type = "pci";
+- dma-coherent;
+- num-lanes = <4>;
+- bus-range = <0x0 0xff>;
+- ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */
+- 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&its>;
+- #interrupt-cells = <1>;
+- interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
+- <0000 0 0 2 &gic 0 0 0 115 4>,
+- <0000 0 0 3 &gic 0 0 0 116 4>,
+- <0000 0 0 4 &gic 0 0 0 117 4>;
+- };
++&usb0 {
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,dma-snooping;
++};
+
+- pcie@3600000 {
+- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+- "snps,dw-pcie";
+- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
+- 0x14 0x00000000 0x0 0x00002000>; /* configuration space */
+- reg-names = "regs", "config";
+- interrupts = <0 118 0x4>; /* Level high type */
+- interrupt-names = "intr";
+- #address-cells = <3>;
+- #size-cells = <2>;
+- device_type = "pci";
+- dma-coherent;
+- num-lanes = <8>;
+- bus-range = <0x0 0xff>;
+- ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */
+- 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&its>;
+- #interrupt-cells = <1>;
+- interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
+- <0000 0 0 2 &gic 0 0 0 120 4>,
+- <0000 0 0 3 &gic 0 0 0 121 4>,
+- <0000 0 0 4 &gic 0 0 0 122 4>;
+- };
++&usb1 {
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,dma-snooping;
++};
+
+- pcie@3700000 {
+- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+- "snps,dw-pcie";
+- reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
+- 0x16 0x00000000 0x0 0x00002000>; /* configuration space */
+- reg-names = "regs", "config";
+- interrupts = <0 123 0x4>; /* Level high type */
+- interrupt-names = "intr";
+- #address-cells = <3>;
+- #size-cells = <2>;
+- device_type = "pci";
+- dma-coherent;
+- num-lanes = <4>;
+- bus-range = <0x0 0xff>;
+- ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */
+- 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+- msi-parent = <&its>;
+- #interrupt-cells = <1>;
+- interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
+- <0000 0 0 2 &gic 0 0 0 125 4>,
+- <0000 0 0 3 &gic 0 0 0 126 4>,
+- <0000 0 0 4 &gic 0 0 0 127 4>;
+- };
++&pcie1 {
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x10 0x00000000 0x0 0x00002000>; /* configuration space */
+
+- sata0: sata@3200000 {
+- status = "disabled";
+- compatible = "fsl,ls2080a-ahci";
+- reg = <0x0 0x3200000 0x0 0x10000>;
+- interrupts = <0 133 0x4>; /* Level high type */
+- clocks = <&clockgen 4 3>;
+- dma-coherent;
+- };
++ ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++};
+
+- sata1: sata@3210000 {
+- status = "disabled";
+- compatible = "fsl,ls2080a-ahci";
+- reg = <0x0 0x3210000 0x0 0x10000>;
+- interrupts = <0 136 0x4>; /* Level high type */
+- clocks = <&clockgen 4 3>;
+- dma-coherent;
+- };
++&pcie2 {
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x12 0x00000000 0x0 0x00002000>; /* configuration space */
+
+- usb0: usb3@3100000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3100000 0x0 0x10000>;
+- interrupts = <0 80 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
++ ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++};
+
+- usb1: usb3@3110000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3110000 0x0 0x10000>;
+- interrupts = <0 81 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
++&pcie3 {
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x14 0x00000000 0x0 0x00002000>; /* configuration space */
+
+- ccn@4000000 {
+- compatible = "arm,ccn-504";
+- reg = <0x0 0x04000000 0x0 0x01000000>;
+- interrupts = <0 12 4>;
+- };
+- };
++ ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++};
+
+- ddr1: memory-controller@1080000 {
+- compatible = "fsl,qoriq-memory-controller";
+- reg = <0x0 0x1080000 0x0 0x1000>;
+- interrupts = <0 17 0x4>;
+- little-endian;
+- };
++&pcie4 {
++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
++ 0x16 0x00000000 0x0 0x00002000>; /* configuration space */
+
+- ddr2: memory-controller@1090000 {
+- compatible = "fsl,qoriq-memory-controller";
+- reg = <0x0 0x1090000 0x0 0x1000>;
+- interrupts = <0 18 0x4>;
+- little-endian;
+- };
++ ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ };
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts
+new file mode 100644
+index 00000000..c3375bf7
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts
+@@ -0,0 +1,161 @@
++/*
++ * Device Tree file for NXP LS2081A RDB Board.
++ *
++ * Copyright 2017 NXP
++ *
++ * Priyanka Jain <priyanka.jain@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls2088a.dtsi"
++
++/ {
++ model = "NXP Layerscape 2081A RDB Board";
++ compatible = "fsl,ls2081a-rdb", "fsl,ls2081a";
++
++ aliases {
++ serial0 = &serial0;
++ serial1 = &serial1;
++ };
++
++ chosen {
++ stdout-path = "serial1:115200n8";
++ };
++};
++
++&esdhc {
++ status = "okay";
++};
++
++&ifc {
++ status = "disabled";
++};
++
++&i2c0 {
++ status = "okay";
++ pca9547@75 {
++ compatible = "nxp,pca9547";
++ reg = <0x75>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ i2c@1 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x01>;
++ rtc@51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ };
++ };
++
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x02>;
++
++ ina220@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <500>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ adt7481@4c {
++ compatible = "adi,adt7461";
++ reg = <0x4c>;
++ };
++ };
++ };
++};
++
++&dspi {
++ status = "okay";
++ dflash0: n25q512a {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <3000000>;
++ reg = <0>;
++ };
++};
++
++&qspi {
++ status = "okay";
++ fsl,qspi-has-second-chip;
++ flash0: s25fs512s@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "spansion,m25p80";
++ m25p,fast-read;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++ flash1: s25fs512s@1 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "spansion,m25p80";
++ m25p,fast-read;
++ spi-max-frequency = <20000000>;
++ reg = <1>;
++ };
++};
++
++&sata0 {
++ status = "okay";
++};
++
++&sata1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
+new file mode 100644
+index 00000000..1dbc7aa8
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
+@@ -0,0 +1,162 @@
++/*
++ * Device Tree file for Freescale LS2088A QDS Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls2088a.dtsi"
++#include "fsl-ls208xa-qds.dtsi"
++
++/ {
++ model = "Freescale Layerscape 2088A QDS Board";
++ compatible = "fsl,ls2088a-qds", "fsl,ls2088a";
++
++ chosen {
++ stdout-path = "serial0:115200n8";
++ };
++};
++
++&ifc {
++ boardctrl: board-control@3,0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus";
++ reg = <3 0 0x300>; /* TODO check address */
++ ranges = <0 3 0 0x300>;
++
++ mdio_mux_emi1 {
++ compatible = "mdio-mux-mmioreg", "mdio-mux";
++ mdio-parent-bus = <&emdio1>;
++ reg = <0x54 1>; /* BRDCFG4 */
++ mux-mask = <0xe0>; /* EMI1_MDIO */
++
++ #address-cells=<1>;
++ #size-cells = <0>;
++
++ /* Child MDIO buses, one for each riser card:
++ * reg = 0x0, 0x20, 0x40, 0x60, 0x80, 0xa0.
++ * VSC8234 PHYs on the riser cards.
++ */
++
++ mdio_mux3: mdio@60 {
++ reg = <0x60>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ mdio0_phy12: mdio_phy0@1c {
++ reg = <0x1c>;
++ phy-connection-type = "sgmii";
++ };
++ mdio0_phy13: mdio_phy1@1d {
++ reg = <0x1d>;
++ phy-connection-type = "sgmii";
++ };
++ mdio0_phy14: mdio_phy2@1e {
++ reg = <0x1e>;
++ phy-connection-type = "sgmii";
++ };
++ mdio0_phy15: mdio_phy3@1f {
++ reg = <0x1f>;
++ phy-connection-type = "sgmii";
++ };
++ };
++ };
++ };
++};
++
++&pcs_mdio1 {
++ pcs_phy1: ethernet-phy@0 {
++ backplane-mode = "10gbase-kr";
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0x9C0 0x40>;/* lane H */
++ };
++};
++
++&pcs_mdio2 {
++ pcs_phy2: ethernet-phy@0 {
++ backplane-mode = "10gbase-kr";
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0x980 0x40>;/* lane G */
++ };
++};
++
++&pcs_mdio3 {
++ pcs_phy3: ethernet-phy@0 {
++ backplane-mode = "10gbase-kr";
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0x940 0x40>;/* lane F */
++ };
++};
++
++&pcs_mdio4 {
++ pcs_phy4: ethernet-phy@0 {
++ backplane-mode = "10gbase-kr";
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0x900 0x40>;/* lane E */
++ };
++};
++
++/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */
++&dpmac9 {
++ phy-handle = <&mdio0_phy12>;
++};
++&dpmac10 {
++ phy-handle = <&mdio0_phy13>;
++};
++&dpmac11 {
++ phy-handle = <&mdio0_phy14>;
++};
++&dpmac12 {
++ phy-handle = <&mdio0_phy15>;
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
+new file mode 100644
+index 00000000..9300119b
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
+@@ -0,0 +1,140 @@
++/*
++ * Device Tree file for Freescale LS2088A RDB Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls2088a.dtsi"
++#include "fsl-ls208xa-rdb.dtsi"
++
++/ {
++ model = "Freescale Layerscape 2088A RDB Board";
++ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a";
++
++ chosen {
++ stdout-path = "serial1:115200n8";
++ };
++};
++
++&emdio1 {
++ status = "disabled";
++ /* CS4340 PHYs */
++ mdio1_phy1: emdio1_phy@1 {
++ reg = <0x10>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy2: emdio1_phy@2 {
++ reg = <0x11>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy3: emdio1_phy@3 {
++ reg = <0x12>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy4: emdio1_phy@4 {
++ reg = <0x13>;
++ phy-connection-type = "xfi";
++ };
++};
++
++&emdio2 {
++ /* AQR405 PHYs */
++ mdio2_phy1: emdio2_phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 1 0x4>; /* Level high type */
++ reg = <0x0>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy2: emdio2_phy@2 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 2 0x4>; /* Level high type */
++ reg = <0x1>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy3: emdio2_phy@3 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 4 0x4>; /* Level high type */
++ reg = <0x2>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy4: emdio2_phy@4 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 5 0x4>; /* Level high type */
++ reg = <0x3>;
++ phy-connection-type = "xfi";
++ };
++};
++
++/* Update DPMAC connections to external PHYs, under the assumption of
++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board.
++ */
++/* Leave Cortina PHYs commented out until proper driver is integrated
++ *&dpmac1 {
++ * phy-handle = <&mdio1_phy1>;
++ *};
++ *&dpmac2 {
++ * phy-handle = <&mdio1_phy2>;
++ *};
++ *&dpmac3 {
++ * phy-handle = <&mdio1_phy3>;
++ *};
++ *&dpmac4 {
++ * phy-handle = <&mdio1_phy4>;
++ *};
++ */
++
++&dpmac5 {
++ phy-handle = <&mdio2_phy1>;
++};
++&dpmac6 {
++ phy-handle = <&mdio2_phy2>;
++};
++&dpmac7 {
++ phy-handle = <&mdio2_phy3>;
++};
++&dpmac8 {
++ phy-handle = <&mdio2_phy4>;
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+new file mode 100644
+index 00000000..833699ea
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+@@ -0,0 +1,195 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-2088A family SoC.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "fsl-ls208xa.dtsi"
++
++&cpu {
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&cluster0_l2>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x1>;
++ clocks = <&clockgen 1 0>;
++ next-level-cache = <&cluster0_l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu2: cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x100>;
++ clocks = <&clockgen 1 1>;
++ next-level-cache = <&cluster1_l2>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu3: cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x101>;
++ clocks = <&clockgen 1 1>;
++ next-level-cache = <&cluster1_l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu4: cpu@200 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x200>;
++ clocks = <&clockgen 1 2>;
++ next-level-cache = <&cluster2_l2>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu5: cpu@201 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x201>;
++ clocks = <&clockgen 1 2>;
++ next-level-cache = <&cluster2_l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu6: cpu@300 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x300>;
++ clocks = <&clockgen 1 3>;
++ next-level-cache = <&cluster3_l2>;
++ #cooling-cells = <2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ cpu7: cpu@301 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x301>;
++ clocks = <&clockgen 1 3>;
++ next-level-cache = <&cluster3_l2>;
++ cpu-idle-states = <&CPU_PH20>;
++ };
++
++ idle-states {
++ /*
++ * PSCI node is not added default, U-boot will add missing
++ * parts if it determines to use PSCI.
++ */
++ entry-method = "arm,psci";
++
++ CPU_PH20: cpu-ph20 {
++ compatible = "arm,idle-state";
++ idle-state-name = "PH20";
++ arm,psci-suspend-param = <0x0>;
++ entry-latency-us = <1000>;
++ exit-latency-us = <1000>;
++ min-residency-us = <3000>;
++ };
++ };
++
++ cluster0_l2: l2-cache0 {
++ compatible = "cache";
++ };
++
++ cluster1_l2: l2-cache1 {
++ compatible = "cache";
++ };
++
++ cluster2_l2: l2-cache2 {
++ compatible = "cache";
++ };
++
++ cluster3_l2: l2-cache3 {
++ compatible = "cache";
++ };
++};
++
++&pcie1 {
++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
++
++ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000
++ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>;
++};
++
++&pcie2 {
++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
++
++ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000
++ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>;
++};
++
++&pcie3 {
++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
++
++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000
++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>;
++};
++
++&pcie4 {
++ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
++ 0x38 0x00000000 0x0 0x00002000>; /* configuration space */
++
++ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000
++ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>;
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+new file mode 100644
+index 00000000..b2374469
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+@@ -0,0 +1,198 @@
++/*
++ * Device Tree file for Freescale LS2080A QDS Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++&esdhc {
++ mmc-hs200-1_8v;
++ status = "okay";
++};
++
++&ifc {
++ status = "okay";
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000
++ 0x2 0x0 0x5 0x30000000 0x00010000
++ 0x3 0x0 0x5 0x20000000 0x00010000>;
++
++ nor@0,0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "cfi-flash";
++ reg = <0x0 0x0 0x8000000>;
++ bank-width = <2>;
++ device-width = <1>;
++ };
++
++ nand@2,0 {
++ compatible = "fsl,ifc-nand";
++ reg = <0x2 0x0 0x10000>;
++ };
++
++ cpld@3,0 {
++ reg = <0x3 0x0 0x10000>;
++ compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
++ };
++};
++
++&i2c0 {
++ status = "okay";
++ pca9547@77 {
++ compatible = "nxp,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ i2c@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x00>;
++ rtc@68 {
++ compatible = "dallas,ds3232";
++ reg = <0x68>;
++ };
++ };
++
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x02>;
++
++ ina220@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <500>;
++ };
++
++ ina220@41 {
++ compatible = "ti,ina220";
++ reg = <0x41>;
++ shunt-resistor = <1000>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ adt7481@4c {
++ compatible = "adi,adt7461";
++ reg = <0x4c>;
++ };
++ };
++ };
++};
++
++&i2c1 {
++ status = "disabled";
++};
++
++&i2c2 {
++ status = "disabled";
++};
++
++&i2c3 {
++ status = "disabled";
++};
++
++&dspi {
++ status = "okay";
++ dflash0: n25q128a {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <3000000>;
++ reg = <0>;
++ };
++ dflash1: sst25wf040b {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <3000000>;
++ reg = <1>;
++ };
++ dflash2: en25s64 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <3000000>;
++ reg = <2>;
++ };
++};
++
++&qspi {
++ status = "okay";
++ flash0: s25fl256s1@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++ flash2: s25fl256s1@2 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&sata0 {
++ status = "okay";
++};
++
++&sata1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+new file mode 100644
+index 00000000..8e919dc8
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+@@ -0,0 +1,161 @@
++/*
++ * Device Tree file for Freescale LS2080A RDB Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++&esdhc {
++ status = "okay";
++};
++
++&ifc {
++ status = "okay";
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000
++ 0x2 0x0 0x5 0x30000000 0x00010000
++ 0x3 0x0 0x5 0x20000000 0x00010000>;
++
++ nor@0,0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "cfi-flash";
++ reg = <0x0 0x0 0x8000000>;
++ bank-width = <2>;
++ device-width = <1>;
++ };
++
++ nand@2,0 {
++ compatible = "fsl,ifc-nand";
++ reg = <0x2 0x0 0x10000>;
++ };
++
++ cpld@3,0 {
++ reg = <0x3 0x0 0x10000>;
++ compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
++ };
++
++};
++
++&i2c0 {
++ status = "okay";
++ pca9547@75 {
++ compatible = "nxp,pca9547";
++ reg = <0x75>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ i2c-mux-never-disable;
++ i2c@1 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x01>;
++ rtc@68 {
++ compatible = "dallas,ds3232";
++ reg = <0x68>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ adt7481@4c {
++ compatible = "adi,adt7461";
++ reg = <0x4c>;
++ };
++ };
++ };
++};
++
++&i2c1 {
++ status = "disabled";
++};
++
++&i2c2 {
++ status = "disabled";
++};
++
++&i2c3 {
++ status = "disabled";
++};
++
++&dspi {
++ status = "okay";
++ dflash0: n25q512a {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <3000000>;
++ reg = <0>;
++ };
++};
++
++&qspi {
++ status = "okay";
++ flash0: s25fs512s@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "spansion,m25p80";
++ m25p,fast-read;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&sata0 {
++ status = "okay";
++};
++
++&sata1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+new file mode 100644
+index 00000000..f694cac0
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -0,0 +1,910 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-2080A family SoC.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <dt-bindings/thermal/thermal.h>
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++
++/ {
++ compatible = "fsl,ls2080a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ aliases {
++ crypto = &crypto;
++ serial0 = &serial0;
++ serial1 = &serial1;
++ };
++
++ cpu: cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0 0x80000000>;
++ /* DRAM space - 1, size : 2 GB DRAM */
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
++ gic: interrupt-controller@6000000 {
++ compatible = "arm,gic-v3";
++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
++ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */
++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */
++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */
++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */
++ #interrupt-cells = <3>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ interrupt-controller;
++ interrupts = <1 9 0x4>;
++
++ its: gic-its@6020000 {
++ compatible = "arm,gic-v3-its";
++ msi-controller;
++ reg = <0x0 0x6020000 0 0x20000>;
++ };
++ };
++
++ rstcr: syscon@1e60000 {
++ compatible = "fsl,ls2080a-rstcr", "syscon";
++ reg = <0x0 0x1e60000 0x0 0x4>;
++ };
++
++ reboot {
++ compatible ="syscon-reboot";
++ regmap = <&rstcr>;
++ offset = <0x0>;
++ mask = <0x2>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
++ <1 14 4>, /* Physical Non-Secure PPI, active-low */
++ <1 11 4>, /* Virtual PPI, active-low */
++ <1 10 4>; /* Hypervisor PPI, active-low */
++ fsl,erratum-a008585;
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
++ };
++
++ soc {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ clockgen: clocking@1300000 {
++ compatible = "fsl,ls2080a-clockgen";
++ reg = <0 0x1300000 0 0xa0000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
++
++ dcfg: dcfg@1e00000 {
++ compatible = "fsl,ls2080a-dcfg", "syscon";
++ reg = <0x0 0x1e00000 0x0 0x10000>;
++ little-endian;
++ };
++
++ tmu: tmu@1f80000 {
++ compatible = "fsl,qoriq-tmu";
++ reg = <0x0 0x1f80000 0x0 0x10000>;
++ interrupts = <0 23 0x4>;
++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
++ fsl,tmu-calibration = <0x00000000 0x00000026
++ 0x00000001 0x0000002d
++ 0x00000002 0x00000032
++ 0x00000003 0x00000039
++ 0x00000004 0x0000003f
++ 0x00000005 0x00000046
++ 0x00000006 0x0000004d
++ 0x00000007 0x00000054
++ 0x00000008 0x0000005a
++ 0x00000009 0x00000061
++ 0x0000000a 0x0000006a
++ 0x0000000b 0x00000071
++
++ 0x00010000 0x00000025
++ 0x00010001 0x0000002c
++ 0x00010002 0x00000035
++ 0x00010003 0x0000003d
++ 0x00010004 0x00000045
++ 0x00010005 0x0000004e
++ 0x00010006 0x00000057
++ 0x00010007 0x00000061
++ 0x00010008 0x0000006b
++ 0x00010009 0x00000076
++
++ 0x00020000 0x00000029
++ 0x00020001 0x00000033
++ 0x00020002 0x0000003d
++ 0x00020003 0x00000049
++ 0x00020004 0x00000056
++ 0x00020005 0x00000061
++ 0x00020006 0x0000006d
++
++ 0x00030000 0x00000021
++ 0x00030001 0x0000002a
++ 0x00030002 0x0000003c
++ 0x00030003 0x0000004e>;
++ little-endian;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++
++ thermal-sensors = <&tmu 4>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++ cpu_crit: cpu-crit {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map1 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map2 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu4 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map3 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu6 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++ };
++
++ serial0: serial@21c0500 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0500 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++
++ serial1: serial@21c0600 {
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0600 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++
++ cluster1_core0_watchdog: wdt@c000000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc000000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster1_core1_watchdog: wdt@c010000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc010000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core0_watchdog: wdt@c100000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc100000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core1_watchdog: wdt@c110000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc110000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster3_core0_watchdog: wdt@c200000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc200000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster3_core1_watchdog: wdt@c210000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc210000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster4_core0_watchdog: wdt@c300000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc300000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster4_core1_watchdog: wdt@c310000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc310000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ crypto: crypto@8000000 {
++ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
++ fsl,sec-era = <8>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x00 0x8000000 0x100000>;
++ reg = <0x00 0x8000000 0x0 0x100000>;
++ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
++ dma-coherent;
++
++ sec_jr0: jr@10000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x10000 0x10000>;
++ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr@20000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x20000 0x10000>;
++ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr2: jr@30000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x30000 0x10000>;
++ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr3: jr@40000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x40000 0x10000>;
++ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
++ };
++ };
++
++ fsl_mc: fsl-mc@80c000000 {
++ compatible = "fsl,qoriq-mc";
++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
++ #address-cells = <3>;
++ #size-cells = <1>;
++
++ /*
++ * Region type 0x0 - MC portals
++ * Region type 0x1 - QBMAN portals
++ */
++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
++
++ /*
++ * Define the maximum number of MACs present on the SoC.
++ */
++ dpmacs {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dpmac1: dpmac@1 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x1>;
++ };
++
++ dpmac2: dpmac@2 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x2>;
++ };
++
++ dpmac3: dpmac@3 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x3>;
++ };
++
++ dpmac4: dpmac@4 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x4>;
++ };
++
++ dpmac5: dpmac@5 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x5>;
++ };
++
++ dpmac6: dpmac@6 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x6>;
++ };
++
++ dpmac7: dpmac@7 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x7>;
++ };
++
++ dpmac8: dpmac@8 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x8>;
++ };
++
++ dpmac9: dpmac@9 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x9>;
++ };
++
++ dpmac10: dpmac@a {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xa>;
++ };
++
++ dpmac11: dpmac@b {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xb>;
++ };
++
++ dpmac12: dpmac@c {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xc>;
++ };
++
++ dpmac13: dpmac@d {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xd>;
++ };
++
++ dpmac14: dpmac@e {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xe>;
++ };
++
++ dpmac15: dpmac@f {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xf>;
++ };
++
++ dpmac16: dpmac@10 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x10>;
++ };
++ };
++ };
++
++ smmu: iommu@5000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x5000000 0 0x800000>;
++ #global-interrupts = <12>;
++ #iommu-cells = <1>;
++ stream-match-mask = <0x7C00>;
++ interrupts = <0 13 4>, /* global secure fault */
++ <0 14 4>, /* combined secure interrupt */
++ <0 15 4>, /* global non-secure fault */
++ <0 16 4>, /* combined non-secure interrupt */
++ /* performance counter interrupts 0-7 */
++ <0 211 4>, <0 212 4>,
++ <0 213 4>, <0 214 4>,
++ <0 215 4>, <0 216 4>,
++ <0 217 4>, <0 218 4>,
++ /* per context interrupt, 64 interrupts */
++ <0 146 4>, <0 147 4>,
++ <0 148 4>, <0 149 4>,
++ <0 150 4>, <0 151 4>,
++ <0 152 4>, <0 153 4>,
++ <0 154 4>, <0 155 4>,
++ <0 156 4>, <0 157 4>,
++ <0 158 4>, <0 159 4>,
++ <0 160 4>, <0 161 4>,
++ <0 162 4>, <0 163 4>,
++ <0 164 4>, <0 165 4>,
++ <0 166 4>, <0 167 4>,
++ <0 168 4>, <0 169 4>,
++ <0 170 4>, <0 171 4>,
++ <0 172 4>, <0 173 4>,
++ <0 174 4>, <0 175 4>,
++ <0 176 4>, <0 177 4>,
++ <0 178 4>, <0 179 4>,
++ <0 180 4>, <0 181 4>,
++ <0 182 4>, <0 183 4>,
++ <0 184 4>, <0 185 4>,
++ <0 186 4>, <0 187 4>,
++ <0 188 4>, <0 189 4>,
++ <0 190 4>, <0 191 4>,
++ <0 192 4>, <0 193 4>,
++ <0 194 4>, <0 195 4>,
++ <0 196 4>, <0 197 4>,
++ <0 198 4>, <0 199 4>,
++ <0 200 4>, <0 201 4>,
++ <0 202 4>, <0 203 4>,
++ <0 204 4>, <0 205 4>,
++ <0 206 4>, <0 207 4>,
++ <0 208 4>, <0 209 4>;
++ };
++
++ dspi: dspi@2100000 {
++ status = "disabled";
++ compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2100000 0x0 0x10000>;
++ interrupts = <0 26 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ clock-names = "dspi";
++ spi-num-chipselects = <5>;
++ bus-num = <0>;
++ };
++
++ esdhc: esdhc@2140000 {
++ status = "disabled";
++ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
++ reg = <0x0 0x2140000 0x0 0x10000>;
++ interrupts = <0 28 0x4>; /* Level high type */
++ clocks = <&clockgen 4 1>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ little-endian;
++ bus-width = <4>;
++ };
++
++ gpio0: gpio@2300000 {
++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio@2310000 {
++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio@2320000 {
++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
++ reg = <0x0 0x2320000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio3: gpio@2330000 {
++ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
++ reg = <0x0 0x2330000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ /* TODO: WRIOP (CCSR?) */
++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000,
++ * E-MDIO1: 0x1_6000
++ */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B96000 0x0 0x1000>;
++ device_type = "mdio"; /* TODO: is this necessary? */
++ little-endian; /* force the driver in LE mode */
++
++ /* Not necessary on the QDS, but needed on the RDB */
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000,
++ * E-MDIO2: 0x1_7000
++ */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B97000 0x0 0x1000>;
++ device_type = "mdio"; /* TODO: is this necessary? */
++ little-endian; /* force the driver in LE mode */
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio1: mdio@0x8c07000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c07000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio2: mdio@0x8c0b000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c0b000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio3: mdio@0x8c0f000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c0f000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio4: mdio@0x8c13000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c13000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio5: mdio@0x8c17000 {
++ status = "disabled";
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c17000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio6: mdio@0x8c1b000 {
++ status = "disabled";
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c1b000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio7: mdio@0x8c1f000 {
++ status = "disabled";
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c1f000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio8: mdio@0x8c23000 {
++ status = "disabled";
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c23000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ i2c0: i2c@2000000 {
++ status = "disabled";
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2000000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c1: i2c@2010000 {
++ status = "disabled";
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2010000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c2: i2c@2020000 {
++ status = "disabled";
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2020000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c3: i2c@2030000 {
++ status = "disabled";
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2030000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ ifc: ifc@2240000 {
++ compatible = "fsl,ifc", "simple-bus";
++ reg = <0x0 0x2240000 0x0 0x20000>;
++ interrupts = <0 21 0x4>; /* Level high type */
++ little-endian;
++ #address-cells = <2>;
++ #size-cells = <1>;
++
++ ranges = <0 0 0x5 0x80000000 0x08000000
++ 2 0 0x5 0x30000000 0x00010000
++ 3 0 0x5 0x20000000 0x00010000>;
++ };
++
++ qspi: quadspi@20c0000 {
++ status = "disabled";
++ compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x20c0000 0x0 0x10000>,
++ <0x0 0x20000000 0x0 0x10000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <0 25 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "qspi_en", "qspi";
++ };
++
++ pcie1: pcie@3400000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg-names = "regs", "config";
++ interrupts = <0 108 0x4>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
++ <0000 0 0 2 &gic 0 0 0 110 4>,
++ <0000 0 0 3 &gic 0 0 0 111 4>,
++ <0000 0 0 4 &gic 0 0 0 112 4>;
++ };
++
++ pcie2: pcie@3500000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg-names = "regs", "config";
++ interrupts = <0 113 0x4>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
++ <0000 0 0 2 &gic 0 0 0 115 4>,
++ <0000 0 0 3 &gic 0 0 0 116 4>,
++ <0000 0 0 4 &gic 0 0 0 117 4>;
++ };
++
++ pcie3: pcie@3600000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg-names = "regs", "config";
++ interrupts = <0 118 0x4>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <8>;
++ bus-range = <0x0 0xff>;
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
++ <0000 0 0 2 &gic 0 0 0 120 4>,
++ <0000 0 0 3 &gic 0 0 0 121 4>,
++ <0000 0 0 4 &gic 0 0 0 122 4>;
++ };
++
++ pcie4: pcie@3700000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg-names = "regs", "config";
++ interrupts = <0 123 0x4>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
++ <0000 0 0 2 &gic 0 0 0 125 4>,
++ <0000 0 0 3 &gic 0 0 0 126 4>,
++ <0000 0 0 4 &gic 0 0 0 127 4>;
++ };
++
++ sata0: sata@3200000 {
++ status = "disabled";
++ compatible = "fsl,ls2080a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>;
++ interrupts = <0 133 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ dma-coherent;
++ };
++
++ sata1: sata@3210000 {
++ status = "disabled";
++ compatible = "fsl,ls2080a-ahci";
++ reg = <0x0 0x3210000 0x0 0x10000>;
++ interrupts = <0 136 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ dma-coherent;
++ };
++
++ usb0: usb3@3100000 {
++ status = "disabled";
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ usb1: usb3@3110000 {
++ status = "disabled";
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ serdes1: serdes@1ea0000 {
++ reg = <0x0 0x1ea0000 0 0x00002000>;
++ };
++
++ ccn@4000000 {
++ compatible = "arm,ccn-504";
++ reg = <0x0 0x04000000 0x0 0x01000000>;
++ interrupts = <0 12 4>;
++ };
++
++ ftm0: ftm0@2800000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x2800000 0x0 0x10000>;
++ interrupts = <0 44 4>;
++ };
++ };
++
++ ddr1: memory-controller@1080000 {
++ compatible = "fsl,qoriq-memory-controller";
++ reg = <0x0 0x1080000 0x0 0x1000>;
++ interrupts = <0 17 0x4>;
++ little-endian;
++ };
++
++ ddr2: memory-controller@1090000 {
++ compatible = "fsl,qoriq-memory-controller";
++ reg = <0x0 0x1090000 0x0 0x1000>;
++ interrupts = <0 18 0x4>;
++ little-endian;
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi b/arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi
+new file mode 100644
+index 00000000..14680adb
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi
+@@ -0,0 +1,81 @@
++/*
++ * QorIQ BMan Portals device tree
++ *
++ * Copyright 2011-2016 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++&bportals {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "simple-bus";
++
++ bman-portal@0 {
++ cell-index = <0>;
++ compatible = "fsl,bman-portal";
++ reg = <0x0 0x4000 0x4000000 0x4000>;
++ interrupts = <0 173 0x4>;
++ };
++
++ bman-portal@10000 {
++ cell-index = <1>;
++ compatible = "fsl,bman-portal";
++ reg = <0x10000 0x4000 0x4010000 0x4000>;
++ interrupts = <0 175 0x4>;
++ };
++
++ bman-portal@20000 {
++ cell-index = <2>;
++ compatible = "fsl,bman-portal";
++ reg = <0x20000 0x4000 0x4020000 0x4000>;
++ interrupts = <0 177 0x4>;
++ };
++
++ bman-portal@30000 {
++ cell-index = <3>;
++ compatible = "fsl,bman-portal";
++ reg = <0x30000 0x4000 0x4030000 0x4000>;
++ interrupts = <0 179 0x4>;
++ };
++
++ bman-portal@40000 {
++ cell-index = <4>;
++ compatible = "fsl,bman-portal";
++ reg = <0x40000 0x4000 0x4040000 0x4000>;
++ interrupts = <0 181 0x4>;
++ };
++
++ bman-portal@50000 {
++ cell-index = <5>;
++ compatible = "fsl,bman-portal";
++ reg = <0x50000 0x4000 0x4050000 0x4000>;
++ interrupts = <0 183 0x4>;
++ };
++
++ bman-portal@60000 {
++ cell-index = <6>;
++ compatible = "fsl,bman-portal";
++ reg = <0x60000 0x4000 0x4060000 0x4000>;
++ interrupts = <0 185 0x4>;
++ };
++
++ bman-portal@70000 {
++ cell-index = <7>;
++ compatible = "fsl,bman-portal";
++ reg = <0x70000 0x4000 0x4070000 0x4000>;
++ interrupts = <0 187 0x4>;
++ };
++
++ bman-portal@80000 {
++ cell-index = <8>;
++ compatible = "fsl,bman-portal";
++ reg = <0x80000 0x4000 0x4080000 0x4000>;
++ interrupts = <0 189 0x4>;
++ };
++
++ bman-bpids@0 {
++ compatible = "fsl,bpid-range";
++ fsl,bpid-range = <32 32>;
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi b/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
+new file mode 100644
+index 00000000..eb5af912
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
+@@ -0,0 +1,66 @@
++/*
++ * QorIQ FMan v3 10g port #1 device tree stub [ controller @ offset 0x400000 ]
++ *
++ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++fsldpaa: fsl,dpaa {
++ compatible = "fsl,ls1043a-dpaa", "simple-bus", "fsl,dpaa";
++ ethernet@0 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet0>;
++ };
++ ethernet@1 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet1>;
++ };
++ ethernet@2 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet2>;
++ };
++ ethernet@3 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet3>;
++ };
++ ethernet@4 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet4>;
++ };
++ ethernet@5 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet5>;
++ };
++ ethernet@8 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet6>;
++ };
++};
++
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
+new file mode 100644
+index 00000000..474bff5e
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
+@@ -0,0 +1,43 @@
++/*
++ * QorIQ FMan v3 10g port #0 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x10: port@90000 {
++ cell-index = <0x10>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
++ reg = <0x90000 0x1000>;
++ fsl,fman-10g-port;
++ };
++
++ fman0_tx_0x30: port@b0000 {
++ cell-index = <0x30>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
++ reg = <0xb0000 0x1000>;
++ fsl,fman-10g-port;
++ fsl,qman-channel-id = <0x800>;
++ };
++
++ ethernet@f0000 {
++ cell-index = <0x8>;
++ compatible = "fsl,fman-memac";
++ reg = <0xf0000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x10 &fman0_tx_0x30>;
++ pcsphy-handle = <&pcsphy6>;
++ };
++
++ mdio@f1000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xf1000 0x1000>;
++
++ pcsphy6: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
+new file mode 100644
+index 00000000..d4326f85
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
+@@ -0,0 +1,43 @@
++/*
++ * QorIQ FMan v3 10g port #1 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x11: port@91000 {
++ cell-index = <0x11>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
++ reg = <0x91000 0x1000>;
++ fsl,fman-10g-port;
++ };
++
++ fman0_tx_0x31: port@b1000 {
++ cell-index = <0x31>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
++ reg = <0xb1000 0x1000>;
++ fsl,fman-10g-port;
++ fsl,qman-channel-id = <0x801>;
++ };
++
++ ethernet@f2000 {
++ cell-index = <0x9>;
++ compatible = "fsl,fman-memac";
++ reg = <0xf2000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x11 &fman0_tx_0x31>;
++ pcsphy-handle = <&pcsphy7>;
++ };
++
++ mdio@f3000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xf3000 0x1000>;
++
++ pcsphy7: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
+new file mode 100644
+index 00000000..7170cab9
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
+@@ -0,0 +1,42 @@
++/*
++ * QorIQ FMan v3 1g port #0 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x08: port@88000 {
++ cell-index = <0x8>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
++ reg = <0x88000 0x1000>;
++ };
++
++ fman0_tx_0x28: port@a8000 {
++ cell-index = <0x28>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
++ reg = <0xa8000 0x1000>;
++ fsl,qman-channel-id = <0x802>;
++ };
++
++ ethernet@e0000 {
++ cell-index = <0>;
++ compatible = "fsl,fman-memac";
++ reg = <0xe0000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy0>;
++ };
++
++ mdio@e1000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xe1000 0x1000>;
++
++ pcsphy0: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
+new file mode 100644
+index 00000000..c7eb8b6e
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
+@@ -0,0 +1,42 @@
++/*
++ * QorIQ FMan v3 1g port #1 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x09: port@89000 {
++ cell-index = <0x9>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
++ reg = <0x89000 0x1000>;
++ };
++
++ fman0_tx_0x29: port@a9000 {
++ cell-index = <0x29>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
++ reg = <0xa9000 0x1000>;
++ fsl,qman-channel-id = <0x803>;
++ };
++
++ ethernet@e2000 {
++ cell-index = <1>;
++ compatible = "fsl,fman-memac";
++ reg = <0xe2000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy1>;
++ };
++
++ mdio@e3000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xe3000 0x1000>;
++
++ pcsphy1: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
+new file mode 100644
+index 00000000..56f9f0dd
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
+@@ -0,0 +1,42 @@
++/*
++ * QorIQ FMan v3 1g port #2 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x0a: port@8a000 {
++ cell-index = <0xa>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
++ reg = <0x8a000 0x1000>;
++ };
++
++ fman0_tx_0x2a: port@aa000 {
++ cell-index = <0x2a>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
++ reg = <0xaa000 0x1000>;
++ fsl,qman-channel-id = <0x804>;
++ };
++
++ ethernet@e4000 {
++ cell-index = <2>;
++ compatible = "fsl,fman-memac";
++ reg = <0xe4000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x0a &fman0_tx_0x2a>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy2>;
++ };
++
++ mdio@e5000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xe5000 0x1000>;
++
++ pcsphy2: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
+new file mode 100644
+index 00000000..bbe7dbaf
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
+@@ -0,0 +1,42 @@
++/*
++ * QorIQ FMan v3 1g port #3 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x0b: port@8b000 {
++ cell-index = <0xb>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
++ reg = <0x8b000 0x1000>;
++ };
++
++ fman0_tx_0x2b: port@ab000 {
++ cell-index = <0x2b>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
++ reg = <0xab000 0x1000>;
++ fsl,qman-channel-id = <0x805>;
++ };
++
++ ethernet@e6000 {
++ cell-index = <3>;
++ compatible = "fsl,fman-memac";
++ reg = <0xe6000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x0b &fman0_tx_0x2b>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy3>;
++ };
++
++ mdio@e7000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xe7000 0x1000>;
++
++ pcsphy3: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
+new file mode 100644
+index 00000000..ead4f062
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
+@@ -0,0 +1,42 @@
++/*
++ * QorIQ FMan v3 1g port #4 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x0c: port@8c000 {
++ cell-index = <0xc>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
++ reg = <0x8c000 0x1000>;
++ };
++
++ fman0_tx_0x2c: port@ac000 {
++ cell-index = <0x2c>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
++ reg = <0xac000 0x1000>;
++ fsl,qman-channel-id = <0x806>;
++ };
++
++ ethernet@e8000 {
++ cell-index = <4>;
++ compatible = "fsl,fman-memac";
++ reg = <0xe8000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy4>;
++ };
++
++ mdio@e9000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xe9000 0x1000>;
++
++ pcsphy4: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
+new file mode 100644
+index 00000000..389eadaf
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
+@@ -0,0 +1,42 @@
++/*
++ * QorIQ FMan v3 1g port #5 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++ fman0_rx_0x0d: port@8d000 {
++ cell-index = <0xd>;
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
++ reg = <0x8d000 0x1000>;
++ };
++
++ fman0_tx_0x2d: port@ad000 {
++ cell-index = <0x2d>;
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
++ reg = <0xad000 0x1000>;
++ fsl,qman-channel-id = <0x807>;
++ };
++
++ ethernet@ea000 {
++ cell-index = <5>;
++ compatible = "fsl,fman-memac";
++ reg = <0xea000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x0d &fman0_tx_0x2d>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy5>;
++ };
++
++ mdio@eb000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xeb000 0x1000>;
++
++ pcsphy5: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
+new file mode 100644
+index 00000000..2d0df20d
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
+@@ -0,0 +1,47 @@
++/*
++ * QorIQ FMan v3 OH ports device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++
++ fman0_oh1: port@82000 {
++ cell-index = <0>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x82000 0x1000>;
++ };
++
++ fman0_oh2: port@83000 {
++ cell-index = <1>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x83000 0x1000>;
++ };
++
++ fman0_oh3: port@84000 {
++ cell-index = <2>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x84000 0x1000>;
++ };
++
++ fman0_oh4: port@85000 {
++ cell-index = <3>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x85000 0x1000>;
++ };
++
++ fman0_oh5: port@86000 {
++ cell-index = <4>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x86000 0x1000>;
++ };
++
++ fman0_oh6: port@87000 {
++ cell-index = <5>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x87000 0x1000>;
++ };
++
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+new file mode 100644
+index 00000000..8e089f0c
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+@@ -0,0 +1,130 @@
++/*
++ * QorIQ FMan v3 device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman0: fman@1a00000 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ cell-index = <0>;
++ compatible = "fsl,fman";
++ ranges = <0x0 0x00 0x1a00000 0x100000>;
++ reg = <0x0 0x1a00000 0x0 0x100000>;
++ interrupts = <0 44 0x4>, <0 45 0x4>;
++ clocks = <&clockgen 3 0>;
++ clock-names = "fmanclk";
++ fsl,qman-channel-range = <0x800 0x10>;
++
++ cc {
++ compatible = "fsl,fman-cc";
++ };
++
++ muram@0 {
++ compatible = "fsl,fman-muram";
++ reg = <0x0 0x60000>;
++ };
++
++ bmi@80000 {
++ compatible = "fsl,fman-bmi";
++ reg = <0x80000 0x400>;
++ };
++
++ qmi@80400 {
++ compatible = "fsl,fman-qmi";
++ reg = <0x80400 0x400>;
++ };
++
++ fman0_oh_0x2: port@82000 {
++ cell-index = <0x2>;
++ compatible = "fsl,fman-v3-port-oh";
++ reg = <0x82000 0x1000>;
++ fsl,qman-channel-id = <0x809>;
++ };
++
++ fman0_oh_0x3: port@83000 {
++ cell-index = <0x3>;
++ compatible = "fsl,fman-v3-port-oh";
++ reg = <0x83000 0x1000>;
++ fsl,qman-channel-id = <0x80a>;
++ };
++
++ fman0_oh_0x4: port@84000 {
++ cell-index = <0x4>;
++ compatible = "fsl,fman-v3-port-oh";
++ reg = <0x84000 0x1000>;
++ fsl,qman-channel-id = <0x80b>;
++ };
++
++ fman0_oh_0x5: port@85000 {
++ cell-index = <0x5>;
++ compatible = "fsl,fman-v3-port-oh";
++ reg = <0x85000 0x1000>;
++ fsl,qman-channel-id = <0x80c>;
++ };
++
++ fman0_oh_0x6: port@86000 {
++ cell-index = <0x6>;
++ compatible = "fsl,fman-v3-port-oh";
++ reg = <0x86000 0x1000>;
++ fsl,qman-channel-id = <0x80d>;
++ };
++
++ fman0_oh_0x7: port@87000 {
++ cell-index = <0x7>;
++ compatible = "fsl,fman-v3-port-oh";
++ reg = <0x87000 0x1000>;
++ fsl,qman-channel-id = <0x80e>;
++ };
++
++ policer@c0000 {
++ compatible = "fsl,fman-policer";
++ reg = <0xc0000 0x1000>;
++ };
++
++ keygen@c1000 {
++ compatible = "fsl,fman-keygen";
++ reg = <0xc1000 0x1000>;
++ };
++
++ dma@c2000 {
++ compatible = "fsl,fman-dma";
++ reg = <0xc2000 0x1000>;
++ };
++
++ fpm@c3000 {
++ compatible = "fsl,fman-fpm";
++ reg = <0xc3000 0x1000>;
++ };
++
++ parser@c7000 {
++ compatible = "fsl,fman-parser";
++ reg = <0xc7000 0x1000>;
++ };
++
++ vsps@dc000 {
++ compatible = "fsl,fman-vsps";
++ reg = <0xdc000 0x1000>;
++ };
++
++ mdio0: mdio@fc000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xfc000 0x1000>;
++ };
++
++ xmdio0: mdio@fd000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xfd000 0x1000>;
++ };
++
++ ptp_timer0: ptp-timer@fe000 {
++ compatible = "fsl,fman-ptp-timer", "fsl,fman-rtc";
++ reg = <0xfe000 0x1000>;
++ };
++};
+diff --git a/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi b/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi
+new file mode 100644
+index 00000000..4f7edf48
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi
+@@ -0,0 +1,104 @@
++/*
++ * QorIQ QMan Portals device tree
++ *
++ * Copyright 2011-2016 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++&qportals {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "simple-bus";
++
++ qportal0: qman-portal@0 {
++ compatible = "fsl,qman-portal";
++ reg = <0x0 0x4000 0x4000000 0x4000>;
++ interrupts = <0 172 0x4>;
++ cell-index = <0>;
++ };
++
++ qportal1: qman-portal@10000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x10000 0x4000 0x4010000 0x4000>;
++ interrupts = <0 174 0x4>;
++ cell-index = <1>;
++ };
++
++ qportal2: qman-portal@20000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x20000 0x4000 0x4020000 0x4000>;
++ interrupts = <0 176 0x4>;
++ cell-index = <2>;
++ };
++
++ qportal3: qman-portal@30000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x30000 0x4000 0x4030000 0x4000>;
++ interrupts = <0 178 0x4>;
++ cell-index = <3>;
++ };
++
++ qportal4: qman-portal@40000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x40000 0x4000 0x4040000 0x4000>;
++ interrupts = <0 180 0x4>;
++ cell-index = <4>;
++ };
++
++ qportal5: qman-portal@50000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x50000 0x4000 0x4050000 0x4000>;
++ interrupts = <0 182 0x4>;
++ cell-index = <5>;
++ };
++
++ qportal6: qman-portal@60000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x60000 0x4000 0x4060000 0x4000>;
++ interrupts = <0 184 0x4>;
++ cell-index = <6>;
++ };
++
++ qportal7: qman-portal@70000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x70000 0x4000 0x4070000 0x4000>;
++ interrupts = <0 186 0x4>;
++ cell-index = <7>;
++ };
++
++ qportal8: qman-portal@80000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x80000 0x4000 0x4080000 0x4000>;
++ interrupts = <0 188 0x4>;
++ cell-index = <8>;
++ };
++
++ qman-fqids@0 {
++ compatible = "fsl,fqid-range";
++ fsl,fqid-range = <256 256>;
++ };
++
++ qman-fqids@1 {
++ compatible = "fsl,fqid-range";
++ fsl,fqid-range = <32768 32768>;
++ };
++
++ qman-pools@0 {
++ compatible = "fsl,pool-channel-range";
++ fsl,pool-channel-range = <0x401 0xf>;
++ };
++
++ qman-cgrids@0 {
++ compatible = "fsl,cgrid-range";
++ fsl,cgrid-range = <0 256>;
++ };
++
++ qman-ceetm@0 {
++ compatible = "fsl,qman-ceetm";
++ fsl,ceetm-lfqid-range = <0xf00000 0x1000>;
++ fsl,ceetm-sp-range = <0 12>;
++ fsl,ceetm-lni-range = <0 8>;
++ fsl,ceetm-channel-range = <0 32>;
++ };
++};
+diff --git a/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi
+index 5022432e..65701ada 100644
+--- a/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi
++++ b/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi
+@@ -38,51 +38,61 @@
+ compatible = "simple-bus";
+
+ bman-portal@0 {
++ cell-index = <0>;
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x100000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
++ cell-index = <1>;
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x101000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
++ cell-index = <2>;
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x102000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
++ cell-index = <3>;
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x103000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
++ cell-index = <4>;
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x104000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
++ cell-index = <5>;
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x105000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
++ cell-index = <6>;
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x106000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
++ cell-index = <7>;
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x107000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
++ cell-index = <8>;
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x108000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
++ cell-index = <9>;
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x109000 0x1000>;
+ interrupts = <123 2 0 0>;
+diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
+index c288f3c6..dd200e28 100644
+--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
+@@ -35,14 +35,14 @@
+ fman@400000 {
+ fman0_rx_0x10: port@90000 {
+ cell-index = <0x10>;
+- compatible = "fsl,fman-v3-port-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
+ reg = <0x90000 0x1000>;
+ fsl,fman-10g-port;
+ };
+
+ fman0_tx_0x30: port@b0000 {
+ cell-index = <0x30>;
+- compatible = "fsl,fman-v3-port-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
+ reg = <0xb0000 0x1000>;
+ fsl,fman-10g-port;
+ };
+diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
+index 94a76982..365770c9 100644
+--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
+@@ -35,14 +35,14 @@
+ fman@400000 {
+ fman0_rx_0x11: port@91000 {
+ cell-index = <0x11>;
+- compatible = "fsl,fman-v3-port-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
+ reg = <0x91000 0x1000>;
+ fsl,fman-10g-port;
+ };
+
+ fman0_tx_0x31: port@b1000 {
+ cell-index = <0x31>;
+- compatible = "fsl,fman-v3-port-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
+ reg = <0xb1000 0x1000>;
+ fsl,fman-10g-port;
+ };
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch b/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch
new file mode 100644
index 0000000000..f61ae06ca2
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch
@@ -0,0 +1,1042 @@
+From 120fa458ffe2250ea58578ccfc85e674005463dc Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 10:53:50 +0800
+Subject: [PATCH] mtd: spi-nor: support layerscape
+
+This is a integrated patch for layerscape qspi support.
+
+Signed-off-by: Suresh Gupta <suresh.gupta@nxp.com>
+Signed-off-by: Yunhui Cui <B56489@freescale.com>
+Signed-off-by: mar.krzeminski <mar.krzeminski@gmail.com>
+Signed-off-by: Alison Wang <b18965@freescale.com>
+Signed-off-by: Nobuhiro Iwamatsu <nobuhiro.iwamatsu.kw@hitachi.com>
+Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com>
+Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
+Signed-off-by: Alexander Kurz <akurz@blala.de>
+Signed-off-by: L. D. Pinney <ldpinney@gmail.com>
+Signed-off-by: Ash Benz <ash.benz@bk.ru>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/mtd/mtdchar.c | 2 +-
+ drivers/mtd/spi-nor/fsl-quadspi.c | 356 +++++++++++++++++++++++++++++++-------
+ drivers/mtd/spi-nor/spi-nor.c | 136 +++++++++++++--
+ include/linux/mtd/spi-nor.h | 14 +-
+ 4 files changed, 432 insertions(+), 76 deletions(-)
+
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index 2a47a3f0..4f21401d 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -451,7 +451,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
+ * data. For our userspace tools it is important to dump areas
+ * with ECC errors!
+ * For kernel internal usage it also might return -EUCLEAN
+- * to signal the caller that a bitflip has occured and has
++ * to signal the caller that a bitflip has occurred and has
+ * been corrected by the ECC algorithm.
+ *
+ * Note: currently the standard NAND function, nand_read_oob_std,
+diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
+index 5c82e4ef..8fb75532 100644
+--- a/drivers/mtd/spi-nor/fsl-quadspi.c
++++ b/drivers/mtd/spi-nor/fsl-quadspi.c
+@@ -41,6 +41,8 @@
+ #define QUADSPI_QUIRK_TKT253890 (1 << 2)
+ /* Controller cannot wake up from wait mode, TKT245618 */
+ #define QUADSPI_QUIRK_TKT245618 (1 << 3)
++/* QSPI_AMBA_BASE is internally added by SOC design */
++#define QUADSPI_AMBA_BASE_INTERNAL (0x10000)
+
+ /* The registers */
+ #define QUADSPI_MCR 0x00
+@@ -193,7 +195,7 @@
+ #define QUADSPI_LUT_NUM 64
+
+ /* SEQID -- we can have 16 seqids at most. */
+-#define SEQID_QUAD_READ 0
++#define SEQID_READ 0
+ #define SEQID_WREN 1
+ #define SEQID_WRDI 2
+ #define SEQID_RDSR 3
+@@ -205,15 +207,22 @@
+ #define SEQID_RDCR 9
+ #define SEQID_EN4B 10
+ #define SEQID_BRWR 11
++#define SEQID_RDAR_OR_RD_EVCR 12
++#define SEQID_WRAR 13
++#define SEQID_WD_EVCR 14
+
+ #define QUADSPI_MIN_IOMAP SZ_4M
+
++#define FLASH_VENDOR_SPANSION_FS "s25fs"
++#define SPANSION_S25FS_FAMILY (1 << 1)
++
+ enum fsl_qspi_devtype {
+ FSL_QUADSPI_VYBRID,
+ FSL_QUADSPI_IMX6SX,
+ FSL_QUADSPI_IMX7D,
+ FSL_QUADSPI_IMX6UL,
+ FSL_QUADSPI_LS1021A,
++ FSL_QUADSPI_LS2080A,
+ };
+
+ struct fsl_qspi_devtype_data {
+@@ -224,7 +233,7 @@ struct fsl_qspi_devtype_data {
+ int driver_data;
+ };
+
+-static struct fsl_qspi_devtype_data vybrid_data = {
++static const struct fsl_qspi_devtype_data vybrid_data = {
+ .devtype = FSL_QUADSPI_VYBRID,
+ .rxfifo = 128,
+ .txfifo = 64,
+@@ -232,7 +241,7 @@ static struct fsl_qspi_devtype_data vybrid_data = {
+ .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
+ };
+
+-static struct fsl_qspi_devtype_data imx6sx_data = {
++static const struct fsl_qspi_devtype_data imx6sx_data = {
+ .devtype = FSL_QUADSPI_IMX6SX,
+ .rxfifo = 128,
+ .txfifo = 512,
+@@ -241,7 +250,7 @@ static struct fsl_qspi_devtype_data imx6sx_data = {
+ | QUADSPI_QUIRK_TKT245618,
+ };
+
+-static struct fsl_qspi_devtype_data imx7d_data = {
++static const struct fsl_qspi_devtype_data imx7d_data = {
+ .devtype = FSL_QUADSPI_IMX7D,
+ .rxfifo = 512,
+ .txfifo = 512,
+@@ -250,7 +259,7 @@ static struct fsl_qspi_devtype_data imx7d_data = {
+ | QUADSPI_QUIRK_4X_INT_CLK,
+ };
+
+-static struct fsl_qspi_devtype_data imx6ul_data = {
++static const struct fsl_qspi_devtype_data imx6ul_data = {
+ .devtype = FSL_QUADSPI_IMX6UL,
+ .rxfifo = 128,
+ .txfifo = 512,
+@@ -267,6 +276,14 @@ static struct fsl_qspi_devtype_data ls1021a_data = {
+ .driver_data = 0,
+ };
+
++static struct fsl_qspi_devtype_data ls2080a_data = {
++ .devtype = FSL_QUADSPI_LS2080A,
++ .rxfifo = 128,
++ .txfifo = 64,
++ .ahb_buf_size = 1024,
++ .driver_data = QUADSPI_AMBA_BASE_INTERNAL | QUADSPI_QUIRK_TKT253890,
++};
++
+ #define FSL_QSPI_MAX_CHIP 4
+ struct fsl_qspi {
+ struct spi_nor nor[FSL_QSPI_MAX_CHIP];
+@@ -282,6 +299,7 @@ struct fsl_qspi {
+ u32 nor_size;
+ u32 nor_num;
+ u32 clk_rate;
++ u32 ddr_smp;
+ unsigned int chip_base_addr; /* We may support two chips. */
+ bool has_second_chip;
+ bool big_endian;
+@@ -309,6 +327,23 @@ static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
+ }
+
++static inline int has_added_amba_base_internal(struct fsl_qspi *q)
++{
++ return q->devtype_data->driver_data & QUADSPI_AMBA_BASE_INTERNAL;
++}
++
++static u32 fsl_get_nor_vendor(struct spi_nor *nor)
++{
++ u32 vendor_id;
++
++ if (nor->vendor) {
++ if (memcmp(nor->vendor, FLASH_VENDOR_SPANSION_FS,
++ sizeof(FLASH_VENDOR_SPANSION_FS) - 1))
++ vendor_id = SPANSION_S25FS_FAMILY;
++ }
++ return vendor_id;
++}
++
+ /*
+ * R/W functions for big- or little-endian registers:
+ * The qSPI controller's endian is independent of the CPU core's endian.
+@@ -331,6 +366,31 @@ static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
+ return ioread32(addr);
+ }
+
++static inline u32 *u8tou32(u32 *dest, const u8 *src, size_t n)
++{
++ size_t i;
++ *dest = 0;
++
++ n = n > 4 ? 4 : n;
++ for (i = 0; i < n; i++)
++ *dest |= *src++ << i * 8;
++
++ return dest;
++
++}
++
++static inline u8 *u32tou8(u8 *dest, const u32 *src, size_t n)
++{
++ size_t i;
++ u8 *xdest = dest;
++
++ n = n > 4 ? 4 : n;
++ for (i = 0; i < n; i++)
++ *xdest++ = *src >> i * 8;
++
++ return dest;
++}
++
+ /*
+ * An IC bug makes us to re-arrange the 32-bit data.
+ * The following chips, such as IMX6SLX, have fixed this bug.
+@@ -373,8 +433,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ void __iomem *base = q->iobase;
+ int rxfifo = q->devtype_data->rxfifo;
+ u32 lut_base;
+- u8 cmd, addrlen, dummy;
+ int i;
++ u32 vendor;
++
++ struct spi_nor *nor = &q->nor[0];
++ u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
++ u8 read_op = nor->read_opcode;
++ u8 read_dm = nor->read_dummy;
++
++ vendor = fsl_get_nor_vendor(nor);
+
+ fsl_qspi_unlock_lut(q);
+
+@@ -382,25 +449,51 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ for (i = 0; i < QUADSPI_LUT_NUM; i++)
+ qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
+
+- /* Quad Read */
+- lut_base = SEQID_QUAD_READ * 4;
+-
+- if (q->nor_size <= SZ_16M) {
+- cmd = SPINOR_OP_READ_1_1_4;
+- addrlen = ADDR24BIT;
+- dummy = 8;
+- } else {
+- /* use the 4-byte address */
+- cmd = SPINOR_OP_READ_1_1_4;
+- addrlen = ADDR32BIT;
+- dummy = 8;
+- }
+-
+- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
++ /* Read */
++ lut_base = SEQID_READ * 4;
++
++ if (nor->flash_read == SPI_NOR_FAST) {
++ qspi_writel(q, LUT0(CMD, PAD1, read_op) |
++ LUT1(ADDR, PAD1, addrlen),
++ base + QUADSPI_LUT(lut_base));
++ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
++ LUT1(FSL_READ, PAD1, rxfifo),
++ base + QUADSPI_LUT(lut_base + 1));
++ } else if (nor->flash_read == SPI_NOR_QUAD) {
++ if (q->nor_size == 0x4000000) {
++ read_op = 0xEC;
++ qspi_writel(q,
++ LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD4, addrlen),
++ base + QUADSPI_LUT(lut_base));
++ qspi_writel(q,
++ LUT0(MODE, PAD4, 0xff) | LUT1(DUMMY, PAD4, read_dm),
++ base + QUADSPI_LUT(lut_base + 1));
++ qspi_writel(q,
++ LUT0(FSL_READ, PAD4, rxfifo),
++ base + QUADSPI_LUT(lut_base + 2));
++ } else {
++ qspi_writel(q, LUT0(CMD, PAD1, read_op) |
++ LUT1(ADDR, PAD1, addrlen),
++ base + QUADSPI_LUT(lut_base));
++ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
++ LUT1(FSL_READ, PAD4, rxfifo),
++ base + QUADSPI_LUT(lut_base + 1));
++ }
++ } else if (nor->flash_read == SPI_NOR_DDR_QUAD) {
++ /* read mode : 1-4-4, such as Spansion s25fl128s. */
++ qspi_writel(q, LUT0(CMD, PAD1, read_op)
++ | LUT1(ADDR_DDR, PAD4, addrlen),
+ base + QUADSPI_LUT(lut_base));
+- qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
++
++ qspi_writel(q, LUT0(MODE_DDR, PAD4, 0xff)
++ | LUT1(DUMMY, PAD1, read_dm),
+ base + QUADSPI_LUT(lut_base + 1));
+
++ qspi_writel(q, LUT0(FSL_READ_DDR, PAD4, rxfifo)
++ | LUT1(JMP_ON_CS, PAD1, 0),
++ base + QUADSPI_LUT(lut_base + 2));
++ }
++
+ /* Write enable */
+ lut_base = SEQID_WREN * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
+@@ -409,16 +502,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ /* Page Program */
+ lut_base = SEQID_PP * 4;
+
+- if (q->nor_size <= SZ_16M) {
+- cmd = SPINOR_OP_PP;
+- addrlen = ADDR24BIT;
+- } else {
+- /* use the 4-byte address */
+- cmd = SPINOR_OP_PP;
+- addrlen = ADDR32BIT;
+- }
+-
+- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
++ qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) |
++ LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
+ base + QUADSPI_LUT(lut_base + 1));
+@@ -432,10 +517,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ /* Erase a sector */
+ lut_base = SEQID_SE * 4;
+
+- cmd = q->nor[0].erase_opcode;
+- addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
+-
+- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
++ qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) |
++ LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase the whole chip */
+@@ -476,6 +559,44 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
+ base + QUADSPI_LUT(lut_base));
+
++
++ /*
++ * Flash Micron and Spansion command confilict
++ * use the same value 0x65. But it indicates different meaning.
++ */
++ lut_base = SEQID_RDAR_OR_RD_EVCR * 4;
++
++ if (vendor == SPANSION_S25FS_FAMILY) {
++ /*
++ * Read any device register.
++ * Used for Spansion S25FS-S family flash only.
++ */
++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_RDAR) |
++ LUT1(ADDR, PAD1, ADDR24BIT),
++ base + QUADSPI_LUT(lut_base));
++ qspi_writel(q, LUT0(DUMMY, PAD1, 8) | LUT1(FSL_READ, PAD1, 1),
++ base + QUADSPI_LUT(lut_base + 1));
++ } else {
++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RD_EVCR),
++ base + QUADSPI_LUT(lut_base));
++ }
++
++ /*
++ * Write any device register.
++ * Used for Spansion S25FS-S family flash only.
++ */
++ lut_base = SEQID_WRAR * 4;
++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_SPANSION_WRAR) |
++ LUT1(ADDR, PAD1, ADDR24BIT),
++ base + QUADSPI_LUT(lut_base));
++ qspi_writel(q, LUT0(FSL_WRITE, PAD1, 1),
++ base + QUADSPI_LUT(lut_base + 1));
++
++ /* Write EVCR register */
++ lut_base = SEQID_WD_EVCR * 4;
++ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WD_EVCR),
++ base + QUADSPI_LUT(lut_base));
++
+ fsl_qspi_lock_lut(q);
+ }
+
+@@ -483,8 +604,24 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
+ {
+ switch (cmd) {
++ case SPINOR_OP_READ_1_4_4_D:
++ case SPINOR_OP_READ4_1_4_4_D:
++ case SPINOR_OP_READ4_1_1_4:
+ case SPINOR_OP_READ_1_1_4:
+- return SEQID_QUAD_READ;
++ case SPINOR_OP_READ_FAST:
++ case SPINOR_OP_READ4_FAST:
++ return SEQID_READ;
++ /*
++ * Spansion & Micron use the same command value 0x65
++ * Spansion: SPINOR_OP_SPANSION_RDAR, read any register.
++ * Micron: SPINOR_OP_RD_EVCR,
++ * read enhanced volatile configuration register.
++ * case SPINOR_OP_RD_EVCR:
++ */
++ case SPINOR_OP_SPANSION_RDAR:
++ return SEQID_RDAR_OR_RD_EVCR;
++ case SPINOR_OP_SPANSION_WRAR:
++ return SEQID_WRAR;
+ case SPINOR_OP_WREN:
+ return SEQID_WREN;
+ case SPINOR_OP_WRDI:
+@@ -496,6 +633,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
+ case SPINOR_OP_CHIP_ERASE:
+ return SEQID_CHIP_ERASE;
+ case SPINOR_OP_PP:
++ case SPINOR_OP_PP_4B:
+ return SEQID_PP;
+ case SPINOR_OP_RDID:
+ return SEQID_RDID;
+@@ -507,6 +645,8 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
+ return SEQID_EN4B;
+ case SPINOR_OP_BRWR:
+ return SEQID_BRWR;
++ case SPINOR_OP_WD_EVCR:
++ return SEQID_WD_EVCR;
+ default:
+ if (cmd == q->nor[0].erase_opcode)
+ return SEQID_SE;
+@@ -531,8 +671,11 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
+ /* save the reg */
+ reg = qspi_readl(q, base + QUADSPI_MCR);
+
+- qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
+- base + QUADSPI_SFAR);
++ if (has_added_amba_base_internal(q))
++ qspi_writel(q, q->chip_base_addr + addr, base + QUADSPI_SFAR);
++ else
++ qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
++ base + QUADSPI_SFAR);
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
+ base + QUADSPI_RBCT);
+ qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
+@@ -582,10 +725,10 @@ static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
+ q->chip_base_addr, tmp);
+
+ if (len >= 4) {
+- *((u32 *)rxbuf) = tmp;
++ u32tou8(rxbuf, &tmp, 4);
+ rxbuf += 4;
+ } else {
+- memcpy(rxbuf, &tmp, len);
++ u32tou8(rxbuf, &tmp, len);
+ break;
+ }
+
+@@ -619,11 +762,12 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
+ }
+
+ static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+- u8 opcode, unsigned int to, u32 *txbuf,
++ u8 opcode, unsigned int to, u8 *txbuf,
+ unsigned count)
+ {
+ int ret, i, j;
+ u32 tmp;
++ u8 byts;
+
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
+ q->chip_base_addr, to, count);
+@@ -633,10 +777,13 @@ static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+ qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
+
+ /* fill the TX data to the FIFO */
++ byts = count;
+ for (j = 0, i = ((count + 3) / 4); j < i; j++) {
+- tmp = fsl_qspi_endian_xchg(q, *txbuf);
++ u8tou32(&tmp, txbuf, byts);
++ tmp = fsl_qspi_endian_xchg(q, tmp);
+ qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
+- txbuf++;
++ txbuf += 4;
++ byts -= 4;
+ }
+
+ /* fill the TXFIFO upto 16 bytes for i.MX7d */
+@@ -657,11 +804,43 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
+ {
+ int nor_size = q->nor_size;
+ void __iomem *base = q->iobase;
++ u32 mem_base;
++
++ if (has_added_amba_base_internal(q))
++ mem_base = 0x0;
++ else
++ mem_base = q->memmap_phy;
++
++ qspi_writel(q, nor_size + mem_base, base + QUADSPI_SFA1AD);
++ qspi_writel(q, nor_size * 2 + mem_base, base + QUADSPI_SFA2AD);
++ qspi_writel(q, nor_size * 3 + mem_base, base + QUADSPI_SFB1AD);
++ qspi_writel(q, nor_size * 4 + mem_base, base + QUADSPI_SFB2AD);
++}
++
++/*
++ * enable controller ddr quad mode to support different
++ * vender flashes ddr quad mode.
++ */
++static void set_ddr_quad_mode(struct fsl_qspi *q)
++{
++ u32 reg, reg2;
++
++ reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
++
++ /* Firstly, disable the module */
++ qspi_writel(q, reg | QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
++
++ /* Set the Sampling Register for DDR */
++ reg2 = qspi_readl(q, q->iobase + QUADSPI_SMPR);
++ reg2 &= ~QUADSPI_SMPR_DDRSMP_MASK;
++ reg2 |= (((q->ddr_smp) << QUADSPI_SMPR_DDRSMP_SHIFT) &
++ QUADSPI_SMPR_DDRSMP_MASK);
++ qspi_writel(q, reg2, q->iobase + QUADSPI_SMPR);
++
++ /* Enable the module again (enable the DDR too) */
++ reg |= QUADSPI_MCR_DDR_EN_MASK;
++ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+
+- qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
+- qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
+- qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
+- qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
+ }
+
+ /*
+@@ -681,19 +860,36 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
+ {
+ void __iomem *base = q->iobase;
+ int seqid;
++ const struct fsl_qspi_devtype_data *devtype_data = q->devtype_data;
+
+ /* AHB configuration for access buffer 0/1/2 .*/
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
++
+ /*
+- * Set ADATSZ with the maximum AHB buffer size to improve the
+- * read performance.
++ * Errata: A-009282: QuadSPI data prefetch may result in incorrect data
++ * Workaround: Keep the read data size to 64 bits (8 bytes).
++ * This disables the prefetch on the AHB buffer and
++ * prevents this issue from occurring.
+ */
+- qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
+- ((q->devtype_data->ahb_buf_size / 8)
+- << QUADSPI_BUF3CR_ADATSZ_SHIFT),
+- base + QUADSPI_BUF3CR);
++ if (devtype_data->devtype == FSL_QUADSPI_LS2080A ||
++ devtype_data->devtype == FSL_QUADSPI_LS1021A) {
++
++ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
++ (1 << QUADSPI_BUF3CR_ADATSZ_SHIFT),
++ base + QUADSPI_BUF3CR);
++
++ } else {
++ /*
++ * Set ADATSZ with the maximum AHB buffer size to improve the
++ * read performance.
++ */
++ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
++ ((q->devtype_data->ahb_buf_size / 8)
++ << QUADSPI_BUF3CR_ADATSZ_SHIFT),
++ base + QUADSPI_BUF3CR);
++ }
+
+ /* We only use the buffer3 */
+ qspi_writel(q, 0, base + QUADSPI_BUF0IND);
+@@ -704,6 +900,11 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
+ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
+ qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
+ q->iobase + QUADSPI_BFGENCR);
++
++ /* enable the DDR quad read */
++ if (q->nor->flash_read == SPI_NOR_DDR_QUAD)
++ set_ddr_quad_mode(q);
++
+ }
+
+ /* This function was used to prepare and enable QSPI clock */
+@@ -822,6 +1023,7 @@ static const struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
+ { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
++ { .compatible = "fsl,ls2080a-qspi", .data = (void *)&ls2080a_data, },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+@@ -835,8 +1037,12 @@ static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+ {
+ int ret;
+ struct fsl_qspi *q = nor->priv;
++ u32 to = 0;
++
++ if (opcode == SPINOR_OP_SPANSION_RDAR)
++ u8tou32(&to, nor->cmd_buf, 4);
+
+- ret = fsl_qspi_runcmd(q, opcode, 0, len);
++ ret = fsl_qspi_runcmd(q, opcode, to, len);
+ if (ret)
+ return ret;
+
+@@ -848,9 +1054,13 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+ {
+ struct fsl_qspi *q = nor->priv;
+ int ret;
++ u32 to = 0;
++
++ if (opcode == SPINOR_OP_SPANSION_WRAR)
++ u8tou32(&to, nor->cmd_buf, 4);
+
+ if (!buf) {
+- ret = fsl_qspi_runcmd(q, opcode, 0, 1);
++ ret = fsl_qspi_runcmd(q, opcode, to, 1);
+ if (ret)
+ return ret;
+
+@@ -859,7 +1069,7 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+
+ } else if (len > 0) {
+ ret = fsl_qspi_nor_write(q, nor, opcode, 0,
+- (u32 *)buf, len);
++ buf, len);
+ if (ret > 0)
+ return 0;
+ } else {
+@@ -875,7 +1085,7 @@ static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
+ {
+ struct fsl_qspi *q = nor->priv;
+ ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+- (u32 *)buf, len);
++ (u8 *)buf, len);
+
+ /* invalid the data in the AHB buffer. */
+ fsl_qspi_invalid(q);
+@@ -922,7 +1132,7 @@ static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
+ len);
+
+ /* Read out the data directly from the AHB buffer.*/
+- memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
++ memcpy_toio(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
+
+ return len;
+@@ -980,6 +1190,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int ret, i = 0;
++ int find_node;
++ enum read_mode mode = SPI_NOR_QUAD;
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+@@ -1027,6 +1239,12 @@ static int fsl_qspi_probe(struct platform_device *pdev)
+ goto clk_failed;
+ }
+
++ /* find ddrsmp value */
++ ret = of_property_read_u32(dev->of_node, "fsl,ddr-sampling-point",
++ &q->ddr_smp);
++ if (ret)
++ q->ddr_smp = 0;
++
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+@@ -1050,6 +1268,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
+
+ mutex_init(&q->lock);
+
++ find_node = 0;
+ /* iterate the subnodes. */
+ for_each_available_child_of_node(dev->of_node, np) {
+ /* skip the holes */
+@@ -1076,18 +1295,25 @@ static int fsl_qspi_probe(struct platform_device *pdev)
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &q->clk_rate);
+ if (ret < 0)
+- goto mutex_failed;
++ continue;
+
+ /* set the chip address for READID */
+ fsl_qspi_set_base_addr(q, nor);
+
+- ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
++ ret = of_property_read_bool(np, "m25p,fast-read");
++ mode = (ret) ? SPI_NOR_FAST : SPI_NOR_QUAD;
++ /* Can we enable the DDR Quad Read? */
++ ret = of_property_read_bool(np, "ddr-quad-read");
+ if (ret)
+- goto mutex_failed;
++ mode = SPI_NOR_DDR_QUAD;
++
++ ret = spi_nor_scan(nor, NULL, mode);
++ if (ret)
++ continue;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+- goto mutex_failed;
++ continue;
+
+ /* Set the correct NOR size now. */
+ if (q->nor_size == 0) {
+@@ -1110,8 +1336,12 @@ static int fsl_qspi_probe(struct platform_device *pdev)
+ nor->page_size = q->devtype_data->txfifo;
+
+ i++;
++ find_node++;
+ }
+
++ if (find_node == 0)
++ goto mutex_failed;
++
+ /* finish the rest init. */
+ ret = fsl_qspi_nor_setup_last(q);
+ if (ret)
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index 793d321d..190e0e45 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -40,6 +40,13 @@
+ #define SPI_NOR_MAX_ID_LEN 6
+ #define SPI_NOR_MAX_ADDR_WIDTH 4
+
++#define SPI_NOR_MICRON_WRITE_ENABLE 0x7f
++/* Added for S25FS-S family flash */
++#define SPINOR_CONFIG_REG3_OFFSET 0x800004
++#define CR3V_4KB_ERASE_UNABLE 0x8
++#define SPINOR_S25FS_FAMILY_ID 0x81
++
++
+ struct flash_info {
+ char *name;
+
+@@ -68,7 +75,8 @@ struct flash_info {
+ #define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
+ #define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
+ #define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
+-#define USE_FSR BIT(7) /* use flag status register */
++#define USE_FSR BIT(13) /* use flag status register */
++#define SPI_NOR_DDR_QUAD_READ BIT(7) /* Flash supports DDR Quad Read */
+ #define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
+ #define SPI_NOR_HAS_TB BIT(9) /*
+ * Flash SR has Top/Bottom (TB) protect
+@@ -85,9 +93,11 @@ struct flash_info {
+ * Use dedicated 4byte address op codes
+ * to support memory size above 128Mib.
+ */
++#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
+ };
+
+ #define JEDEC_MFR(info) ((info)->id[0])
++#define EXT_ID(info) ((info)->id[5])
+
+ static const struct flash_info *spi_nor_match_id(const char *name);
+
+@@ -132,7 +142,7 @@ static int read_fsr(struct spi_nor *nor)
+ /*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+- * Returns negative if error occured.
++ * Returns negative if error occurred.
+ */
+ static int read_cr(struct spi_nor *nor)
+ {
+@@ -160,6 +170,8 @@ static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
+ case SPI_NOR_DUAL:
+ case SPI_NOR_QUAD:
+ return 8;
++ case SPI_NOR_DDR_QUAD:
++ return 6;
+ case SPI_NOR_NORMAL:
+ return 0;
+ }
+@@ -961,6 +973,8 @@ static const struct flash_info spi_nor_ids[] = {
+
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
++ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
++ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
+
+ /* Everspin */
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+@@ -1014,12 +1028,15 @@ static const struct flash_info spi_nor_ids[] = {
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
++ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
++ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
++ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, 0) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
++ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
+@@ -1033,10 +1050,11 @@ static const struct flash_info spi_nor_ids[] = {
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
++ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
++ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
++ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+@@ -1054,8 +1072,11 @@ static const struct flash_info spi_nor_ids[] = {
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++ { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512, 0)},
++ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ
++ | SPI_NOR_DDR_QUAD_READ) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)},
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+@@ -1130,6 +1151,9 @@ static const struct flash_info spi_nor_ids[] = {
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
++ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
++ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
++ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ {
+ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
+@@ -1192,6 +1216,53 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+ id[0], id[1], id[2]);
+ return ERR_PTR(-ENODEV);
+ }
++/*
++ * The S25FS-S family physical sectors may be configured as a
++ * hybrid combination of eight 4-kB parameter sectors
++ * at the top or bottom of the address space with all
++ * but one of the remaining sectors being uniform size.
++ * The Parameter Sector Erase commands (20h or 21h) must
++ * be used to erase the 4-kB parameter sectors individually.
++ * The Sector (uniform sector) Erase commands (D8h or DCh)
++ * must be used to erase any of the remaining
++ * sectors, including the portion of highest or lowest address
++ * sector that is not overlaid by the parameter sectors.
++ * The uniform sector erase command has no effect on parameter sectors.
++ */
++static int spansion_s25fs_disable_4kb_erase(struct spi_nor *nor)
++{
++ struct fsl_qspi *q;
++ u32 cr3v_addr = SPINOR_CONFIG_REG3_OFFSET;
++ u8 cr3v = 0x0;
++ int ret = 0x0;
++
++ q = nor->priv;
++
++ nor->cmd_buf[2] = cr3v_addr >> 16;
++ nor->cmd_buf[1] = cr3v_addr >> 8;
++ nor->cmd_buf[0] = cr3v_addr >> 0;
++
++ ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1);
++ if (ret)
++ return ret;
++ if (cr3v & CR3V_4KB_ERASE_UNABLE)
++ return 0;
++ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
++ if (ret)
++ return ret;
++ cr3v = CR3V_4KB_ERASE_UNABLE;
++ nor->program_opcode = SPINOR_OP_SPANSION_WRAR;
++ nor->write(nor, cr3v_addr, 1, &cr3v);
++
++ ret = nor->read_reg(nor, SPINOR_OP_SPANSION_RDAR, &cr3v, 1);
++ if (ret)
++ return ret;
++ if (!(cr3v & CR3V_4KB_ERASE_UNABLE))
++ return -EPERM;
++
++ return 0;
++}
++
+
+ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+@@ -1411,7 +1482,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+- * Return negative if error occured.
++ * Return negative if error occurred.
+ */
+ static int write_sr_cr(struct spi_nor *nor, u16 val)
+ {
+@@ -1459,6 +1530,24 @@ static int spansion_quad_enable(struct spi_nor *nor)
+ return 0;
+ }
+
++static int set_ddr_quad_mode(struct spi_nor *nor, const struct flash_info *info)
++{
++ int status;
++
++ switch (JEDEC_MFR(info)) {
++ case SNOR_MFR_SPANSION:
++ status = spansion_quad_enable(nor);
++ if (status) {
++ dev_err(nor->dev, "Spansion DDR quad-read not enabled\n");
++ return status;
++ }
++ return status;
++ default:
++ return -EINVAL;
++ }
++}
++
++
+ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ {
+ int status;
+@@ -1604,9 +1693,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
+ write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
+ }
++ if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
++ ret = read_sr(nor);
++ ret &= SPI_NOR_MICRON_WRITE_ENABLE;
++
++ write_enable(nor);
++ write_sr(nor, ret);
++ }
++
++ if (EXT_ID(info) == SPINOR_S25FS_FAMILY_ID) {
++ ret = spansion_s25fs_disable_4kb_erase(nor);
++ if (ret)
++ return ret;
++ }
++
+
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
++ if (info->name)
++ nor->vendor = info->name;
+ mtd->priv = nor;
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+@@ -1639,6 +1744,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
+ nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & SPI_NOR_HAS_TB)
+ nor->flags |= SNOR_F_HAS_SR_TB;
++ if (info->flags & NO_CHIP_ERASE)
++ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+@@ -1676,9 +1783,15 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
+ /* Some devices cannot do fast-read, no matter what DT tells us */
+ if (info->flags & SPI_NOR_NO_FR)
+ nor->flash_read = SPI_NOR_NORMAL;
+-
+- /* Quad/Dual-read mode takes precedence over fast/normal */
+- if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
++ /* DDR Quad/Quad/Dual-read mode takes precedence over fast/normal */
++ if (mode == SPI_NOR_DDR_QUAD && info->flags & SPI_NOR_DDR_QUAD_READ) {
++ ret = set_ddr_quad_mode(nor, info);
++ if (ret) {
++ dev_err(dev, "DDR quad mode not supported\n");
++ return ret;
++ }
++ nor->flash_read = SPI_NOR_DDR_QUAD;
++ } else if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
+ ret = set_quad_mode(nor, info);
+ if (ret) {
+ dev_err(dev, "quad mode not supported\n");
+@@ -1691,6 +1804,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
+
+ /* Default commands */
+ switch (nor->flash_read) {
++ case SPI_NOR_DDR_QUAD:
++ nor->read_opcode = SPINOR_OP_READ4_1_4_4_D;
++ break;
+ case SPI_NOR_QUAD:
+ nor->read_opcode = SPINOR_OP_READ_1_1_4;
+ break;
+diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
+index f2a71803..5003ff64 100644
+--- a/include/linux/mtd/spi-nor.h
++++ b/include/linux/mtd/spi-nor.h
+@@ -31,10 +31,10 @@
+
+ /*
+ * Note on opcode nomenclature: some opcodes have a format like
+- * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
++ * SPINOR_OP_FUNCTION{4,}_x_y_z{_D}. The numbers x, y,and z stand for the number
+ * of I/O lines used for the opcode, address, and data (respectively). The
+ * FUNCTION has an optional suffix of '4', to represent an opcode which
+- * requires a 4-byte (32-bit) address.
++ * requires a 4-byte (32-bit) address. The suffix of 'D' stands for the
+ */
+
+ /* Flash opcodes. */
+@@ -46,7 +46,9 @@
+ #define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
+ #define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
+ #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
++#define SPINOR_OP_READ_1_4_4_D 0xed /* Read data bytes (DDR Quad SPI) */
+ #define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
++#define SPINOR_OP_READ4_1_4_4_D 0xee /* Read data bytes (DDR Quad SPI) */
+ #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
+ #define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */
+ #define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */
+@@ -62,9 +64,11 @@
+ /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
+ #define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
+ #define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */
++#define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */
+ #define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */
+ #define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
+ #define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
++#define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */
+ #define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
+ #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
+ #define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */
+@@ -94,6 +98,10 @@
+ /* Used for Spansion flashes only. */
+ #define SPINOR_OP_BRWR 0x17 /* Bank register write */
+
++/* Used for Spansion S25FS-S family flash only. */
++#define SPINOR_OP_SPANSION_RDAR 0x65 /* Read any device register */
++#define SPINOR_OP_SPANSION_WRAR 0x71 /* Write any device register */
++
+ /* Used for Micron flashes only. */
+ #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
+ #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+@@ -124,6 +132,7 @@ enum read_mode {
+ SPI_NOR_FAST,
+ SPI_NOR_DUAL,
+ SPI_NOR_QUAD,
++ SPI_NOR_DDR_QUAD,
+ };
+
+ #define SPI_NOR_MAX_CMD_SIZE 8
+@@ -189,6 +198,7 @@ struct spi_nor {
+ bool sst_write_second;
+ u32 flags;
+ u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
++ char *vendor;
+
+ int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch b/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch
new file mode 100644
index 0000000000..e45d2635fd
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch
@@ -0,0 +1,412 @@
+From c0e4767d3b26f21e5043fe2d15a24a1958de766e Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 10:17:28 +0800
+Subject: [PATCH] mtd: support layerscape
+
+This is a integrated patch for layerscape ifc-nor-nand support.
+
+Signed-off-by: Alison Wang <b18965@freescale.com>
+Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/memory/Kconfig | 2 +-
+ drivers/memory/fsl_ifc.c | 263 ++++++++++++++++++++++++++++++++++++++++
+ drivers/mtd/maps/physmap_of.c | 4 +
+ drivers/mtd/nand/Kconfig | 2 +-
+ drivers/mtd/nand/fsl_ifc_nand.c | 5 +-
+ include/linux/fsl_ifc.h | 7 ++
+ 6 files changed, 280 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
+index 4b4c0c3c..820f5590 100644
+--- a/drivers/memory/Kconfig
++++ b/drivers/memory/Kconfig
+@@ -115,7 +115,7 @@ config FSL_CORENET_CF
+
+ config FSL_IFC
+ bool
+- depends on FSL_SOC || ARCH_LAYERSCAPE
++ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
+
+ config JZ4780_NEMC
+ bool "Ingenic JZ4780 SoC NEMC driver"
+diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
+index 1b182b11..10d2a5f8 100644
+--- a/drivers/memory/fsl_ifc.c
++++ b/drivers/memory/fsl_ifc.c
+@@ -24,6 +24,7 @@
+ #include <linux/compiler.h>
+ #include <linux/sched.h>
+ #include <linux/spinlock.h>
++#include <linux/delay.h>
+ #include <linux/types.h>
+ #include <linux/slab.h>
+ #include <linux/io.h>
+@@ -37,6 +38,8 @@
+
+ struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+ EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
++#define FSL_IFC_V1_3_0 0x01030000
++#define IFC_TIMEOUT_MSECS 1000 /* 1000ms */
+
+ /*
+ * convert_ifc_address - convert the base address
+@@ -311,6 +314,261 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+ return ret;
+ }
+
++#ifdef CONFIG_PM_SLEEP
++/* save ifc registers */
++static int fsl_ifc_suspend(struct device *dev)
++{
++ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev);
++ struct fsl_ifc_global __iomem *fcm = ctrl->gregs;
++ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs;
++ __be32 nand_evter_intr_en, cm_evter_intr_en, nor_evter_intr_en,
++ gpcm_evter_intr_en;
++ uint32_t ifc_bank, i;
++
++ ctrl->saved_gregs = kzalloc(sizeof(struct fsl_ifc_global), GFP_KERNEL);
++ if (!ctrl->saved_gregs)
++ return -ENOMEM;
++ ctrl->saved_rregs = kzalloc(sizeof(struct fsl_ifc_runtime), GFP_KERNEL);
++ if (!ctrl->saved_rregs)
++ return -ENOMEM;
++
++ cm_evter_intr_en = ifc_in32(&fcm->cm_evter_intr_en);
++ nand_evter_intr_en = ifc_in32(&runtime->ifc_nand.nand_evter_intr_en);
++ nor_evter_intr_en = ifc_in32(&runtime->ifc_nor.nor_evter_intr_en);
++ gpcm_evter_intr_en = ifc_in32(&runtime->ifc_gpcm.gpcm_evter_intr_en);
++
++/* IFC interrupts disabled */
++
++ ifc_out32(0x0, &fcm->cm_evter_intr_en);
++ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en);
++ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en);
++ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en);
++
++ if (ctrl->saved_gregs) {
++ for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) {
++ ctrl->saved_gregs->cspr_cs[ifc_bank].cspr_ext =
++ ifc_in32(&fcm->cspr_cs[ifc_bank].cspr_ext);
++ ctrl->saved_gregs->cspr_cs[ifc_bank].cspr =
++ ifc_in32(&fcm->cspr_cs[ifc_bank].cspr);
++ ctrl->saved_gregs->amask_cs[ifc_bank].amask =
++ ifc_in32(&fcm->amask_cs[ifc_bank].amask);
++ ctrl->saved_gregs->csor_cs[ifc_bank].csor_ext =
++ ifc_in32(&fcm->csor_cs[ifc_bank].csor_ext);
++ ctrl->saved_gregs->csor_cs[ifc_bank].csor =
++ ifc_in32(&fcm->csor_cs[ifc_bank].csor);
++ for (i = 0; i < 4; i++) {
++ ctrl->saved_gregs->ftim_cs[ifc_bank].ftim[i] =
++ ifc_in32(
++ &fcm->ftim_cs[ifc_bank].ftim[i]);
++ }
++ }
++
++ ctrl->saved_gregs->rb_map = ifc_in32(&fcm->rb_map);
++ ctrl->saved_gregs->wb_map = ifc_in32(&fcm->wb_map);
++ ctrl->saved_gregs->ifc_gcr = ifc_in32(&fcm->ifc_gcr);
++ ctrl->saved_gregs->ddr_ccr_low = ifc_in32(&fcm->ddr_ccr_low);
++ ctrl->saved_gregs->cm_evter_en = ifc_in32(&fcm->cm_evter_en);
++ }
++
++ if (ctrl->saved_rregs) {
++ /* IFC controller NAND machine registers */
++ ctrl->saved_rregs->ifc_nand.ncfgr =
++ ifc_in32(&runtime->ifc_nand.ncfgr);
++ ctrl->saved_rregs->ifc_nand.nand_fcr0 =
++ ifc_in32(&runtime->ifc_nand.nand_fcr0);
++ ctrl->saved_rregs->ifc_nand.nand_fcr1 =
++ ifc_in32(&runtime->ifc_nand.nand_fcr1);
++ ctrl->saved_rregs->ifc_nand.row0 =
++ ifc_in32(&runtime->ifc_nand.row0);
++ ctrl->saved_rregs->ifc_nand.row1 =
++ ifc_in32(&runtime->ifc_nand.row1);
++ ctrl->saved_rregs->ifc_nand.col0 =
++ ifc_in32(&runtime->ifc_nand.col0);
++ ctrl->saved_rregs->ifc_nand.col1 =
++ ifc_in32(&runtime->ifc_nand.col1);
++ ctrl->saved_rregs->ifc_nand.row2 =
++ ifc_in32(&runtime->ifc_nand.row2);
++ ctrl->saved_rregs->ifc_nand.col2 =
++ ifc_in32(&runtime->ifc_nand.col2);
++ ctrl->saved_rregs->ifc_nand.row3 =
++ ifc_in32(&runtime->ifc_nand.row3);
++ ctrl->saved_rregs->ifc_nand.col3 =
++ ifc_in32(&runtime->ifc_nand.col3);
++
++ ctrl->saved_rregs->ifc_nand.nand_fbcr =
++ ifc_in32(&runtime->ifc_nand.nand_fbcr);
++ ctrl->saved_rregs->ifc_nand.nand_fir0 =
++ ifc_in32(&runtime->ifc_nand.nand_fir0);
++ ctrl->saved_rregs->ifc_nand.nand_fir1 =
++ ifc_in32(&runtime->ifc_nand.nand_fir1);
++ ctrl->saved_rregs->ifc_nand.nand_fir2 =
++ ifc_in32(&runtime->ifc_nand.nand_fir2);
++ ctrl->saved_rregs->ifc_nand.nand_csel =
++ ifc_in32(&runtime->ifc_nand.nand_csel);
++ ctrl->saved_rregs->ifc_nand.nandseq_strt =
++ ifc_in32(
++ &runtime->ifc_nand.nandseq_strt);
++ ctrl->saved_rregs->ifc_nand.nand_evter_en =
++ ifc_in32(
++ &runtime->ifc_nand.nand_evter_en);
++ ctrl->saved_rregs->ifc_nand.nanndcr =
++ ifc_in32(&runtime->ifc_nand.nanndcr);
++ ctrl->saved_rregs->ifc_nand.nand_dll_lowcfg0 =
++ ifc_in32(
++ &runtime->ifc_nand.nand_dll_lowcfg0);
++ ctrl->saved_rregs->ifc_nand.nand_dll_lowcfg1 =
++ ifc_in32(
++ &runtime->ifc_nand.nand_dll_lowcfg1);
++
++ /* IFC controller NOR machine registers */
++ ctrl->saved_rregs->ifc_nor.nor_evter_en =
++ ifc_in32(
++ &runtime->ifc_nor.nor_evter_en);
++ ctrl->saved_rregs->ifc_nor.norcr =
++ ifc_in32(&runtime->ifc_nor.norcr);
++
++ /* IFC controller GPCM Machine registers */
++ ctrl->saved_rregs->ifc_gpcm.gpcm_evter_en =
++ ifc_in32(
++ &runtime->ifc_gpcm.gpcm_evter_en);
++ }
++
++/* save the interrupt values */
++ ctrl->saved_gregs->cm_evter_intr_en = cm_evter_intr_en;
++ ctrl->saved_rregs->ifc_nand.nand_evter_intr_en = nand_evter_intr_en;
++ ctrl->saved_rregs->ifc_nor.nor_evter_intr_en = nor_evter_intr_en;
++ ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en = gpcm_evter_intr_en;
++
++ return 0;
++}
++
++/* restore ifc registers */
++static int fsl_ifc_resume(struct device *dev)
++{
++ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev);
++ struct fsl_ifc_global __iomem *fcm = ctrl->gregs;
++ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs;
++ struct fsl_ifc_global *savd_gregs = ctrl->saved_gregs;
++ struct fsl_ifc_runtime *savd_rregs = ctrl->saved_rregs;
++ uint32_t ver = 0, ncfgr, timeout, ifc_bank, i;
++
++/*
++ * IFC interrupts disabled
++ */
++ ifc_out32(0x0, &fcm->cm_evter_intr_en);
++ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en);
++ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en);
++ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en);
++
++
++ if (ctrl->saved_gregs) {
++ for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) {
++ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr_ext,
++ &fcm->cspr_cs[ifc_bank].cspr_ext);
++ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr,
++ &fcm->cspr_cs[ifc_bank].cspr);
++ ifc_out32(savd_gregs->amask_cs[ifc_bank].amask,
++ &fcm->amask_cs[ifc_bank].amask);
++ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor_ext,
++ &fcm->csor_cs[ifc_bank].csor_ext);
++ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor,
++ &fcm->csor_cs[ifc_bank].csor);
++ for (i = 0; i < 4; i++) {
++ ifc_out32(savd_gregs->ftim_cs[ifc_bank].ftim[i],
++ &fcm->ftim_cs[ifc_bank].ftim[i]);
++ }
++ }
++ ifc_out32(savd_gregs->rb_map, &fcm->rb_map);
++ ifc_out32(savd_gregs->wb_map, &fcm->wb_map);
++ ifc_out32(savd_gregs->ifc_gcr, &fcm->ifc_gcr);
++ ifc_out32(savd_gregs->ddr_ccr_low, &fcm->ddr_ccr_low);
++ ifc_out32(savd_gregs->cm_evter_en, &fcm->cm_evter_en);
++ }
++
++ if (ctrl->saved_rregs) {
++ /* IFC controller NAND machine registers */
++ ifc_out32(savd_rregs->ifc_nand.ncfgr,
++ &runtime->ifc_nand.ncfgr);
++ ifc_out32(savd_rregs->ifc_nand.nand_fcr0,
++ &runtime->ifc_nand.nand_fcr0);
++ ifc_out32(savd_rregs->ifc_nand.nand_fcr1,
++ &runtime->ifc_nand.nand_fcr1);
++ ifc_out32(savd_rregs->ifc_nand.row0, &runtime->ifc_nand.row0);
++ ifc_out32(savd_rregs->ifc_nand.row1, &runtime->ifc_nand.row1);
++ ifc_out32(savd_rregs->ifc_nand.col0, &runtime->ifc_nand.col0);
++ ifc_out32(savd_rregs->ifc_nand.col1, &runtime->ifc_nand.col1);
++ ifc_out32(savd_rregs->ifc_nand.row2, &runtime->ifc_nand.row2);
++ ifc_out32(savd_rregs->ifc_nand.col2, &runtime->ifc_nand.col2);
++ ifc_out32(savd_rregs->ifc_nand.row3, &runtime->ifc_nand.row3);
++ ifc_out32(savd_rregs->ifc_nand.col3, &runtime->ifc_nand.col3);
++ ifc_out32(savd_rregs->ifc_nand.nand_fbcr,
++ &runtime->ifc_nand.nand_fbcr);
++ ifc_out32(savd_rregs->ifc_nand.nand_fir0,
++ &runtime->ifc_nand.nand_fir0);
++ ifc_out32(savd_rregs->ifc_nand.nand_fir1,
++ &runtime->ifc_nand.nand_fir1);
++ ifc_out32(savd_rregs->ifc_nand.nand_fir2,
++ &runtime->ifc_nand.nand_fir2);
++ ifc_out32(savd_rregs->ifc_nand.nand_csel,
++ &runtime->ifc_nand.nand_csel);
++ ifc_out32(savd_rregs->ifc_nand.nandseq_strt,
++ &runtime->ifc_nand.nandseq_strt);
++ ifc_out32(savd_rregs->ifc_nand.nand_evter_en,
++ &runtime->ifc_nand.nand_evter_en);
++ ifc_out32(savd_rregs->ifc_nand.nanndcr,
++ &runtime->ifc_nand.nanndcr);
++ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg0,
++ &runtime->ifc_nand.nand_dll_lowcfg0);
++ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg1,
++ &runtime->ifc_nand.nand_dll_lowcfg1);
++
++ /* IFC controller NOR machine registers */
++ ifc_out32(savd_rregs->ifc_nor.nor_evter_en,
++ &runtime->ifc_nor.nor_evter_en);
++ ifc_out32(savd_rregs->ifc_nor.norcr, &runtime->ifc_nor.norcr);
++
++ /* IFC controller GPCM Machine registers */
++ ifc_out32(savd_rregs->ifc_gpcm.gpcm_evter_en,
++ &runtime->ifc_gpcm.gpcm_evter_en);
++
++ /* IFC interrupts enabled */
++ ifc_out32(ctrl->saved_gregs->cm_evter_intr_en,
++ &fcm->cm_evter_intr_en);
++ ifc_out32(ctrl->saved_rregs->ifc_nand.nand_evter_intr_en,
++ &runtime->ifc_nand.nand_evter_intr_en);
++ ifc_out32(ctrl->saved_rregs->ifc_nor.nor_evter_intr_en,
++ &runtime->ifc_nor.nor_evter_intr_en);
++ ifc_out32(ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en,
++ &runtime->ifc_gpcm.gpcm_evter_intr_en);
++
++ kfree(ctrl->saved_gregs);
++ kfree(ctrl->saved_rregs);
++ ctrl->saved_gregs = NULL;
++ ctrl->saved_rregs = NULL;
++ }
++
++ ver = ifc_in32(&fcm->ifc_rev);
++ ncfgr = ifc_in32(&runtime->ifc_nand.ncfgr);
++ if (ver >= FSL_IFC_V1_3_0) {
++
++ ifc_out32(ncfgr | IFC_NAND_SRAM_INIT_EN,
++ &runtime->ifc_nand.ncfgr);
++ /* wait for SRAM_INIT bit to be clear or timeout */
++ timeout = 10;
++ while ((ifc_in32(&runtime->ifc_nand.ncfgr) &
++ IFC_NAND_SRAM_INIT_EN) && timeout) {
++ mdelay(IFC_TIMEOUT_MSECS);
++ timeout--;
++ }
++
++ if (!timeout)
++ dev_err(ctrl->dev, "Timeout waiting for IFC SRAM INIT");
++ }
++
++ return 0;
++}
++#endif /* CONFIG_PM_SLEEP */
++
+ static const struct of_device_id fsl_ifc_match[] = {
+ {
+ .compatible = "fsl,ifc",
+@@ -318,10 +576,15 @@ static const struct of_device_id fsl_ifc_match[] = {
+ {},
+ };
+
++static const struct dev_pm_ops ifc_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(fsl_ifc_suspend, fsl_ifc_resume)
++};
++
+ static struct platform_driver fsl_ifc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-ifc",
+ .of_match_table = fsl_ifc_match,
++ .pm = &ifc_pm_ops,
+ },
+ .probe = fsl_ifc_ctrl_probe,
+ .remove = fsl_ifc_ctrl_remove,
+diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
+index 11d63046..38b90301 100644
+--- a/drivers/mtd/maps/physmap_of.c
++++ b/drivers/mtd/maps/physmap_of.c
+@@ -20,6 +20,7 @@
+ #include <linux/mtd/map.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/concat.h>
++#include <linux/mtd/cfi_endian.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+@@ -209,6 +210,9 @@ static int of_flash_probe(struct platform_device *dev)
+ return err;
+ }
+
++ if (of_property_read_bool(dp->parent, "big-endian"))
++ info->list[i].map.swap = CFI_BIG_ENDIAN;
++
+ err = -ENOMEM;
+ info->list[i].map.virt = ioremap(info->list[i].map.phys,
+ info->list[i].map.size);
+diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
+index b254090b..961f1aa1 100644
+--- a/drivers/mtd/nand/Kconfig
++++ b/drivers/mtd/nand/Kconfig
+@@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC
+
+ config MTD_NAND_FSL_IFC
+ tristate "NAND support for Freescale IFC controller"
+- depends on FSL_SOC || ARCH_LAYERSCAPE
++ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
+ select FSL_IFC
+ select MEMORY
+ help
+diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
+index d1570f51..785e9ee0 100644
+--- a/drivers/mtd/nand/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/fsl_ifc_nand.c
+@@ -904,9 +904,12 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ }
+
+- if (ctrl->version == FSL_IFC_VERSION_1_1_0)
++ if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
+ fsl_ifc_sram_init(priv);
+
++ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
++ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
++
+ return 0;
+ }
+
+diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
+index c332f0a4..a41d21b6 100644
+--- a/include/linux/fsl_ifc.h
++++ b/include/linux/fsl_ifc.h
+@@ -274,6 +274,8 @@
+ */
+ /* Auto Boot Mode */
+ #define IFC_NAND_NCFGR_BOOT 0x80000000
++/* SRAM INIT EN */
++#define IFC_NAND_SRAM_INIT_EN 0x20000000
+ /* Addressing Mode-ROW0+n/COL0 */
+ #define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
+ /* Addressing Mode-ROW0+n/COL0+n */
+@@ -861,6 +863,11 @@ struct fsl_ifc_ctrl {
+ u32 nand_stat;
+ wait_queue_head_t nand_wait;
+ bool little_endian;
++#ifdef CONFIG_PM_SLEEP
++ /*save regs when system goes to deep sleep*/
++ struct fsl_ifc_global *saved_gregs;
++ struct fsl_ifc_runtime *saved_rregs;
++#endif
+ };
+
+ extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch b/target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch
new file mode 100644
index 0000000000..82e0a584b3
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/601-net-support-layerscape.patch
@@ -0,0 +1,2549 @@
+From 2ed7bff3d1f2fa6c5f6eff0b2bd98deaa3dc18b0 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 10:57:14 +0800
+Subject: [PATCH] net: support layerscape
+
+This is a integrated patch for layerscape net support.
+
+Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
+Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
+Signed-off-by: Jarod Wilson <jarod@redhat.com>
+Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
+Signed-off-by: stephen hemminger <stephen@networkplumber.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/base/devres.c | 66 +++++++++++++++
+ drivers/base/soc.c | 66 +++++++++++++++
+ drivers/net/bonding/bond_main.c | 10 +--
+ drivers/net/dummy.c | 5 +-
+ drivers/net/ethernet/amazon/ena/ena_netdev.c | 10 +--
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 +-
+ drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 4 +-
+ drivers/net/ethernet/atheros/alx/main.c | 6 +-
+ drivers/net/ethernet/broadcom/b44.c | 5 +-
+ drivers/net/ethernet/broadcom/bnx2.c | 5 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +-
+ drivers/net/ethernet/broadcom/tg3.c | 8 +-
+ drivers/net/ethernet/brocade/bna/bnad.c | 6 +-
+ drivers/net/ethernet/calxeda/xgmac.c | 5 +-
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c | 5 +-
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 7 +-
+ drivers/net/ethernet/cisco/enic/enic_main.c | 8 +-
+ drivers/net/ethernet/ec_bhf.c | 4 +-
+ drivers/net/ethernet/emulex/benet/be_main.c | 5 +-
+ drivers/net/ethernet/hisilicon/hns/hns_enet.c | 6 +-
+ drivers/net/ethernet/ibm/ehea/ehea_main.c | 5 +-
+ drivers/net/ethernet/intel/e1000e/e1000.h | 4 +-
+ drivers/net/ethernet/intel/e1000e/netdev.c | 5 +-
+ drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 6 +-
+ drivers/net/ethernet/intel/i40e/i40e.h | 5 +-
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 18 ++--
+ drivers/net/ethernet/intel/igb/igb_main.c | 10 +--
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 +-
+ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +-
+ drivers/net/ethernet/marvell/mvneta.c | 4 +-
+ drivers/net/ethernet/marvell/mvpp2.c | 4 +-
+ drivers/net/ethernet/marvell/sky2.c | 6 +-
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 6 +-
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 +-
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +-
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +-
+ drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 3 +-
+ drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 9 +-
+ drivers/net/ethernet/neterion/vxge/vxge-main.c | 4 +-
+ .../net/ethernet/netronome/nfp/nfp_net_common.c | 6 +-
+ drivers/net/ethernet/nvidia/forcedeth.c | 4 +-
+ .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 10 +--
+ drivers/net/ethernet/qlogic/qede/qede_main.c | 7 +-
+ drivers/net/ethernet/qualcomm/emac/emac.c | 6 +-
+ drivers/net/ethernet/realtek/8139too.c | 9 +-
+ drivers/net/ethernet/realtek/r8169.c | 4 +-
+ drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 8 +-
+ drivers/net/ethernet/sfc/efx.c | 6 +-
+ drivers/net/ethernet/sun/niu.c | 6 +-
+ drivers/net/ethernet/synopsys/dwc_eth_qos.c | 4 +-
+ drivers/net/ethernet/tile/tilepro.c | 4 +-
+ drivers/net/ethernet/via/via-rhine.c | 8 +-
+ drivers/net/fjes/fjes_main.c | 7 +-
+ drivers/net/hyperv/netvsc_drv.c | 6 +-
+ drivers/net/ifb.c | 6 +-
+ drivers/net/ipvlan/ipvlan_main.c | 5 +-
+ drivers/net/loopback.c | 5 +-
+ drivers/net/macsec.c | 8 +-
+ drivers/net/macvlan.c | 5 +-
+ drivers/net/nlmon.c | 4 +-
+ drivers/net/ppp/ppp_generic.c | 4 +-
+ drivers/net/slip/slip.c | 3 +-
+ drivers/net/team/team.c | 3 +-
+ drivers/net/tun.c | 3 +-
+ drivers/net/veth.c | 6 +-
+ drivers/net/virtio_net.c | 6 +-
+ drivers/net/vmxnet3/vmxnet3_ethtool.c | 4 +-
+ drivers/net/vmxnet3/vmxnet3_int.h | 4 +-
+ drivers/net/vrf.c | 5 +-
+ drivers/net/xen-netfront.c | 6 +-
+ drivers/staging/netlogic/xlr_net.c | 10 +--
+ include/linux/device.h | 19 +++++
+ include/linux/fsl/svr.h | 97 ++++++++++++++++++++++
+ include/linux/fsl_devices.h | 3 +
+ include/linux/netdev_features.h | 2 +
+ include/linux/netdevice.h | 12 ++-
+ include/linux/skbuff.h | 2 +
+ include/linux/sys_soc.h | 3 +
+ include/net/ip_tunnels.h | 4 +-
+ include/uapi/linux/if_ether.h | 1 +
+ net/8021q/vlan_dev.c | 5 +-
+ net/bridge/br_device.c | 6 +-
+ net/core/dev.c | 13 ++-
+ net/core/skbuff.c | 29 ++++++-
+ net/ipv4/ip_tunnel_core.c | 6 +-
+ net/l2tp/l2tp_eth.c | 6 +-
+ net/mac80211/iface.c | 4 +-
+ net/openvswitch/vport-internal_dev.c | 4 +-
+ net/sched/sch_generic.c | 7 ++
+ net/sched/sch_teql.c | 5 +-
+ 90 files changed, 468 insertions(+), 298 deletions(-)
+ create mode 100644 include/linux/fsl/svr.h
+
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 8fc654f0..71d57702 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -10,6 +10,7 @@
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/percpu.h>
+
+ #include "base.h"
+
+@@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev, unsigned long addr)
+ &devres));
+ }
+ EXPORT_SYMBOL_GPL(devm_free_pages);
++
++static void devm_percpu_release(struct device *dev, void *pdata)
++{
++ void __percpu *p;
++
++ p = *(void __percpu **)pdata;
++ free_percpu(p);
++}
++
++static int devm_percpu_match(struct device *dev, void *data, void *p)
++{
++ struct devres *devr = container_of(data, struct devres, data);
++
++ return *(void **)devr->data == p;
++}
++
++/**
++ * __devm_alloc_percpu - Resource-managed alloc_percpu
++ * @dev: Device to allocate per-cpu memory for
++ * @size: Size of per-cpu memory to allocate
++ * @align: Alignment of per-cpu memory to allocate
++ *
++ * Managed alloc_percpu. Per-cpu memory allocated with this function is
++ * automatically freed on driver detach.
++ *
++ * RETURNS:
++ * Pointer to allocated memory on success, NULL on failure.
++ */
++void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
++ size_t align)
++{
++ void *p;
++ void __percpu *pcpu;
++
++ pcpu = __alloc_percpu(size, align);
++ if (!pcpu)
++ return NULL;
++
++ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
++ if (!p) {
++ free_percpu(pcpu);
++ return NULL;
++ }
++
++ *(void __percpu **)p = pcpu;
++
++ devres_add(dev, p);
++
++ return pcpu;
++}
++EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
++
++/**
++ * devm_free_percpu - Resource-managed free_percpu
++ * @dev: Device this memory belongs to
++ * @pdata: Per-cpu memory to free
++ *
++ * Free memory allocated with devm_alloc_percpu().
++ */
++void devm_free_percpu(struct device *dev, void __percpu *pdata)
++{
++ WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
++ (void *)pdata));
++}
++EXPORT_SYMBOL_GPL(devm_free_percpu);
+diff --git a/drivers/base/soc.c b/drivers/base/soc.c
+index b63f23e6..0c5cf872 100644
+--- a/drivers/base/soc.c
++++ b/drivers/base/soc.c
+@@ -13,6 +13,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/sys_soc.h>
+ #include <linux/err.h>
++#include <linux/glob.h>
+
+ static DEFINE_IDA(soc_ida);
+
+@@ -159,3 +160,68 @@ static int __init soc_bus_register(void)
+ return bus_register(&soc_bus_type);
+ }
+ core_initcall(soc_bus_register);
++
++static int soc_device_match_one(struct device *dev, void *arg)
++{
++ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
++ const struct soc_device_attribute *match = arg;
++
++ if (match->machine &&
++ !glob_match(match->machine, soc_dev->attr->machine))
++ return 0;
++
++ if (match->family &&
++ !glob_match(match->family, soc_dev->attr->family))
++ return 0;
++
++ if (match->revision &&
++ !glob_match(match->revision, soc_dev->attr->revision))
++ return 0;
++
++ if (match->soc_id &&
++ !glob_match(match->soc_id, soc_dev->attr->soc_id))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * soc_device_match - identify the SoC in the machine
++ * @matches: zero-terminated array of possible matches
++ *
++ * returns the first matching entry of the argument array, or NULL
++ * if none of them match.
++ *
++ * This function is meant as a helper in place of of_match_node()
++ * in cases where either no device tree is available or the information
++ * in a device node is insufficient to identify a particular variant
++ * by its compatible strings or other properties. For new devices,
++ * the DT binding should always provide unique compatible strings
++ * that allow the use of of_match_node() instead.
++ *
++ * The calling function can use the .data entry of the
++ * soc_device_attribute to pass a structure or function pointer for
++ * each entry.
++ */
++const struct soc_device_attribute *soc_device_match(
++ const struct soc_device_attribute *matches)
++{
++ int ret = 0;
++
++ if (!matches)
++ return NULL;
++
++ while (!ret) {
++ if (!(matches->machine || matches->family ||
++ matches->revision || matches->soc_id))
++ break;
++ ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
++ soc_device_match_one);
++ if (!ret)
++ matches++;
++ else
++ return matches;
++ }
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(soc_device_match);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 5fa36ebc..75ff184a 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -211,8 +211,8 @@ static int lacp_fast;
+
+ static int bond_init(struct net_device *bond_dev);
+ static void bond_uninit(struct net_device *bond_dev);
+-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+- struct rtnl_link_stats64 *stats);
++static void bond_get_stats(struct net_device *bond_dev,
++ struct rtnl_link_stats64 *stats);
+ static void bond_slave_arr_handler(struct work_struct *work);
+ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+ int mod);
+@@ -3336,8 +3336,8 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
+ }
+ }
+
+-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+- struct rtnl_link_stats64 *stats)
++static void bond_get_stats(struct net_device *bond_dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct rtnl_link_stats64 temp;
+@@ -3361,8 +3361,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+
+ memcpy(&bond->bond_stats, stats, sizeof(*stats));
+ spin_unlock(&bond->stats_lock);
+-
+- return stats;
+ }
+
+ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index 69fc8409..da5b10e0 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -54,8 +54,8 @@ struct pcpu_dstats {
+ struct u64_stats_sync syncp;
+ };
+
+-static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void dummy_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ int i;
+
+@@ -73,7 +73,6 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpackets;
+ }
+- return stats;
+ }
+
+ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index bfeaec5b..ddc281c4 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -2172,19 +2172,19 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
+ ena_com_delete_debug_area(adapter->ena_dev);
+ }
+
+-static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void ena_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_admin_basic_stats ena_stats;
+ int rc;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+- return NULL;
++ return;
+
+ rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats);
+ if (rc)
+- return NULL;
++ return;
+
+ stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) |
+ ena_stats.tx_bytes_low;
+@@ -2211,8 +2211,6 @@ static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
+
+ stats->rx_errors = 0;
+ stats->tx_errors = 0;
+-
+- return stats;
+ }
+
+ static const struct net_device_ops ena_netdev_ops = {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 1e4e8b24..5b413ee7 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1542,8 +1542,8 @@ static void xgbe_tx_timeout(struct net_device *netdev)
+ schedule_work(&pdata->restart_work);
+ }
+
+-static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *s)
++static void xgbe_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *s)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+@@ -1569,8 +1569,6 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+ s->tx_dropped = netdev->stats.tx_dropped;
+
+ DBGPR("<--%s\n", __func__);
+-
+- return s;
+ }
+
+ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+index 8158d469..e62ca66b 100644
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+@@ -1199,7 +1199,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
+ return ret;
+ }
+
+-static struct rtnl_link_stats64 *xgene_enet_get_stats64(
++static void xgene_enet_get_stats64(
+ struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+ {
+@@ -1230,8 +1230,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
+ }
+ }
+ memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
+-
+- return storage;
+ }
+
+ static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index c0f84b73..3c5c8159 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1424,8 +1424,8 @@ static void alx_poll_controller(struct net_device *netdev)
+ }
+ #endif
+
+-static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *net_stats)
++static void alx_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *net_stats)
+ {
+ struct alx_priv *alx = netdev_priv(dev);
+ struct alx_hw_stats *hw_stats = &alx->hw.stats;
+@@ -1469,8 +1469,6 @@ static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+
+ spin_unlock(&alx->stats_lock);
+-
+- return net_stats;
+ }
+
+ static const struct net_device_ops alx_netdev_ops = {
+diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
+index 17aa33c5..55f124d7 100644
+--- a/drivers/net/ethernet/broadcom/b44.c
++++ b/drivers/net/ethernet/broadcom/b44.c
+@@ -1677,8 +1677,8 @@ static int b44_close(struct net_device *dev)
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *nstat)
++static void b44_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *nstat)
+ {
+ struct b44 *bp = netdev_priv(dev);
+ struct b44_hw_stats *hwstat = &bp->hw_stats;
+@@ -1721,7 +1721,6 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
+ #endif
+ } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+
+- return nstat;
+ }
+
+ static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 1f7034d7..c23dfcb9 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -6828,13 +6828,13 @@ bnx2_save_stats(struct bnx2 *bp)
+ (unsigned long) (bp->stats_blk->ctr + \
+ bp->temp_stats_blk->ctr)
+
+-static struct rtnl_link_stats64 *
++static void
+ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+ {
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (bp->stats_blk == NULL)
+- return net_stats;
++ return;
+
+ net_stats->rx_packets =
+ GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+@@ -6898,7 +6898,6 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+ GET_32BIT_NET_STATS(stat_FwRxDrop);
+
+- return net_stats;
+ }
+
+ /* All ethtool functions called with rtnl_lock */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 20e569bd..d6c10783 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5664,7 +5664,7 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ return -EOPNOTSUPP;
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ u32 i;
+@@ -5673,7 +5673,7 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ if (!bp->bnapi)
+- return stats;
++ return;
+
+ /* TODO check if we need to synchronize with bnxt_close path */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+@@ -5720,8 +5720,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
+ stats->tx_errors = le64_to_cpu(tx->tx_err);
+ }
+-
+- return stats;
+ }
+
+ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index edae2dcc..4d45077e 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14145,8 +14145,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
+ .set_link_ksettings = tg3_set_link_ksettings,
+ };
+
+-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void tg3_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct tg3 *tp = netdev_priv(dev);
+
+@@ -14154,13 +14154,11 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+ if (!tp->hw_stats) {
+ *stats = tp->net_stats_prev;
+ spin_unlock_bh(&tp->lock);
+- return stats;
++ return;
+ }
+
+ tg3_get_nstats(tp, stats);
+ spin_unlock_bh(&tp->lock);
+-
+- return stats;
+ }
+
+ static void tg3_set_rx_mode(struct net_device *dev)
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index f42f672b..aa141609 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -3111,7 +3111,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ * Used spin_lock to synchronize reading of stats structures, which
+ * is written by BNA under the same lock.
+ */
+-static struct rtnl_link_stats64 *
++static void
+ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+ {
+ struct bnad *bnad = netdev_priv(netdev);
+@@ -3123,8 +3123,6 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+ bnad_netdev_hwstats_fill(bnad, stats);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+-
+- return stats;
+ }
+
+ static void
+@@ -3430,7 +3428,7 @@ static const struct net_device_ops bnad_netdev_ops = {
+ .ndo_open = bnad_open,
+ .ndo_stop = bnad_stop,
+ .ndo_start_xmit = bnad_start_xmit,
+- .ndo_get_stats64 = bnad_get_stats64,
++ .ndo_get_stats64 = bnad_get_stats64,
+ .ndo_set_rx_mode = bnad_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnad_set_mac_address,
+diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
+index 63efa0dc..ccb8fbd6 100644
+--- a/drivers/net/ethernet/calxeda/xgmac.c
++++ b/drivers/net/ethernet/calxeda/xgmac.c
+@@ -1460,9 +1460,9 @@ static void xgmac_poll_controller(struct net_device *dev)
+ }
+ #endif
+
+-static struct rtnl_link_stats64 *
++static void
+ xgmac_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *storage)
++ struct rtnl_link_stats64 *storage)
+ {
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *base = priv->base;
+@@ -1490,7 +1490,6 @@ xgmac_get_stats64(struct net_device *dev,
+
+ writel(0, base + XGMAC_MMC_CTRL);
+ spin_unlock_bh(&priv->stats_lock);
+- return storage;
+ }
+
+ static int xgmac_set_mac_address(struct net_device *dev, void *p)
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index 8a37012c..87709a9d 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -1423,8 +1423,8 @@ void nicvf_update_stats(struct nicvf *nic)
+ nicvf_update_sq_stats(nic, qidx);
+ }
+
+-static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void nicvf_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
+@@ -1440,7 +1440,6 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+ stats->tx_packets = hw_stats->tx_frames;
+ stats->tx_dropped = hw_stats->tx_drops;
+
+- return stats;
+ }
+
+ static void nicvf_tx_timeout(struct net_device *dev)
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 0c2a32a3..c4244913 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -2383,8 +2383,8 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ }
+ EXPORT_SYMBOL(cxgb4_remove_server_filter);
+
+-static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
+- struct rtnl_link_stats64 *ns)
++static void cxgb_get_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *ns)
+ {
+ struct port_stats stats;
+ struct port_info *p = netdev_priv(dev);
+@@ -2397,7 +2397,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
+ spin_lock(&adapter->stats_lock);
+ if (!netif_device_present(dev)) {
+ spin_unlock(&adapter->stats_lock);
+- return ns;
++ return;
+ }
+ t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+ &p->stats_base);
+@@ -2431,7 +2431,6 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
+ ns->tx_errors = stats.tx_error_frames;
+ ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+ ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
+- return ns;
+ }
+
+ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 48f82ab6..662cc676 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -680,8 +680,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
+ }
+
+ /* dev_base_lock rwlock held, nominally process context */
+-static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
+- struct rtnl_link_stats64 *net_stats)
++static void enic_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *net_stats)
+ {
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_stats *stats;
+@@ -693,7 +693,7 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
+ * recorded stats.
+ */
+ if (err == -ENOMEM)
+- return net_stats;
++ return;
+
+ net_stats->tx_packets = stats->tx.tx_frames_ok;
+ net_stats->tx_bytes = stats->tx.tx_bytes_ok;
+@@ -707,8 +707,6 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
+ net_stats->rx_over_errors = enic->rq_truncated_pkts;
+ net_stats->rx_crc_errors = enic->rq_bad_fcs;
+ net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
+-
+- return net_stats;
+ }
+
+ static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
+diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
+index f7b42483..2e7554c7 100644
+--- a/drivers/net/ethernet/ec_bhf.c
++++ b/drivers/net/ethernet/ec_bhf.c
+@@ -458,7 +458,7 @@ static int ec_bhf_stop(struct net_device *net_dev)
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ ec_bhf_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+ {
+@@ -473,8 +473,6 @@ ec_bhf_get_stats(struct net_device *net_dev,
+
+ stats->tx_bytes = priv->stat_tx_bytes;
+ stats->rx_bytes = priv->stat_rx_bytes;
+-
+- return stats;
+ }
+
+ static const struct net_device_ops ec_bhf_netdev_ops = {
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 5626908f..59852304 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -646,8 +646,8 @@ void be_parse_stats(struct be_adapter *adapter)
+ }
+ }
+
+-static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void be_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+@@ -711,7 +711,6 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
+ stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
+ drvs->rx_input_fifo_overflow_drop +
+ drvs->rx_drops_no_pbuf;
+- return stats;
+ }
+
+ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index c06845b7..249bc6ad 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -1536,8 +1536,8 @@ void hns_nic_set_rx_mode(struct net_device *ndev)
+ hns_set_multicast_list(ndev);
+ }
+
+-struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
+- struct rtnl_link_stats64 *stats)
++static void hns_nic_get_stats64(struct net_device *ndev,
++ struct rtnl_link_stats64 *stats)
+ {
+ int idx = 0;
+ u64 tx_bytes = 0;
+@@ -1579,8 +1579,6 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
+ stats->tx_window_errors = ndev->stats.tx_window_errors;
+ stats->rx_compressed = ndev->stats.rx_compressed;
+ stats->tx_compressed = ndev->stats.tx_compressed;
+-
+- return stats;
+ }
+
+ static u16
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index bd719e25..a2ff2783 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -328,8 +328,8 @@ static void ehea_update_bcmc_registrations(void)
+ spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
+ }
+
+-static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void ehea_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct ehea_port *port = netdev_priv(dev);
+ u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
+@@ -352,7 +352,6 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
+
+ stats->multicast = port->stats.multicast;
+ stats->rx_errors = port->stats.rx_errors;
+- return stats;
+ }
+
+ static void ehea_update_stats(struct work_struct *work)
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index 879cca47..a29b12e8 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e1000_ring *ring);
+ int e1000e_setup_tx_resources(struct e1000_ring *ring);
+ void e1000e_free_rx_resources(struct e1000_ring *ring);
+ void e1000e_free_tx_resources(struct e1000_ring *ring);
+-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats);
++void e1000e_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats);
+ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+ void e1000e_get_hw_control(struct e1000_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 7017281b..49f3d868 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5920,8 +5920,8 @@ static void e1000_reset_task(struct work_struct *work)
+ *
+ * Returns the address of the device statistics structure.
+ **/
+-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++void e1000e_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+@@ -5958,7 +5958,6 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+- return stats;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+index 05629381..e97b6c49 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+@@ -1128,8 +1128,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
+ * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
+ * function replaces fm10k_get_stats for kernels which support it.
+ */
+-static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void fm10k_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_ring *ring;
+@@ -1174,8 +1174,6 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
+
+ /* following stats updated by fm10k_service_task() */
+ stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+-
+- return stats;
+ }
+
+ int fm10k_setup_tc(struct net_device *dev, u8 tc)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 6d61e443..3cc0bf29 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -797,9 +797,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
+ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
+ #ifdef I40E_FCOE
+-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+- struct net_device *netdev,
+- struct rtnl_link_stats64 *storage);
++void i40e_get_netdev_stats_struct(struct net_device *netdev,
++ struct rtnl_link_stats64 *storage);
+ int i40e_set_mac(struct net_device *netdev, void *p);
+ void i40e_set_rx_mode(struct net_device *netdev);
+ #endif
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 31c97e39..2f1554b3 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -408,15 +408,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+-#ifdef I40E_FCOE
+-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+- struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
+-#else
+-static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+- struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++#ifndef I40E_FCOE
++static
+ #endif
++void i40e_get_netdev_stats_struct(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
+@@ -425,10 +421,10 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+ int i;
+
+ if (test_bit(__I40E_DOWN, &vsi->state))
+- return stats;
++ return;
+
+ if (!vsi->tx_rings)
+- return stats;
++ return;
+
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+@@ -468,8 +464,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+ stats->rx_dropped = vsi_stats->rx_dropped;
+ stats->rx_crc_errors = vsi_stats->rx_crc_errors;
+ stats->rx_length_errors = vsi_stats->rx_length_errors;
+-
+- return stats;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 9affd7c1..076ccfa4 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned long);
+ static void igb_watchdog(unsigned long);
+ static void igb_watchdog_task(struct work_struct *);
+ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
+-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats);
++static void igb_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats);
+ static int igb_change_mtu(struct net_device *, int);
+ static int igb_set_mac(struct net_device *, void *);
+ static void igb_set_uta(struct igb_adapter *adapter, bool set);
+@@ -5386,8 +5386,8 @@ static void igb_reset_task(struct work_struct *work)
+ * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
+ **/
+-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void igb_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+@@ -5395,8 +5395,6 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
+ igb_update_stats(adapter, &adapter->stats64);
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
+ spin_unlock(&adapter->stats64_lock);
+-
+- return stats;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index fee1f291..ee4f1cc2 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -8085,8 +8085,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
+ }
+
+ #endif
+-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++
++static void ixgbe_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+@@ -8124,13 +8125,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
+ }
+ }
+ rcu_read_unlock();
++
+ /* following stats updated by ixgbe_watchdog_task() */
+ stats->multicast = netdev->stats.multicast;
+ stats->rx_errors = netdev->stats.rx_errors;
+ stats->rx_length_errors = netdev->stats.rx_length_errors;
+ stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+ stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+- return stats;
+ }
+
+ #ifdef CONFIG_IXGBE_DCB
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index cbf70fe4..3519769e 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -3880,8 +3880,8 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
+ ixgbevf_suspend(pdev, PMSG_SUSPEND);
+ }
+
+-static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void ixgbevf_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ unsigned int start;
+@@ -3914,8 +3914,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ }
+-
+- return stats;
+ }
+
+ #define IXGBEVF_MAX_MAC_HDR_LEN 127
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 707bc468..100f3af8 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -636,7 +636,7 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
+ }
+
+ /* Get System Network Statistics */
+-static struct rtnl_link_stats64 *
++static void
+ mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+ {
+@@ -670,8 +670,6 @@ mvneta_get_stats64(struct net_device *dev,
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ stats->tx_dropped = dev->stats.tx_dropped;
+-
+- return stats;
+ }
+
+ /* Rx descriptors helper methods */
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 0a4e81a2..17032925 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -5762,7 +5762,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+ return err;
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct mvpp2_port *port = netdev_priv(dev);
+@@ -5794,8 +5794,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+-
+- return stats;
+ }
+
+ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 941c8e2c..cff8b633 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -3898,8 +3898,8 @@ static void sky2_set_multicast(struct net_device *dev)
+ gma_write16(hw, port, GM_RX_CTRL, reg);
+ }
+
+-static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void sky2_get_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+@@ -3939,8 +3939,6 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
+-
+- return stats;
+ }
+
+ /* Can have one global because blinking is controlled by
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 4832223f..6d260da6 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_eth *eth)
+ }
+ }
+
+-static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *storage)
++static void mtk_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *storage)
+ {
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+@@ -494,8 +494,6 @@ static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
+ storage->tx_errors = dev->stats.tx_errors;
+ storage->rx_dropped = dev->stats.rx_dropped;
+ storage->tx_dropped = dev->stats.tx_dropped;
+-
+- return storage;
+ }
+
+ static inline int mtk_max_frag_size(int mtu)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index d223e7cb..ebf5c7e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1316,7 +1316,7 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
+ }
+
+
+-static struct rtnl_link_stats64 *
++static void
+ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -1324,8 +1324,6 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ spin_lock_bh(&priv->stats_lock);
+ netdev_stats_to_stats64(stats, &dev->stats);
+ spin_unlock_bh(&priv->stats_lock);
+-
+- return stats;
+ }
+
+ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 9d372293..582ba530 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2647,7 +2647,7 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
+ return mlx5e_setup_tc(dev, tc->tc);
+ }
+
+-struct rtnl_link_stats64 *
++static void
+ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+@@ -2681,7 +2681,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->multicast =
+ VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
+
+- return stats;
+ }
+
+ static void mlx5e_set_rx_mode(struct net_device *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 1806b1fc..cf21a83a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -948,15 +948,13 @@ static void update_stats_cache(struct work_struct *work)
+ /* Return the stats from a cache that is updated periodically,
+ * as this function might get called in an atomic context.
+ */
+-static struct rtnl_link_stats64 *
++static void
+ mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+ {
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+-
+- return stats;
+ }
+
+ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+index d548f0a5..6e9e7fa4 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+@@ -351,7 +351,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ mlxsw_sx_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+ {
+@@ -380,7 +380,6 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+- return stats;
+ }
+
+ static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index 6d1a956e..d26745cc 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -378,8 +378,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
+ __raw_writel((__force __u32) val, (__force void __iomem *)p);
+ }
+
+-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+- struct rtnl_link_stats64 *stats);
++static void myri10ge_get_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *stats);
+
+ static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
+ {
+@@ -3119,8 +3119,8 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
+-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void myri10ge_get_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ const struct myri10ge_priv *mgp = netdev_priv(dev);
+ const struct myri10ge_slice_netstats *slice_stats;
+@@ -3135,7 +3135,6 @@ static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+ stats->rx_dropped += slice_stats->rx_dropped;
+ stats->tx_dropped += slice_stats->tx_dropped;
+ }
+- return stats;
+ }
+
+ static void myri10ge_set_multicast_list(struct net_device *dev)
+diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
+index e0993eba..9c27728f 100644
+--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
+@@ -3116,7 +3116,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
+ * @stats: pointer to struct rtnl_link_stats64
+ *
+ */
+-static struct rtnl_link_stats64 *
++static void
+ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+ {
+ struct vxgedev *vdev = netdev_priv(dev);
+@@ -3155,8 +3155,6 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+ net_stats->tx_bytes += bytes;
+ net_stats->tx_errors += txstats->tx_errors;
+ }
+-
+- return net_stats;
+ }
+
+ static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index 4ca82bd8..55915cbd 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -2400,8 +2400,8 @@ int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+ return err;
+ }
+
+-static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void nfp_net_stat64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct nfp_net *nn = netdev_priv(netdev);
+ int r;
+@@ -2431,8 +2431,6 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
+ stats->tx_bytes += data[1];
+ stats->tx_errors += data[2];
+ }
+-
+- return stats;
+ }
+
+ static bool nfp_net_ebpf_capable(struct nfp_net *nn)
+diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
+index 9b0d7f46..d7a07928 100644
+--- a/drivers/net/ethernet/nvidia/forcedeth.c
++++ b/drivers/net/ethernet/nvidia/forcedeth.c
+@@ -1733,7 +1733,7 @@ static void nv_update_stats(struct net_device *dev)
+ * Called with read_lock(&dev_base_lock) held for read -
+ * only synchronized against unregister_netdevice.
+ */
+-static struct rtnl_link_stats64*
++static void
+ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
+@@ -1793,8 +1793,6 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+
+ spin_unlock_bh(&np->hwstats_lock);
+ }
+-
+- return storage;
+ }
+
+ /*
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index 7a0281a3..8bb16d64 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -90,8 +90,8 @@ static irqreturn_t netxen_msix_intr(int irq, void *data);
+
+ static void netxen_free_ip_list(struct netxen_adapter *, bool);
+ static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
+-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
+- struct rtnl_link_stats64 *stats);
++static void netxen_nic_get_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *stats);
+ static int netxen_nic_set_mac(struct net_device *netdev, void *p);
+
+ /* PCI Device ID Table */
+@@ -2295,8 +2295,8 @@ static void netxen_tx_timeout_task(struct work_struct *work)
+ clear_bit(__NX_RESETTING, &adapter->state);
+ }
+
+-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static void netxen_nic_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+@@ -2306,8 +2306,6 @@ static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+-
+- return stats;
+ }
+
+ static irqreturn_t netxen_intr(int irq, void *data)
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 85f46dbe..645507a9 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -1803,9 +1803,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+ edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+ }
+
+-static
+-struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void qede_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct qede_dev *edev = netdev_priv(dev);
+
+@@ -1835,8 +1834,6 @@ struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+ stats->collisions = edev->stats.tx_total_collisions;
+ stats->rx_crc_errors = edev->stats.rx_crc_errors;
+ stats->rx_frame_errors = edev->stats.rx_align_errors;
+-
+- return stats;
+ }
+
+ #ifdef CONFIG_QED_SRIOV
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
+index 57b35aea..babc1025 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac.c
+@@ -319,8 +319,8 @@ static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+ }
+
+ /* Provide network statistics info for the interface */
+-static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *net_stats)
++static void emac_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *net_stats)
+ {
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ unsigned int addr = REG_MAC_RX_STATUS_BIN;
+@@ -384,8 +384,6 @@ static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
+ net_stats->tx_window_errors = stats->tx_late_col;
+
+ spin_unlock(&stats->lock);
+-
+- return net_stats;
+ }
+
+ static const struct net_device_ops emac_netdev_ops = {
+diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
+index da4c2d8a..5722e187 100644
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -653,9 +653,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget);
+ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance);
+ static int rtl8139_close (struct net_device *dev);
+ static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+-static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64
+- *stats);
++static void rtl8139_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats);
+ static void rtl8139_set_rx_mode (struct net_device *dev);
+ static void __set_rx_mode (struct net_device *dev);
+ static void rtl8139_hw_start (struct net_device *dev);
+@@ -2521,7 +2520,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ }
+
+
+-static struct rtnl_link_stats64 *
++static void
+ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct rtl8139_private *tp = netdev_priv(dev);
+@@ -2549,8 +2548,6 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->tx_packets = tp->tx_stats.packets;
+ stats->tx_bytes = tp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
+-
+- return stats;
+ }
+
+ /* Set or clear the multicast filter for this adaptor.
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 2c4350a1..ac7eab06 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -7751,7 +7751,7 @@ static int rtl_open(struct net_device *dev)
+ goto out;
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+@@ -7805,8 +7805,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ le16_to_cpu(tp->tc_offset.tx_aborted);
+
+ pm_runtime_put_noidle(&pdev->dev);
+-
+- return stats;
+ }
+
+ static void rtl8169_net_suspend(struct net_device *dev)
+diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+index ea44a245..49196846 100644
+--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
++++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+@@ -1721,11 +1721,9 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
+ * This function is a driver entry point whenever ifconfig command gets
+ * executed to see device statistics. Statistics are number of
+ * bytes sent or received, errors occurred etc.
+- * Return value:
+- * This function returns various statistical information of device.
+ */
+-static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void sxgbe_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->ioaddr;
+@@ -1776,8 +1774,6 @@ static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
+ SXGBE_MMC_TXUFLWHI_GBCNT_REG);
+ writel(0, ioaddr + SXGBE_MMC_CTL_REG);
+ spin_unlock(&priv->stats_lock);
+-
+- return stats;
+ }
+
+ /* sxgbe_set_features - entry point to set offload features of the device.
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 6b89e4a7..c6209dd4 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -2232,16 +2232,14 @@ int efx_net_stop(struct net_device *net_dev)
+ }
+
+ /* Context: process, dev_base_lock or RTNL held, non-blocking. */
+-static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
+- struct rtnl_link_stats64 *stats)
++static void efx_net_stats(struct net_device *net_dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, stats);
+ spin_unlock_bh(&efx->stats_lock);
+-
+- return stats;
+ }
+
+ /* Context: netif_tx_lock held, BHs disabled. */
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index a2371aa1..680fc756 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -6294,8 +6294,8 @@ static void niu_get_tx_stats(struct niu *np,
+ stats->tx_errors = errors;
+ }
+
+-static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void niu_get_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct niu *np = netdev_priv(dev);
+
+@@ -6303,8 +6303,6 @@ static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
+ niu_get_rx_stats(np, stats);
+ niu_get_tx_stats(np, stats);
+ }
+-
+- return stats;
+ }
+
+ static void niu_load_hash_xmac(struct niu *np, u16 *hash)
+diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+index 97d64bfe..bcacd08c 100644
+--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
++++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+@@ -2490,7 +2490,7 @@ static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+ dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+ }
+
+-static struct rtnl_link_stats64*
++static void
+ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+ {
+ unsigned long flags;
+@@ -2522,8 +2522,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+ else
+ s->tx_errors = hwstats->txunderflowerror +
+ hwstats->txcarriererror;
+-
+- return s;
+ }
+
+ static void
+diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
+index 4ef605a9..9cd7cd3c 100644
+--- a/drivers/net/ethernet/tile/tilepro.c
++++ b/drivers/net/ethernet/tile/tilepro.c
+@@ -2047,8 +2047,8 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ *
+ * Returns the address of the device statistics structure.
+ */
+-static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void tile_net_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct tile_net_priv *priv = netdev_priv(dev);
+ u64 rx_packets = 0, tx_packets = 0;
+diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
+index 9d14731c..ae891364 100644
+--- a/drivers/net/ethernet/via/via-rhine.c
++++ b/drivers/net/ethernet/via/via-rhine.c
+@@ -513,8 +513,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
+ static void rhine_tx(struct net_device *dev);
+ static int rhine_rx(struct net_device *dev, int limit);
+ static void rhine_set_rx_mode(struct net_device *dev);
+-static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats);
++static void rhine_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats);
+ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+ static const struct ethtool_ops netdev_ethtool_ops;
+ static int rhine_close(struct net_device *dev);
+@@ -2222,7 +2222,7 @@ static void rhine_slow_event_task(struct work_struct *work)
+ mutex_unlock(&rp->task_lock);
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct rhine_private *rp = netdev_priv(dev);
+@@ -2245,8 +2245,6 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->tx_packets = rp->tx_stats.packets;
+ stats->tx_bytes = rp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
+-
+- return stats;
+ }
+
+ static void rhine_set_rx_mode(struct net_device *dev)
+diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
+index e46b1ebb..6b944caf 100644
+--- a/drivers/net/fjes/fjes_main.c
++++ b/drivers/net/fjes/fjes_main.c
+@@ -56,8 +56,7 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *);
+ static void fjes_tx_stall_task(struct work_struct *);
+ static void fjes_force_close_task(struct work_struct *);
+ static irqreturn_t fjes_intr(int, void*);
+-static struct rtnl_link_stats64 *
+-fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
++static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
+ static int fjes_change_mtu(struct net_device *, int);
+ static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
+ static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
+@@ -762,14 +761,12 @@ static void fjes_tx_retry(struct net_device *netdev)
+ netif_tx_wake_queue(queue);
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+ {
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+
+ memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
+-
+- return stats;
+ }
+
+ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 36a04e18..48f20945 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -918,8 +918,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
+ return ret;
+ }
+
+-static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
+- struct rtnl_link_stats64 *t)
++static void netvsc_get_stats64(struct net_device *net,
++ struct rtnl_link_stats64 *t)
+ {
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ int cpu;
+@@ -957,8 +957,6 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
+
+ t->rx_dropped = net->stats.rx_dropped;
+ t->rx_errors = net->stats.rx_errors;
+-
+- return t;
+ }
+
+ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index 66c0eeaf..082534e1 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -129,8 +129,8 @@ static void ifb_ri_tasklet(unsigned long _txp)
+
+ }
+
+-static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void ifb_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp = dp->tx_private;
+@@ -157,8 +157,6 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
+ }
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+-
+- return stats;
+ }
+
+ static int ifb_dev_init(struct net_device *dev)
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index dfbc4ef6..6629680d 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -296,8 +296,8 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
+ dev_mc_sync(ipvlan->phy_dev, dev);
+ }
+
+-static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *s)
++static void ipvlan_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *s)
+ {
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+@@ -334,7 +334,6 @@ static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
+ s->rx_dropped = rx_errs;
+ s->tx_dropped = tx_drps;
+ }
+- return s;
+ }
+
+ static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 1b65f0f9..0254b2a9 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -97,8 +97,8 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
+-static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void loopback_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ u64 bytes = 0;
+ u64 packets = 0;
+@@ -122,7 +122,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
+ stats->tx_packets = packets;
+ stats->rx_bytes = bytes;
+ stats->tx_bytes = bytes;
+- return stats;
+ }
+
+ static u32 always_on(struct net_device *dev)
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 2caac0c3..880ee985 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2899,13 +2899,13 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *s)
++static void macsec_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *s)
+ {
+ int cpu;
+
+ if (!dev->tstats)
+- return s;
++ return;
+
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *stats;
+@@ -2929,8 +2929,6 @@ static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
+
+ s->rx_dropped = dev->stats.rx_dropped;
+ s->tx_dropped = dev->stats.tx_dropped;
+-
+- return s;
+ }
+
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index dc8ccac0..94576142 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -857,8 +857,8 @@ static void macvlan_uninit(struct net_device *dev)
+ macvlan_port_destroy(port->dev);
+ }
+
+-static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void macvlan_dev_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+@@ -895,7 +895,6 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
+ stats->rx_dropped = rx_errors;
+ stats->tx_dropped = tx_dropped;
+ }
+- return stats;
+ }
+
+ static int macvlan_vlan_rx_add_vid(struct net_device *dev,
+diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
+index 7b7c70e2..a0daed8a 100644
+--- a/drivers/net/nlmon.c
++++ b/drivers/net/nlmon.c
+@@ -76,7 +76,7 @@ static int nlmon_close(struct net_device *dev)
+ return netlink_remove_tap(&nlmon->nt);
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ int i;
+@@ -104,8 +104,6 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+
+ stats->rx_bytes = bytes;
+ stats->tx_bytes = 0;
+-
+- return stats;
+ }
+
+ static u32 always_on(struct net_device *dev)
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 96fa0e61..d3b17318 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1312,7 +1312,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ return err;
+ }
+
+-static struct rtnl_link_stats64*
++static void
+ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+ {
+ struct ppp *ppp = netdev_priv(dev);
+@@ -1332,8 +1332,6 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+ stats64->rx_dropped = dev->stats.rx_dropped;
+ stats64->tx_dropped = dev->stats.tx_dropped;
+ stats64->rx_length_errors = dev->stats.rx_length_errors;
+-
+- return stats64;
+ }
+
+ static int ppp_dev_init(struct net_device *dev)
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index 9ed6d1c1..c8c0c231 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -571,7 +571,7 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
+
+ /* Netdevice get statistics request */
+
+-static struct rtnl_link_stats64 *
++static void
+ sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct net_device_stats *devstats = &dev->stats;
+@@ -602,7 +602,6 @@ sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->collisions += comp->sls_o_misses;
+ }
+ #endif
+- return stats;
+ }
+
+ /* Netdevice register callback */
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index a380649b..95499313 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1798,7 +1798,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
+ return err;
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ struct team *team = netdev_priv(dev);
+@@ -1835,7 +1835,6 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ stats->rx_nohandler = rx_nohandler;
+- return stats;
+ }
+
+ static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index a931b733..19fb25b1 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -983,7 +983,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
+ tun->align = new_hr;
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
+@@ -1017,7 +1017,6 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->rx_dropped = rx_dropped;
+ stats->rx_frame_errors = rx_frame_errors;
+ stats->tx_dropped = tx_dropped;
+- return stats;
+ }
+
+ static const struct net_device_ops tun_netdev_ops = {
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index fbc853e6..e25aa62d 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -161,8 +161,8 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
+ return atomic64_read(&priv->dropped);
+ }
+
+-static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *tot)
++static void veth_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *tot)
+ {
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+@@ -180,8 +180,6 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
+ tot->rx_packets = one.packets;
+ }
+ rcu_read_unlock();
+-
+- return tot;
+ }
+
+ /* fake multicast ability */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 1568aedd..15e87add 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1017,8 +1017,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
+ return ret;
+ }
+
+-static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
+- struct rtnl_link_stats64 *tot)
++static void virtnet_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *tot)
+ {
+ struct virtnet_info *vi = netdev_priv(dev);
+ int cpu;
+@@ -1051,8 +1051,6 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
+ tot->rx_dropped = dev->stats.rx_dropped;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_frame_errors = dev->stats.rx_frame_errors;
+-
+- return tot;
+ }
+
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
+index aabc6ef3..f88ffafe 100644
+--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
+@@ -113,7 +113,7 @@ vmxnet3_global_stats[] = {
+ };
+
+
+-struct rtnl_link_stats64 *
++void
+ vmxnet3_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+ {
+@@ -160,8 +160,6 @@ vmxnet3_get_stats64(struct net_device *netdev,
+ stats->rx_dropped += drvRxStats->drop_total;
+ stats->multicast += devRxStats->mcastPktsRxOK;
+ }
+-
+- return stats;
+ }
+
+ static int
+diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
+index 7dc37a09..ec19df56 100644
+--- a/drivers/net/vmxnet3/vmxnet3_int.h
++++ b/drivers/net/vmxnet3/vmxnet3_int.h
+@@ -466,8 +466,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
+
+ void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+
+-struct rtnl_link_stats64 *
+-vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
++void vmxnet3_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats);
+
+ extern char vmxnet3_driver_name[];
+ #endif
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 578bd500..499af07a 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -79,8 +79,8 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
+ kfree_skb(skb);
+ }
+
+-static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void vrf_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ int i;
+
+@@ -104,7 +104,6 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
+- return stats;
+ }
+
+ /* Local traffic destined to local address. Reinsert the packet to rx
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index cd442e46..f87cae7e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1081,8 +1081,8 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *tot)
++static void xennet_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *tot)
+ {
+ struct netfront_info *np = netdev_priv(dev);
+ int cpu;
+@@ -1113,8 +1113,6 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
+
+ tot->rx_errors = dev->stats.rx_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+-
+- return tot;
+ }
+
+ static void xennet_release_tx_bufs(struct netfront_queue *queue)
+diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
+index 552a7dcb..d7dea9d4 100644
+--- a/drivers/staging/netlogic/xlr_net.c
++++ b/drivers/staging/netlogic/xlr_net.c
+@@ -395,14 +395,6 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
+ TX_DROP_FRAME_COUNTER);
+ }
+
+-static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev,
+- struct rtnl_link_stats64 *stats
+- )
+-{
+- xlr_stats(ndev, stats);
+- return stats;
+-}
+-
+ static const struct net_device_ops xlr_netdev_ops = {
+ .ndo_open = xlr_net_open,
+ .ndo_stop = xlr_net_stop,
+@@ -410,7 +402,7 @@ static const struct net_device_ops xlr_netdev_ops = {
+ .ndo_select_queue = xlr_net_select_queue,
+ .ndo_set_mac_address = xlr_net_set_mac_addr,
+ .ndo_set_rx_mode = xlr_set_rx_mode,
+- .ndo_get_stats64 = xlr_get_stats64,
++ .ndo_get_stats64 = xlr_stats,
+ };
+
+ /*
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 8d732965..6d206930 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -688,6 +688,25 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
+ int devm_add_action(struct device *dev, void (*action)(void *), void *data);
+ void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+
++/**
++ * devm_alloc_percpu - Resource-managed alloc_percpu
++ * @dev: Device to allocate per-cpu memory for
++ * @type: Type to allocate per-cpu memory for
++ *
++ * Managed alloc_percpu. Per-cpu memory allocated with this function is
++ * automatically freed on driver detach.
++ *
++ * RETURNS:
++ * Pointer to allocated memory on success, NULL on failure.
++ */
++#define devm_alloc_percpu(dev, type) \
++ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
++ __alignof__(type)))
++
++void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
++ size_t align);
++void devm_free_percpu(struct device *dev, void __percpu *pdata);
++
+ static inline int devm_add_action_or_reset(struct device *dev,
+ void (*action)(void *), void *data)
+ {
+diff --git a/include/linux/fsl/svr.h b/include/linux/fsl/svr.h
+new file mode 100644
+index 00000000..e95c8f43
+--- /dev/null
++++ b/include/linux/fsl/svr.h
+@@ -0,0 +1,97 @@
++/*
++ * MPC85xx cpu type detection
++ *
++ * Copyright 2011-2012 Freescale Semiconductor, Inc.
++ *
++ * This is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef FSL_SVR_H
++#define FSL_SVR_H
++
++#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
++#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
++#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
++
++/* Some parts define SVR[0:23] as the SOC version */
++#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
++
++#define SVR_8533 0x803400
++#define SVR_8535 0x803701
++#define SVR_8536 0x803700
++#define SVR_8540 0x803000
++#define SVR_8541 0x807200
++#define SVR_8543 0x803200
++#define SVR_8544 0x803401
++#define SVR_8545 0x803102
++#define SVR_8547 0x803101
++#define SVR_8548 0x803100
++#define SVR_8555 0x807100
++#define SVR_8560 0x807000
++#define SVR_8567 0x807501
++#define SVR_8568 0x807500
++#define SVR_8569 0x808000
++#define SVR_8572 0x80E000
++#define SVR_P1010 0x80F100
++#define SVR_P1011 0x80E500
++#define SVR_P1012 0x80E501
++#define SVR_P1013 0x80E700
++#define SVR_P1014 0x80F101
++#define SVR_P1017 0x80F700
++#define SVR_P1020 0x80E400
++#define SVR_P1021 0x80E401
++#define SVR_P1022 0x80E600
++#define SVR_P1023 0x80F600
++#define SVR_P1024 0x80E402
++#define SVR_P1025 0x80E403
++#define SVR_P2010 0x80E300
++#define SVR_P2020 0x80E200
++#define SVR_P2040 0x821000
++#define SVR_P2041 0x821001
++#define SVR_P3041 0x821103
++#define SVR_P4040 0x820100
++#define SVR_P4080 0x820000
++#define SVR_P5010 0x822100
++#define SVR_P5020 0x822000
++#define SVR_P5021 0X820500
++#define SVR_P5040 0x820400
++#define SVR_T4240 0x824000
++#define SVR_T4120 0x824001
++#define SVR_T4160 0x824100
++#define SVR_T4080 0x824102
++#define SVR_C291 0x850000
++#define SVR_C292 0x850020
++#define SVR_C293 0x850030
++#define SVR_B4860 0X868000
++#define SVR_G4860 0x868001
++#define SVR_G4060 0x868003
++#define SVR_B4440 0x868100
++#define SVR_G4440 0x868101
++#define SVR_B4420 0x868102
++#define SVR_B4220 0x868103
++#define SVR_T1040 0x852000
++#define SVR_T1041 0x852001
++#define SVR_T1042 0x852002
++#define SVR_T1020 0x852100
++#define SVR_T1021 0x852101
++#define SVR_T1022 0x852102
++#define SVR_T1023 0x854100
++#define SVR_T1024 0x854000
++#define SVR_T2080 0x853000
++#define SVR_T2081 0x853100
++
++#define SVR_8610 0x80A000
++#define SVR_8641 0x809000
++#define SVR_8641D 0x809001
++
++#define SVR_9130 0x860001
++#define SVR_9131 0x860000
++#define SVR_9132 0x861000
++#define SVR_9232 0x861400
++
++#define SVR_Unknown 0xFFFFFF
++
++#endif
+diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
+index f2912914..22308465 100644
+--- a/include/linux/fsl_devices.h
++++ b/include/linux/fsl_devices.h
+@@ -99,7 +99,10 @@ struct fsl_usb2_platform_data {
+ unsigned suspended:1;
+ unsigned already_suspended:1;
+ unsigned has_fsl_erratum_a007792:1;
++ unsigned has_fsl_erratum_14:1;
+ unsigned has_fsl_erratum_a005275:1;
++ unsigned has_fsl_erratum_a006918:1;
++ unsigned has_fsl_erratum_a005697:1;
+ unsigned check_phy_clk_valid:1;
+
+ /* register save area for suspend/resume */
+diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
+index 9c6c8ef2..90b4107e 100644
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -74,6 +74,7 @@ enum {
+ NETIF_F_BUSY_POLL_BIT, /* Busy poll */
+
+ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
++ NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */
+
+ /*
+ * Add your fresh new feature above and remember to update
+@@ -136,6 +137,7 @@ enum {
+ #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
+ #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+ #define NETIF_F_HW_TC __NETIF_F(HW_TC)
++#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ)
+
+ #define for_each_netdev_feature(mask_addr, bit) \
+ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index c3a1537c..67ef59d0 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -916,8 +916,8 @@ struct netdev_xdp {
+ * Callback used when the transmitter has not made any progress
+ * for dev->watchdog ticks.
+ *
+- * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
+- * struct rtnl_link_stats64 *storage);
++ * void (*ndo_get_stats64)(struct net_device *dev,
++ * struct rtnl_link_stats64 *storage);
+ * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
+ * Called when a user wants to get the network device usage
+ * statistics. Drivers must do one of the following:
+@@ -1165,8 +1165,8 @@ struct net_device_ops {
+ struct neigh_parms *);
+ void (*ndo_tx_timeout) (struct net_device *dev);
+
+- struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
+- struct rtnl_link_stats64 *storage);
++ void (*ndo_get_stats64)(struct net_device *dev,
++ struct rtnl_link_stats64 *storage);
+ bool (*ndo_has_offload_stats)(int attr_id);
+ int (*ndo_get_offload_stats)(int attr_id,
+ const struct net_device *dev,
+@@ -1509,6 +1509,8 @@ enum netdev_priv_flags {
+ * @if_port: Selectable AUI, TP, ...
+ * @dma: DMA channel
+ * @mtu: Interface MTU value
++ * @min_mtu: Interface Minimum MTU value
++ * @max_mtu: Interface Maximum MTU value
+ * @type: Interface hardware type
+ * @hard_header_len: Maximum hardware header length.
+ * @min_header_len: Minimum hardware header length
+@@ -1735,6 +1737,8 @@ struct net_device {
+ unsigned char dma;
+
+ unsigned int mtu;
++ unsigned int min_mtu;
++ unsigned int max_mtu;
+ unsigned short type;
+ unsigned short hard_header_len;
+ unsigned short min_header_len;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 9a0c945e..06f33c98 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -903,6 +903,7 @@ void kfree_skb(struct sk_buff *skb);
+ void kfree_skb_list(struct sk_buff *segs);
+ void skb_tx_error(struct sk_buff *skb);
+ void consume_skb(struct sk_buff *skb);
++void skb_recycle(struct sk_buff *skb);
+ void __kfree_skb(struct sk_buff *skb);
+ extern struct kmem_cache *skbuff_head_cache;
+
+@@ -3057,6 +3058,7 @@ static inline void skb_free_datagram_locked(struct sock *sk,
+ }
+ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
++void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
+ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+ int len, __wsum csum);
+diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
+index 2739ccb6..9f5eb06f 100644
+--- a/include/linux/sys_soc.h
++++ b/include/linux/sys_soc.h
+@@ -13,6 +13,7 @@ struct soc_device_attribute {
+ const char *family;
+ const char *revision;
+ const char *soc_id;
++ const void *data;
+ };
+
+ /**
+@@ -34,4 +35,6 @@ void soc_device_unregister(struct soc_device *soc_dev);
+ */
+ struct device *soc_device_to_device(struct soc_device *soc);
+
++const struct soc_device_attribute *soc_device_match(
++ const struct soc_device_attribute *matches);
+ #endif /* __SOC_BUS_H */
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index 59557c07..876de4f9 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -261,8 +261,8 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
+ int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
+
+-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *tot);
++void ip_tunnel_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *tot);
+ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ int link, __be16 flags,
+ __be32 remote, __be32 local,
+diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
+index 51f38442..5c01afbf 100644
+--- a/include/uapi/linux/if_ether.h
++++ b/include/uapi/linux/if_ether.h
+@@ -35,6 +35,7 @@
+ #define ETH_DATA_LEN 1500 /* Max. octets in payload */
+ #define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+ #define ETH_FCS_LEN 4 /* Octets in the FCS */
++#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */
+
+ /*
+ * These are the defined Ethernet Protocol ID's.
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index fbfacd51..ca3cf29b 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -671,7 +671,8 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
++static void vlan_dev_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct vlan_pcpu_stats *p;
+ u32 rx_errors = 0, tx_dropped = 0;
+@@ -702,8 +703,6 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
+ }
+ stats->rx_errors = rx_errors;
+ stats->tx_dropped = tx_dropped;
+-
+- return stats;
+ }
+
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 5f5e28f2..73d66ae3 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -156,8 +156,8 @@ static int br_dev_stop(struct net_device *dev)
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void br_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct net_bridge *br = netdev_priv(dev);
+ struct pcpu_sw_netstats tmp, sum = { 0 };
+@@ -181,8 +181,6 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
+ stats->tx_packets = sum.tx_packets;
+ stats->rx_bytes = sum.rx_bytes;
+ stats->rx_packets = sum.rx_packets;
+-
+- return stats;
+ }
+
+ static int br_change_mtu(struct net_device *dev, int new_mtu)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 912f40ac..17e16cf7 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6600,9 +6600,18 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
+ if (new_mtu == dev->mtu)
+ return 0;
+
+- /* MTU must be positive. */
+- if (new_mtu < 0)
++ /* MTU must be positive, and in range */
++ if (new_mtu < 0 || new_mtu < dev->min_mtu) {
++ net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
++ dev->name, new_mtu, dev->min_mtu);
+ return -EINVAL;
++ }
++
++ if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
++ net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
++ dev->name, new_mtu, dev->min_mtu);
++ return -EINVAL;
++ }
+
+ if (!netif_device_present(dev))
+ return -ENODEV;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 7e7b7ce0..0f9c014a 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -842,6 +842,32 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
+ }
+ EXPORT_SYMBOL(napi_consume_skb);
+
++/**
++ * skb_recycle - clean up an skb for reuse
++ * @skb: buffer
++ *
++ * Recycles the skb to be reused as a receive buffer. This
++ * function does any necessary reference count dropping, and
++ * cleans up the skbuff as if it just came from __alloc_skb().
++ */
++void skb_recycle(struct sk_buff *skb)
++{
++ struct skb_shared_info *shinfo;
++ u8 head_frag = skb->head_frag;
++
++ skb_release_head_state(skb);
++
++ shinfo = skb_shinfo(skb);
++ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
++ atomic_set(&shinfo->dataref, 1);
++
++ memset(skb, 0, offsetof(struct sk_buff, tail));
++ skb->data = skb->head + NET_SKB_PAD;
++ skb->head_frag = head_frag;
++ skb_reset_tail_pointer(skb);
++}
++EXPORT_SYMBOL(skb_recycle);
++
+ /* Make sure a field is enclosed inside headers_start/headers_end section */
+ #define CHECK_SKB_FIELD(field) \
+ BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
+@@ -1073,7 +1099,7 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
+ skb->inner_mac_header += off;
+ }
+
+-static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
++void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ {
+ __copy_skb_header(new, old);
+
+@@ -1081,6 +1107,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
+ skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
+ }
++EXPORT_SYMBOL(copy_skb_header);
+
+ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
+ {
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 0fd1976a..9d6c1009 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -188,8 +188,8 @@ int iptunnel_handle_offloads(struct sk_buff *skb,
+ EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
+
+ /* Often modified stats are per cpu, other are shared (netdev->stats) */
+-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *tot)
++void ip_tunnel_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *tot)
+ {
+ int i;
+
+@@ -214,8 +214,6 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ }
+-
+- return tot;
+ }
+ EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index eecc64e1..ce73136a 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -106,8 +106,8 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ return NETDEV_TX_OK;
+ }
+
+-static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void l2tp_eth_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct l2tp_eth *priv = netdev_priv(dev);
+
+@@ -117,10 +117,8 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
+ stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
+ stats->rx_packets = atomic_long_read(&priv->rx_packets);
+ stats->rx_errors = atomic_long_read(&priv->rx_errors);
+- return stats;
+ }
+
+-
+ static const struct net_device_ops l2tp_eth_netdev_ops = {
+ .ndo_init = l2tp_eth_dev_init,
+ .ndo_uninit = l2tp_eth_dev_uninit,
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 37bec0f8..aee93423 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1133,7 +1133,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
+ return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ int i;
+@@ -1158,8 +1158,6 @@ ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->rx_bytes += rx_bytes;
+ stats->tx_bytes += tx_bytes;
+ }
+-
+- return stats;
+ }
+
+ static const struct net_device_ops ieee80211_dataif_ops = {
+diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
+index e7da2902..95fd5744 100644
+--- a/net/openvswitch/vport-internal_dev.c
++++ b/net/openvswitch/vport-internal_dev.c
+@@ -106,7 +106,7 @@ static void internal_dev_destructor(struct net_device *dev)
+ free_netdev(dev);
+ }
+
+-static struct rtnl_link_stats64 *
++static void
+ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ {
+ int i;
+@@ -134,8 +134,6 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ stats->tx_bytes += local_stats.tx_bytes;
+ stats->tx_packets += local_stats.tx_packets;
+ }
+-
+- return stats;
+ }
+
+ static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index a62de9e4..5b287e1d 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -309,6 +309,13 @@ static void dev_watchdog(unsigned long arg)
+ txq->trans_timeout++;
+ break;
+ }
++
++ /* Devices with HW_ACCEL_MQ have multiple txqs
++ * but update only the first one's transmission
++ * timestamp so avoid checking the rest.
++ */
++ if (dev->features & NETIF_F_HW_ACCEL_MQ)
++ break;
+ }
+
+ if (some_queue_timedout) {
+diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
+index 2cd9b447..5e25213e 100644
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -401,8 +401,8 @@ static int teql_master_close(struct net_device *dev)
+ return 0;
+ }
+
+-static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats)
++static void teql_master_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct teql_master *m = netdev_priv(dev);
+
+@@ -410,7 +410,6 @@ static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
+ stats->tx_bytes = m->tx_bytes;
+ stats->tx_errors = m->tx_errors;
+ stats->tx_dropped = m->tx_dropped;
+- return stats;
+ }
+
+ static int teql_master_mtu(struct net_device *dev, int new_mtu)
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch b/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch
new file mode 100644
index 0000000000..67e09c5270
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch
@@ -0,0 +1,155005 @@
+From 6fe4518adbbbab0404958db4aa95673d60174881 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 11:58:03 +0800
+Subject: [PATCH] sdk_dpaa: support layerscape
+
+This is a integrated patch for layerscape dpaa1-sdk support.
+
+Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 173 +
+ drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 46 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
+ drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1213 ++++
+ drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 698 ++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 205 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 49 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1992 +++++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 237 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1811 +++++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 225 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1179 +++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 ++
+ drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 291 +
+ drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 907 +++
+ drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 ++
+ drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 +
+ .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++
+ .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
+ drivers/net/ethernet/freescale/sdk_fman/Kconfig | 153 +
+ drivers/net/ethernet/freescale/sdk_fman/Makefile | 11 +
+ .../freescale/sdk_fman/Peripherals/FM/HC/Makefile | 15 +
+ .../freescale/sdk_fman/Peripherals/FM/HC/hc.c | 1232 ++++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/Makefile | 28 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c | 1464 ++++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h | 228 +
+ .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c | 97 +
+ .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h | 42 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c | 658 ++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h | 225 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_crc32.c | 119 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_crc32.h | 43 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_dtsec.c | 845 +++
+ .../Peripherals/FM/MAC/fman_dtsec_mii_acc.c | 163 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_memac.c | 511 ++
+ .../Peripherals/FM/MAC/fman_memac_mii_acc.c | 213 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_tgec.c | 367 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 1096 +++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/memac.h | 110 +
+ .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c | 78 +
+ .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h | 73 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.c | 975 +++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.h | 151 +
+ .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c | 139 +
+ .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h | 80 +
+ .../sdk_fman/Peripherals/FM/MACSEC/Makefile | 15 +
+ .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c | 237 +
+ .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h | 203 +
+ .../Peripherals/FM/MACSEC/fm_macsec_guest.c | 59 +
+ .../Peripherals/FM/MACSEC/fm_macsec_master.c | 1031 +++
+ .../Peripherals/FM/MACSEC/fm_macsec_master.h | 479 ++
+ .../Peripherals/FM/MACSEC/fm_macsec_secy.c | 883 +++
+ .../Peripherals/FM/MACSEC/fm_macsec_secy.h | 144 +
+ .../freescale/sdk_fman/Peripherals/FM/Makefile | 23 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/Makefile | 26 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h | 360 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7582 ++++++++++++++++++++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c | 3242 +++++++++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h | 206 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 ++++++++++++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_manip.h | 555 ++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2095 ++++++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h | 543 ++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h | 280 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1847 +++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h | 165 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 423 ++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h | 316 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_replic.c | 984 +++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_replic.h | 101 +
+ .../sdk_fman/Peripherals/FM/Pcd/fman_kg.c | 888 +++
+ .../sdk_fman/Peripherals/FM/Pcd/fman_prs.c | 129 +
+ .../sdk_fman/Peripherals/FM/Port/Makefile | 15 +
+ .../sdk_fman/Peripherals/FM/Port/fm_port.c | 6436 +++++++++++++++++
+ .../sdk_fman/Peripherals/FM/Port/fm_port.h | 999 +++
+ .../sdk_fman/Peripherals/FM/Port/fm_port_dsar.h | 494 ++
+ .../sdk_fman/Peripherals/FM/Port/fm_port_im.c | 753 ++
+ .../sdk_fman/Peripherals/FM/Port/fman_port.c | 1568 ++++
+ .../freescale/sdk_fman/Peripherals/FM/Rtc/Makefile | 15 +
+ .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c | 692 ++
+ .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h | 96 +
+ .../sdk_fman/Peripherals/FM/Rtc/fman_rtc.c | 334 +
+ .../freescale/sdk_fman/Peripherals/FM/SP/Makefile | 15 +
+ .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c | 757 ++
+ .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h | 85 +
+ .../freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c | 197 +
+ .../freescale/sdk_fman/Peripherals/FM/fm.c | 5216 ++++++++++++++
+ .../freescale/sdk_fman/Peripherals/FM/fm.h | 648 ++
+ .../freescale/sdk_fman/Peripherals/FM/fm_ipc.h | 465 ++
+ .../freescale/sdk_fman/Peripherals/FM/fm_muram.c | 174 +
+ .../freescale/sdk_fman/Peripherals/FM/fman.c | 1398 ++++
+ .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1214 ++++
+ .../freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h | 93 +
+ .../sdk_fman/Peripherals/FM/inc/fm_sp_common.h | 117 +
+ .../net/ethernet/freescale/sdk_fman/etc/Makefile | 12 +
+ .../net/ethernet/freescale/sdk_fman/etc/error.c | 95 +
+ drivers/net/ethernet/freescale/sdk_fman/etc/list.c | 71 +
+ .../net/ethernet/freescale/sdk_fman/etc/memcpy.c | 620 ++
+ drivers/net/ethernet/freescale/sdk_fman/etc/mm.c | 1155 +++
+ drivers/net/ethernet/freescale/sdk_fman/etc/mm.h | 105 +
+ .../net/ethernet/freescale/sdk_fman/etc/sprint.c | 81 +
+ .../ethernet/freescale/sdk_fman/fmanv3h_dflags.h | 57 +
+ .../ethernet/freescale/sdk_fman/fmanv3l_dflags.h | 56 +
+ .../sdk_fman/inc/Peripherals/crc_mac_addr_ext.h | 364 +
+ .../freescale/sdk_fman/inc/Peripherals/dpaa_ext.h | 210 +
+ .../freescale/sdk_fman/inc/Peripherals/fm_ext.h | 1731 +++++
+ .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 859 +++
+ .../sdk_fman/inc/Peripherals/fm_macsec_ext.h | 1271 ++++
+ .../sdk_fman/inc/Peripherals/fm_muram_ext.h | 170 +
+ .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 ++++++++++
+ .../sdk_fman/inc/Peripherals/fm_port_ext.h | 2608 +++++++
+ .../sdk_fman/inc/Peripherals/fm_rtc_ext.h | 619 ++
+ .../sdk_fman/inc/Peripherals/fm_vsp_ext.h | 411 ++
+ .../sdk_fman/inc/Peripherals/mii_acc_ext.h | 76 +
+ .../net/ethernet/freescale/sdk_fman/inc/core_ext.h | 90 +
+ .../freescale/sdk_fman/inc/cores/arm_ext.h | 55 +
+ .../freescale/sdk_fman/inc/cores/e500v2_ext.h | 476 ++
+ .../freescale/sdk_fman/inc/cores/ppc_ext.h | 141 +
+ .../ethernet/freescale/sdk_fman/inc/ddr_std_ext.h | 77 +
+ .../ethernet/freescale/sdk_fman/inc/debug_ext.h | 233 +
+ .../ethernet/freescale/sdk_fman/inc/endian_ext.h | 447 ++
+ .../net/ethernet/freescale/sdk_fman/inc/enet_ext.h | 205 +
+ .../ethernet/freescale/sdk_fman/inc/error_ext.h | 529 ++
+ .../ethernet/freescale/sdk_fman/inc/etc/list_ext.h | 358 +
+ .../ethernet/freescale/sdk_fman/inc/etc/mem_ext.h | 318 +
+ .../freescale/sdk_fman/inc/etc/memcpy_ext.h | 208 +
+ .../ethernet/freescale/sdk_fman/inc/etc/mm_ext.h | 310 +
+ .../freescale/sdk_fman/inc/etc/sprint_ext.h | 118 +
+ .../sdk_fman/inc/flib/common/arch/ppc_access.h | 37 +
+ .../freescale/sdk_fman/inc/flib/common/general.h | 52 +
+ .../freescale/sdk_fman/inc/flib/fman_common.h | 78 +
+ .../freescale/sdk_fman/inc/flib/fsl_enet.h | 273 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman.h | 825 +++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h | 1096 +++
+ .../sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h | 107 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_kg.h | 514 ++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_memac.h | 427 ++
+ .../sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h | 78 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_port.h | 593 ++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_prs.h | 102 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_rtc.h | 449 ++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_sp.h | 138 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_tgec.h | 479 ++
+ .../integrations/FMANV3H/dpaa_integration_ext.h | 291 +
+ .../sdk_fman/inc/integrations/FMANV3H/part_ext.h | 71 +
+ .../integrations/FMANV3H/part_integration_ext.h | 304 +
+ .../integrations/FMANV3L/dpaa_integration_ext.h | 293 +
+ .../sdk_fman/inc/integrations/FMANV3L/part_ext.h | 59 +
+ .../integrations/FMANV3L/part_integration_ext.h | 304 +
+ .../inc/integrations/LS1043/dpaa_integration_ext.h | 291 +
+ .../sdk_fman/inc/integrations/LS1043/part_ext.h | 64 +
+ .../inc/integrations/LS1043/part_integration_ext.h | 185 +
+ .../inc/integrations/P1023/dpaa_integration_ext.h | 213 +
+ .../sdk_fman/inc/integrations/P1023/part_ext.h | 82 +
+ .../inc/integrations/P1023/part_integration_ext.h | 635 ++
+ .../P3040_P4080_P5020/dpaa_integration_ext.h | 276 +
+ .../inc/integrations/P3040_P4080_P5020/part_ext.h | 83 +
+ .../P3040_P4080_P5020/part_integration_ext.h | 336 +
+ .../net/ethernet/freescale/sdk_fman/inc/math_ext.h | 100 +
+ .../net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h | 435 ++
+ .../net/ethernet/freescale/sdk_fman/inc/net_ext.h | 430 ++
+ .../net/ethernet/freescale/sdk_fman/inc/std_ext.h | 48 +
+ .../ethernet/freescale/sdk_fman/inc/stdarg_ext.h | 49 +
+ .../ethernet/freescale/sdk_fman/inc/stdlib_ext.h | 162 +
+ .../ethernet/freescale/sdk_fman/inc/string_ext.h | 56 +
+ .../ethernet/freescale/sdk_fman/inc/types_ext.h | 62 +
+ .../ethernet/freescale/sdk_fman/inc/xx_common.h | 56 +
+ .../net/ethernet/freescale/sdk_fman/inc/xx_ext.h | 791 ++
+ .../ethernet/freescale/sdk_fman/ls1043_dflags.h | 56 +
+ .../net/ethernet/freescale/sdk_fman/ncsw_config.mk | 53 +
+ .../net/ethernet/freescale/sdk_fman/p1023_dflags.h | 65 +
+ .../freescale/sdk_fman/p3040_4080_5020_dflags.h | 62 +
+ .../net/ethernet/freescale/sdk_fman/src/Makefile | 11 +
+ .../freescale/sdk_fman/src/inc/system/sys_ext.h | 118 +
+ .../freescale/sdk_fman/src/inc/system/sys_io_ext.h | 46 +
+ .../freescale/sdk_fman/src/inc/types_linux.h | 208 +
+ .../sdk_fman/src/inc/wrapper/fsl_fman_test.h | 84 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 128 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h | 163 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h | 921 +++
+ .../ethernet/freescale/sdk_fman/src/inc/xx/xx.h | 50 +
+ .../freescale/sdk_fman/src/system/Makefile | 10 +
+ .../freescale/sdk_fman/src/system/sys_io.c | 171 +
+ .../freescale/sdk_fman/src/wrapper/Makefile | 19 +
+ .../freescale/sdk_fman/src/wrapper/fman_test.c | 1665 +++++
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.c | 2908 ++++++++
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.h | 294 +
+ .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1480 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c | 4813 +++++++++++++
+ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c | 1297 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h | 755 ++
+ .../sdk_fman/src/wrapper/lnxwrp_resources.h | 121 +
+ .../sdk_fman/src/wrapper/lnxwrp_resources_ut.c | 191 +
+ .../sdk_fman/src/wrapper/lnxwrp_resources_ut.h | 144 +
+ .../sdk_fman/src/wrapper/lnxwrp_resources_ut.make | 28 +
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c | 60 +
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h | 60 +
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c | 1855 +++++
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h | 136 +
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c | 1268 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h | 56 +
+ .../ethernet/freescale/sdk_fman/src/xx/Makefile | 18 +
+ .../freescale/sdk_fman/src/xx/module_strings.c | 46 +
+ .../freescale/sdk_fman/src/xx/xx_arm_linux.c | 905 +++
+ .../ethernet/freescale/sdk_fman/src/xx/xx_linux.c | 918 +++
+ drivers/staging/fsl_qbman/Kconfig | 228 +
+ drivers/staging/fsl_qbman/Makefile | 28 +
+ drivers/staging/fsl_qbman/bman_config.c | 720 ++
+ drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
+ drivers/staging/fsl_qbman/bman_driver.c | 575 ++
+ drivers/staging/fsl_qbman/bman_high.c | 1145 +++
+ drivers/staging/fsl_qbman/bman_low.h | 565 ++
+ drivers/staging/fsl_qbman/bman_private.h | 166 +
+ drivers/staging/fsl_qbman/bman_test.c | 56 +
+ drivers/staging/fsl_qbman/bman_test.h | 44 +
+ drivers/staging/fsl_qbman/bman_test_high.c | 183 +
+ drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
+ drivers/staging/fsl_qbman/dpa_alloc.c | 706 ++
+ drivers/staging/fsl_qbman/dpa_sys.h | 259 +
+ drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
+ drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
+ drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
+ drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
+ drivers/staging/fsl_qbman/fsl_usdpaa.c | 1983 +++++
+ drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 +
+ drivers/staging/fsl_qbman/qbman_driver.c | 88 +
+ drivers/staging/fsl_qbman/qman_config.c | 1224 ++++
+ drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++
+ drivers/staging/fsl_qbman/qman_driver.c | 977 +++
+ drivers/staging/fsl_qbman/qman_high.c | 5669 +++++++++++++++
+ drivers/staging/fsl_qbman/qman_low.h | 1427 ++++
+ drivers/staging/fsl_qbman/qman_private.h | 398 +
+ drivers/staging/fsl_qbman/qman_test.c | 57 +
+ drivers/staging/fsl_qbman/qman_test.h | 45 +
+ drivers/staging/fsl_qbman/qman_test_high.c | 216 +
+ drivers/staging/fsl_qbman/qman_test_hotpotato.c | 502 ++
+ drivers/staging/fsl_qbman/qman_utility.c | 129 +
+ include/linux/fsl_bman.h | 532 ++
+ include/linux/fsl_qman.h | 3888 ++++++++++
+ include/linux/fsl_usdpaa.h | 372 +
+ include/uapi/linux/fmd/Kbuild | 5 +
+ include/uapi/linux/fmd/Peripherals/Kbuild | 4 +
+ include/uapi/linux/fmd/Peripherals/fm_ioctls.h | 628 ++
+ include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h | 3084 ++++++++
+ .../uapi/linux/fmd/Peripherals/fm_port_ioctls.h | 948 +++
+ .../uapi/linux/fmd/Peripherals/fm_test_ioctls.h | 208 +
+ include/uapi/linux/fmd/integrations/Kbuild | 1 +
+ .../linux/fmd/integrations/integration_ioctls.h | 56 +
+ include/uapi/linux/fmd/ioctls.h | 96 +
+ include/uapi/linux/fmd/net_ioctls.h | 430 ++
+ 257 files changed, 152931 insertions(+)
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Kconfig
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/hc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec_mii_acc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac.c
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac_mii_acc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_tgec.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_guest.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_kg.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_prs.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_dsar.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_im.c
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fman_port.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fman_rtc.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_ipc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_muram.c
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_common.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_sp_common.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/error.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/list.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/memcpy.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/mm.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/mm.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/sprint.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/fmanv3h_dflags.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/fmanv3l_dflags.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/crc_mac_addr_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/dpaa_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_macsec_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_muram_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_pcd_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_port_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_rtc_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_vsp_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/mii_acc_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/core_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/cores/arm_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/cores/e500v2_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/cores/ppc_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/ddr_std_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/debug_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/endian_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/enet_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/error_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/list_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/mem_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/memcpy_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/mm_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/sprint_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/arch/ppc_access.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/general.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fman_common.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_enet.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_kg.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_port.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_prs.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_rtc.h
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_sp.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_tgec.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/dpaa_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/dpaa_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/dpaa_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/dpaa_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_integration_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/net_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/std_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/stdarg_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/stdlib_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/string_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/types_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/xx_common.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/xx_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/ls1043_dflags.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/p1023_dflags.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/p3040_4080_5020_dflags.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_io_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/types_linux.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/fsl_fman_test.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/xx/xx.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/system/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/system/sys_io.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/fman_test.c
+ create mode 100755 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.make
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/Makefile
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c
+ create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_linux.c
+ create mode 100644 drivers/staging/fsl_qbman/Kconfig
+ create mode 100644 drivers/staging/fsl_qbman/Makefile
+ create mode 100644 drivers/staging/fsl_qbman/bman_config.c
+ create mode 100644 drivers/staging/fsl_qbman/bman_debugfs.c
+ create mode 100644 drivers/staging/fsl_qbman/bman_driver.c
+ create mode 100644 drivers/staging/fsl_qbman/bman_high.c
+ create mode 100644 drivers/staging/fsl_qbman/bman_low.h
+ create mode 100644 drivers/staging/fsl_qbman/bman_private.h
+ create mode 100644 drivers/staging/fsl_qbman/bman_test.c
+ create mode 100644 drivers/staging/fsl_qbman/bman_test.h
+ create mode 100644 drivers/staging/fsl_qbman/bman_test_high.c
+ create mode 100644 drivers/staging/fsl_qbman/bman_test_thresh.c
+ create mode 100644 drivers/staging/fsl_qbman/dpa_alloc.c
+ create mode 100644 drivers/staging/fsl_qbman/dpa_sys.h
+ create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm.h
+ create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm64.h
+ create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc32.h
+ create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc64.h
+ create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa.c
+ create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
+ create mode 100644 drivers/staging/fsl_qbman/qbman_driver.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_config.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_debugfs.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_driver.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_high.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_low.h
+ create mode 100644 drivers/staging/fsl_qbman/qman_private.h
+ create mode 100644 drivers/staging/fsl_qbman/qman_test.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_test.h
+ create mode 100644 drivers/staging/fsl_qbman/qman_test_high.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_test_hotpotato.c
+ create mode 100644 drivers/staging/fsl_qbman/qman_utility.c
+ create mode 100644 include/linux/fsl_bman.h
+ create mode 100644 include/linux/fsl_qman.h
+ create mode 100644 include/linux/fsl_usdpaa.h
+ create mode 100644 include/uapi/linux/fmd/Kbuild
+ create mode 100644 include/uapi/linux/fmd/Peripherals/Kbuild
+ create mode 100644 include/uapi/linux/fmd/Peripherals/fm_ioctls.h
+ create mode 100644 include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
+ create mode 100644 include/uapi/linux/fmd/Peripherals/fm_port_ioctls.h
+ create mode 100644 include/uapi/linux/fmd/Peripherals/fm_test_ioctls.h
+ create mode 100644 include/uapi/linux/fmd/integrations/Kbuild
+ create mode 100644 include/uapi/linux/fmd/integrations/integration_ioctls.h
+ create mode 100644 include/uapi/linux/fmd/ioctls.h
+ create mode 100644 include/uapi/linux/fmd/net_ioctls.h
+
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
+new file mode 100644
+index 00000000..92118b76
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
+@@ -0,0 +1,173 @@
++menuconfig FSL_SDK_DPAA_ETH
++ tristate "DPAA Ethernet"
++ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH
++ select PHYLIB
++ help
++ Data Path Acceleration Architecture Ethernet driver,
++ supporting the Freescale QorIQ chips.
++ Depends on Freescale Buffer Manager and Queue Manager
++ driver and Frame Manager Driver.
++
++if FSL_SDK_DPAA_ETH
++
++config FSL_DPAA_HOOKS
++ bool "DPAA Ethernet driver hooks"
++
++config FSL_DPAA_CEETM
++ bool "DPAA CEETM QoS"
++ depends on NET_SCHED
++ default n
++ help
++ Enable QoS offloading support through the CEETM hardware block.
++
++config FSL_DPAA_OFFLINE_PORTS
++ bool "Offline Ports support"
++ depends on FSL_SDK_DPAA_ETH
++ default y
++ help
++ The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
++ most of the functionality of the regular, online ports, except they receive their
++ frames from a core or an accelerator on the SoC, via QMan frame queues,
++ rather than directly from the network.
++ Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
++ any online FMan port. They deliver the processed frames to frame queues, according
++ to the applied PCD configurations.
++
++ Choosing this feature will not impact the functionality and/or performance of the system,
++ so it is safe to have it.
++
++config FSL_DPAA_ADVANCED_DRIVERS
++ bool "Advanced DPAA Ethernet drivers"
++ depends on FSL_SDK_DPAA_ETH
++ default y
++ help
++ Besides the standard DPAA Ethernet driver the DPAA Proxy initialization driver
++ is needed to support advanced scenarios. Select this to also build the advanced
++ drivers.
++
++config FSL_DPAA_ETH_JUMBO_FRAME
++ bool "Optimize for jumbo frames"
++ default n
++ help
++ Optimize the DPAA Ethernet driver throughput for large frames
++ termination traffic (e.g. 4K and above).
++ NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
++ is set to 9600 bytes.
++ Using this option in combination with small frames increases
++ significantly the driver's memory footprint and may even deplete
++ the system memory. Also, the skb truesize is altered and messages
++ from the stack that warn against this are bypassed.
++ This option is not available on LS1043.
++
++config FSL_DPAA_TS
++ bool "Linux compliant timestamping"
++ depends on FSL_SDK_DPAA_ETH
++ default n
++ help
++ Enable Linux API compliant timestamping support.
++
++config FSL_DPAA_1588
++ bool "IEEE 1588-compliant timestamping"
++ depends on FSL_SDK_DPAA_ETH
++ select FSL_DPAA_TS
++ default n
++ help
++ Enable IEEE1588 support code.
++
++config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++ bool "Use driver's Tx queue selection mechanism"
++ default y
++ depends on FSL_SDK_DPAA_ETH
++ help
++ The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
++ of the egress FQ. That will override the XPS support for this netdevice.
++ If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
++ or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
++ and use the standard XPS support instead.
++
++config FSL_DPAA_ETH_MAX_BUF_COUNT
++ int "Maximum nuber of buffers in private bpool"
++ depends on FSL_SDK_DPAA_ETH
++ range 64 2048
++ default "128"
++ help
++ The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
++ buffer pool. One needn't normally modify this, as it has probably been tuned for performance
++ already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
++
++config FSL_DPAA_ETH_REFILL_THRESHOLD
++ int "Private bpool refill threshold"
++ depends on FSL_SDK_DPAA_ETH
++ range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
++ default "80"
++ help
++ The DPAA-Ethernet driver will start replenishing buffer pools whose count
++ falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
++ modify this value unless one has very specific performance reasons.
++
++config FSL_DPAA_CS_THRESHOLD_1G
++ hex "Egress congestion threshold on 1G ports"
++ depends on FSL_SDK_DPAA_ETH
++ range 0x1000 0x10000000
++ default "0x06000000"
++ help
++ The size in bytes of the egress Congestion State notification threshold on 1G ports.
++ The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
++ (e.g. by sending UDP datagrams at "while(1) speed"),
++ and the larger the frame size, the more acute the problem.
++ So we have to find a balance between these factors:
++ - avoiding the device staying congested for a prolonged time (risking
++ the netdev watchdog to fire - see also the tx_timeout module param);
++ - affecting performance of protocols such as TCP, which otherwise
++ behave well under the congestion notification mechanism;
++ - preventing the Tx cores from tightly-looping (as if the congestion
++ threshold was too low to be effective);
++ - running out of memory if the CS threshold is set too high.
++
++config FSL_DPAA_CS_THRESHOLD_10G
++ hex "Egress congestion threshold on 10G ports"
++ depends on FSL_SDK_DPAA_ETH
++ range 0x1000 0x20000000
++ default "0x10000000"
++ help
++ The size in bytes of the egress Congestion State notification threshold on 10G ports.
++
++config FSL_DPAA_INGRESS_CS_THRESHOLD
++ hex "Ingress congestion threshold on FMan ports"
++ depends on FSL_SDK_DPAA_ETH
++ default "0x10000000"
++ help
++ The size in bytes of the ingress tail-drop threshold on FMan ports.
++ Traffic piling up above this value will be rejected by QMan and discarded by FMan.
++
++config FSL_DPAA_ETH_DEBUGFS
++ bool "DPAA Ethernet debugfs interface"
++ depends on DEBUG_FS && FSL_SDK_DPAA_ETH
++ default y
++ help
++ This option compiles debugfs code for the DPAA Ethernet driver.
++
++config FSL_DPAA_ETH_DEBUG
++ bool "DPAA Ethernet Debug Support"
++ depends on FSL_SDK_DPAA_ETH
++ default n
++ help
++ This option compiles debug code for the DPAA Ethernet driver.
++
++config FSL_DPAA_DBG_LOOP
++ bool "DPAA Ethernet Debug loopback"
++ depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++ default n
++ help
++ This option allows to divert all received traffic on a certain interface A towards a
++ selected interface B. This option is used to benchmark the HW + Ethernet driver in
++ isolation from the Linux networking stack. The loops are controlled by debugfs entries,
++ one for each interface. By default all loops are disabled (target value is -1). I.e. to
++ change the loop setting for interface 4 and divert all received traffic to interface 5
++ write Tx interface number in the receive interface debugfs file:
++ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
++ 4->-1
++ # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
++ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
++ 4->5
++endif # FSL_SDK_DPAA_ETH
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/Makefile b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
+new file mode 100644
+index 00000000..a0f4b190
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
+@@ -0,0 +1,46 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++# Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++ccflags-y += -I$(NET_DPA)
++
++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
++obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
++
++fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
++ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
++fsl_dpa-objs += dpaa_debugfs.o
++endif
++ifeq ($(CONFIG_FSL_DPAA_1588),y)
++fsl_dpa-objs += dpaa_1588.o
++endif
++ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
++ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
++fsl_dpa-objs += dpaa_eth_ceetm.o
++endif
++
++fsl_mac-objs += mac.o mac-api.o
++
++# Advanced drivers
++ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
++
++fsl_advanced-objs += dpaa_eth_base.o
++# suport for multiple drivers per kernel module comes in kernel 3.14
++# so we are forced to generate several modules for the advanced drivers
++fsl_proxy-objs += dpaa_eth_proxy.o
++
++ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
++obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
++
++fsl_oh-objs += offline_port.o
++endif
++endif
++
++# Needed by the tracing framework
++CFLAGS_dpaa_eth.o := -I$(src)
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
+new file mode 100644
+index 00000000..3bf8cbca
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
+@@ -0,0 +1,580 @@
++/* Copyright (C) 2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2009 IXXAT Automation, GmbH
++ *
++ * DPAA Ethernet Driver -- IEEE 1588 interface functionality
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++#include <linux/io.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/spinlock.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <linux/udp.h>
++#include <asm/div64.h>
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++#include "dpaa_1588.h"
++#include "mac.h"
++
++static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
++{
++ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
++
++ circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
++ if (!circ_buf->buf)
++ return 1;
++
++ circ_buf->head = 0;
++ circ_buf->tail = 0;
++ ptp_buf->size = size;
++ spin_lock_init(&ptp_buf->ptp_lock);
++
++ return 0;
++}
++
++static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
++{
++ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
++
++ circ_buf->head = 0;
++ circ_buf->tail = 0;
++ ptp_buf->size = size;
++}
++
++static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
++ struct dpa_ptp_data *data)
++{
++ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
++ int size = ptp_buf->size;
++ struct dpa_ptp_data *tmp;
++ unsigned long flags;
++ int head, tail;
++
++ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
++
++ head = circ_buf->head;
++ tail = circ_buf->tail;
++
++ if (CIRC_SPACE(head, tail, size) <= 0)
++ circ_buf->tail = (tail + 1) & (size - 1);
++
++ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
++ memcpy(tmp, data, sizeof(struct dpa_ptp_data));
++
++ circ_buf->head = (head + 1) & (size - 1);
++
++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
++
++ return 0;
++}
++
++static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
++ struct dpa_ptp_ident *src)
++{
++ int ret;
++
++ if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
++ return 0;
++
++ if ((dst->netw_prot == src->netw_prot)
++ || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
++ if (dst->seq_id != src->seq_id)
++ return 0;
++
++ ret = memcmp(dst->snd_port_id, src->snd_port_id,
++ DPA_PTP_SOURCE_PORT_LENGTH);
++ if (ret)
++ return 0;
++ else
++ return 1;
++ }
++
++ return 0;
++}
++
++static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
++ struct dpa_ptp_ident *ident,
++ struct dpa_ptp_time *ts)
++{
++ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
++ int size = ptp_buf->size;
++ int head, tail, idx;
++ unsigned long flags;
++ struct dpa_ptp_data *tmp, *tmp2;
++ struct dpa_ptp_ident *tmp_ident;
++
++ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
++
++ head = circ_buf->head;
++ tail = idx = circ_buf->tail;
++
++ if (CIRC_CNT(head, tail, size) == 0) {
++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
++ return 1;
++ }
++
++ while (idx != head) {
++ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
++ tmp_ident = &tmp->ident;
++ if (dpa_ptp_is_ident_match(tmp_ident, ident))
++ break;
++ idx = (idx + 1) & (size - 1);
++ }
++
++ if (idx == head) {
++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
++ return 1;
++ }
++
++ ts->sec = tmp->ts.sec;
++ ts->nsec = tmp->ts.nsec;
++
++ if (idx != tail) {
++ if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
++ tail = circ_buf->tail =
++ (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
++ }
++
++ while (CIRC_CNT(idx, tail, size) > 0) {
++ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
++ idx = (idx - 1) & (size - 1);
++ tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
++ *tmp = *tmp2;
++ }
++ }
++ circ_buf->tail = (tail + 1) & (size - 1);
++
++ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
++
++ return 0;
++}
++
++/* Parse the PTP packets
++ *
++ * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
++ * an IEEE802.3 ethernet frame. This function returns the position of
++ * the PTP packet or NULL if no PTP found
++ */
++static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
++{
++ u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
++ u8 *ptp_loc = NULL;
++ u8 msg_type;
++ u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
++ struct iphdr *iph;
++ struct udphdr *udph;
++ struct ipv6hdr *ipv6h;
++
++ /* when we can receive S/G frames we need to check the data we want to
++ * access is in the linear skb buffer
++ */
++ if (!pskb_may_pull(skb, access_len))
++ return NULL;
++
++ *eth_type = *((u16 *)pos);
++
++ /* Check if inner tag is here */
++ if (*eth_type == ETH_P_8021Q) {
++ access_len += DPA_VLAN_TAG_LEN;
++
++ if (!pskb_may_pull(skb, access_len))
++ return NULL;
++
++ pos += DPA_VLAN_TAG_LEN;
++ *eth_type = *((u16 *)pos);
++ }
++
++ pos += DPA_ETYPE_LEN;
++
++ switch (*eth_type) {
++ /* Transport of PTP over Ethernet */
++ case ETH_P_1588:
++ ptp_loc = pos;
++
++ if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
++ return NULL;
++
++ msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
++ if ((msg_type == PTP_MSGTYPE_SYNC)
++ || (msg_type == PTP_MSGTYPE_DELREQ)
++ || (msg_type == PTP_MSGTYPE_PDELREQ)
++ || (msg_type == PTP_MSGTYPE_PDELRESP))
++ return ptp_loc;
++ break;
++ /* Transport of PTP over IPv4 */
++ case ETH_P_IP:
++ iph = (struct iphdr *)pos;
++ access_len += sizeof(struct iphdr);
++
++ if (!pskb_may_pull(skb, access_len))
++ return NULL;
++
++ if (ntohs(iph->protocol) != IPPROTO_UDP)
++ return NULL;
++
++ access_len += iph->ihl * 4 - sizeof(struct iphdr) +
++ sizeof(struct udphdr);
++
++ if (!pskb_may_pull(skb, access_len))
++ return NULL;
++
++ pos += iph->ihl * 4;
++ udph = (struct udphdr *)pos;
++ if (ntohs(udph->dest) != 319)
++ return NULL;
++ ptp_loc = pos + sizeof(struct udphdr);
++ break;
++ /* Transport of PTP over IPv6 */
++ case ETH_P_IPV6:
++ ipv6h = (struct ipv6hdr *)pos;
++
++ access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
++
++ if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
++ return NULL;
++
++ pos += sizeof(struct ipv6hdr);
++ udph = (struct udphdr *)pos;
++ if (ntohs(udph->dest) != 319)
++ return NULL;
++ ptp_loc = pos + sizeof(struct udphdr);
++ break;
++ default:
++ break;
++ }
++
++ return ptp_loc;
++}
++
++static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
++ struct sk_buff *skb, void *data, enum port_type rx_tx,
++ struct dpa_ptp_data *ptp_data)
++{
++ u64 nsec;
++ u32 mod;
++ u8 *ptp_loc;
++ u16 eth_type;
++
++ ptp_loc = dpa_ptp_parse_packet(skb, &eth_type);
++ if (!ptp_loc)
++ return -EINVAL;
++
++ switch (eth_type) {
++ case ETH_P_IP:
++ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
++ break;
++ case ETH_P_IPV6:
++ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
++ break;
++ case ETH_P_1588:
++ ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
++ return -EINVAL;
++
++ ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
++ ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
++ ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
++ memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
++ DPA_PTP_SOURCE_PORT_LENGTH);
++
++ nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
++ mod = do_div(nsec, NANOSEC_PER_SECOND);
++ ptp_data->ts.sec = nsec;
++ ptp_data->ts.nsec = mod;
++
++ return 0;
++}
++
++void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
++ struct sk_buff *skb, void *data)
++{
++ struct dpa_ptp_tsu *tsu = priv->tsu;
++ struct dpa_ptp_data ptp_tx_data;
++
++ if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
++ return;
++
++ dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
++}
++
++void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
++ struct sk_buff *skb, void *data)
++{
++ struct dpa_ptp_tsu *tsu = priv->tsu;
++ struct dpa_ptp_data ptp_rx_data;
++
++ if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
++ return;
++
++ dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
++}
++
++static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
++ struct dpa_ptp_ident *ident,
++ struct dpa_ptp_time *ts)
++{
++ struct dpa_ptp_tsu *tsu = ptp_tsu;
++ struct dpa_ptp_time tmp;
++ int flag;
++
++ flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
++ if (!flag) {
++ ts->sec = tmp.sec;
++ ts->nsec = tmp.nsec;
++ return 0;
++ }
++
++ return -1;
++}
++
++static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
++ struct dpa_ptp_ident *ident,
++ struct dpa_ptp_time *ts)
++{
++ struct dpa_ptp_tsu *tsu = ptp_tsu;
++ struct dpa_ptp_time tmp;
++ int flag;
++
++ flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
++ if (!flag) {
++ ts->sec = tmp.sec;
++ ts->nsec = tmp.nsec;
++ return 0;
++ }
++
++ return -1;
++}
++
++static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
++ struct dpa_ptp_time *cnt_time)
++{
++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
++ u64 tmp, fiper;
++
++ if (mac_dev->fm_rtc_disable)
++ mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
++
++ /* TMR_FIPER1 will pulse every second after ALARM1 expired */
++ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
++ fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
++ if (mac_dev->fm_rtc_set_alarm)
++ mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
++ 0, tmp);
++ if (mac_dev->fm_rtc_set_fiper)
++ mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
++ 0, fiper);
++
++ if (mac_dev->fm_rtc_enable)
++ mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
++}
++
++static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
++ struct dpa_ptp_time *curr_time)
++{
++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
++ u64 tmp;
++ u32 mod;
++
++ if (mac_dev->fm_rtc_get_cnt)
++ mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
++ &tmp);
++
++ mod = do_div(tmp, NANOSEC_PER_SECOND);
++ curr_time->sec = (u32)tmp;
++ curr_time->nsec = mod;
++}
++
++static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
++ struct dpa_ptp_time *cnt_time)
++{
++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
++ u64 tmp;
++
++ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
++
++ if (mac_dev->fm_rtc_set_cnt)
++ mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
++ tmp);
++
++ /* Restart fiper two seconds later */
++ cnt_time->sec += 2;
++ cnt_time->nsec = 0;
++ dpa_set_fiper_alarm(tsu, cnt_time);
++}
++
++static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
++{
++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
++ u32 drift;
++
++ if (mac_dev->fm_rtc_get_drift)
++ mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
++ &drift);
++
++ *addend = drift;
++}
++
++static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
++{
++ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
++
++ if (mac_dev->fm_rtc_set_drift)
++ mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
++ addend);
++}
++
++static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
++{
++ dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
++ dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
++}
++
++int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
++{
++ struct dpa_priv_s *priv = netdev_priv(dev);
++ struct dpa_ptp_tsu *tsu = priv->tsu;
++ struct mac_device *mac_dev = priv->mac_dev;
++ struct dpa_ptp_data ptp_data;
++ struct dpa_ptp_data *ptp_data_user;
++ struct dpa_ptp_time act_time;
++ u32 addend;
++ int retval = 0;
++
++ if (!tsu || !tsu->valid)
++ return -ENODEV;
++
++ switch (cmd) {
++ case PTP_ENBL_TXTS_IOCTL:
++ tsu->hwts_tx_en_ioctl = 1;
++ if (mac_dev->fm_rtc_enable)
++ mac_dev->fm_rtc_enable(get_fm_handle(dev));
++ if (mac_dev->ptp_enable)
++ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
++ break;
++ case PTP_DSBL_TXTS_IOCTL:
++ tsu->hwts_tx_en_ioctl = 0;
++ if (mac_dev->fm_rtc_disable)
++ mac_dev->fm_rtc_disable(get_fm_handle(dev));
++ if (mac_dev->ptp_disable)
++ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
++ break;
++ case PTP_ENBL_RXTS_IOCTL:
++ tsu->hwts_rx_en_ioctl = 1;
++ break;
++ case PTP_DSBL_RXTS_IOCTL:
++ tsu->hwts_rx_en_ioctl = 0;
++ break;
++ case PTP_GET_RX_TIMESTAMP:
++ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
++ if (copy_from_user(&ptp_data.ident,
++ &ptp_data_user->ident, sizeof(ptp_data.ident)))
++ return -EINVAL;
++
++ if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
++ return -EAGAIN;
++
++ if (copy_to_user((void __user *)&ptp_data_user->ts,
++ &ptp_data.ts, sizeof(ptp_data.ts)))
++ return -EFAULT;
++ break;
++ case PTP_GET_TX_TIMESTAMP:
++ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
++ if (copy_from_user(&ptp_data.ident,
++ &ptp_data_user->ident, sizeof(ptp_data.ident)))
++ return -EINVAL;
++
++ if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
++ return -EAGAIN;
++
++ if (copy_to_user((void __user *)&ptp_data_user->ts,
++ &ptp_data.ts, sizeof(ptp_data.ts)))
++ return -EFAULT;
++ break;
++ case PTP_GET_TIME:
++ dpa_get_curr_cnt(tsu, &act_time);
++ if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
++ return -EFAULT;
++ break;
++ case PTP_SET_TIME:
++ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
++ return -EINVAL;
++ dpa_set_1588cnt(tsu, &act_time);
++ break;
++ case PTP_GET_ADJ:
++ dpa_get_drift(tsu, &addend);
++ if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
++ return -EFAULT;
++ break;
++ case PTP_SET_ADJ:
++ if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
++ return -EINVAL;
++ dpa_set_drift(tsu, addend);
++ break;
++ case PTP_SET_FIPER_ALARM:
++ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
++ return -EINVAL;
++ dpa_set_fiper_alarm(tsu, &act_time);
++ break;
++ case PTP_CLEANUP_TS:
++ dpa_flush_timestamp(tsu);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return retval;
++}
++
++int dpa_ptp_init(struct dpa_priv_s *priv)
++{
++ struct dpa_ptp_tsu *tsu;
++
++ /* Allocate memory for PTP structure */
++ tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
++ if (!tsu)
++ return -ENOMEM;
++
++ tsu->valid = TRUE;
++ tsu->dpa_priv = priv;
++
++ dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
++ dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
++
++ priv->tsu = tsu;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_ptp_init);
++
++void dpa_ptp_cleanup(struct dpa_priv_s *priv)
++{
++ struct dpa_ptp_tsu *tsu = priv->tsu;
++
++ tsu->valid = FALSE;
++ vfree(tsu->rx_timestamps.circ_buf.buf);
++ vfree(tsu->tx_timestamps.circ_buf.buf);
++
++ kfree(tsu);
++}
++EXPORT_SYMBOL(dpa_ptp_cleanup);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
+new file mode 100644
+index 00000000..73390168
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
+@@ -0,0 +1,138 @@
++/* Copyright (C) 2011 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++#ifndef __DPAA_1588_H__
++#define __DPAA_1588_H__
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/circ_buf.h>
++#include <linux/fsl_qman.h>
++
++#define DEFAULT_PTP_RX_BUF_SZ 256
++#define DEFAULT_PTP_TX_BUF_SZ 256
++
++/* 1588 private ioctl calls */
++#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
++#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
++#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
++#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
++#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
++#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
++#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
++#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
++#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
++#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
++#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
++#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
++
++/* PTP V2 message type */
++enum {
++ PTP_MSGTYPE_SYNC = 0x0,
++ PTP_MSGTYPE_DELREQ = 0x1,
++ PTP_MSGTYPE_PDELREQ = 0x2,
++ PTP_MSGTYPE_PDELRESP = 0x3,
++ PTP_MSGTYPE_FLWUP = 0x8,
++ PTP_MSGTYPE_DELRESP = 0x9,
++ PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
++ PTP_MSGTYPE_ANNOUNCE = 0xB,
++ PTP_MSGTYPE_SGNLNG = 0xC,
++ PTP_MSGTYPE_MNGMNT = 0xD,
++};
++
++/* Byte offset of data in the PTP V2 headers */
++#define PTP_OFFS_MSG_TYPE 0
++#define PTP_OFFS_VER_PTP 1
++#define PTP_OFFS_MSG_LEN 2
++#define PTP_OFFS_DOM_NMB 4
++#define PTP_OFFS_FLAGS 6
++#define PTP_OFFS_CORFIELD 8
++#define PTP_OFFS_SRCPRTID 20
++#define PTP_OFFS_SEQ_ID 30
++#define PTP_OFFS_CTRL 32
++#define PTP_OFFS_LOGMEAN 33
++
++#define PTP_IP_OFFS 14
++#define PTP_UDP_OFFS 34
++#define PTP_HEADER_OFFS 42
++#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
++#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
++#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
++#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
++
++/* 1588-2008 network protocol enumeration values */
++#define DPA_PTP_PROT_IPV4 1
++#define DPA_PTP_PROT_IPV6 2
++#define DPA_PTP_PROT_802_3 3
++#define DPA_PTP_PROT_DONTCARE 0xFFFF
++
++#define DPA_PTP_SOURCE_PORT_LENGTH 10
++#define DPA_PTP_HEADER_SZE 34
++#define DPA_ETYPE_LEN 2
++#define DPA_VLAN_TAG_LEN 4
++#define NANOSEC_PER_SECOND 1000000000
++
++/* The threshold between the current found one and the oldest one */
++#define TS_ACCUMULATION_THRESHOLD 50
++
++/* Struct needed to identify a timestamp */
++struct dpa_ptp_ident {
++ u8 version;
++ u8 msg_type;
++ u16 netw_prot;
++ u16 seq_id;
++ u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
++};
++
++/* Timestamp format in 1588-2008 */
++struct dpa_ptp_time {
++ u64 sec; /* just 48 bit used */
++ u32 nsec;
++};
++
++/* needed for timestamp data over ioctl */
++struct dpa_ptp_data {
++ struct dpa_ptp_ident ident;
++ struct dpa_ptp_time ts;
++};
++
++struct dpa_ptp_circ_buf {
++ struct circ_buf circ_buf;
++ u32 size;
++ spinlock_t ptp_lock;
++};
++
++/* PTP TSU control structure */
++struct dpa_ptp_tsu {
++ struct dpa_priv_s *dpa_priv;
++ bool valid;
++ struct dpa_ptp_circ_buf rx_timestamps;
++ struct dpa_ptp_circ_buf tx_timestamps;
++
++ /* HW timestamping over ioctl enabled flag */
++ int hwts_tx_en_ioctl;
++ int hwts_rx_en_ioctl;
++};
++
++extern int dpa_ptp_init(struct dpa_priv_s *priv);
++extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
++extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
++ struct sk_buff *skb, void *data);
++extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
++ struct sk_buff *skb, void *data);
++extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
++#endif
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
+new file mode 100644
+index 00000000..25d9f5f1
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
+@@ -0,0 +1,180 @@
++/* Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
++#include <linux/debugfs.h>
++#include "dpaa_debugfs.h"
++#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
++
++#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
++#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
++
++static struct dentry *dpa_debugfs_root;
++
++static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
++static ssize_t dpa_loop_write(struct file *f,
++ const char __user *buf, size_t count, loff_t *off);
++
++static const struct file_operations dpa_debugfs_lp_fops = {
++ .open = dpa_debugfs_loop_open,
++ .write = dpa_loop_write,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
++{
++ struct dpa_priv_s *priv;
++
++ BUG_ON(offset == NULL);
++
++ priv = netdev_priv((struct net_device *)file->private);
++ seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
++
++ return 0;
++}
++
++static int user_input_convert(const char __user *user_buf, size_t count,
++ long *val)
++{
++ char buf[12];
++
++ if (count > sizeof(buf) - 1)
++ return -EINVAL;
++ if (copy_from_user(buf, user_buf, count))
++ return -EFAULT;
++ buf[count] = '\0';
++ if (kstrtol(buf, 0, val))
++ return -EINVAL;
++ return 0;
++}
++
++static ssize_t dpa_loop_write(struct file *f,
++ const char __user *buf, size_t count, loff_t *off)
++{
++ struct dpa_priv_s *priv;
++ struct net_device *netdev;
++ struct seq_file *sf;
++ int ret;
++ long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++
++ sf = (struct seq_file *)f->private_data;
++ netdev = (struct net_device *)sf->private;
++ priv = netdev_priv(netdev);
++
++ priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
++
++ return count;
++}
++
++static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
++{
++ int _errno;
++ const struct net_device *net_dev;
++
++ _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
++ if (unlikely(_errno < 0)) {
++ net_dev = (struct net_device *)inode->i_private;
++
++ if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
++ netdev_err(net_dev, "single_open() = %d\n",
++ _errno);
++ }
++
++ return _errno;
++}
++
++
++int dpa_netdev_debugfs_create(struct net_device *net_dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ static int cnt;
++ char loop_file_name[100];
++
++ if (unlikely(dpa_debugfs_root == NULL)) {
++ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
++ KBUILD_BASENAME".c", __LINE__, __func__,
++ "root debugfs missing, possible module ordering issue");
++ return -ENOMEM;
++ }
++
++ sprintf(loop_file_name, "eth%d_loop", ++cnt);
++ priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
++ S_IRUGO,
++ dpa_debugfs_root,
++ net_dev,
++ &dpa_debugfs_lp_fops);
++ if (unlikely(priv->debugfs_loop_file == NULL)) {
++ netdev_err(net_dev, "debugfs_create_file(%s/%s)",
++ dpa_debugfs_root->d_iname,
++ loop_file_name);
++
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++void dpa_netdev_debugfs_remove(struct net_device *net_dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++
++ debugfs_remove(priv->debugfs_loop_file);
++}
++
++int __init dpa_debugfs_module_init(void)
++{
++ int _errno = 0;
++
++ pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
++
++ dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
++
++ if (unlikely(dpa_debugfs_root == NULL)) {
++ _errno = -ENOMEM;
++ pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
++ KBUILD_BASENAME".c", __LINE__, __func__);
++ pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
++ DPA_ETH_DEBUGFS_ROOT, _errno);
++ }
++
++ return _errno;
++}
++
++void __exit dpa_debugfs_module_exit(void)
++{
++ debugfs_remove(dpa_debugfs_root);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
+new file mode 100644
+index 00000000..63d35427
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
+@@ -0,0 +1,43 @@
++/* Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPAA_DEBUGFS_H_
++#define DPAA_DEBUGFS_H_
++
++#include <linux/netdevice.h>
++#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
++
++int dpa_netdev_debugfs_create(struct net_device *net_dev);
++void dpa_netdev_debugfs_remove(struct net_device *net_dev);
++int __init dpa_debugfs_module_init(void);
++void __exit dpa_debugfs_module_exit(void);
++
++#endif /* DPAA_DEBUGFS_H_ */
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
+new file mode 100644
+index 00000000..7026f916
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
+@@ -0,0 +1,1213 @@
++/* Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_mdio.h>
++#include <linux/of_net.h>
++#include <linux/kthread.h>
++#include <linux/io.h>
++#include <linux/if_arp.h> /* arp_hdr_len() */
++#include <linux/if_vlan.h> /* VLAN_HLEN */
++#include <linux/icmp.h> /* struct icmphdr */
++#include <linux/ip.h> /* struct iphdr */
++#include <linux/ipv6.h> /* struct ipv6hdr */
++#include <linux/udp.h> /* struct udphdr */
++#include <linux/tcp.h> /* struct tcphdr */
++#include <linux/net.h> /* net_ratelimit() */
++#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
++#include <linux/highmem.h>
++#include <linux/percpu.h>
++#include <linux/dma-mapping.h>
++#include <linux/fsl_bman.h>
++#ifdef CONFIG_SOC_BUS
++#include <linux/sys_soc.h> /* soc_device_match */
++#endif
++
++#include "fsl_fman.h"
++#include "fm_ext.h"
++#include "fm_port_ext.h"
++
++#include "mac.h"
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++#include "dpaa_debugfs.h"
++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
++
++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
++ * using trace events only need to #include <trace/events/sched.h>
++ */
++#define CREATE_TRACE_POINTS
++#include "dpaa_eth_trace.h"
++
++#define DPA_NAPI_WEIGHT 64
++
++/* Valid checksum indication */
++#define DPA_CSUM_VALID 0xFFFF
++
++#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
++
++MODULE_DESCRIPTION(DPA_DESCRIPTION);
++
++static uint8_t debug = -1;
++module_param(debug, byte, S_IRUGO);
++MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
++
++/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
++static uint16_t tx_timeout = 1000;
++module_param(tx_timeout, ushort, S_IRUGO);
++MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
++
++static const char rtx[][3] = {
++ [RX] = "RX",
++ [TX] = "TX"
++};
++
++#ifndef CONFIG_PPC
++bool dpaa_errata_a010022;
++EXPORT_SYMBOL(dpaa_errata_a010022);
++#endif
++
++/* BM */
++
++#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
++
++static uint8_t dpa_priv_common_bpid;
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++struct net_device *dpa_loop_netdevs[20];
++#endif
++
++#ifdef CONFIG_PM
++
++static int dpaa_suspend(struct device *dev)
++{
++ struct net_device *net_dev;
++ struct dpa_priv_s *priv;
++ struct mac_device *mac_dev;
++ int err = 0;
++
++ net_dev = dev_get_drvdata(dev);
++
++ if (net_dev->flags & IFF_UP) {
++ priv = netdev_priv(net_dev);
++ mac_dev = priv->mac_dev;
++
++ if (priv->wol & DPAA_WOL_MAGIC) {
++ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
++ priv->mac_dev->get_mac_handle(mac_dev), true);
++ if (err) {
++ netdev_err(net_dev, "set_wol() = %d\n", err);
++ goto set_wol_failed;
++ }
++ }
++
++ err = fm_port_suspend(mac_dev->port_dev[RX]);
++ if (err) {
++ netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
++ goto rx_port_suspend_failed;
++ }
++
++ err = fm_port_suspend(mac_dev->port_dev[TX]);
++ if (err) {
++ netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
++ goto tx_port_suspend_failed;
++ }
++ }
++
++ return 0;
++
++tx_port_suspend_failed:
++ fm_port_resume(mac_dev->port_dev[RX]);
++rx_port_suspend_failed:
++ if (priv->wol & DPAA_WOL_MAGIC) {
++ priv->mac_dev->set_wol(mac_dev->port_dev[RX],
++ priv->mac_dev->get_mac_handle(mac_dev), false);
++ }
++set_wol_failed:
++ return err;
++}
++
++static int dpaa_resume(struct device *dev)
++{
++ struct net_device *net_dev;
++ struct dpa_priv_s *priv;
++ struct mac_device *mac_dev;
++ int err = 0;
++
++ net_dev = dev_get_drvdata(dev);
++
++ if (net_dev->flags & IFF_UP) {
++ priv = netdev_priv(net_dev);
++ mac_dev = priv->mac_dev;
++
++ err = fm_mac_resume(mac_dev->get_mac_handle(mac_dev));
++ if (err) {
++ netdev_err(net_dev, "fm_mac_resume = %d\n", err);
++ goto resume_failed;
++ }
++
++ err = fm_port_resume(mac_dev->port_dev[TX]);
++ if (err) {
++ netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
++ goto resume_failed;
++ }
++
++ err = fm_port_resume(mac_dev->port_dev[RX]);
++ if (err) {
++ netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
++ goto resume_failed;
++ }
++
++ if (priv->wol & DPAA_WOL_MAGIC) {
++ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
++ priv->mac_dev->get_mac_handle(mac_dev), false);
++ if (err) {
++ netdev_err(net_dev, "set_wol() = %d\n", err);
++ goto resume_failed;
++ }
++ }
++ }
++
++ return 0;
++
++resume_failed:
++ return err;
++}
++
++static const struct dev_pm_ops dpaa_pm_ops = {
++ .suspend = dpaa_suspend,
++ .resume = dpaa_resume,
++};
++
++#define DPAA_PM_OPS (&dpaa_pm_ops)
++
++#else /* CONFIG_PM */
++
++#define DPAA_PM_OPS NULL
++
++#endif /* CONFIG_PM */
++
++/* Checks whether the checksum field in Parse Results array is valid
++ * (equals 0xFFFF) and increments the .cse counter otherwise
++ */
++static inline void
++dpa_csum_validation(const struct dpa_priv_s *priv,
++ struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_fd *fd)
++{
++ dma_addr_t addr = qm_fd_addr(fd);
++ struct dpa_bp *dpa_bp = priv->dpa_bp;
++ void *frm = phys_to_virt(addr);
++ fm_prs_result_t *parse_result;
++
++ if (unlikely(!frm))
++ return;
++
++ dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
++ DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
++
++ parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
++
++ if (parse_result->cksum != DPA_CSUM_VALID)
++ percpu_priv->rx_errors.cse++;
++}
++
++static void _dpa_rx_error(struct net_device *net_dev,
++ const struct dpa_priv_s *priv,
++ struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_fd *fd,
++ u32 fqid)
++{
++ /* limit common, possibly innocuous Rx FIFO Overflow errors'
++ * interference with zero-loss convergence benchmark results.
++ */
++ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
++ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
++ else
++ if (netif_msg_hw(priv) && net_ratelimit())
++ netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
++ fd->status & FM_FD_STAT_RX_ERRORS);
++#ifdef CONFIG_FSL_DPAA_HOOKS
++ if (dpaa_eth_hooks.rx_error &&
++ dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
++ /* it's up to the hook to perform resource cleanup */
++ return;
++#endif
++ percpu_priv->stats.rx_errors++;
++
++ if (fd->status & FM_PORT_FRM_ERR_DMA)
++ percpu_priv->rx_errors.dme++;
++ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
++ percpu_priv->rx_errors.fpe++;
++ if (fd->status & FM_PORT_FRM_ERR_SIZE)
++ percpu_priv->rx_errors.fse++;
++ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
++ percpu_priv->rx_errors.phe++;
++ if (fd->status & FM_FD_STAT_L4CV)
++ dpa_csum_validation(priv, percpu_priv, fd);
++
++ dpa_fd_release(net_dev, fd);
++}
++
++static void _dpa_tx_error(struct net_device *net_dev,
++ const struct dpa_priv_s *priv,
++ struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_fd *fd,
++ u32 fqid)
++{
++ struct sk_buff *skb;
++
++ if (netif_msg_hw(priv) && net_ratelimit())
++ netdev_warn(net_dev, "FD status = 0x%08x\n",
++ fd->status & FM_FD_STAT_TX_ERRORS);
++#ifdef CONFIG_FSL_DPAA_HOOKS
++ if (dpaa_eth_hooks.tx_error &&
++ dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
++ /* now the hook must ensure proper cleanup */
++ return;
++#endif
++ percpu_priv->stats.tx_errors++;
++
++ /* If we intended the buffers from this frame to go into the bpools
++ * when the FMan transmit was done, we need to put it in manually.
++ */
++ if (fd->bpid != 0xff) {
++ dpa_fd_release(net_dev, fd);
++ return;
++ }
++
++ skb = _dpa_cleanup_tx_fd(priv, fd);
++ dev_kfree_skb(skb);
++}
++
++/* Helper function to factor out frame validation logic on all Rx paths. Its
++ * purpose is to extract from the Parse Results structure information about
++ * the integrity of the frame, its checksum, the length of the parsed headers
++ * and whether the frame is suitable for GRO.
++ *
++ * Assumes no parser errors, since any error frame is dropped before this
++ * function is called.
++ *
++ * @skb will have its ip_summed field overwritten;
++ * @use_gro will only be written with 0, if the frame is definitely not
++ * GRO-able; otherwise, it will be left unchanged;
++ * @hdr_size will be written with a safe value, at least the size of the
++ * headers' length.
++ */
++void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
++ const struct qm_fd *fd,
++ struct sk_buff *skb, int *use_gro)
++{
++ if (fd->status & FM_FD_STAT_L4CV) {
++ /* The parser has run and performed L4 checksum validation.
++ * We know there were no parser errors (and implicitly no
++ * L4 csum error), otherwise we wouldn't be here.
++ */
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ /* Don't go through GRO for certain types of traffic that
++ * we know are not GRO-able, such as dgram-based protocols.
++ * In the worst-case scenarios, such as small-pkt terminating
++ * UDP, the extra GRO processing would be overkill.
++ *
++ * The only protocol the Parser supports that is also GRO-able
++ * is currently TCP.
++ */
++ if (!fm_l4_frame_is_tcp(parse_results))
++ *use_gro = 0;
++
++ return;
++ }
++
++ /* We're here because either the parser didn't run or the L4 checksum
++ * was not verified. This may include the case of a UDP frame with
++ * checksum zero or an L4 proto other than TCP/UDP
++ */
++ skb->ip_summed = CHECKSUM_NONE;
++
++ /* Bypass GRO for unknown traffic or if no PCDs are applied */
++ *use_gro = 0;
++}
++
++int dpaa_eth_poll(struct napi_struct *napi, int budget)
++{
++ struct dpa_napi_portal *np =
++ container_of(napi, struct dpa_napi_portal, napi);
++
++ int cleaned = qman_p_poll_dqrr(np->p, budget);
++
++ if (cleaned < budget) {
++ int tmp;
++ napi_complete(napi);
++ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
++ DPA_BUG_ON(tmp);
++ }
++
++ return cleaned;
++}
++EXPORT_SYMBOL(dpaa_eth_poll);
++
++static void __hot _dpa_tx_conf(struct net_device *net_dev,
++ const struct dpa_priv_s *priv,
++ struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_fd *fd,
++ u32 fqid)
++{
++ struct sk_buff *skb;
++
++ /* do we need the timestamp for the error frames? */
++
++ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
++ if (netif_msg_hw(priv) && net_ratelimit())
++ netdev_warn(net_dev, "FD status = 0x%08x\n",
++ fd->status & FM_FD_STAT_TX_ERRORS);
++
++ percpu_priv->stats.tx_errors++;
++ }
++
++ /* hopefully we need not get the timestamp before the hook */
++#ifdef CONFIG_FSL_DPAA_HOOKS
++ if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
++ fd, fqid) == DPAA_ETH_STOLEN)
++ /* it's the hook that must now perform cleanup */
++ return;
++#endif
++ /* This might not perfectly reflect the reality, if the core dequeuing
++ * the Tx confirmation is different from the one that did the enqueue,
++ * but at least it'll show up in the total count.
++ */
++ percpu_priv->tx_confirm++;
++
++ skb = _dpa_cleanup_tx_fd(priv, fd);
++
++ dev_kfree_skb(skb);
++}
++
++enum qman_cb_dqrr_result
++priv_rx_error_dqrr(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dq)
++{
++ struct net_device *net_dev;
++ struct dpa_priv_s *priv;
++ struct dpa_percpu_priv_s *percpu_priv;
++ int *count_ptr;
++
++ net_dev = ((struct dpa_fq *)fq)->net_dev;
++ priv = netdev_priv(net_dev);
++
++ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
++ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
++
++ if (dpaa_eth_napi_schedule(percpu_priv, portal))
++ return qman_cb_dqrr_stop;
++
++ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
++ /* Unable to refill the buffer pool due to insufficient
++ * system memory. Just release the frame back into the pool,
++ * otherwise we'll soon end up with an empty buffer pool.
++ */
++ dpa_fd_release(net_dev, &dq->fd);
++ else
++ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
++
++ return qman_cb_dqrr_consume;
++}
++
++
++enum qman_cb_dqrr_result __hot
++priv_rx_default_dqrr(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dq)
++{
++ struct net_device *net_dev;
++ struct dpa_priv_s *priv;
++ struct dpa_percpu_priv_s *percpu_priv;
++ int *count_ptr;
++ struct dpa_bp *dpa_bp;
++
++ net_dev = ((struct dpa_fq *)fq)->net_dev;
++ priv = netdev_priv(net_dev);
++ dpa_bp = priv->dpa_bp;
++
++ /* Trace the Rx fd */
++ trace_dpa_rx_fd(net_dev, fq, &dq->fd);
++
++ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
++ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
++ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
++
++ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
++ return qman_cb_dqrr_stop;
++
++ /* Vale of plenty: make sure we didn't run out of buffers */
++
++ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
++ /* Unable to refill the buffer pool due to insufficient
++ * system memory. Just release the frame back into the pool,
++ * otherwise we'll soon end up with an empty buffer pool.
++ */
++ dpa_fd_release(net_dev, &dq->fd);
++ else
++ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
++ count_ptr);
++
++ return qman_cb_dqrr_consume;
++}
++
++enum qman_cb_dqrr_result
++priv_tx_conf_error_dqrr(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dq)
++{
++ struct net_device *net_dev;
++ struct dpa_priv_s *priv;
++ struct dpa_percpu_priv_s *percpu_priv;
++
++ net_dev = ((struct dpa_fq *)fq)->net_dev;
++ priv = netdev_priv(net_dev);
++
++ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
++
++ if (dpaa_eth_napi_schedule(percpu_priv, portal))
++ return qman_cb_dqrr_stop;
++
++ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
++
++ return qman_cb_dqrr_consume;
++}
++
++enum qman_cb_dqrr_result __hot
++priv_tx_conf_default_dqrr(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dq)
++{
++ struct net_device *net_dev;
++ struct dpa_priv_s *priv;
++ struct dpa_percpu_priv_s *percpu_priv;
++
++ net_dev = ((struct dpa_fq *)fq)->net_dev;
++ priv = netdev_priv(net_dev);
++
++ /* Trace the fd */
++ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
++
++ /* Non-migratable context, safe to use raw_cpu_ptr */
++ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
++
++ if (dpaa_eth_napi_schedule(percpu_priv, portal))
++ return qman_cb_dqrr_stop;
++
++ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
++
++ return qman_cb_dqrr_consume;
++}
++
++void priv_ern(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ struct net_device *net_dev;
++ const struct dpa_priv_s *priv;
++ struct sk_buff *skb;
++ struct dpa_percpu_priv_s *percpu_priv;
++ struct qm_fd fd = msg->ern.fd;
++
++ net_dev = ((struct dpa_fq *)fq)->net_dev;
++ priv = netdev_priv(net_dev);
++ /* Non-migratable context, safe to use raw_cpu_ptr */
++ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
++
++ percpu_priv->stats.tx_dropped++;
++ percpu_priv->stats.tx_fifo_errors++;
++ count_ern(percpu_priv, msg);
++
++ /* If we intended this buffer to go into the pool
++ * when the FM was done, we need to put it in
++ * manually.
++ */
++ if (msg->ern.fd.bpid != 0xff) {
++ dpa_fd_release(net_dev, &fd);
++ return;
++ }
++
++ skb = _dpa_cleanup_tx_fd(priv, &fd);
++ dev_kfree_skb_any(skb);
++}
++
++const struct dpa_fq_cbs_t private_fq_cbs = {
++ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
++ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
++ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
++ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
++ .egress_ern = { .cb = { .ern = priv_ern } }
++};
++EXPORT_SYMBOL(private_fq_cbs);
++
++static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
++{
++ struct dpa_percpu_priv_s *percpu_priv;
++ int i, j;
++
++ for_each_possible_cpu(i) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
++
++ for (j = 0; j < qman_portal_max; j++)
++ napi_enable(&percpu_priv->np[j].napi);
++ }
++}
++
++static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
++{
++ struct dpa_percpu_priv_s *percpu_priv;
++ int i, j;
++
++ for_each_possible_cpu(i) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
++
++ for (j = 0; j < qman_portal_max; j++)
++ napi_disable(&percpu_priv->np[j].napi);
++ }
++}
++
++static int __cold dpa_eth_priv_start(struct net_device *net_dev)
++{
++ int err;
++ struct dpa_priv_s *priv;
++
++ priv = netdev_priv(net_dev);
++
++ dpaa_eth_napi_enable(priv);
++
++ err = dpa_start(net_dev);
++ if (err < 0)
++ dpaa_eth_napi_disable(priv);
++
++ return err;
++}
++
++
++
++static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
++{
++ int _errno;
++ struct dpa_priv_s *priv;
++
++ _errno = dpa_stop(net_dev);
++ /* Allow NAPI to consume any frame still in the Rx/TxConfirm
++ * ingress queues. This is to avoid a race between the current
++ * context and ksoftirqd which could leave NAPI disabled while
++ * in fact there's still Rx traffic to be processed.
++ */
++ usleep_range(5000, 10000);
++
++ priv = netdev_priv(net_dev);
++ dpaa_eth_napi_disable(priv);
++
++ return _errno;
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static void dpaa_eth_poll_controller(struct net_device *net_dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ struct dpa_percpu_priv_s *percpu_priv =
++ raw_cpu_ptr(priv->percpu_priv);
++ struct qman_portal *p;
++ const struct qman_portal_config *pc;
++ struct dpa_napi_portal *np;
++
++ p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
++ pc = qman_p_get_portal_config(p);
++ np = &percpu_priv->np[pc->index];
++
++ qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
++ qman_p_poll_dqrr(np->p, np->napi.weight);
++ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
++}
++#endif
++
++static const struct net_device_ops dpa_private_ops = {
++ .ndo_open = dpa_eth_priv_start,
++ .ndo_start_xmit = dpa_tx,
++ .ndo_stop = dpa_eth_priv_stop,
++ .ndo_tx_timeout = dpa_timeout,
++ .ndo_get_stats64 = dpa_get_stats64,
++ .ndo_set_mac_address = dpa_set_mac_address,
++ .ndo_validate_addr = eth_validate_addr,
++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++ .ndo_select_queue = dpa_select_queue,
++#endif
++ .ndo_change_mtu = dpa_change_mtu,
++ .ndo_set_rx_mode = dpa_set_rx_mode,
++ .ndo_init = dpa_ndo_init,
++ .ndo_set_features = dpa_set_features,
++ .ndo_fix_features = dpa_fix_features,
++ .ndo_do_ioctl = dpa_ioctl,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ .ndo_poll_controller = dpaa_eth_poll_controller,
++#endif
++};
++
++static int dpa_private_napi_add(struct net_device *net_dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ struct dpa_percpu_priv_s *percpu_priv;
++ int i, cpu;
++
++ for_each_possible_cpu(cpu) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
++
++ percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
++ qman_portal_max * sizeof(struct dpa_napi_portal),
++ GFP_KERNEL);
++
++ if (unlikely(percpu_priv->np == NULL)) {
++ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < qman_portal_max; i++)
++ netif_napi_add(net_dev, &percpu_priv->np[i].napi,
++ dpaa_eth_poll, DPA_NAPI_WEIGHT);
++ }
++
++ return 0;
++}
++
++void dpa_private_napi_del(struct net_device *net_dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ struct dpa_percpu_priv_s *percpu_priv;
++ int i, cpu;
++
++ for_each_possible_cpu(cpu) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
++
++ if (percpu_priv->np) {
++ for (i = 0; i < qman_portal_max; i++)
++ netif_napi_del(&percpu_priv->np[i].napi);
++
++ devm_kfree(net_dev->dev.parent, percpu_priv->np);
++ }
++ }
++}
++EXPORT_SYMBOL(dpa_private_napi_del);
++
++static int dpa_private_netdev_init(struct net_device *net_dev)
++{
++ int i;
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ struct dpa_percpu_priv_s *percpu_priv;
++ const uint8_t *mac_addr;
++
++ /* Although we access another CPU's private data here
++ * we do it at initialization so it is safe
++ */
++ for_each_possible_cpu(i) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
++ percpu_priv->net_dev = net_dev;
++ }
++
++ net_dev->netdev_ops = &dpa_private_ops;
++ mac_addr = priv->mac_dev->addr;
++
++ net_dev->mem_start = priv->mac_dev->res->start;
++ net_dev->mem_end = priv->mac_dev->res->end;
++
++ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_LLTX);
++
++ /* Advertise S/G and HIGHDMA support for private interfaces */
++ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
++ /* Recent kernels enable GSO automatically, if
++ * we declare NETIF_F_SG. For conformity, we'll
++ * still declare GSO explicitly.
++ */
++ net_dev->features |= NETIF_F_GSO;
++
++ /* Advertise GRO support */
++ net_dev->features |= NETIF_F_GRO;
++
++ /* Advertise NETIF_F_HW_ACCEL_MQ to avoid Tx timeout warnings */
++ net_dev->features |= NETIF_F_HW_ACCEL_MQ;
++
++ return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
++}
++
++static struct dpa_bp * __cold
++dpa_priv_bp_probe(struct device *dev)
++{
++ struct dpa_bp *dpa_bp;
++
++ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
++ if (unlikely(dpa_bp == NULL)) {
++ dev_err(dev, "devm_kzalloc() failed\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
++ dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
++
++ dpa_bp->seed_cb = dpa_bp_priv_seed;
++ dpa_bp->free_buf_cb = _dpa_bp_free_pf;
++
++ return dpa_bp;
++}
++
++/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
++ * We won't be sending congestion notifications to FMan; for now, we just use
++ * this CGR to generate enqueue rejections to FMan in order to drop the frames
++ * before they reach our ingress queues and eat up memory.
++ */
++static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
++{
++ struct qm_mcc_initcgr initcgr;
++ u32 cs_th;
++ int err;
++
++ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
++ if (err < 0) {
++ pr_err("Error %d allocating CGR ID\n", err);
++ goto out_error;
++ }
++
++ /* Enable CS TD, but disable Congestion State Change Notifications. */
++ initcgr.we_mask = QM_CGR_WE_CS_THRES;
++ initcgr.cgr.cscn_en = QM_CGR_EN;
++ cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
++ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
++
++ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
++ initcgr.cgr.cstd_en = QM_CGR_EN;
++
++ /* This is actually a hack, because this CGR will be associated with
++ * our affine SWP. However, we'll place our ingress FQs in it.
++ */
++ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
++ &initcgr);
++ if (err < 0) {
++ pr_err("Error %d creating ingress CGR with ID %d\n", err,
++ priv->ingress_cgr.cgrid);
++ qman_release_cgrid(priv->ingress_cgr.cgrid);
++ goto out_error;
++ }
++ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
++ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
++
++ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
++ * range), but we have no common initialization path between the
++ * different variants of the DPAA Eth driver, so we do it here rather
++ * than modifying every other variant than "private Eth".
++ */
++ priv->use_ingress_cgr = true;
++
++out_error:
++ return err;
++}
++
++static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
++ size_t count)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ int i;
++
++ if (netif_msg_probe(priv))
++ dev_dbg(net_dev->dev.parent,
++ "Using private BM buffer pools\n");
++
++ priv->bp_count = count;
++
++ for (i = 0; i < count; i++) {
++ int err;
++ err = dpa_bp_alloc(&dpa_bp[i]);
++ if (err < 0) {
++ dpa_bp_free(priv);
++ priv->dpa_bp = NULL;
++ return err;
++ }
++
++ priv->dpa_bp = &dpa_bp[i];
++ }
++
++ dpa_priv_common_bpid = priv->dpa_bp->bpid;
++ return 0;
++}
++
++static const struct of_device_id dpa_match[];
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++static int dpa_new_loop_id(void)
++{
++ static int if_id;
++
++ return if_id++;
++}
++#endif
++
++static int
++dpaa_eth_priv_probe(struct platform_device *_of_dev)
++{
++ int err = 0, i, channel;
++ struct device *dev;
++ struct device_node *dpa_node;
++ struct dpa_bp *dpa_bp;
++ size_t count = 1;
++ struct net_device *net_dev = NULL;
++ struct dpa_priv_s *priv = NULL;
++ struct dpa_percpu_priv_s *percpu_priv;
++ struct fm_port_fqs port_fqs;
++ struct dpa_buffer_layout_s *buf_layout = NULL;
++ struct mac_device *mac_dev;
++
++ dev = &_of_dev->dev;
++
++ dpa_node = dev->of_node;
++
++ if (!of_device_is_available(dpa_node))
++ return -ENODEV;
++
++ /* Get the buffer pools assigned to this interface;
++ * run only once the default pool probing code
++ */
++ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
++ dpa_priv_bp_probe(dev);
++ if (IS_ERR(dpa_bp))
++ return PTR_ERR(dpa_bp);
++
++ /* Allocate this early, so we can store relevant information in
++ * the private area (needed by 1588 code in dpa_mac_probe)
++ */
++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
++ if (!net_dev) {
++ dev_err(dev, "alloc_etherdev_mq() failed\n");
++ goto alloc_etherdev_mq_failed;
++ }
++
++ /* Do this here, so we can be verbose early */
++ SET_NETDEV_DEV(net_dev, dev);
++ dev_set_drvdata(dev, net_dev);
++
++ priv = netdev_priv(net_dev);
++ priv->net_dev = net_dev;
++ strcpy(priv->if_type, "private");
++
++ priv->msg_enable = netif_msg_init(debug, -1);
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ priv->loop_id = dpa_new_loop_id();
++ priv->loop_to = -1; /* disabled by default */
++ dpa_loop_netdevs[priv->loop_id] = net_dev;
++#endif
++
++ mac_dev = dpa_mac_probe(_of_dev);
++ if (IS_ERR(mac_dev) || !mac_dev) {
++ err = PTR_ERR(mac_dev);
++ goto mac_probe_failed;
++ }
++
++ /* We have physical ports, so we need to establish
++ * the buffer layout.
++ */
++ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
++ GFP_KERNEL);
++ if (!buf_layout) {
++ dev_err(dev, "devm_kzalloc() failed\n");
++ goto alloc_failed;
++ }
++ dpa_set_buffers_layout(mac_dev, buf_layout);
++
++ /* For private ports, need to compute the size of the default
++ * buffer pool, based on FMan port buffer layout;also update
++ * the maximum buffer size for private ports if necessary
++ */
++ dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
++
++#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
++ /* We only want to use jumbo frame optimization if we actually have
++ * L2 MAX FRM set for jumbo frames as well.
++ */
++#ifndef CONFIG_PPC
++ if (likely(!dpaa_errata_a010022))
++#endif
++ if(fm_get_max_frm() < 9600)
++ dev_warn(dev,
++ "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
++#endif
++
++ INIT_LIST_HEAD(&priv->dpa_fq_list);
++
++ memset(&port_fqs, 0, sizeof(port_fqs));
++
++ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
++ if (!err)
++ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
++ &port_fqs, true, TX);
++
++ if (err < 0)
++ goto fq_probe_failed;
++
++ /* bp init */
++
++ err = dpa_priv_bp_create(net_dev, dpa_bp, count);
++
++ if (err < 0)
++ goto bp_create_failed;
++
++ priv->mac_dev = mac_dev;
++
++ channel = dpa_get_channel();
++
++ if (channel < 0) {
++ err = channel;
++ goto get_channel_failed;
++ }
++
++ priv->channel = (uint16_t)channel;
++ dpaa_eth_add_channel(priv->channel);
++
++ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
++
++ /* Create a congestion group for this netdev, with
++ * dynamically-allocated CGR ID.
++ * Must be executed after probing the MAC, but before
++ * assigning the egress FQs to the CGRs.
++ */
++ err = dpaa_eth_cgr_init(priv);
++ if (err < 0) {
++ dev_err(dev, "Error initializing CGR\n");
++ goto tx_cgr_init_failed;
++ }
++ err = dpaa_eth_priv_ingress_cgr_init(priv);
++ if (err < 0) {
++ dev_err(dev, "Error initializing ingress CGR\n");
++ goto rx_cgr_init_failed;
++ }
++
++ /* Add the FQs to the interface, and make them active */
++ err = dpa_fqs_init(dev, &priv->dpa_fq_list, false);
++ if (err < 0)
++ goto fq_alloc_failed;
++
++ priv->buf_layout = buf_layout;
++ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
++ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
++
++ /* All real interfaces need their ports initialized */
++ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
++ buf_layout, dev);
++
++#ifdef CONFIG_FMAN_PFC
++ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
++ err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
++ mac_dev->port_dev[TX], i, i);
++ if (unlikely(err != 0)) {
++ dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
++ goto pfc_mapping_failed;
++ }
++ }
++#endif
++
++ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
++
++ if (priv->percpu_priv == NULL) {
++ dev_err(dev, "devm_alloc_percpu() failed\n");
++ err = -ENOMEM;
++ goto alloc_percpu_failed;
++ }
++ for_each_possible_cpu(i) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
++ memset(percpu_priv, 0, sizeof(*percpu_priv));
++ }
++
++ /* Initialize NAPI */
++ err = dpa_private_napi_add(net_dev);
++
++ if (err < 0)
++ goto napi_add_failed;
++
++ err = dpa_private_netdev_init(net_dev);
++
++ if (err < 0)
++ goto netdev_init_failed;
++
++ dpaa_eth_sysfs_init(&net_dev->dev);
++
++#ifdef CONFIG_PM
++ device_set_wakeup_capable(dev, true);
++#endif
++
++ pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
++
++ return 0;
++
++netdev_init_failed:
++napi_add_failed:
++ dpa_private_napi_del(net_dev);
++alloc_percpu_failed:
++#ifdef CONFIG_FMAN_PFC
++pfc_mapping_failed:
++#endif
++ dpa_fq_free(dev, &priv->dpa_fq_list);
++fq_alloc_failed:
++ qman_delete_cgr_safe(&priv->ingress_cgr);
++ qman_release_cgrid(priv->ingress_cgr.cgrid);
++rx_cgr_init_failed:
++ qman_delete_cgr_safe(&priv->cgr_data.cgr);
++ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
++tx_cgr_init_failed:
++get_channel_failed:
++ dpa_bp_free(priv);
++bp_create_failed:
++fq_probe_failed:
++alloc_failed:
++mac_probe_failed:
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
++alloc_etherdev_mq_failed:
++ if (atomic_read(&dpa_bp->refs) == 0)
++ devm_kfree(dev, dpa_bp);
++
++ return err;
++}
++
++static const struct of_device_id dpa_match[] = {
++ {
++ .compatible = "fsl,dpa-ethernet"
++ },
++ {}
++};
++MODULE_DEVICE_TABLE(of, dpa_match);
++
++static struct platform_driver dpa_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = dpa_match,
++ .owner = THIS_MODULE,
++ .pm = DPAA_PM_OPS,
++ },
++ .probe = dpaa_eth_priv_probe,
++ .remove = dpa_remove
++};
++
++#ifndef CONFIG_PPC
++static bool __init __cold soc_has_errata_a010022(void)
++{
++#ifdef CONFIG_SOC_BUS
++ const struct soc_device_attribute soc_msi_matches[] = {
++ { .family = "QorIQ LS1043A",
++ .data = NULL },
++ { },
++ };
++
++ if (soc_device_match(soc_msi_matches))
++ return true;
++
++ return false;
++#else
++ return true; /* cannot identify SoC */
++#endif
++}
++#endif
++
++static int __init __cold dpa_load(void)
++{
++ int _errno;
++
++ pr_info(DPA_DESCRIPTION "\n");
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ dpa_debugfs_module_init();
++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
++
++ /* initialise dpaa_eth mirror values */
++ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
++ dpa_max_frm = fm_get_max_frm();
++ dpa_num_cpus = num_possible_cpus();
++
++#ifndef CONFIG_PPC
++ /* Detect if the current SoC requires the 4K alignment workaround */
++ dpaa_errata_a010022 = soc_has_errata_a010022();
++#endif
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
++#endif
++
++ _errno = platform_driver_register(&dpa_driver);
++ if (unlikely(_errno < 0)) {
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): platform_driver_register() = %d\n",
++ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
++ }
++
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++
++ return _errno;
++}
++module_init(dpa_load);
++
++static void __exit __cold dpa_unload(void)
++{
++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
++ KBUILD_BASENAME".c", __func__);
++
++ platform_driver_unregister(&dpa_driver);
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ dpa_debugfs_module_exit();
++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
++
++ /* Only one channel is used and needs to be relased after all
++ * interfaces are removed
++ */
++ dpa_release_channel();
++
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++}
++module_exit(dpa_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
+new file mode 100644
+index 00000000..b1703bc1
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
+@@ -0,0 +1,698 @@
++/* Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DPA_H
++#define __DPA_H
++
++#include <linux/netdevice.h>
++#include <linux/fsl_qman.h> /* struct qman_fq */
++
++#include "fm_ext.h"
++#include "dpaa_eth_trace.h"
++
++extern int dpa_rx_extra_headroom;
++extern int dpa_max_frm;
++extern int dpa_num_cpus;
++
++#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
++#define dpa_get_max_frm() dpa_max_frm
++
++#define dpa_get_max_mtu() \
++ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
++
++#define __hot
++
++/* Simple enum of FQ types - used for array indexing */
++enum port_type {RX, TX};
++
++/* TODO: This structure should be renamed & moved to the FMD wrapper */
++struct dpa_buffer_layout_s {
++ uint16_t priv_data_size;
++ bool parse_results;
++ bool time_stamp;
++ bool hash_results;
++ uint8_t manip_extra_space;
++ uint16_t data_align;
++};
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define DPA_BUG_ON(cond) BUG_ON(cond)
++#else
++#define DPA_BUG_ON(cond)
++#endif
++
++#define DPA_TX_PRIV_DATA_SIZE 16
++#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
++#define DPA_TIME_STAMP_SIZE 8
++#define DPA_HASH_RESULTS_SIZE 8
++#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
++ dpa_get_rx_extra_headroom())
++
++#define FM_FD_STAT_RX_ERRORS \
++ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
++ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
++ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
++ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
++ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
++
++#define FM_FD_STAT_TX_ERRORS \
++ (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
++ FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
++
++#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
++/* The raw buffer size must be cacheline aligned.
++ * Normally we use 2K buffers.
++ */
++#define DPA_BP_RAW_SIZE 2048
++#else
++/* For jumbo frame optimizations, use buffers large enough to accommodate
++ * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
++ * space to account for further alignments.
++ */
++#define DPA_MAX_FRM_SIZE 9600
++#ifdef CONFIG_PPC
++#define DPA_BP_RAW_SIZE \
++ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
++ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
++#else /* CONFIG_PPC */
++#define DPA_BP_RAW_SIZE ((unlikely(dpaa_errata_a010022)) ? 2048 : \
++ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
++ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1)))
++#endif /* CONFIG_PPC */
++#endif /* CONFIG_FSL_DPAA_ETH_JUMBO_FRAME */
++
++/* This is what FMan is ever allowed to use.
++ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
++ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
++ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
++ * half-page-aligned buffers (can we?), so we reserve some more space
++ * for start-of-buffer alignment.
++ */
++#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
++ SMP_CACHE_BYTES)
++/* We must ensure that skb_shinfo is always cacheline-aligned. */
++#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
++
++/* Maximum size of a buffer for which recycling is allowed.
++ * We need an upper limit such that forwarded skbs that get reallocated on Tx
++ * aren't allowed to grow unboundedly. On the other hand, we need to make sure
++ * that skbs allocated by us will not fail to be recycled due to their size.
++ *
++ * For a requested size, the kernel allocator provides the next power of two
++ * sized block, which the stack will use as is, regardless of the actual size
++ * it required; since we must accommodate at most 9.6K buffers (L2 maximum
++ * supported frame size), set the recycling upper limit to 16K.
++ */
++#define DPA_RECYCLE_MAX_SIZE 16384
++
++#if defined(CONFIG_FSL_SDK_FMAN_TEST)
++/*TODO: temporary for fman pcd testing */
++#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
++#endif
++
++#define DPAA_ETH_FQ_DELTA 0x10000
++
++#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
++ (((device_addr) & 0x1fffff) >> 6)
++
++#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
++ (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
++
++/* Largest value that the FQD's OAL field can hold.
++ * This is DPAA-1.x specific.
++ * TODO: This rather belongs in fsl_qman.h
++ */
++#define FSL_QMAN_MAX_OAL 127
++
++/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
++#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
++
++/* Default alignment for start of data in an Rx FD */
++#define DPA_FD_DATA_ALIGNMENT 16
++
++/* Values for the L3R field of the FM Parse Results
++ */
++/* L3 Type field: First IP Present IPv4 */
++#define FM_L3_PARSE_RESULT_IPV4 0x8000
++/* L3 Type field: First IP Present IPv6 */
++#define FM_L3_PARSE_RESULT_IPV6 0x4000
++
++/* Values for the L4R field of the FM Parse Results
++ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
++ */
++/* L4 Type field: UDP */
++#define FM_L4_PARSE_RESULT_UDP 0x40
++/* L4 Type field: TCP */
++#define FM_L4_PARSE_RESULT_TCP 0x20
++/* FD status field indicating whether the FM Parser has attempted to validate
++ * the L4 csum of the frame.
++ * Note that having this bit set doesn't necessarily imply that the checksum
++ * is valid. One would have to check the parse results to find that out.
++ */
++#define FM_FD_STAT_L4CV 0x00000004
++
++
++#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
++
++/* Check if the parsed frame was found to be a TCP segment.
++ *
++ * @parse_result_ptr must be of type (fm_prs_result_t *).
++ */
++#define fm_l4_frame_is_tcp(parse_result_ptr) \
++ ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
++
++/* number of Tx queues to FMan */
++#ifdef CONFIG_FMAN_PFC
++#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
++#else
++#define DPAA_ETH_TX_QUEUES NR_CPUS
++#endif
++
++#define DPAA_ETH_RX_QUEUES 128
++
++/* Convenience macros for storing/retrieving the skb back-pointers. They must
++ * accommodate both recycling and confirmation paths - i.e. cases when the buf
++ * was allocated by ourselves, respectively by the stack. In the former case,
++ * we could store the skb at negative offset; in the latter case, we can't,
++ * so we'll use 0 as offset.
++ *
++ * NB: @off is an offset from a (struct sk_buff **) pointer!
++ */
++#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
++{ \
++ skbh = (struct sk_buff **)addr; \
++ *(skbh + (off)) = skb; \
++}
++#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
++{ \
++ skbh = (struct sk_buff **)addr; \
++ skb = *(skbh + (off)); \
++}
++
++#ifdef CONFIG_PM
++/* Magic Packet wakeup */
++#define DPAA_WOL_MAGIC 0x00000001
++#endif
++
++#if defined(CONFIG_FSL_SDK_FMAN_TEST)
++struct pcd_range {
++ uint32_t base;
++ uint32_t count;
++};
++#endif
++
++/* More detailed FQ types - used for fine-grained WQ assignments */
++enum dpa_fq_type {
++ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
++ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
++ FQ_TYPE_RX_PCD, /* User-defined PCDs */
++ FQ_TYPE_TX, /* "Real" Tx FQs */
++ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
++ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
++ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
++ FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
++};
++
++struct dpa_fq {
++ struct qman_fq fq_base;
++ struct list_head list;
++ struct net_device *net_dev;
++ bool init;
++ uint32_t fqid;
++ uint32_t flags;
++ uint16_t channel;
++ uint8_t wq;
++ enum dpa_fq_type fq_type;
++};
++
++struct dpa_fq_cbs_t {
++ struct qman_fq rx_defq;
++ struct qman_fq tx_defq;
++ struct qman_fq rx_errq;
++ struct qman_fq tx_errq;
++ struct qman_fq egress_ern;
++};
++
++struct fqid_cell {
++ uint32_t start;
++ uint32_t count;
++};
++
++struct dpa_bp {
++ struct bman_pool *pool;
++ uint8_t bpid;
++ struct device *dev;
++ union {
++ /* The buffer pools used for the private ports are initialized
++ * with target_count buffers for each CPU; at runtime the
++ * number of buffers per CPU is constantly brought back to this
++ * level
++ */
++ int target_count;
++ /* The configured value for the number of buffers in the pool,
++ * used for shared port buffer pools
++ */
++ int config_count;
++ };
++ size_t size;
++ bool seed_pool;
++ /* physical address of the contiguous memory used by the pool to store
++ * the buffers
++ */
++ dma_addr_t paddr;
++ /* virtual address of the contiguous memory used by the pool to store
++ * the buffers
++ */
++ void __iomem *vaddr;
++ /* current number of buffers in the bpool alloted to this CPU */
++ int __percpu *percpu_count;
++ atomic_t refs;
++ /* some bpools need to be seeded before use by this cb */
++ int (*seed_cb)(struct dpa_bp *);
++ /* some bpools need to be emptied before freeing; this cb is used
++ * for freeing of individual buffers taken from the pool
++ */
++ void (*free_buf_cb)(void *addr);
++};
++
++struct dpa_rx_errors {
++ u64 dme; /* DMA Error */
++ u64 fpe; /* Frame Physical Error */
++ u64 fse; /* Frame Size Error */
++ u64 phe; /* Header Error */
++ u64 cse; /* Checksum Validation Error */
++};
++
++/* Counters for QMan ERN frames - one counter per rejection code */
++struct dpa_ern_cnt {
++ u64 cg_tdrop; /* Congestion group taildrop */
++ u64 wred; /* WRED congestion */
++ u64 err_cond; /* Error condition */
++ u64 early_window; /* Order restoration, frame too early */
++ u64 late_window; /* Order restoration, frame too late */
++ u64 fq_tdrop; /* FQ taildrop */
++ u64 fq_retired; /* FQ is retired */
++ u64 orp_zero; /* ORP disabled */
++};
++
++struct dpa_napi_portal {
++ struct napi_struct napi;
++ struct qman_portal *p;
++};
++
++struct dpa_percpu_priv_s {
++ struct net_device *net_dev;
++ struct dpa_napi_portal *np;
++ u64 in_interrupt;
++ u64 tx_returned;
++ u64 tx_confirm;
++ /* fragmented (non-linear) skbuffs received from the stack */
++ u64 tx_frag_skbuffs;
++ /* number of S/G frames received */
++ u64 rx_sg;
++
++ struct rtnl_link_stats64 stats;
++ struct dpa_rx_errors rx_errors;
++ struct dpa_ern_cnt ern_cnt;
++};
++
++struct dpa_priv_s {
++ struct dpa_percpu_priv_s __percpu *percpu_priv;
++ struct dpa_bp *dpa_bp;
++ /* Store here the needed Tx headroom for convenience and speed
++ * (even though it can be computed based on the fields of buf_layout)
++ */
++ uint16_t tx_headroom;
++ struct net_device *net_dev;
++ struct mac_device *mac_dev;
++ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
++ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
++
++ size_t bp_count;
++
++ uint16_t channel; /* "fsl,qman-channel-id" */
++ struct list_head dpa_fq_list;
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ struct dentry *debugfs_loop_file;
++#endif
++
++ uint32_t msg_enable; /* net_device message level */
++#ifdef CONFIG_FSL_DPAA_1588
++ struct dpa_ptp_tsu *tsu;
++#endif
++
++#if defined(CONFIG_FSL_SDK_FMAN_TEST)
++/* TODO: this is temporary until pcd support is implemented in dpaa */
++ int priv_pcd_num_ranges;
++ struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
++#endif
++
++ struct {
++ /**
++ * All egress queues to a given net device belong to one
++ * (and the same) congestion group.
++ */
++ struct qman_cgr cgr;
++ /* If congested, when it began. Used for performance stats. */
++ u32 congestion_start_jiffies;
++ /* Number of jiffies the Tx port was congested. */
++ u32 congested_jiffies;
++ /**
++ * Counter for the number of times the CGR
++ * entered congestion state
++ */
++ u32 cgr_congested_count;
++ } cgr_data;
++ /* Use a per-port CGR for ingress traffic. */
++ bool use_ingress_cgr;
++ struct qman_cgr ingress_cgr;
++
++#ifdef CONFIG_FSL_DPAA_TS
++ bool ts_tx_en; /* Tx timestamping enabled */
++ bool ts_rx_en; /* Rx timestamping enabled */
++#endif /* CONFIG_FSL_DPAA_TS */
++
++ struct dpa_buffer_layout_s *buf_layout;
++ uint16_t rx_headroom;
++ char if_type[30];
++
++ void *peer;
++#ifdef CONFIG_PM
++ u32 wol;
++#endif
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ int loop_id;
++ int loop_to;
++#endif
++#ifdef CONFIG_FSL_DPAA_CEETM
++ bool ceetm_en; /* CEETM QoS enabled */
++#endif
++};
++
++struct fm_port_fqs {
++ struct dpa_fq *tx_defq;
++ struct dpa_fq *tx_errq;
++ struct dpa_fq *rx_defq;
++ struct dpa_fq *rx_errq;
++};
++
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++extern struct net_device *dpa_loop_netdevs[20];
++#endif
++
++/* functions with different implementation for SG and non-SG: */
++int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
++int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
++void __hot _dpa_rx(struct net_device *net_dev,
++ struct qman_portal *portal,
++ const struct dpa_priv_s *priv,
++ struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_fd *fd,
++ u32 fqid,
++ int *count_ptr);
++int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
++int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
++ struct qman_fq *egress_fq, struct qman_fq *conf_fq);
++struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
++ const struct qm_fd *fd);
++void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
++ const struct qm_fd *fd,
++ struct sk_buff *skb,
++ int *use_gro);
++#ifndef CONFIG_FSL_DPAA_TS
++bool dpa_skb_is_recyclable(struct sk_buff *skb);
++bool dpa_buf_is_recyclable(struct sk_buff *skb,
++ uint32_t min_size,
++ uint16_t min_offset,
++ unsigned char **new_buf_start);
++#endif
++int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
++ struct sk_buff *skb, struct qm_fd *fd,
++ int *count_ptr, int *offset);
++int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
++ struct sk_buff *skb, struct qm_fd *fd);
++int __cold __attribute__((nonnull))
++ _dpa_fq_free(struct device *dev, struct qman_fq *fq);
++
++/* Turn on HW checksum computation for this outgoing frame.
++ * If the current protocol is not something we support in this regard
++ * (or if the stack has already computed the SW checksum), we do nothing.
++ *
++ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
++ * otherwise.
++ *
++ * Note that this function may modify the fd->cmd field and the skb data buffer
++ * (the Parse Results area).
++ */
++int dpa_enable_tx_csum(struct dpa_priv_s *priv,
++ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
++
++static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
++ struct qman_portal *portal)
++{
++ /* In case of threaded ISR for RT enable kernel,
++ * in_irq() does not return appropriate value, so use
++ * in_serving_softirq to distinguish softirq or irq context.
++ */
++ if (unlikely(in_irq() || !in_serving_softirq())) {
++ /* Disable QMan IRQ and invoke NAPI */
++ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
++ if (likely(!ret)) {
++ const struct qman_portal_config *pc =
++ qman_p_get_portal_config(portal);
++ struct dpa_napi_portal *np =
++ &percpu_priv->np[pc->index];
++
++ np->p = portal;
++ napi_schedule(&np->napi);
++ percpu_priv->in_interrupt++;
++ return 1;
++ }
++ }
++ return 0;
++}
++
++static inline ssize_t __const __must_check __attribute__((nonnull))
++dpa_fd_length(const struct qm_fd *fd)
++{
++ return fd->length20;
++}
++
++static inline ssize_t __const __must_check __attribute__((nonnull))
++dpa_fd_offset(const struct qm_fd *fd)
++{
++ return fd->offset;
++}
++
++/* Verifies if the skb length is below the interface MTU */
++static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
++{
++ if (unlikely(skb->len > mtu))
++ if ((skb->protocol != htons(ETH_P_8021Q))
++ || (skb->len > mtu + 4))
++ return -1;
++
++ return 0;
++}
++
++static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
++{
++ uint16_t headroom;
++ /* The frame headroom must accommodate:
++ * - the driver private data area
++ * - parse results, hash results, timestamp if selected
++ * - manip extra space
++ * If either hash results or time stamp are selected, both will
++ * be copied to/from the frame headroom, as TS is located between PR and
++ * HR in the IC and IC copy size has a granularity of 16bytes
++ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
++ *
++ * Also make sure the headroom is a multiple of data_align bytes
++ */
++ headroom = (uint16_t)(bl->priv_data_size +
++ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
++ (bl->hash_results || bl->time_stamp ?
++ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
++ bl->manip_extra_space);
++
++ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
++}
++
++int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
++int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
++int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
++
++void dpaa_eth_sysfs_remove(struct device *dev);
++void dpaa_eth_sysfs_init(struct device *dev);
++int dpaa_eth_poll(struct napi_struct *napi, int budget);
++
++void dpa_private_napi_del(struct net_device *net_dev);
++
++/* Equivalent to a memset(0), but works faster */
++static inline void clear_fd(struct qm_fd *fd)
++{
++ fd->opaque_addr = 0;
++ fd->opaque = 0;
++ fd->cmd = 0;
++}
++
++static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
++ struct qman_fq *tx_fq)
++{
++ int i;
++
++ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
++ if (priv->egress_fqs[i] == tx_fq)
++ return i;
++
++ return -EINVAL;
++}
++
++static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
++ struct rtnl_link_stats64 *percpu_stats,
++ struct qm_fd *fd, struct qman_fq *egress_fq,
++ struct qman_fq *conf_fq)
++{
++ int err, i;
++
++ if (fd->bpid == 0xff)
++ fd->cmd |= qman_fq_fqid(conf_fq);
++
++ /* Trace this Tx fd */
++ trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
++
++ for (i = 0; i < 100000; i++) {
++ err = qman_enqueue(egress_fq, fd, 0);
++ if (err != -EBUSY)
++ break;
++ }
++
++ if (unlikely(err < 0)) {
++ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
++ percpu_stats->tx_errors++;
++ percpu_stats->tx_fifo_errors++;
++ return err;
++ }
++
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += dpa_fd_length(fd);
++
++ return 0;
++}
++
++/* Use multiple WQs for FQ assignment:
++ * - Tx Confirmation queues go to WQ1.
++ * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
++ * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
++ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
++ * to be scheduled, in case there are many more FQs in WQ3).
++ * This ensures that Tx-confirmed buffers are timely released. In particular,
++ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
++ * are greatly outnumbered by other FQs in the system (usually PCDs), while
++ * dequeue scheduling is round-robin.
++ */
++static inline void _dpa_assign_wq(struct dpa_fq *fq)
++{
++ switch (fq->fq_type) {
++ case FQ_TYPE_TX_CONFIRM:
++ case FQ_TYPE_TX_CONF_MQ:
++ fq->wq = 1;
++ break;
++ case FQ_TYPE_RX_DEFAULT:
++ case FQ_TYPE_TX:
++ fq->wq = 3;
++ break;
++ case FQ_TYPE_RX_ERROR:
++ case FQ_TYPE_TX_ERROR:
++ case FQ_TYPE_RX_PCD_HI_PRIO:
++ fq->wq = 2;
++ break;
++ case FQ_TYPE_RX_PCD:
++ fq->wq = 5;
++ break;
++ default:
++ WARN(1, "Invalid FQ type %d for FQID %d!\n",
++ fq->fq_type, fq->fqid);
++ }
++}
++
++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++/* Use in lieu of skb_get_queue_mapping() */
++#ifdef CONFIG_FMAN_PFC
++#define dpa_get_queue_mapping(skb) \
++ (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
++ ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
++ ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
++ dpa_num_cpus + smp_processor_id()));
++
++#else
++#define dpa_get_queue_mapping(skb) \
++ raw_smp_processor_id()
++#endif
++#else
++/* Use the queue selected by XPS */
++#define dpa_get_queue_mapping(skb) \
++ skb_get_queue_mapping(skb)
++#endif
++
++#ifdef CONFIG_PTP_1588_CLOCK_DPAA
++struct ptp_priv_s {
++ struct device_node *node;
++ struct platform_device *of_dev;
++ struct ptp_clock *clock;
++ struct mac_device *mac_dev;
++};
++extern struct ptp_priv_s ptp_priv;
++#endif
++
++static inline void _dpa_bp_free_pf(void *addr)
++{
++ put_page(virt_to_head_page(addr));
++}
++
++/* LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
++ * manifests itself at high traffic rates when frames cross 4K memory
++ * boundaries or when they are not aligned to 16 bytes; For the moment, we
++ * use a SW workaround to avoid frames larger than 4K or that exceed 4K
++ * alignments and to realign the frames to 16 bytes.
++ */
++
++#ifndef CONFIG_PPC
++extern bool dpaa_errata_a010022; /* SoC affected by A010022 errata */
++#define NONREC_MARK 0x01
++#define HAS_DMA_ISSUE(start, size) \
++ (((uintptr_t)(start) + (size)) > \
++ (((uintptr_t)(start) + 0x1000) & ~0xFFF))
++#endif /* !CONFIG_PPC */
++
++#endif /* __DPA_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
+new file mode 100644
+index 00000000..507e77c3
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
+@@ -0,0 +1,205 @@
++/* Copyright 2008-2013 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/io.h>
++#include <linux/of_platform.h>
++#include <linux/of_net.h>
++#include <linux/etherdevice.h>
++#include <linux/kthread.h>
++#include <linux/percpu.h>
++#include <linux/highmem.h>
++#include <linux/sort.h>
++#include <linux/fsl_qman.h>
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++#include "dpaa_eth_base.h"
++
++#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++uint8_t advanced_debug = -1;
++module_param(advanced_debug, byte, S_IRUGO);
++MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
++EXPORT_SYMBOL(advanced_debug);
++
++static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
++{
++ return ((struct dpa_bp *)dpa_bp0)->size -
++ ((struct dpa_bp *)dpa_bp1)->size;
++}
++
++struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
++dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
++{
++ int i, lenp, na, ns, err;
++ struct device *dev;
++ struct device_node *dev_node;
++ const __be32 *bpool_cfg;
++ struct dpa_bp *dpa_bp;
++ u32 bpid;
++
++ dev = &_of_dev->dev;
++
++ *count = of_count_phandle_with_args(dev->of_node,
++ "fsl,bman-buffer-pools", NULL);
++ if (*count < 1) {
++ dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
++ return ERR_PTR(-EINVAL);
++ }
++
++ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
++ if (dpa_bp == NULL) {
++ dev_err(dev, "devm_kzalloc() failed\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dev_node = of_find_node_by_path("/");
++ if (unlikely(dev_node == NULL)) {
++ dev_err(dev, "of_find_node_by_path(/) failed\n");
++ return ERR_PTR(-EINVAL);
++ }
++
++ na = of_n_addr_cells(dev_node);
++ ns = of_n_size_cells(dev_node);
++
++ for (i = 0; i < *count; i++) {
++ of_node_put(dev_node);
++
++ dev_node = of_parse_phandle(dev->of_node,
++ "fsl,bman-buffer-pools", i);
++ if (dev_node == NULL) {
++ dev_err(dev, "of_find_node_by_phandle() failed\n");
++ return ERR_PTR(-EFAULT);
++ }
++
++ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
++ dev_err(dev,
++ "!of_device_is_compatible(%s, fsl,bpool)\n",
++ dev_node->full_name);
++ dpa_bp = ERR_PTR(-EINVAL);
++ goto _return_of_node_put;
++ }
++
++ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
++ if (err) {
++ dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
++ dpa_bp = ERR_PTR(-EINVAL);
++ goto _return_of_node_put;
++ }
++ dpa_bp[i].bpid = (uint8_t)bpid;
++
++ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
++ &lenp);
++ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
++ const uint32_t *seed_pool;
++
++ dpa_bp[i].config_count =
++ (int)of_read_number(bpool_cfg, ns);
++ dpa_bp[i].size =
++ (size_t)of_read_number(bpool_cfg + ns, ns);
++ dpa_bp[i].paddr =
++ of_read_number(bpool_cfg + 2 * ns, na);
++
++ seed_pool = of_get_property(dev_node,
++ "fsl,bpool-ethernet-seeds", &lenp);
++ dpa_bp[i].seed_pool = !!seed_pool;
++
++ } else {
++ dev_err(dev,
++ "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
++ dev_node->full_name);
++ dpa_bp = ERR_PTR(-EINVAL);
++ goto _return_of_node_put;
++ }
++ }
++
++ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
++
++ return dpa_bp;
++
++_return_of_node_put:
++ if (dev_node)
++ of_node_put(dev_node);
++
++ return dpa_bp;
++}
++EXPORT_SYMBOL(dpa_bp_probe);
++
++int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
++ size_t count)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ int i;
++
++ priv->dpa_bp = dpa_bp;
++ priv->bp_count = count;
++
++ for (i = 0; i < count; i++) {
++ int err;
++ err = dpa_bp_alloc(&dpa_bp[i]);
++ if (err < 0) {
++ dpa_bp_free(priv);
++ priv->dpa_bp = NULL;
++ return err;
++ }
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_bp_create);
++
++static int __init __cold dpa_advanced_load(void)
++{
++ pr_info(DPA_DESCRIPTION "\n");
++
++ return 0;
++}
++module_init(dpa_advanced_load);
++
++static void __exit __cold dpa_advanced_unload(void)
++{
++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
++ KBUILD_BASENAME".c", __func__);
++
++}
++module_exit(dpa_advanced_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
+new file mode 100644
+index 00000000..6ec68c3c
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
+@@ -0,0 +1,49 @@
++/* Copyright 2008-2013 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DPAA_ETH_BASE_H
++#define __DPAA_ETH_BASE_H
++
++#include <linux/etherdevice.h> /* struct net_device */
++#include <linux/fsl_bman.h> /* struct bm_buffer */
++#include <linux/of_platform.h> /* struct platform_device */
++#include <linux/net_tstamp.h> /* struct hwtstamp_config */
++
++extern uint8_t advanced_debug;
++extern const struct dpa_fq_cbs_t shared_fq_cbs;
++extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
++
++struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
++dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
++int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
++ size_t count);
++
++#endif /* __DPAA_ETH_BASE_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
+new file mode 100644
+index 00000000..cac613b7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
+@@ -0,0 +1,1992 @@
++/* Copyright 2008-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/init.h>
++#include "dpaa_eth_ceetm.h"
++
++#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
++
++const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
++ [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
++ [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
++};
++
++struct Qdisc_ops ceetm_qdisc_ops;
++
++/* Obtain the DCP and the SP ids from the FMan port */
++static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
++ unsigned int *sp_id)
++{
++ uint32_t channel;
++ t_LnxWrpFmPortDev *port_dev;
++ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
++ struct mac_device *mac_dev = dpa_priv->mac_dev;
++
++ port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
++ channel = port_dev->txCh;
++
++ *sp_id = channel & CHANNEL_SP_MASK;
++ pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
++
++ if (channel < DCP0_MAX_CHANNEL) {
++ *dcp_id = qm_dc_portal_fman0;
++ pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
++ } else {
++ *dcp_id = qm_dc_portal_fman1;
++ pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
++ }
++}
++
++/* Enqueue Rejection Notification callback */
++static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ struct net_device *net_dev;
++ struct ceetm_class *cls;
++ struct ceetm_class_stats *cstats = NULL;
++ const struct dpa_priv_s *dpa_priv;
++ struct dpa_percpu_priv_s *dpa_percpu_priv;
++ struct sk_buff *skb;
++ struct qm_fd fd = msg->ern.fd;
++
++ net_dev = ((struct ceetm_fq *)fq)->net_dev;
++ dpa_priv = netdev_priv(net_dev);
++ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
++
++ /* Increment DPA counters */
++ dpa_percpu_priv->stats.tx_dropped++;
++ dpa_percpu_priv->stats.tx_fifo_errors++;
++
++ /* Increment CEETM counters */
++ cls = ((struct ceetm_fq *)fq)->ceetm_cls;
++ switch (cls->type) {
++ case CEETM_PRIO:
++ cstats = this_cpu_ptr(cls->prio.cstats);
++ break;
++ case CEETM_WBFS:
++ cstats = this_cpu_ptr(cls->wbfs.cstats);
++ break;
++ }
++
++ if (cstats)
++ cstats->ern_drop_count++;
++
++ if (fd.bpid != 0xff) {
++ dpa_fd_release(net_dev, &fd);
++ return;
++ }
++
++ skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
++ dev_kfree_skb_any(skb);
++}
++
++/* Congestion State Change Notification callback */
++static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
++{
++ struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
++ struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
++ struct ceetm_class *cls = ceetm_fq->ceetm_cls;
++ struct ceetm_class_stats *cstats = NULL;
++
++ switch (cls->type) {
++ case CEETM_PRIO:
++ cstats = this_cpu_ptr(cls->prio.cstats);
++ break;
++ case CEETM_WBFS:
++ cstats = this_cpu_ptr(cls->wbfs.cstats);
++ break;
++ }
++
++ if (congested) {
++ dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
++ netif_tx_stop_all_queues(dpa_priv->net_dev);
++ dpa_priv->cgr_data.cgr_congested_count++;
++ if (cstats)
++ cstats->congested_count++;
++ } else {
++ dpa_priv->cgr_data.congested_jiffies +=
++ (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
++ netif_tx_wake_all_queues(dpa_priv->net_dev);
++ }
++}
++
++/* Allocate a ceetm fq */
++static int ceetm_alloc_fq(struct ceetm_fq **fq, struct net_device *dev,
++ struct ceetm_class *cls)
++{
++ *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
++ if (!*fq)
++ return -ENOMEM;
++
++ (*fq)->net_dev = dev;
++ (*fq)->ceetm_cls = cls;
++ return 0;
++}
++
++/* Configure a ceetm Class Congestion Group */
++static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
++ struct qm_ceetm_channel *channel, unsigned int id,
++ struct ceetm_fq *fq, struct dpa_priv_s *dpa_priv)
++{
++ int err;
++ u32 cs_th;
++ u16 ccg_mask;
++ struct qm_ceetm_ccg_params ccg_params;
++
++ err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
++ if (err)
++ return err;
++
++ /* Configure the count mode (frames/bytes), enable congestion state
++ * notifications, configure the congestion entry and exit thresholds,
++ * enable tail-drop, configure the tail-drop mode, and set the
++ * overhead accounting limit
++ */
++ ccg_mask = QM_CCGR_WE_MODE |
++ QM_CCGR_WE_CSCN_EN |
++ QM_CCGR_WE_CS_THRES_IN | QM_CCGR_WE_CS_THRES_OUT |
++ QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
++ QM_CCGR_WE_OAL;
++
++ ccg_params.mode = 0; /* count bytes */
++ ccg_params.cscn_en = 1; /* generate notifications */
++ ccg_params.td_en = 1; /* enable tail-drop */
++ ccg_params.td_mode = 0; /* tail-drop on congestion state */
++ ccg_params.oal = (signed char)(min(sizeof(struct sk_buff) +
++ dpa_priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
++
++ /* Set the congestion state thresholds according to the link speed */
++ if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
++ else
++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
++
++ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1);
++ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out,
++ cs_th * CEETM_CCGR_RATIO, 1);
++
++ err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
++ if (err)
++ return err;
++
++ return 0;
++}
++
++/* Configure a ceetm Logical Frame Queue */
++static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
++ struct qm_ceetm_lfq **lfq)
++{
++ int err;
++ u64 context_a;
++ u32 context_b;
++
++ err = qman_ceetm_lfq_claim(lfq, cq);
++ if (err)
++ return err;
++
++ /* Get the former contexts in order to preserve context B */
++ err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
++ if (err)
++ return err;
++
++ context_a = CEETM_CONTEXT_A;
++ err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
++ if (err)
++ return err;
++
++ (*lfq)->ern = ceetm_ern;
++
++ err = qman_ceetm_create_fq(*lfq, &fq->fq);
++ if (err)
++ return err;
++
++ return 0;
++}
++
++/* Configure a prio ceetm class */
++static int ceetm_config_prio_cls(struct ceetm_class *cls,
++ struct net_device *dev,
++ struct qm_ceetm_channel *channel,
++ unsigned int id)
++{
++ int err;
++ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
++
++ err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
++ if (err)
++ return err;
++
++ /* Claim and configure the CCG */
++ err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
++ dpa_priv);
++ if (err)
++ return err;
++
++ /* Claim and configure the CQ */
++ err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
++ if (err)
++ return err;
++
++ if (cls->shaped) {
++ err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
++ if (err)
++ return err;
++
++ err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
++ if (err)
++ return err;
++ }
++
++ /* Claim and configure a LFQ */
++ err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
++ if (err)
++ return err;
++
++ return 0;
++}
++
++/* Configure a wbfs ceetm class */
++static int ceetm_config_wbfs_cls(struct ceetm_class *cls,
++ struct net_device *dev,
++ struct qm_ceetm_channel *channel,
++ unsigned int id, int type)
++{
++ int err;
++ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
++
++ err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
++ if (err)
++ return err;
++
++ /* Claim and configure the CCG */
++ err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
++ dpa_priv);
++ if (err)
++ return err;
++
++ /* Claim and configure the CQ */
++ if (type == WBFS_GRP_B)
++ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
++ cls->wbfs.ccg);
++ else
++ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
++ cls->wbfs.ccg);
++ if (err)
++ return err;
++
++ /* Configure the CQ weight: real number multiplied by 100 to get rid
++ * of the fraction
++ */
++ err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
++ cls->wbfs.weight * 100);
++ if (err)
++ return err;
++
++ /* Claim and configure a LFQ */
++ err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
++ if (err)
++ return err;
++
++ return 0;
++}
++
++/* Find class in qdisc hash table using given handle */
++static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
++{
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc_class_common *clc;
++
++ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
++ __func__, handle, sch->handle);
++
++ clc = qdisc_class_find(&priv->clhash, handle);
++ return clc ? container_of(clc, struct ceetm_class, common) : NULL;
++}
++
++/* Insert a class in the qdisc's class hash */
++static void ceetm_link_class(struct Qdisc *sch,
++ struct Qdisc_class_hash *clhash,
++ struct Qdisc_class_common *common)
++{
++ sch_tree_lock(sch);
++ qdisc_class_hash_insert(clhash, common);
++ sch_tree_unlock(sch);
++ qdisc_class_hash_grow(sch, clhash);
++}
++
++/* Destroy a ceetm class */
++static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
++{
++ if (!cl)
++ return;
++
++ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (cl->root.child) {
++ qdisc_destroy(cl->root.child);
++ cl->root.child = NULL;
++ }
++
++ if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the channel %d\n",
++ __func__, cl->root.ch->idx);
++
++ break;
++
++ case CEETM_PRIO:
++ if (cl->prio.child) {
++ qdisc_destroy(cl->prio.child);
++ cl->prio.child = NULL;
++ }
++
++ if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the LFQ %d\n",
++ __func__, cl->prio.lfq->idx);
++
++ if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the CQ %d\n",
++ __func__, cl->prio.cq->idx);
++
++ if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the CCG %d\n",
++ __func__, cl->prio.ccg->idx);
++
++ kfree(cl->prio.fq);
++
++ if (cl->prio.cstats)
++ free_percpu(cl->prio.cstats);
++
++ break;
++
++ case CEETM_WBFS:
++ if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the LFQ %d\n",
++ __func__, cl->wbfs.lfq->idx);
++
++ if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the CQ %d\n",
++ __func__, cl->wbfs.cq->idx);
++
++ if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the CCG %d\n",
++ __func__, cl->wbfs.ccg->idx);
++
++ kfree(cl->wbfs.fq);
++
++ if (cl->wbfs.cstats)
++ free_percpu(cl->wbfs.cstats);
++ }
++
++ tcf_destroy_chain(&cl->filter_list);
++ kfree(cl);
++}
++
++/* Destroy a ceetm qdisc */
++static void ceetm_destroy(struct Qdisc *sch)
++{
++ unsigned int ntx, i;
++ struct hlist_node *next;
++ struct ceetm_class *cl;
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
++ __func__, sch->handle);
++
++ /* All filters need to be removed before destroying the classes */
++ tcf_destroy_chain(&priv->filter_list);
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
++ tcf_destroy_chain(&cl->filter_list);
++ }
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
++ common.hnode)
++ ceetm_cls_destroy(sch, cl);
++ }
++
++ qdisc_class_hash_destroy(&priv->clhash);
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ dpa_disable_ceetm(dev);
++
++ if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the LNI %d\n",
++ __func__, priv->root.lni->idx);
++
++ if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
++ pr_err(KBUILD_BASENAME
++ " : %s : error releasing the SP %d\n",
++ __func__, priv->root.sp->idx);
++
++ if (priv->root.qstats)
++ free_percpu(priv->root.qstats);
++
++ if (!priv->root.qdiscs)
++ break;
++
++ /* Remove the pfifo qdiscs */
++ for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
++ if (priv->root.qdiscs[ntx])
++ qdisc_destroy(priv->root.qdiscs[ntx]);
++
++ kfree(priv->root.qdiscs);
++ break;
++
++ case CEETM_PRIO:
++ if (priv->prio.parent)
++ priv->prio.parent->root.child = NULL;
++ break;
++
++ case CEETM_WBFS:
++ if (priv->wbfs.parent)
++ priv->wbfs.parent->prio.child = NULL;
++ break;
++ }
++}
++
++static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct Qdisc *qdisc;
++ unsigned int ntx, i;
++ struct nlattr *nest;
++ struct tc_ceetm_qopt qopt;
++ struct ceetm_qdisc_stats *qstats;
++ struct net_device *dev = qdisc_dev(sch);
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ sch_tree_lock(sch);
++ memset(&qopt, 0, sizeof(qopt));
++ qopt.type = priv->type;
++ qopt.shaped = priv->shaped;
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ /* Gather statistics from the underlying pfifo qdiscs */
++ sch->q.qlen = 0;
++ memset(&sch->bstats, 0, sizeof(sch->bstats));
++ memset(&sch->qstats, 0, sizeof(sch->qstats));
++
++ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
++ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++ sch->q.qlen += qdisc->q.qlen;
++ sch->bstats.bytes += qdisc->bstats.bytes;
++ sch->bstats.packets += qdisc->bstats.packets;
++ sch->qstats.qlen += qdisc->qstats.qlen;
++ sch->qstats.backlog += qdisc->qstats.backlog;
++ sch->qstats.drops += qdisc->qstats.drops;
++ sch->qstats.requeues += qdisc->qstats.requeues;
++ sch->qstats.overlimits += qdisc->qstats.overlimits;
++ }
++
++ for_each_online_cpu(i) {
++ qstats = per_cpu_ptr(priv->root.qstats, i);
++ sch->qstats.drops += qstats->drops;
++ }
++
++ qopt.rate = priv->root.rate;
++ qopt.ceil = priv->root.ceil;
++ qopt.overhead = priv->root.overhead;
++ break;
++
++ case CEETM_PRIO:
++ qopt.qcount = priv->prio.qcount;
++ break;
++
++ case CEETM_WBFS:
++ qopt.qcount = priv->wbfs.qcount;
++ qopt.cr = priv->wbfs.cr;
++ qopt.er = priv->wbfs.er;
++ break;
++
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ sch_tree_unlock(sch);
++ return -EINVAL;
++ }
++
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
++
++ sch_tree_unlock(sch);
++ return skb->len;
++
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
++}
++
++/* Configure a root ceetm qdisc */
++static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
++ struct tc_ceetm_qopt *qopt)
++{
++ struct netdev_queue *dev_queue;
++ struct Qdisc *qdisc;
++ enum qm_dc_portal dcp_id;
++ unsigned int i, sp_id, parent_id;
++ int err;
++ u64 bps;
++ struct qm_ceetm_sp *sp;
++ struct qm_ceetm_lni *lni;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
++ struct mac_device *mac_dev = dpa_priv->mac_dev;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ /* Validate inputs */
++ if (sch->parent != TC_H_ROOT) {
++ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
++ tcf_destroy_chain(&priv->filter_list);
++ qdisc_class_hash_destroy(&priv->clhash);
++ return -EINVAL;
++ }
++
++ if (!mac_dev) {
++ pr_err("CEETM: the interface is lacking a mac\n");
++ err = -EINVAL;
++ goto err_init_root;
++ }
++
++ /* pre-allocate underlying pfifo qdiscs */
++ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
++ sizeof(priv->root.qdiscs[0]),
++ GFP_KERNEL);
++ if (!priv->root.qdiscs) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
++
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ dev_queue = netdev_get_tx_queue(dev, i);
++ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
++ TC_H_MIN(i + PFIFO_MIN_OFFSET));
++
++ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
++ parent_id);
++ if (!qdisc) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
++
++ priv->root.qdiscs[i] = qdisc;
++ qdisc->flags |= TCQ_F_ONETXQUEUE;
++ }
++
++ sch->flags |= TCQ_F_MQROOT;
++
++ priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
++ if (!priv->root.qstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_root;
++ }
++
++ priv->shaped = qopt->shaped;
++ priv->root.rate = qopt->rate;
++ priv->root.ceil = qopt->ceil;
++ priv->root.overhead = qopt->overhead;
++
++ /* Claim the SP */
++ get_dcp_and_sp(dev, &dcp_id, &sp_id);
++ err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
++ __func__);
++ goto err_init_root;
++ }
++
++ priv->root.sp = sp;
++
++ /* Claim the LNI - will use the same id as the SP id since SPs 0-7
++ * are connected to the TX FMan ports
++ */
++ err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
++ __func__);
++ goto err_init_root;
++ }
++
++ priv->root.lni = lni;
++
++ err = qman_ceetm_sp_set_lni(sp, lni);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to link the SP and LNI\n",
++ __func__);
++ goto err_init_root;
++ }
++
++ lni->sp = sp;
++
++ /* Configure the LNI shaper */
++ if (priv->shaped) {
++ err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
++ __func__);
++ goto err_init_root;
++ }
++
++ bps = priv->root.rate << 3; /* Bps -> bps */
++ err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
++ __func__);
++ goto err_init_root;
++ }
++
++ bps = priv->root.ceil << 3; /* Bps -> bps */
++ err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
++ __func__);
++ goto err_init_root;
++ }
++ }
++
++ /* TODO default configuration */
++
++ dpa_enable_ceetm(dev);
++ return 0;
++
++err_init_root:
++ ceetm_destroy(sch);
++ return err;
++}
++
++/* Configure a prio ceetm qdisc */
++static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
++ struct tc_ceetm_qopt *qopt)
++{
++ int err;
++ unsigned int i;
++ struct ceetm_class *parent_cl, *child_cl;
++ struct Qdisc *parent_qdisc;
++ struct net_device *dev = qdisc_dev(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (sch->parent == TC_H_ROOT) {
++ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
++ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ /* Obtain the parent root ceetm_class */
++ parent_cl = ceetm_find(sch->parent, parent_qdisc);
++
++ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
++ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ priv->prio.parent = parent_cl;
++ parent_cl->root.child = sch;
++
++ priv->shaped = parent_cl->shaped;
++ priv->prio.qcount = qopt->qcount;
++
++ /* Create and configure qcount child classes */
++ for (i = 0; i < priv->prio.qcount; i++) {
++ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
++ if (!child_cl) {
++ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_prio;
++ }
++
++ child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
++ if (!child_cl->prio.cstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_prio_cls;
++ }
++
++ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
++ child_cl->refcnt = 1;
++ child_cl->parent = sch;
++ child_cl->type = CEETM_PRIO;
++ child_cl->shaped = priv->shaped;
++ child_cl->prio.child = NULL;
++
++ /* All shaped CQs have CR and ER enabled by default */
++ child_cl->prio.cr = child_cl->shaped;
++ child_cl->prio.er = child_cl->shaped;
++ child_cl->prio.fq = NULL;
++ child_cl->prio.cq = NULL;
++
++ /* Configure the corresponding hardware CQ */
++ err = ceetm_config_prio_cls(child_cl, dev,
++ parent_cl->root.ch, i);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
++ __func__, child_cl->common.classid);
++ goto err_init_prio_cls;
++ }
++
++ /* Add class handle in Qdisc */
++ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
++ pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X associated with CQ %d and CCG %d\n",
++ __func__, child_cl->common.classid,
++ child_cl->prio.cq->idx, child_cl->prio.ccg->idx);
++ }
++
++ return 0;
++
++err_init_prio_cls:
++ ceetm_cls_destroy(sch, child_cl);
++err_init_prio:
++ ceetm_destroy(sch);
++ return err;
++}
++
++/* Configure a wbfs ceetm qdisc */
++static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
++ struct tc_ceetm_qopt *qopt)
++{
++ int err, group_b, small_group;
++ unsigned int i, id, prio_a, prio_b;
++ struct ceetm_class *parent_cl, *child_cl, *root_cl;
++ struct Qdisc *parent_qdisc;
++ struct ceetm_qdisc *parent_priv;
++ struct qm_ceetm_channel *channel;
++ struct net_device *dev = qdisc_dev(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ /* Validate inputs */
++ if (sch->parent == TC_H_ROOT) {
++ pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ /* Obtain the parent prio ceetm qdisc */
++ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
++ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ /* Obtain the parent prio ceetm class */
++ parent_cl = ceetm_find(sch->parent, parent_qdisc);
++ parent_priv = qdisc_priv(parent_qdisc);
++
++ if (!parent_cl || parent_cl->type != CEETM_PRIO) {
++ pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a prio ceetm class\n");
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ if (!qopt->qcount || !qopt->qweight[0]) {
++ pr_err("CEETM: qcount and qweight are mandatory for a wbfs ceetm qdisc\n");
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ priv->shaped = parent_cl->shaped;
++
++ if (!priv->shaped && (qopt->cr || qopt->er)) {
++ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ if (priv->shaped && !(qopt->cr || qopt->er)) {
++ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ /* Obtain the parent root ceetm class */
++ root_cl = parent_priv->prio.parent;
++ if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) ||
++ root_cl->root.wbfs_grp_large) {
++ pr_err("CEETM: no more wbfs classes are available\n");
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) &&
++ qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
++ pr_err("CEETM: only %d wbfs classes are available\n",
++ CEETM_MIN_WBFS_QCOUNT);
++ err = -EINVAL;
++ goto err_init_wbfs;
++ }
++
++ priv->wbfs.parent = parent_cl;
++ parent_cl->prio.child = sch;
++
++ priv->wbfs.qcount = qopt->qcount;
++ priv->wbfs.cr = qopt->cr;
++ priv->wbfs.er = qopt->er;
++
++ channel = root_cl->root.ch;
++
++ /* Configure the hardware wbfs channel groups */
++ if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
++ /* Configure the large group A */
++ priv->wbfs.group_type = WBFS_GRP_LARGE;
++ small_group = false;
++ group_b = false;
++ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
++ prio_b = prio_a;
++
++ } else if (root_cl->root.wbfs_grp_a) {
++ /* Configure the group B */
++ priv->wbfs.group_type = WBFS_GRP_B;
++
++ err = qman_ceetm_channel_get_group(channel, &small_group,
++ &prio_a, &prio_b);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
++ __func__);
++ goto err_init_wbfs;
++ }
++
++ small_group = true;
++ group_b = true;
++ prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
++ /* If group A isn't configured, configure it as group B */
++ prio_a = prio_a ? : prio_b;
++
++ } else {
++ /* Configure the small group A */
++ priv->wbfs.group_type = WBFS_GRP_A;
++
++ err = qman_ceetm_channel_get_group(channel, &small_group,
++ &prio_a, &prio_b);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
++ __func__);
++ goto err_init_wbfs;
++ }
++
++ small_group = true;
++ group_b = false;
++ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
++ /* If group B isn't configured, configure it as group A */
++ prio_b = prio_b ? : prio_a;
++ }
++
++ err = qman_ceetm_channel_set_group(channel, small_group, prio_a,
++ prio_b);
++ if (err)
++ goto err_init_wbfs;
++
++ if (priv->shaped) {
++ err = qman_ceetm_channel_set_group_cr_eligibility(channel,
++ group_b,
++ priv->wbfs.cr);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to set group CR eligibility\n",
++ __func__);
++ goto err_init_wbfs;
++ }
++
++ err = qman_ceetm_channel_set_group_er_eligibility(channel,
++ group_b,
++ priv->wbfs.er);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to set group ER eligibility\n",
++ __func__);
++ goto err_init_wbfs;
++ }
++ }
++
++ /* Create qcount child classes */
++ for (i = 0; i < priv->wbfs.qcount; i++) {
++ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
++ if (!child_cl) {
++ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_wbfs;
++ }
++
++ child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
++ if (!child_cl->wbfs.cstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_wbfs_cls;
++ }
++
++ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
++ child_cl->refcnt = 1;
++ child_cl->parent = sch;
++ child_cl->type = CEETM_WBFS;
++ child_cl->shaped = priv->shaped;
++ child_cl->wbfs.fq = NULL;
++ child_cl->wbfs.cq = NULL;
++ child_cl->wbfs.weight = qopt->qweight[i];
++
++ if (priv->wbfs.group_type == WBFS_GRP_B)
++ id = WBFS_GRP_B_OFFSET + i;
++ else
++ id = WBFS_GRP_A_OFFSET + i;
++
++ err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
++ priv->wbfs.group_type);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
++ __func__, child_cl->common.classid);
++ goto err_init_wbfs_cls;
++ }
++
++ /* Add class handle in Qdisc */
++ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
++ pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X associated with CQ %d and CCG %d\n",
++ __func__, child_cl->common.classid,
++ child_cl->wbfs.cq->idx, child_cl->wbfs.ccg->idx);
++ }
++
++ /* Signal the root class that a group has been configured */
++ switch (priv->wbfs.group_type) {
++ case WBFS_GRP_LARGE:
++ root_cl->root.wbfs_grp_large = true;
++ break;
++ case WBFS_GRP_A:
++ root_cl->root.wbfs_grp_a = true;
++ break;
++ case WBFS_GRP_B:
++ root_cl->root.wbfs_grp_b = true;
++ break;
++ }
++
++ return 0;
++
++err_init_wbfs_cls:
++ ceetm_cls_destroy(sch, child_cl);
++err_init_wbfs:
++ ceetm_destroy(sch);
++ return err;
++}
++
++/* Configure a generic ceetm qdisc */
++static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct tc_ceetm_qopt *qopt;
++ struct nlattr *tb[TCA_CEETM_QOPS + 1];
++ int ret;
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (!netif_is_multiqueue(dev))
++ return -EOPNOTSUPP;
++
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return -EINVAL;
++ }
++
++ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
++ if (ret < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return ret;
++ }
++
++ if (!tb[TCA_CEETM_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return -EINVAL;
++ }
++
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
++
++ qopt = nla_data(tb[TCA_CEETM_QOPS]);
++
++ /* Initialize the class hash list. Each qdisc has its own class hash */
++ ret = qdisc_class_hash_init(&priv->clhash);
++ if (ret < 0) {
++ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
++ __func__);
++ return ret;
++ }
++
++ priv->type = qopt->type;
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ ret = ceetm_init_root(sch, priv, qopt);
++ break;
++ case CEETM_PRIO:
++ ret = ceetm_init_prio(sch, priv, qopt);
++ break;
++ case CEETM_WBFS:
++ ret = ceetm_init_wbfs(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ ceetm_destroy(sch);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/* Edit a root ceetm qdisc */
++static int ceetm_change_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
++ struct net_device *dev,
++ struct tc_ceetm_qopt *qopt)
++{
++ int err = 0;
++ u64 bps;
++
++ if (priv->shaped != (bool)qopt->shaped) {
++ pr_err("CEETM: qdisc %X is %s\n", sch->handle,
++ priv->shaped ? "shaped" : "unshaped");
++ return -EINVAL;
++ }
++
++ /* Nothing to modify for unshaped qdiscs */
++ if (!priv->shaped)
++ return 0;
++
++ /* Configure the LNI shaper */
++ if (priv->root.overhead != qopt->overhead) {
++ err = qman_ceetm_lni_enable_shaper(priv->root.lni, 1,
++ qopt->overhead);
++ if (err)
++ goto change_err;
++ priv->root.overhead = qopt->overhead;
++ }
++
++ if (priv->root.rate != qopt->rate) {
++ bps = qopt->rate << 3; /* Bps -> bps */
++ err = qman_ceetm_lni_set_commit_rate_bps(priv->root.lni, bps,
++ dev->mtu);
++ if (err)
++ goto change_err;
++ priv->root.rate = qopt->rate;
++ }
++
++ if (priv->root.ceil != qopt->ceil) {
++ bps = qopt->ceil << 3; /* Bps -> bps */
++ err = qman_ceetm_lni_set_excess_rate_bps(priv->root.lni, bps,
++ dev->mtu);
++ if (err)
++ goto change_err;
++ priv->root.ceil = qopt->ceil;
++ }
++
++ return 0;
++
++change_err:
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the root ceetm qdisc %X\n",
++ __func__, sch->handle);
++ return err;
++}
++
++/* Edit a wbfs ceetm qdisc */
++static int ceetm_change_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
++ struct tc_ceetm_qopt *qopt)
++{
++ int err;
++ bool group_b;
++ struct qm_ceetm_channel *channel;
++ struct ceetm_class *prio_class, *root_class;
++ struct ceetm_qdisc *prio_qdisc;
++
++ if (qopt->qcount) {
++ pr_err("CEETM: the qcount can not be modified\n");
++ return -EINVAL;
++ }
++
++ if (qopt->qweight[0]) {
++ pr_err("CEETM: the qweight can be modified through the wbfs classes\n");
++ return -EINVAL;
++ }
++
++ if (!priv->shaped && (qopt->cr || qopt->er)) {
++ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
++ return -EINVAL;
++ }
++
++ if (priv->shaped && !(qopt->cr || qopt->er)) {
++ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
++ return -EINVAL;
++ }
++
++ /* Nothing to modify for unshaped qdiscs */
++ if (!priv->shaped)
++ return 0;
++
++ prio_class = priv->wbfs.parent;
++ prio_qdisc = qdisc_priv(prio_class->parent);
++ root_class = prio_qdisc->prio.parent;
++ channel = root_class->root.ch;
++ group_b = priv->wbfs.group_type == WBFS_GRP_B;
++
++ if (qopt->cr != priv->wbfs.cr) {
++ err = qman_ceetm_channel_set_group_cr_eligibility(channel,
++ group_b,
++ qopt->cr);
++ if (err)
++ goto change_err;
++ priv->wbfs.cr = qopt->cr;
++ }
++
++ if (qopt->er != priv->wbfs.er) {
++ err = qman_ceetm_channel_set_group_er_eligibility(channel,
++ group_b,
++ qopt->er);
++ if (err)
++ goto change_err;
++ priv->wbfs.er = qopt->er;
++ }
++
++ return 0;
++
++change_err:
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the wbfs ceetm qdisc %X\n",
++ __func__, sch->handle);
++ return err;
++}
++
++/* Edit a ceetm qdisc */
++static int ceetm_change(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct tc_ceetm_qopt *qopt;
++ struct nlattr *tb[TCA_CEETM_QOPS + 1];
++ int ret;
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
++ if (ret < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return ret;
++ }
++
++ if (!tb[TCA_CEETM_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return -EINVAL;
++ }
++
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
++
++ qopt = nla_data(tb[TCA_CEETM_QOPS]);
++
++ if (priv->type != qopt->type) {
++ pr_err("CEETM: qdisc %X is not of the provided type\n",
++ sch->handle);
++ return -EINVAL;
++ }
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ ret = ceetm_change_root(sch, priv, dev, qopt);
++ break;
++ case CEETM_PRIO:
++ pr_err("CEETM: prio qdiscs can not be modified\n");
++ ret = -EINVAL;
++ break;
++ case CEETM_WBFS:
++ ret = ceetm_change_wbfs(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/* Attach the underlying pfifo qdiscs */
++static void ceetm_attach(struct Qdisc *sch)
++{
++ struct net_device *dev = qdisc_dev(sch);
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc *qdisc, *old_qdisc;
++ unsigned int i;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ qdisc = priv->root.qdiscs[i];
++ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
++ if (old_qdisc)
++ qdisc_destroy(old_qdisc);
++ }
++}
++
++static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
++{
++ struct ceetm_class *cl;
++
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, classid, sch->handle);
++ cl = ceetm_find(classid, sch);
++
++ if (cl)
++ cl->refcnt++; /* Will decrement in put() */
++ return (unsigned long)cl;
++}
++
++static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
++{
++ struct ceetm_class *cl = (struct ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++ cl->refcnt--;
++
++ if (cl->refcnt == 0)
++ ceetm_cls_destroy(sch, cl);
++}
++
++static int ceetm_cls_change_root(struct ceetm_class *cl,
++ struct tc_ceetm_copt *copt,
++ struct net_device *dev)
++{
++ int err;
++ u64 bps;
++
++ if ((bool)copt->shaped != cl->shaped) {
++ pr_err("CEETM: class %X is %s\n", cl->common.classid,
++ cl->shaped ? "shaped" : "unshaped");
++ return -EINVAL;
++ }
++
++ if (cl->shaped && cl->root.rate != copt->rate) {
++ bps = copt->rate << 3; /* Bps -> bps */
++ err = qman_ceetm_channel_set_commit_rate_bps(cl->root.ch, bps,
++ dev->mtu);
++ if (err)
++ goto change_cls_err;
++ cl->root.rate = copt->rate;
++ }
++
++ if (cl->shaped && cl->root.ceil != copt->ceil) {
++ bps = copt->ceil << 3; /* Bps -> bps */
++ err = qman_ceetm_channel_set_excess_rate_bps(cl->root.ch, bps,
++ dev->mtu);
++ if (err)
++ goto change_cls_err;
++ cl->root.ceil = copt->ceil;
++ }
++
++ if (!cl->shaped && cl->root.tbl != copt->tbl) {
++ err = qman_ceetm_channel_set_weight(cl->root.ch, copt->tbl);
++ if (err)
++ goto change_cls_err;
++ cl->root.tbl = copt->tbl;
++ }
++
++ return 0;
++
++change_cls_err:
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm root class %X\n",
++ __func__, cl->common.classid);
++ return err;
++}
++
++static int ceetm_cls_change_prio(struct ceetm_class *cl,
++ struct tc_ceetm_copt *copt)
++{
++ int err;
++
++ if (!cl->shaped && (copt->cr || copt->er)) {
++ pr_err("CEETM: only shaped classes can have CR and ER enabled\n");
++ return -EINVAL;
++ }
++
++ if (cl->prio.cr != (bool)copt->cr) {
++ err = qman_ceetm_channel_set_cq_cr_eligibility(
++ cl->prio.cq->parent,
++ cl->prio.cq->idx,
++ copt->cr);
++ if (err)
++ goto change_cls_err;
++ cl->prio.cr = copt->cr;
++ }
++
++ if (cl->prio.er != (bool)copt->er) {
++ err = qman_ceetm_channel_set_cq_er_eligibility(
++ cl->prio.cq->parent,
++ cl->prio.cq->idx,
++ copt->er);
++ if (err)
++ goto change_cls_err;
++ cl->prio.er = copt->er;
++ }
++
++ return 0;
++
++change_cls_err:
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
++ __func__, cl->common.classid);
++ return err;
++}
++
++static int ceetm_cls_change_wbfs(struct ceetm_class *cl,
++ struct tc_ceetm_copt *copt)
++{
++ int err;
++
++ if (copt->weight != cl->wbfs.weight) {
++ /* Configure the CQ weight: real number multiplied by 100 to
++ * get rid of the fraction
++ */
++ err = qman_ceetm_set_queue_weight_in_ratio(cl->wbfs.cq,
++ copt->weight * 100);
++
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
++ __func__, cl->common.classid);
++ return err;
++ }
++
++ cl->wbfs.weight = copt->weight;
++ }
++
++ return 0;
++}
++
++/* Add a ceetm root class or configure a ceetm root/prio/wbfs class */
++static int ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
++ struct nlattr **tca, unsigned long *arg)
++{
++ int err;
++ u64 bps;
++ struct ceetm_qdisc *priv;
++ struct ceetm_class *cl = (struct ceetm_class *)*arg;
++ struct nlattr *opt = tca[TCA_OPTIONS];
++ struct nlattr *tb[__TCA_CEETM_MAX];
++ struct tc_ceetm_copt *copt;
++ struct qm_ceetm_channel *channel;
++ struct net_device *dev = qdisc_dev(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
++ __func__, classid, sch->handle);
++
++ if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
++ return -EINVAL;
++ }
++
++ priv = qdisc_priv(sch);
++
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return -EINVAL;
++ }
++
++ if (!cl && sch->handle != parentid) {
++ pr_err("CEETM: classes can be attached to the root ceetm qdisc only\n");
++ return -EINVAL;
++ }
++
++ if (!cl && priv->type != CEETM_ROOT) {
++ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
++ return -EINVAL;
++ }
++
++ err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return -EINVAL;
++ }
++
++ if (!tb[TCA_CEETM_COPT]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
++ return -EINVAL;
++ }
++
++ if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
++ pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm root classes\n");
++ return -EINVAL;
++ }
++
++ copt = nla_data(tb[TCA_CEETM_COPT]);
++
++ /* Configure an existing ceetm class */
++ if (cl) {
++ if (copt->type != cl->type) {
++ pr_err("CEETM: class %X is not of the provided type\n",
++ cl->common.classid);
++ return -EINVAL;
++ }
++
++ switch (copt->type) {
++ case CEETM_ROOT:
++ return ceetm_cls_change_root(cl, copt, dev);
++
++ case CEETM_PRIO:
++ return ceetm_cls_change_prio(cl, copt);
++
++ case CEETM_WBFS:
++ return ceetm_cls_change_wbfs(cl, copt);
++
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid class\n",
++ __func__);
++ return -EINVAL;
++ }
++ }
++
++ /* Add a new root ceetm class */
++ if (copt->type != CEETM_ROOT) {
++ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
++ return -EINVAL;
++ }
++
++ if (copt->shaped && !priv->shaped) {
++ pr_err("CEETM: can not add a shaped ceetm root class under an unshaped ceetm root qdisc\n");
++ return -EINVAL;
++ }
++
++ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
++ if (!cl)
++ return -ENOMEM;
++
++ cl->type = copt->type;
++ cl->shaped = copt->shaped;
++ cl->root.rate = copt->rate;
++ cl->root.ceil = copt->ceil;
++ cl->root.tbl = copt->tbl;
++
++ cl->common.classid = classid;
++ cl->refcnt = 1;
++ cl->parent = sch;
++ cl->root.child = NULL;
++ cl->root.wbfs_grp_a = false;
++ cl->root.wbfs_grp_b = false;
++ cl->root.wbfs_grp_large = false;
++
++ /* Claim a CEETM channel */
++ err = qman_ceetm_channel_claim(&channel, priv->root.lni);
++ if (err) {
++ pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
++ __func__);
++ goto claim_err;
++ }
++
++ cl->root.ch = channel;
++
++ if (cl->shaped) {
++ /* Configure the channel shaper */
++ err = qman_ceetm_channel_enable_shaper(channel, 1);
++ if (err)
++ goto channel_err;
++
++ bps = cl->root.rate << 3; /* Bps -> bps */
++ err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
++ dev->mtu);
++ if (err)
++ goto channel_err;
++
++ bps = cl->root.ceil << 3; /* Bps -> bps */
++ err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
++ dev->mtu);
++ if (err)
++ goto channel_err;
++
++ } else {
++ /* Configure the uFQ algorithm */
++ err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
++ if (err)
++ goto channel_err;
++ }
++
++ /* Add class handle in Qdisc */
++ ceetm_link_class(sch, &priv->clhash, &cl->common);
++
++ pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with channel %d\n",
++ __func__, classid, channel->idx);
++ *arg = (unsigned long)cl;
++ return 0;
++
++channel_err:
++ pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
++ __func__, channel->idx);
++ if (qman_ceetm_channel_release(channel))
++ pr_err(KBUILD_BASENAME " : %s : failed to release the channel %d\n",
++ __func__, channel->idx);
++claim_err:
++ kfree(cl);
++ return err;
++}
++
++static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
++{
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct ceetm_class *cl;
++ unsigned int i;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (arg->stop)
++ return;
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
++ if (arg->count < arg->skip) {
++ arg->count++;
++ continue;
++ }
++ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
++ arg->stop = 1;
++ return;
++ }
++ arg->count++;
++ }
++ }
++}
++
++static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
++ struct sk_buff *skb, struct tcmsg *tcm)
++{
++ struct ceetm_class *cl = (struct ceetm_class *)arg;
++ struct nlattr *nest;
++ struct tc_ceetm_copt copt;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ sch_tree_lock(sch);
++
++ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
++ tcm->tcm_handle = cl->common.classid;
++
++ memset(&copt, 0, sizeof(copt));
++
++ copt.shaped = cl->shaped;
++ copt.type = cl->type;
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (cl->root.child)
++ tcm->tcm_info = cl->root.child->handle;
++
++ copt.rate = cl->root.rate;
++ copt.ceil = cl->root.ceil;
++ copt.tbl = cl->root.tbl;
++ break;
++
++ case CEETM_PRIO:
++ if (cl->prio.child)
++ tcm->tcm_info = cl->prio.child->handle;
++
++ copt.cr = cl->prio.cr;
++ copt.er = cl->prio.er;
++ break;
++
++ case CEETM_WBFS:
++ copt.weight = cl->wbfs.weight;
++ break;
++ }
++
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
++ sch_tree_unlock(sch);
++ return skb->len;
++
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
++}
++
++static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
++{
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct ceetm_class *cl = (struct ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ sch_tree_lock(sch);
++ qdisc_class_hash_remove(&priv->clhash, &cl->common);
++ cl->refcnt--;
++
++ /* The refcnt should be at least 1 since we have incremented it in
++ * get(). Will decrement again in put() where we will call destroy()
++ * to actually free the memory if it reaches 0.
++ */
++ WARN_ON(cl->refcnt == 0);
++
++ sch_tree_unlock(sch);
++ return 0;
++}
++
++/* Get the class' child qdisc, if any */
++static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
++{
++ struct ceetm_class *cl = (struct ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ return cl->root.child;
++
++ case CEETM_PRIO:
++ return cl->prio.child;
++ }
++
++ return NULL;
++}
++
++static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
++ struct Qdisc *new, struct Qdisc **old)
++{
++ if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
++ struct gnet_dump *d)
++{
++ unsigned int i;
++ struct ceetm_class *cl = (struct ceetm_class *)arg;
++ struct gnet_stats_basic_packed tmp_bstats;
++ struct ceetm_class_stats *cstats = NULL;
++ struct qm_ceetm_cq *cq = NULL;
++ struct tc_ceetm_xstats xstats;
++
++ memset(&xstats, 0, sizeof(xstats));
++ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ return 0;
++ case CEETM_PRIO:
++ cq = cl->prio.cq;
++ break;
++ case CEETM_WBFS:
++ cq = cl->wbfs.cq;
++ break;
++ }
++
++ for_each_online_cpu(i) {
++ switch (cl->type) {
++ case CEETM_PRIO:
++ cstats = per_cpu_ptr(cl->prio.cstats, i);
++ break;
++ case CEETM_WBFS:
++ cstats = per_cpu_ptr(cl->wbfs.cstats, i);
++ break;
++ }
++
++ if (cstats) {
++ xstats.ern_drop_count += cstats->ern_drop_count;
++ xstats.congested_count += cstats->congested_count;
++ tmp_bstats.bytes += cstats->bstats.bytes;
++ tmp_bstats.packets += cstats->bstats.packets;
++ }
++ }
++
++ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
++ d, NULL, &tmp_bstats) < 0)
++ return -1;
++
++ if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
++ &xstats.frame_count,
++ &xstats.byte_count))
++ return -1;
++
++ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
++}
++
++static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
++{
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct ceetm_class *cl = (struct ceetm_class *)arg;
++ struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return fl;
++}
++
++static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
++ u32 classid)
++{
++ struct ceetm_class *cl = ceetm_find(classid, sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return (unsigned long)cl;
++}
++
++static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
++{
++ struct ceetm_class *cl = (struct ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++}
++
++const struct Qdisc_class_ops ceetm_cls_ops = {
++ .graft = ceetm_cls_graft,
++ .leaf = ceetm_cls_leaf,
++ .get = ceetm_cls_get,
++ .put = ceetm_cls_put,
++ .change = ceetm_cls_change,
++ .delete = ceetm_cls_delete,
++ .walk = ceetm_cls_walk,
++ .tcf_chain = ceetm_tcf_chain,
++ .bind_tcf = ceetm_tcf_bind,
++ .unbind_tcf = ceetm_tcf_unbind,
++ .dump = ceetm_cls_dump,
++ .dump_stats = ceetm_cls_dump_stats,
++};
++
++struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
++ .id = "ceetm",
++ .priv_size = sizeof(struct ceetm_qdisc),
++ .cl_ops = &ceetm_cls_ops,
++ .init = ceetm_init,
++ .destroy = ceetm_destroy,
++ .change = ceetm_change,
++ .dump = ceetm_dump,
++ .attach = ceetm_attach,
++ .owner = THIS_MODULE,
++};
++
++/* Run the filters and classifiers attached to the qdisc on the provided skb */
++static struct ceetm_class *ceetm_classify(struct sk_buff *skb,
++ struct Qdisc *sch, int *qerr,
++ bool *act_drop)
++{
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct ceetm_class *cl = NULL, *wbfs_cl;
++ struct tcf_result res;
++ struct tcf_proto *tcf;
++ int result;
++
++ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
++ tcf = priv->filter_list;
++ while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
++#ifdef CONFIG_NET_CLS_ACT
++ switch (result) {
++ case TC_ACT_QUEUED:
++ case TC_ACT_STOLEN:
++ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
++ case TC_ACT_SHOT:
++ /* No valid class found due to action */
++ *act_drop = true;
++ return NULL;
++ }
++#endif
++ cl = (void *)res.class;
++ if (!cl) {
++ if (res.classid == sch->handle) {
++ /* The filter leads to the qdisc */
++ /* TODO default qdisc */
++ return NULL;
++ }
++
++ cl = ceetm_find(res.classid, sch);
++ if (!cl)
++ /* The filter leads to an invalid class */
++ break;
++ }
++
++ /* The class might have its own filters attached */
++ tcf = cl->filter_list;
++ }
++
++ if (!cl) {
++ /* No valid class found */
++ /* TODO default qdisc */
++ return NULL;
++ }
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (cl->root.child) {
++ /* Run the prio qdisc classifiers */
++ return ceetm_classify(skb, cl->root.child, qerr,
++ act_drop);
++ } else {
++ /* The root class does not have a child prio qdisc */
++ /* TODO default qdisc */
++ return NULL;
++ }
++ case CEETM_PRIO:
++ if (cl->prio.child) {
++ /* If filters lead to a wbfs class, return it.
++ * Otherwise, return the prio class
++ */
++ wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
++ act_drop);
++ /* A NULL result might indicate either an erroneous
++ * filter, or no filters at all. We will assume the
++ * latter
++ */
++ return wbfs_cl ? : cl;
++ }
++ }
++
++ /* For wbfs and childless prio classes, return the class directly */
++ return cl;
++}
++
++int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
++{
++ int ret;
++ bool act_drop = false;
++ struct Qdisc *sch = net_dev->qdisc;
++ struct ceetm_class *cl;
++ struct dpa_priv_s *priv_dpa;
++ struct qman_fq *egress_fq, *conf_fq;
++ struct ceetm_qdisc *priv = qdisc_priv(sch);
++ struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
++ struct ceetm_class_stats *cstats;
++ const int queue_mapping = dpa_get_queue_mapping(skb);
++ spinlock_t *root_lock = qdisc_lock(sch);
++
++ spin_lock(root_lock);
++ cl = ceetm_classify(skb, sch, &ret, &act_drop);
++ spin_unlock(root_lock);
++
++#ifdef CONFIG_NET_CLS_ACT
++ if (act_drop) {
++ if (ret & __NET_XMIT_BYPASS)
++ qstats->drops++;
++ goto drop;
++ }
++#endif
++ /* TODO default class */
++ if (unlikely(!cl)) {
++ qstats->drops++;
++ goto drop;
++ }
++
++ priv_dpa = netdev_priv(net_dev);
++ conf_fq = priv_dpa->conf_fqs[queue_mapping];
++
++ /* Choose the proper tx fq and update the basic stats (bytes and
++ * packets sent by the class)
++ */
++ switch (cl->type) {
++ case CEETM_PRIO:
++ egress_fq = &cl->prio.fq->fq;
++ cstats = this_cpu_ptr(cl->prio.cstats);
++ break;
++ case CEETM_WBFS:
++ egress_fq = &cl->wbfs.fq->fq;
++ cstats = this_cpu_ptr(cl->wbfs.cstats);
++ break;
++ default:
++ qstats->drops++;
++ goto drop;
++ }
++
++ bstats_update(&cstats->bstats, skb);
++ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
++
++drop:
++ dev_kfree_skb_any(skb);
++ return NET_XMIT_SUCCESS;
++}
++
++static int __init ceetm_register(void)
++{
++ int _errno = 0;
++
++ pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
++
++ _errno = register_qdisc(&ceetm_qdisc_ops);
++ if (unlikely(_errno))
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): register_qdisc() = %d\n",
++ KBUILD_BASENAME ".c", __LINE__, __func__, _errno);
++
++ return _errno;
++}
++
++static void __exit ceetm_unregister(void)
++{
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME ".c", __func__);
++
++ unregister_qdisc(&ceetm_qdisc_ops);
++}
++
++module_init(ceetm_register);
++module_exit(ceetm_unregister);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
+new file mode 100644
+index 00000000..63cc3475
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
+@@ -0,0 +1,237 @@
++/* Copyright 2008-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DPAA_ETH_CEETM_H
++#define __DPAA_ETH_CEETM_H
++
++#include <net/pkt_sched.h>
++#include <net/pkt_cls.h>
++#include <net/netlink.h>
++#include <lnxwrp_fm.h>
++
++#include "mac.h"
++#include "dpaa_eth_common.h"
++
++/* Mask to determine the sub-portal id from a channel number */
++#define CHANNEL_SP_MASK 0x1f
++/* The number of the last channel that services DCP0, connected to FMan 0.
++ * Value validated for B4 and T series platforms.
++ */
++#define DCP0_MAX_CHANNEL 0x80f
++/* A2V=1 - field A2 is valid
++ * A0V=1 - field A0 is valid - enables frame confirmation
++ * OVOM=1 - override operation mode bits with values from A2
++ * EBD=1 - external buffers are deallocated at the end of the FMan flow
++ * NL=0 - the BMI releases all the internal buffers
++ */
++#define CEETM_CONTEXT_A 0x1a00000080000000
++/* The ratio between the superior and inferior congestion state thresholds. The
++ * lower threshold is set to 7/8 of the superior one (as the default for WQ
++ * scheduling).
++ */
++#define CEETM_CCGR_RATIO 0.875
++/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
++ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
++ * are reserved for the maximum 32 CEETM channels (majors and minors are in
++ * hex).
++ */
++#define PFIFO_MIN_OFFSET 0x21
++
++/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
++#define CEETM_MAX_PRIO_QCOUNT 8
++#define CEETM_MAX_WBFS_QCOUNT 8
++#define CEETM_MIN_WBFS_QCOUNT 4
++
++/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
++ * and/or 12-15 for group B).
++ */
++#define WBFS_GRP_A_OFFSET 8
++#define WBFS_GRP_B_OFFSET 12
++
++#define WBFS_GRP_A 1
++#define WBFS_GRP_B 2
++#define WBFS_GRP_LARGE 3
++
++enum {
++ TCA_CEETM_UNSPEC,
++ TCA_CEETM_COPT,
++ TCA_CEETM_QOPS,
++ __TCA_CEETM_MAX,
++};
++
++/* CEETM configuration types */
++enum {
++ CEETM_ROOT = 1,
++ CEETM_PRIO,
++ CEETM_WBFS
++};
++
++#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
++extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
++
++struct ceetm_class;
++struct ceetm_qdisc_stats;
++struct ceetm_class_stats;
++
++struct ceetm_fq {
++ struct qman_fq fq;
++ struct net_device *net_dev;
++ struct ceetm_class *ceetm_cls;
++};
++
++struct root_q {
++ struct Qdisc **qdiscs;
++ __u16 overhead;
++ __u32 rate;
++ __u32 ceil;
++ struct qm_ceetm_sp *sp;
++ struct qm_ceetm_lni *lni;
++ struct ceetm_qdisc_stats __percpu *qstats;
++};
++
++struct prio_q {
++ __u16 qcount;
++ struct ceetm_class *parent;
++};
++
++struct wbfs_q {
++ __u16 qcount;
++ int group_type;
++ struct ceetm_class *parent;
++ __u16 cr;
++ __u16 er;
++};
++
++struct ceetm_qdisc {
++ int type; /* LNI/CHNL/WBFS */
++ bool shaped;
++ union {
++ struct root_q root;
++ struct prio_q prio;
++ struct wbfs_q wbfs;
++ };
++ struct Qdisc_class_hash clhash;
++ struct tcf_proto *filter_list; /* qdisc attached filters */
++};
++
++/* CEETM Qdisc configuration parameters */
++struct tc_ceetm_qopt {
++ __u32 type;
++ __u16 shaped;
++ __u16 qcount;
++ __u16 overhead;
++ __u32 rate;
++ __u32 ceil;
++ __u16 cr;
++ __u16 er;
++ __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
++};
++
++struct root_c {
++ unsigned int rate;
++ unsigned int ceil;
++ unsigned int tbl;
++ bool wbfs_grp_a;
++ bool wbfs_grp_b;
++ bool wbfs_grp_large;
++ struct Qdisc *child;
++ struct qm_ceetm_channel *ch;
++};
++
++struct prio_c {
++ bool cr;
++ bool er;
++ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
++ struct qm_ceetm_lfq *lfq;
++ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
++ struct qm_ceetm_ccg *ccg;
++ /* only one wbfs can be linked to one priority CQ */
++ struct Qdisc *child;
++ struct ceetm_class_stats __percpu *cstats;
++};
++
++struct wbfs_c {
++ __u8 weight; /* The weight of the class between 1 and 248 */
++ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
++ struct qm_ceetm_lfq *lfq;
++ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
++ struct qm_ceetm_ccg *ccg;
++ struct ceetm_class_stats __percpu *cstats;
++};
++
++struct ceetm_class {
++ struct Qdisc_class_common common;
++ int refcnt; /* usage count of this class */
++ struct tcf_proto *filter_list; /* class attached filters */
++ struct Qdisc *parent;
++ bool shaped;
++ int type; /* ROOT/PRIO/WBFS */
++ union {
++ struct root_c root;
++ struct prio_c prio;
++ struct wbfs_c wbfs;
++ };
++};
++
++/* CEETM Class configuration parameters */
++struct tc_ceetm_copt {
++ __u32 type;
++ __u16 shaped;
++ __u32 rate;
++ __u32 ceil;
++ __u16 tbl;
++ __u16 cr;
++ __u16 er;
++ __u8 weight;
++};
++
++/* CEETM stats */
++struct ceetm_qdisc_stats {
++ __u32 drops;
++};
++
++struct ceetm_class_stats {
++ /* Software counters */
++ struct gnet_stats_basic_packed bstats;
++ __u32 ern_drop_count;
++ __u32 congested_count;
++};
++
++struct tc_ceetm_xstats {
++ __u32 ern_drop_count;
++ __u32 congested_count;
++ /* Hardware counters */
++ __u64 frame_count;
++ __u64 byte_count;
++};
++
++int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
++#endif
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
+new file mode 100644
+index 00000000..fbe61da2
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
+@@ -0,0 +1,1811 @@
++/* Copyright 2008-2013 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/of_net.h>
++#include <linux/etherdevice.h>
++#include <linux/kthread.h>
++#include <linux/percpu.h>
++#include <linux/highmem.h>
++#include <linux/sort.h>
++#include <linux/fsl_qman.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <linux/if_vlan.h> /* vlan_eth_hdr */
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++#ifdef CONFIG_FSL_DPAA_1588
++#include "dpaa_1588.h"
++#endif
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++#include "dpaa_debugfs.h"
++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
++#include "mac.h"
++
++/* Size in bytes of the FQ taildrop threshold */
++#define DPA_FQ_TD 0x200000
++
++#ifdef CONFIG_PTP_1588_CLOCK_DPAA
++struct ptp_priv_s ptp_priv;
++#endif
++
++static struct dpa_bp *dpa_bp_array[64];
++
++int dpa_max_frm;
++EXPORT_SYMBOL(dpa_max_frm);
++
++int dpa_rx_extra_headroom;
++EXPORT_SYMBOL(dpa_rx_extra_headroom);
++
++int dpa_num_cpus = NR_CPUS;
++
++static const struct fqid_cell tx_confirm_fqids[] = {
++ {0, DPAA_ETH_TX_QUEUES}
++};
++
++static struct fqid_cell default_fqids[][3] = {
++ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
++ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
++};
++
++static const char fsl_qman_frame_queues[][25] = {
++ [RX] = "fsl,qman-frame-queues-rx",
++ [TX] = "fsl,qman-frame-queues-tx"
++};
++#ifdef CONFIG_FSL_DPAA_HOOKS
++/* A set of callbacks for hooking into the fastpath at different points. */
++struct dpaa_eth_hooks_s dpaa_eth_hooks;
++EXPORT_SYMBOL(dpaa_eth_hooks);
++/* This function should only be called on the probe paths, since it makes no
++ * effort to guarantee consistency of the destination hooks structure.
++ */
++void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
++{
++ if (hooks)
++ dpaa_eth_hooks = *hooks;
++ else
++ pr_err("NULL pointer to hooks!\n");
++}
++EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
++#endif
++
++int dpa_netdev_init(struct net_device *net_dev,
++ const uint8_t *mac_addr,
++ uint16_t tx_timeout)
++{
++ int err;
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++
++ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
++
++ net_dev->features |= net_dev->hw_features;
++ net_dev->vlan_features = net_dev->features;
++
++ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
++
++ net_dev->ethtool_ops = &dpa_ethtool_ops;
++
++ net_dev->needed_headroom = priv->tx_headroom;
++ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
++
++ err = register_netdev(net_dev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev() = %d\n", err);
++ return err;
++ }
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ /* create debugfs entry for this net_device */
++ err = dpa_netdev_debugfs_create(net_dev);
++ if (err) {
++ unregister_netdev(net_dev);
++ return err;
++ }
++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_netdev_init);
++
++int __cold dpa_start(struct net_device *net_dev)
++{
++ int err, i;
++ struct dpa_priv_s *priv;
++ struct mac_device *mac_dev;
++
++ priv = netdev_priv(net_dev);
++ mac_dev = priv->mac_dev;
++
++ err = mac_dev->init_phy(net_dev, priv->mac_dev);
++ if (err < 0) {
++ if (netif_msg_ifup(priv))
++ netdev_err(net_dev, "init_phy() = %d\n", err);
++ return err;
++ }
++
++ for_each_port_device(i, mac_dev->port_dev) {
++ err = fm_port_enable(mac_dev->port_dev[i]);
++ if (err)
++ goto mac_start_failed;
++ }
++
++ err = priv->mac_dev->start(mac_dev);
++ if (err < 0) {
++ if (netif_msg_ifup(priv))
++ netdev_err(net_dev, "mac_dev->start() = %d\n", err);
++ goto mac_start_failed;
++ }
++
++ netif_tx_start_all_queues(net_dev);
++
++ return 0;
++
++mac_start_failed:
++ for_each_port_device(i, mac_dev->port_dev)
++ fm_port_disable(mac_dev->port_dev[i]);
++
++ return err;
++}
++EXPORT_SYMBOL(dpa_start);
++
++int __cold dpa_stop(struct net_device *net_dev)
++{
++ int _errno, i, err;
++ struct dpa_priv_s *priv;
++ struct mac_device *mac_dev;
++
++ priv = netdev_priv(net_dev);
++ mac_dev = priv->mac_dev;
++
++ netif_tx_stop_all_queues(net_dev);
++ /* Allow the Fman (Tx) port to process in-flight frames before we
++ * try switching it off.
++ */
++ usleep_range(5000, 10000);
++
++ _errno = mac_dev->stop(mac_dev);
++ if (unlikely(_errno < 0))
++ if (netif_msg_ifdown(priv))
++ netdev_err(net_dev, "mac_dev->stop() = %d\n",
++ _errno);
++
++ for_each_port_device(i, mac_dev->port_dev) {
++ err = fm_port_disable(mac_dev->port_dev[i]);
++ _errno = err ? err : _errno;
++ }
++
++ if (mac_dev->phy_dev)
++ phy_disconnect(mac_dev->phy_dev);
++ mac_dev->phy_dev = NULL;
++
++ return _errno;
++}
++EXPORT_SYMBOL(dpa_stop);
++
++void __cold dpa_timeout(struct net_device *net_dev)
++{
++ const struct dpa_priv_s *priv;
++ struct dpa_percpu_priv_s *percpu_priv;
++
++ priv = netdev_priv(net_dev);
++ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
++
++ if (netif_msg_timer(priv))
++ netdev_crit(net_dev, "Transmit timeout!\n");
++
++ percpu_priv->stats.tx_errors++;
++}
++EXPORT_SYMBOL(dpa_timeout);
++
++/* net_device */
++
++/**
++ * @param net_dev the device for which statistics are calculated
++ * @param stats the function fills this structure with the device's statistics
++ * @return the address of the structure containing the statistics
++ *
++ * Calculates the statistics for the given device by adding the statistics
++ * collected by each CPU.
++ */
++void __cold
++dpa_get_stats64(struct net_device *net_dev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ u64 *cpustats;
++ u64 *netstats = (u64 *)stats;
++ int i, j;
++ struct dpa_percpu_priv_s *percpu_priv;
++ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
++
++ for_each_possible_cpu(i) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
++
++ cpustats = (u64 *)&percpu_priv->stats;
++
++ for (j = 0; j < numstats; j++)
++ netstats[j] += cpustats[j];
++ }
++}
++EXPORT_SYMBOL(dpa_get_stats64);
++
++int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
++{
++ const int max_mtu = dpa_get_max_mtu();
++
++ /* Make sure we don't exceed the Ethernet controller's MAXFRM */
++ if (new_mtu < 68 || new_mtu > max_mtu) {
++ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
++ new_mtu, 68, max_mtu);
++ return -EINVAL;
++ }
++ net_dev->mtu = new_mtu;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_change_mtu);
++
++/* .ndo_init callback */
++int dpa_ndo_init(struct net_device *net_dev)
++{
++ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
++ * we choose conservatively and let the user explicitly set a higher
++ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
++ * in the same LAN.
++ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
++ * start with the maximum allowed.
++ */
++ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
++
++ pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
++ net_dev->mtu = init_mtu;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_ndo_init);
++
++int dpa_set_features(struct net_device *dev, netdev_features_t features)
++{
++ /* Not much to do here for now */
++ dev->features = features;
++ return 0;
++}
++EXPORT_SYMBOL(dpa_set_features);
++
++netdev_features_t dpa_fix_features(struct net_device *dev,
++ netdev_features_t features)
++{
++ netdev_features_t unsupported_features = 0;
++
++ /* In theory we should never be requested to enable features that
++ * we didn't set in netdev->features and netdev->hw_features at probe
++ * time, but double check just to be on the safe side.
++ * We don't support enabling Rx csum through ethtool yet
++ */
++ unsupported_features |= NETIF_F_RXCSUM;
++
++ features &= ~unsupported_features;
++
++ return features;
++}
++EXPORT_SYMBOL(dpa_fix_features);
++
++#ifdef CONFIG_FSL_DPAA_TS
++u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
++ const void *data)
++{
++ u64 *ts, ns;
++
++ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
++ data);
++
++ if (!ts || *ts == 0)
++ return 0;
++
++ be64_to_cpus(ts);
++
++ /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
++ ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
++
++ return ns;
++}
++
++int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
++ struct skb_shared_hwtstamps *shhwtstamps, const void *data)
++{
++ u64 ns;
++
++ ns = dpa_get_timestamp_ns(priv, rx_tx, data);
++
++ if (ns == 0)
++ return -EINVAL;
++
++ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
++ shhwtstamps->hwtstamp = ns_to_ktime(ns);
++
++ return 0;
++}
++
++static void dpa_ts_tx_enable(struct net_device *dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(dev);
++ struct mac_device *mac_dev = priv->mac_dev;
++
++ if (mac_dev->fm_rtc_enable)
++ mac_dev->fm_rtc_enable(get_fm_handle(dev));
++ if (mac_dev->ptp_enable)
++ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
++
++ priv->ts_tx_en = true;
++}
++
++static void dpa_ts_tx_disable(struct net_device *dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(dev);
++
++#if 0
++/* the RTC might be needed by the Rx Ts, cannot disable here
++ * no separate ptp_disable API for Rx/Tx, cannot disable here
++ */
++ struct mac_device *mac_dev = priv->mac_dev;
++
++ if (mac_dev->fm_rtc_disable)
++ mac_dev->fm_rtc_disable(get_fm_handle(dev));
++
++ if (mac_dev->ptp_disable)
++ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
++#endif
++
++ priv->ts_tx_en = false;
++}
++
++static void dpa_ts_rx_enable(struct net_device *dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(dev);
++ struct mac_device *mac_dev = priv->mac_dev;
++
++ if (mac_dev->fm_rtc_enable)
++ mac_dev->fm_rtc_enable(get_fm_handle(dev));
++ if (mac_dev->ptp_enable)
++ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
++
++ priv->ts_rx_en = true;
++}
++
++static void dpa_ts_rx_disable(struct net_device *dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(dev);
++
++#if 0
++/* the RTC might be needed by the Tx Ts, cannot disable here
++ * no separate ptp_disable API for Rx/Tx, cannot disable here
++ */
++ struct mac_device *mac_dev = priv->mac_dev;
++
++ if (mac_dev->fm_rtc_disable)
++ mac_dev->fm_rtc_disable(get_fm_handle(dev));
++
++ if (mac_dev->ptp_disable)
++ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
++#endif
++
++ priv->ts_rx_en = false;
++}
++
++static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct hwtstamp_config config;
++
++ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
++ return -EFAULT;
++
++ switch (config.tx_type) {
++ case HWTSTAMP_TX_OFF:
++ dpa_ts_tx_disable(dev);
++ break;
++ case HWTSTAMP_TX_ON:
++ dpa_ts_tx_enable(dev);
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ if (config.rx_filter == HWTSTAMP_FILTER_NONE)
++ dpa_ts_rx_disable(dev);
++ else {
++ dpa_ts_rx_enable(dev);
++ /* TS is set for all frame types, not only those requested */
++ config.rx_filter = HWTSTAMP_FILTER_ALL;
++ }
++
++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
++ -EFAULT : 0;
++}
++#endif /* CONFIG_FSL_DPAA_TS */
++
++int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++#ifdef CONFIG_FSL_DPAA_1588
++ struct dpa_priv_s *priv = netdev_priv(dev);
++#endif
++ int ret = 0;
++
++ /* at least one timestamping feature must be enabled */
++#ifdef CONFIG_FSL_DPAA_TS
++ if (!netif_running(dev))
++#endif
++ return -EINVAL;
++
++#ifdef CONFIG_FSL_DPAA_TS
++ if (cmd == SIOCSHWTSTAMP)
++ return dpa_ts_ioctl(dev, rq, cmd);
++#endif /* CONFIG_FSL_DPAA_TS */
++
++#ifdef CONFIG_FSL_DPAA_1588
++ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
++ if (priv->tsu && priv->tsu->valid)
++ ret = dpa_ioctl_1588(dev, rq, cmd);
++ else
++ ret = -ENODEV;
++ }
++#endif
++
++ return ret;
++}
++EXPORT_SYMBOL(dpa_ioctl);
++
++int __cold dpa_remove(struct platform_device *of_dev)
++{
++ int err;
++ struct device *dev;
++ struct net_device *net_dev;
++ struct dpa_priv_s *priv;
++
++ dev = &of_dev->dev;
++ net_dev = dev_get_drvdata(dev);
++
++ priv = netdev_priv(net_dev);
++
++ dpaa_eth_sysfs_remove(dev);
++
++ dev_set_drvdata(dev, NULL);
++ unregister_netdev(net_dev);
++
++ err = dpa_fq_free(dev, &priv->dpa_fq_list);
++
++ qman_delete_cgr_safe(&priv->ingress_cgr);
++ qman_release_cgrid(priv->ingress_cgr.cgrid);
++ qman_delete_cgr_safe(&priv->cgr_data.cgr);
++ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
++
++ dpa_private_napi_del(net_dev);
++
++ dpa_bp_free(priv);
++
++ if (priv->buf_layout)
++ devm_kfree(dev, priv->buf_layout);
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ /* remove debugfs entry for this net_device */
++ dpa_netdev_debugfs_remove(net_dev);
++#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
++
++#ifdef CONFIG_FSL_DPAA_1588
++ if (priv->tsu && priv->tsu->valid)
++ dpa_ptp_cleanup(priv);
++#endif
++
++ free_netdev(net_dev);
++
++ return err;
++}
++EXPORT_SYMBOL(dpa_remove);
++
++struct mac_device * __cold __must_check
++__attribute__((nonnull))
++dpa_mac_probe(struct platform_device *_of_dev)
++{
++ struct device *dpa_dev, *dev;
++ struct device_node *mac_node;
++ struct platform_device *of_dev;
++ struct mac_device *mac_dev;
++#ifdef CONFIG_FSL_DPAA_1588
++ int lenp;
++ const phandle *phandle_prop;
++ struct net_device *net_dev = NULL;
++ struct dpa_priv_s *priv = NULL;
++ struct device_node *timer_node;
++#endif
++ dpa_dev = &_of_dev->dev;
++
++ mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
++ if (unlikely(mac_node == NULL)) {
++ dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
++ return ERR_PTR(-EFAULT);
++ }
++
++ of_dev = of_find_device_by_node(mac_node);
++ if (unlikely(of_dev == NULL)) {
++ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
++ mac_node->full_name);
++ of_node_put(mac_node);
++ return ERR_PTR(-EINVAL);
++ }
++ of_node_put(mac_node);
++
++ dev = &of_dev->dev;
++
++ mac_dev = dev_get_drvdata(dev);
++ if (unlikely(mac_dev == NULL)) {
++ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
++ dev_name(dev));
++ return ERR_PTR(-EINVAL);
++ }
++
++#ifdef CONFIG_FSL_DPAA_1588
++ phandle_prop = of_get_property(mac_node, "ptp-timer", &lenp);
++ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
++ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
++ (mac_dev->speed == SPEED_1000)))) {
++ timer_node = of_find_node_by_phandle(*phandle_prop);
++ if (timer_node)
++ net_dev = dev_get_drvdata(dpa_dev);
++ if (timer_node && net_dev) {
++ priv = netdev_priv(net_dev);
++ if (!dpa_ptp_init(priv))
++ dev_info(dev, "%s: ptp 1588 is initialized.\n",
++ mac_node->full_name);
++ }
++ }
++#endif
++
++#ifdef CONFIG_PTP_1588_CLOCK_DPAA
++ if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
++ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
++ (mac_dev->speed == SPEED_1000))) {
++ ptp_priv.node = of_parse_phandle(mac_node, "ptp-timer", 0);
++ if (ptp_priv.node) {
++ ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
++ if (unlikely(ptp_priv.of_dev == NULL)) {
++ dev_err(dpa_dev,
++ "Cannot find device represented by timer_node\n");
++ of_node_put(ptp_priv.node);
++ return ERR_PTR(-EINVAL);
++ }
++ ptp_priv.mac_dev = mac_dev;
++ }
++ }
++#endif
++ return mac_dev;
++}
++EXPORT_SYMBOL(dpa_mac_probe);
++
++int dpa_set_mac_address(struct net_device *net_dev, void *addr)
++{
++ const struct dpa_priv_s *priv;
++ int _errno;
++ struct mac_device *mac_dev;
++
++ priv = netdev_priv(net_dev);
++
++ _errno = eth_mac_addr(net_dev, addr);
++ if (_errno < 0) {
++ if (netif_msg_drv(priv))
++ netdev_err(net_dev,
++ "eth_mac_addr() = %d\n",
++ _errno);
++ return _errno;
++ }
++
++ mac_dev = priv->mac_dev;
++
++ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
++ net_dev->dev_addr);
++ if (_errno < 0) {
++ if (netif_msg_drv(priv))
++ netdev_err(net_dev,
++ "mac_dev->change_addr() = %d\n",
++ _errno);
++ return _errno;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_set_mac_address);
++
++void dpa_set_rx_mode(struct net_device *net_dev)
++{
++ int _errno;
++ const struct dpa_priv_s *priv;
++
++ priv = netdev_priv(net_dev);
++
++ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
++ priv->mac_dev->promisc = !priv->mac_dev->promisc;
++ _errno = priv->mac_dev->set_promisc(
++ priv->mac_dev->get_mac_handle(priv->mac_dev),
++ priv->mac_dev->promisc);
++ if (unlikely(_errno < 0) && netif_msg_drv(priv))
++ netdev_err(net_dev,
++ "mac_dev->set_promisc() = %d\n",
++ _errno);
++ }
++
++ _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
++ if (unlikely(_errno < 0) && netif_msg_drv(priv))
++ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
++}
++EXPORT_SYMBOL(dpa_set_rx_mode);
++
++void dpa_set_buffers_layout(struct mac_device *mac_dev,
++ struct dpa_buffer_layout_s *layout)
++{
++ struct fm_port_params params;
++
++ /* Rx */
++ layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
++ layout[RX].parse_results = true;
++ layout[RX].hash_results = true;
++#ifdef CONFIG_FSL_DPAA_TS
++ layout[RX].time_stamp = true;
++#endif
++ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
++ layout[RX].manip_extra_space = params.manip_extra_space;
++ /* a value of zero for data alignment means "don't care", so align to
++ * a non-zero value to prevent FMD from using its own default
++ */
++ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
++
++ /* Tx */
++ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
++ layout[TX].parse_results = true;
++ layout[TX].hash_results = true;
++#ifdef CONFIG_FSL_DPAA_TS
++ layout[TX].time_stamp = true;
++#endif
++ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
++ layout[TX].manip_extra_space = params.manip_extra_space;
++ layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
++}
++EXPORT_SYMBOL(dpa_set_buffers_layout);
++
++int __attribute__((nonnull))
++dpa_bp_alloc(struct dpa_bp *dpa_bp)
++{
++ int err;
++ struct bman_pool_params bp_params;
++ struct platform_device *pdev;
++
++ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
++ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
++ return -EINVAL;
++ }
++
++ memset(&bp_params, 0, sizeof(struct bman_pool_params));
++#ifdef CONFIG_FMAN_PFC
++ bp_params.flags = BMAN_POOL_FLAG_THRESH;
++ bp_params.thresholds[0] = bp_params.thresholds[2] =
++ CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
++ bp_params.thresholds[1] = bp_params.thresholds[3] =
++ CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
++#endif
++
++ /* If the pool is already specified, we only create one per bpid */
++ if (dpa_bpid2pool_use(dpa_bp->bpid))
++ return 0;
++
++ if (dpa_bp->bpid == 0)
++ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
++ else
++ bp_params.bpid = dpa_bp->bpid;
++
++ dpa_bp->pool = bman_new_pool(&bp_params);
++ if (unlikely(dpa_bp->pool == NULL)) {
++ pr_err("bman_new_pool() failed\n");
++ return -ENODEV;
++ }
++
++ dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
++
++ pdev = platform_device_register_simple("dpaa_eth_bpool",
++ dpa_bp->bpid, NULL, 0);
++ if (IS_ERR(pdev)) {
++ pr_err("platform_device_register_simple() failed\n");
++ err = PTR_ERR(pdev);
++ goto pdev_register_failed;
++ }
++ {
++ struct dma_map_ops *ops = get_dma_ops(&pdev->dev);
++ ops->dma_supported = NULL;
++ }
++ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
++ if (err) {
++ pr_err("dma_coerce_mask_and_coherent() failed\n");
++ goto pdev_mask_failed;
++ }
++#ifdef CONFIG_FMAN_ARM
++ /* force coherency */
++ pdev->dev.archdata.dma_coherent = true;
++ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
++#endif
++
++ dpa_bp->dev = &pdev->dev;
++
++ if (dpa_bp->seed_cb) {
++ err = dpa_bp->seed_cb(dpa_bp);
++ if (err)
++ goto pool_seed_failed;
++ }
++
++ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
++
++ return 0;
++
++pool_seed_failed:
++pdev_mask_failed:
++ platform_device_unregister(pdev);
++pdev_register_failed:
++ bman_free_pool(dpa_bp->pool);
++
++ return err;
++}
++EXPORT_SYMBOL(dpa_bp_alloc);
++
++void dpa_bp_drain(struct dpa_bp *bp)
++{
++ int ret, num = 8;
++
++ do {
++ struct bm_buffer bmb[8];
++ int i;
++
++ ret = bman_acquire(bp->pool, bmb, num, 0);
++ if (ret < 0) {
++ if (num == 8) {
++ /* we have less than 8 buffers left;
++ * drain them one by one
++ */
++ num = 1;
++ ret = 1;
++ continue;
++ } else {
++ /* Pool is fully drained */
++ break;
++ }
++ }
++
++ for (i = 0; i < num; i++) {
++ dma_addr_t addr = bm_buf_addr(&bmb[i]);
++
++ dma_unmap_single(bp->dev, addr, bp->size,
++ DMA_BIDIRECTIONAL);
++
++ bp->free_buf_cb(phys_to_virt(addr));
++ }
++ } while (ret > 0);
++}
++EXPORT_SYMBOL(dpa_bp_drain);
++
++static void __cold __attribute__((nonnull))
++_dpa_bp_free(struct dpa_bp *dpa_bp)
++{
++ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
++
++ /* the mapping between bpid and dpa_bp is done very late in the
++ * allocation procedure; if something failed before the mapping, the bp
++ * was not configured, therefore we don't need the below instructions
++ */
++ if (!bp)
++ return;
++
++ if (!atomic_dec_and_test(&bp->refs))
++ return;
++
++ if (bp->free_buf_cb)
++ dpa_bp_drain(bp);
++
++ dpa_bp_array[bp->bpid] = NULL;
++ bman_free_pool(bp->pool);
++
++ if (bp->dev)
++ platform_device_unregister(to_platform_device(bp->dev));
++}
++
++void __cold __attribute__((nonnull))
++dpa_bp_free(struct dpa_priv_s *priv)
++{
++ int i;
++
++ if (priv->dpa_bp)
++ for (i = 0; i < priv->bp_count; i++)
++ _dpa_bp_free(&priv->dpa_bp[i]);
++}
++EXPORT_SYMBOL(dpa_bp_free);
++
++struct dpa_bp *dpa_bpid2pool(int bpid)
++{
++ return dpa_bp_array[bpid];
++}
++EXPORT_SYMBOL(dpa_bpid2pool);
++
++void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
++{
++ dpa_bp_array[bpid] = dpa_bp;
++ atomic_set(&dpa_bp->refs, 1);
++}
++
++bool dpa_bpid2pool_use(int bpid)
++{
++ if (dpa_bpid2pool(bpid)) {
++ atomic_inc(&dpa_bp_array[bpid]->refs);
++ return true;
++ }
++
++ return false;
++}
++
++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
++ void *accel_priv, select_queue_fallback_t fallback)
++{
++ return dpa_get_queue_mapping(skb);
++}
++EXPORT_SYMBOL(dpa_select_queue);
++#endif
++
++struct dpa_fq *dpa_fq_alloc(struct device *dev,
++ u32 fq_start,
++ u32 fq_count,
++ struct list_head *list,
++ enum dpa_fq_type fq_type)
++{
++ int i;
++ struct dpa_fq *dpa_fq;
++
++ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
++ if (dpa_fq == NULL)
++ return NULL;
++
++ for (i = 0; i < fq_count; i++) {
++ dpa_fq[i].fq_type = fq_type;
++ if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
++ dpa_fq[i].fqid = fq_start ?
++ DPAA_ETH_FQ_DELTA + fq_start + i : 0;
++ else
++ dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
++
++ list_add_tail(&dpa_fq[i].list, list);
++ }
++
++#ifdef CONFIG_FMAN_PFC
++ if (fq_type == FQ_TYPE_TX)
++ for (i = 0; i < fq_count; i++)
++ dpa_fq[i].wq = i / dpa_num_cpus;
++ else
++#endif
++ for (i = 0; i < fq_count; i++)
++ _dpa_assign_wq(dpa_fq + i);
++
++ return dpa_fq;
++}
++EXPORT_SYMBOL(dpa_fq_alloc);
++
++/* Probing of FQs for MACful ports */
++int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
++ struct fm_port_fqs *port_fqs,
++ bool alloc_tx_conf_fqs,
++ enum port_type ptype)
++{
++ struct fqid_cell *fqids = NULL;
++ const void *fqids_off = NULL;
++ struct dpa_fq *dpa_fq = NULL;
++ struct device_node *np = dev->of_node;
++ int num_ranges;
++ int i, lenp;
++
++ if (ptype == TX && alloc_tx_conf_fqs) {
++ if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
++ tx_confirm_fqids->count, list,
++ FQ_TYPE_TX_CONF_MQ))
++ goto fq_alloc_failed;
++ }
++
++ fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
++ if (fqids_off == NULL) {
++ /* No dts definition, so use the defaults. */
++ fqids = default_fqids[ptype];
++ num_ranges = 3;
++ } else {
++ num_ranges = lenp / sizeof(*fqids);
++
++ fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
++ GFP_KERNEL);
++ if (fqids == NULL)
++ goto fqids_alloc_failed;
++
++ /* convert to CPU endianess */
++ for (i = 0; i < num_ranges; i++) {
++ fqids[i].start = be32_to_cpup(fqids_off +
++ i * sizeof(*fqids));
++ fqids[i].count = be32_to_cpup(fqids_off +
++ i * sizeof(*fqids) + sizeof(__be32));
++ }
++ }
++
++ for (i = 0; i < num_ranges; i++) {
++ switch (i) {
++ case 0:
++ /* The first queue is the error queue */
++ if (fqids[i].count != 1)
++ goto invalid_error_queue;
++
++ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
++ fqids[i].count, list,
++ ptype == RX ?
++ FQ_TYPE_RX_ERROR :
++ FQ_TYPE_TX_ERROR);
++ if (dpa_fq == NULL)
++ goto fq_alloc_failed;
++
++ if (ptype == RX)
++ port_fqs->rx_errq = &dpa_fq[0];
++ else
++ port_fqs->tx_errq = &dpa_fq[0];
++ break;
++ case 1:
++ /* the second queue is the default queue */
++ if (fqids[i].count != 1)
++ goto invalid_default_queue;
++
++ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
++ fqids[i].count, list,
++ ptype == RX ?
++ FQ_TYPE_RX_DEFAULT :
++ FQ_TYPE_TX_CONFIRM);
++ if (dpa_fq == NULL)
++ goto fq_alloc_failed;
++
++ if (ptype == RX)
++ port_fqs->rx_defq = &dpa_fq[0];
++ else
++ port_fqs->tx_defq = &dpa_fq[0];
++ break;
++ default:
++ /* all subsequent queues are either RX* PCD or Tx */
++ if (ptype == RX) {
++ if (!dpa_fq_alloc(dev, fqids[i].start,
++ fqids[i].count, list,
++ FQ_TYPE_RX_PCD) ||
++ !dpa_fq_alloc(dev, fqids[i].start,
++ fqids[i].count, list,
++ FQ_TYPE_RX_PCD_HI_PRIO))
++ goto fq_alloc_failed;
++ } else {
++ if (!dpa_fq_alloc(dev, fqids[i].start,
++ fqids[i].count, list,
++ FQ_TYPE_TX))
++ goto fq_alloc_failed;
++ }
++ break;
++ }
++ }
++
++ return 0;
++
++fq_alloc_failed:
++fqids_alloc_failed:
++ dev_err(dev, "Cannot allocate memory for frame queues\n");
++ return -ENOMEM;
++
++invalid_default_queue:
++invalid_error_queue:
++ dev_err(dev, "Too many default or error queues\n");
++ return -EINVAL;
++}
++EXPORT_SYMBOL(dpa_fq_probe_mac);
++
++static u32 rx_pool_channel;
++static DEFINE_SPINLOCK(rx_pool_channel_init);
++
++int dpa_get_channel(void)
++{
++ spin_lock(&rx_pool_channel_init);
++ if (!rx_pool_channel) {
++ u32 pool;
++ int ret = qman_alloc_pool(&pool);
++ if (!ret)
++ rx_pool_channel = pool;
++ }
++ spin_unlock(&rx_pool_channel_init);
++ if (!rx_pool_channel)
++ return -ENOMEM;
++ return rx_pool_channel;
++}
++EXPORT_SYMBOL(dpa_get_channel);
++
++void dpa_release_channel(void)
++{
++ qman_release_pool(rx_pool_channel);
++}
++EXPORT_SYMBOL(dpa_release_channel);
++
++void dpaa_eth_add_channel(u16 channel)
++{
++ const cpumask_t *cpus = qman_affine_cpus();
++ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
++ int cpu;
++ struct qman_portal *portal;
++
++ for_each_cpu(cpu, cpus) {
++ portal = (struct qman_portal *)qman_get_affine_portal(cpu);
++ qman_p_static_dequeue_add(portal, pool);
++ }
++}
++EXPORT_SYMBOL(dpaa_eth_add_channel);
++
++/**
++ * Congestion group state change notification callback.
++ * Stops the device's egress queues while they are congested and
++ * wakes them upon exiting congested state.
++ * Also updates some CGR-related stats.
++ */
++static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
++
++ int congested)
++{
++ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
++ struct dpa_priv_s, cgr_data.cgr);
++
++ if (congested) {
++ priv->cgr_data.congestion_start_jiffies = jiffies;
++ netif_tx_stop_all_queues(priv->net_dev);
++ priv->cgr_data.cgr_congested_count++;
++ } else {
++ priv->cgr_data.congested_jiffies +=
++ (jiffies - priv->cgr_data.congestion_start_jiffies);
++ netif_tx_wake_all_queues(priv->net_dev);
++ }
++}
++
++int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
++{
++ struct qm_mcc_initcgr initcgr;
++ u32 cs_th;
++ int err;
++
++ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
++ if (err < 0) {
++ pr_err("Error %d allocating CGR ID\n", err);
++ goto out_error;
++ }
++ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
++
++ /* Enable Congestion State Change Notifications and CS taildrop */
++ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
++ initcgr.cgr.cscn_en = QM_CGR_EN;
++
++ /* Set different thresholds based on the MAC speed.
++ * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
++ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
++ * In such cases, we ought to reconfigure the threshold, too.
++ */
++ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
++ else
++ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
++ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
++
++ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
++ initcgr.cgr.cstd_en = QM_CGR_EN;
++
++ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
++ &initcgr);
++ if (err < 0) {
++ pr_err("Error %d creating CGR with ID %d\n", err,
++ priv->cgr_data.cgr.cgrid);
++ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
++ goto out_error;
++ }
++ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
++ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
++ priv->cgr_data.cgr.chan);
++
++out_error:
++ return err;
++}
++EXPORT_SYMBOL(dpaa_eth_cgr_init);
++
++static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
++ struct dpa_fq *fq,
++ const struct qman_fq *template)
++{
++ fq->fq_base = *template;
++ fq->net_dev = priv->net_dev;
++
++ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
++ fq->channel = priv->channel;
++}
++
++static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
++ struct dpa_fq *fq,
++ struct fm_port *port,
++ const struct qman_fq *template)
++{
++ fq->fq_base = *template;
++ fq->net_dev = priv->net_dev;
++
++ if (port) {
++ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
++ fq->channel = (uint16_t)fm_get_tx_port_channel(port);
++ } else {
++ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
++ }
++}
++
++void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
++ struct fm_port *tx_port)
++{
++ struct dpa_fq *fq;
++ uint16_t portals[NR_CPUS];
++ int cpu, portal_cnt = 0, num_portals = 0;
++ uint32_t pcd_fqid, pcd_fqid_hi_prio;
++ const cpumask_t *affine_cpus = qman_affine_cpus();
++ int egress_cnt = 0, conf_cnt = 0;
++
++ /* Prepare for PCD FQs init */
++ for_each_cpu(cpu, affine_cpus)
++ portals[num_portals++] = qman_affine_channel(cpu);
++ if (num_portals == 0)
++ dev_err(priv->net_dev->dev.parent,
++ "No Qman software (affine) channels found");
++
++ pcd_fqid = (priv->mac_dev) ?
++ DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
++ pcd_fqid_hi_prio = (priv->mac_dev) ?
++ DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
++
++ /* Initialize each FQ in the list */
++ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
++ switch (fq->fq_type) {
++ case FQ_TYPE_RX_DEFAULT:
++ BUG_ON(!priv->mac_dev);
++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
++ break;
++ case FQ_TYPE_RX_ERROR:
++ BUG_ON(!priv->mac_dev);
++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
++ break;
++ case FQ_TYPE_RX_PCD:
++ /* For MACless we can't have dynamic Rx queues */
++ BUG_ON(!priv->mac_dev && !fq->fqid);
++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
++ if (!fq->fqid)
++ fq->fqid = pcd_fqid++;
++ fq->channel = portals[portal_cnt];
++ portal_cnt = (portal_cnt + 1) % num_portals;
++ break;
++ case FQ_TYPE_RX_PCD_HI_PRIO:
++ /* For MACless we can't have dynamic Hi Pri Rx queues */
++ BUG_ON(!priv->mac_dev && !fq->fqid);
++ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
++ if (!fq->fqid)
++ fq->fqid = pcd_fqid_hi_prio++;
++ fq->channel = portals[portal_cnt];
++ portal_cnt = (portal_cnt + 1) % num_portals;
++ break;
++ case FQ_TYPE_TX:
++ dpa_setup_egress(priv, fq, tx_port,
++ &fq_cbs->egress_ern);
++ /* If we have more Tx queues than the number of cores,
++ * just ignore the extra ones.
++ */
++ if (egress_cnt < DPAA_ETH_TX_QUEUES)
++ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
++ break;
++ case FQ_TYPE_TX_CONFIRM:
++ BUG_ON(!priv->mac_dev);
++ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
++ break;
++ case FQ_TYPE_TX_CONF_MQ:
++ BUG_ON(!priv->mac_dev);
++ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
++ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
++ break;
++ case FQ_TYPE_TX_ERROR:
++ BUG_ON(!priv->mac_dev);
++ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
++ break;
++ default:
++ dev_warn(priv->net_dev->dev.parent,
++ "Unknown FQ type detected!\n");
++ break;
++ }
++ }
++
++ /* The number of Tx queues may be smaller than the number of cores, if
++ * the Tx queue range is specified in the device tree instead of being
++ * dynamically allocated.
++ * Make sure all CPUs receive a corresponding Tx queue.
++ */
++ while (egress_cnt < DPAA_ETH_TX_QUEUES) {
++ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
++ if (fq->fq_type != FQ_TYPE_TX)
++ continue;
++ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
++ if (egress_cnt == DPAA_ETH_TX_QUEUES)
++ break;
++ }
++ }
++}
++EXPORT_SYMBOL(dpa_fq_setup);
++
++int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
++{
++ int _errno;
++ const struct dpa_priv_s *priv;
++ struct device *dev;
++ struct qman_fq *fq;
++ struct qm_mcc_initfq initfq;
++ struct qman_fq *confq;
++ int queue_id;
++
++ priv = netdev_priv(dpa_fq->net_dev);
++ dev = dpa_fq->net_dev->dev.parent;
++
++ if (dpa_fq->fqid == 0)
++ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
++
++ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
++
++ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
++ if (_errno) {
++ dev_err(dev, "qman_create_fq() failed\n");
++ return _errno;
++ }
++ fq = &dpa_fq->fq_base;
++
++ if (dpa_fq->init) {
++ memset(&initfq, 0, sizeof(initfq));
++
++ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
++ /* FIXME: why would we want to keep an empty FQ in cache? */
++ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
++
++ /* Try to reduce the number of portal interrupts for
++ * Tx Confirmation FQs.
++ */
++ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
++ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
++
++ /* FQ placement */
++ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
++
++ initfq.fqd.dest.channel = dpa_fq->channel;
++ initfq.fqd.dest.wq = dpa_fq->wq;
++
++ /* Put all egress queues in a congestion group of their own.
++ * Sensu stricto, the Tx confirmation queues are Rx FQs,
++ * rather than Tx - but they nonetheless account for the
++ * memory footprint on behalf of egress traffic. We therefore
++ * place them in the netdev's CGR, along with the Tx FQs.
++ */
++ if (dpa_fq->fq_type == FQ_TYPE_TX ||
++ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
++ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
++ initfq.we_mask |= QM_INITFQ_WE_CGID;
++ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
++ initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
++ /* Set a fixed overhead accounting, in an attempt to
++ * reduce the impact of fixed-size skb shells and the
++ * driver's needed headroom on system memory. This is
++ * especially the case when the egress traffic is
++ * composed of small datagrams.
++ * Unfortunately, QMan's OAL value is capped to an
++ * insufficient value, but even that is better than
++ * no overhead accounting at all.
++ */
++ initfq.we_mask |= QM_INITFQ_WE_OAC;
++ initfq.fqd.oac_init.oac = QM_OAC_CG;
++ initfq.fqd.oac_init.oal =
++ (signed char)(min(sizeof(struct sk_buff) +
++ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
++ }
++
++ if (td_enable) {
++ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
++ qm_fqd_taildrop_set(&initfq.fqd.td,
++ DPA_FQ_TD, 1);
++ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
++ }
++
++ /* Configure the Tx confirmation queue, now that we know
++ * which Tx queue it pairs with.
++ */
++ if (dpa_fq->fq_type == FQ_TYPE_TX) {
++ queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
++ if (queue_id >= 0) {
++ confq = priv->conf_fqs[queue_id];
++ if (confq) {
++ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
++ /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
++ * A2V=1 (contextA A2 field is valid)
++ * A0V=1 (contextA A0 field is valid)
++ * B0V=1 (contextB field is valid)
++ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
++ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
++ */
++ initfq.fqd.context_a.hi = 0x1e000000;
++ initfq.fqd.context_a.lo = 0x80000000;
++ }
++ }
++ }
++
++ /* Put all *private* ingress queues in our "ingress CGR". */
++ if (priv->use_ingress_cgr &&
++ (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
++ dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
++ dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
++ dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
++ initfq.we_mask |= QM_INITFQ_WE_CGID;
++ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
++ initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
++ /* Set a fixed overhead accounting, just like for the
++ * egress CGR.
++ */
++ initfq.we_mask |= QM_INITFQ_WE_OAC;
++ initfq.fqd.oac_init.oac = QM_OAC_CG;
++ initfq.fqd.oac_init.oal =
++ (signed char)(min(sizeof(struct sk_buff) +
++ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
++ }
++
++ /* Initialization common to all ingress queues */
++ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
++ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
++ initfq.fqd.fq_ctrl |=
++ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
++ initfq.fqd.context_a.stashing.exclusive =
++ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
++ QM_STASHING_EXCL_ANNOTATION;
++ initfq.fqd.context_a.stashing.data_cl = 2;
++ initfq.fqd.context_a.stashing.annotation_cl = 1;
++ initfq.fqd.context_a.stashing.context_cl =
++ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
++ }
++
++ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
++ if (_errno < 0) {
++ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno)) {
++ dpa_fq->init = 0;
++ } else {
++ dev_err(dev, "qman_init_fq(%u) = %d\n",
++ qman_fq_fqid(fq), _errno);
++ qman_destroy_fq(fq, 0);
++ }
++ return _errno;
++ }
++ }
++
++ dpa_fq->fqid = qman_fq_fqid(fq);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_fq_init);
++
++int __cold __attribute__((nonnull))
++_dpa_fq_free(struct device *dev, struct qman_fq *fq)
++{
++ int _errno, __errno;
++ struct dpa_fq *dpa_fq;
++ const struct dpa_priv_s *priv;
++
++ _errno = 0;
++
++ dpa_fq = container_of(fq, struct dpa_fq, fq_base);
++ priv = netdev_priv(dpa_fq->net_dev);
++
++ if (dpa_fq->init) {
++ _errno = qman_retire_fq(fq, NULL);
++ if (unlikely(_errno < 0) && netif_msg_drv(priv))
++ dev_err(dev, "qman_retire_fq(%u) = %d\n",
++ qman_fq_fqid(fq), _errno);
++
++ __errno = qman_oos_fq(fq);
++ if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
++ dev_err(dev, "qman_oos_fq(%u) = %d\n",
++ qman_fq_fqid(fq), __errno);
++ if (_errno >= 0)
++ _errno = __errno;
++ }
++ }
++
++ qman_destroy_fq(fq, 0);
++ list_del(&dpa_fq->list);
++
++ return _errno;
++}
++EXPORT_SYMBOL(_dpa_fq_free);
++
++int __cold __attribute__((nonnull))
++dpa_fq_free(struct device *dev, struct list_head *list)
++{
++ int _errno, __errno;
++ struct dpa_fq *dpa_fq, *tmp;
++
++ _errno = 0;
++ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
++ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
++ if (unlikely(__errno < 0) && _errno >= 0)
++ _errno = __errno;
++ }
++
++ return _errno;
++}
++EXPORT_SYMBOL(dpa_fq_free);
++
++int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable)
++{
++ int _errno, __errno;
++ struct dpa_fq *dpa_fq, *tmp;
++ static bool print_msg __read_mostly;
++
++ _errno = 0;
++ print_msg = true;
++ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
++ __errno = dpa_fq_init(dpa_fq, td_enable);
++ if (unlikely(__errno < 0) && _errno >= 0) {
++ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, __errno)) {
++ if (print_msg) {
++ dev_warn(dev,
++ "Skip RX PCD High Priority FQs initialization\n");
++ print_msg = false;
++ }
++ if (_dpa_fq_free(dev, (struct qman_fq *)dpa_fq))
++ dev_warn(dev,
++ "Error freeing frame queues\n");
++ } else {
++ _errno = __errno;
++ break;
++ }
++ }
++ }
++
++ return _errno;
++}
++EXPORT_SYMBOL(dpa_fqs_init);
++static void
++dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
++ struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
++{
++ struct fm_port_params tx_port_param;
++ bool frag_enabled = false;
++
++ memset(&tx_port_param, 0, sizeof(tx_port_param));
++ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
++ buf_layout, frag_enabled);
++}
++
++static void
++dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
++ struct dpa_fq *errq, struct dpa_fq *defq,
++ struct dpa_buffer_layout_s *buf_layout)
++{
++ struct fm_port_params rx_port_param;
++ int i;
++ bool frag_enabled = false;
++
++ memset(&rx_port_param, 0, sizeof(rx_port_param));
++ count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
++ rx_port_param.num_pools = (uint8_t)count;
++ for (i = 0; i < count; i++) {
++ if (i >= rx_port_param.num_pools)
++ break;
++ rx_port_param.pool_param[i].id = bp[i].bpid;
++ rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
++ }
++
++ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
++ buf_layout, frag_enabled);
++}
++
++#if defined(CONFIG_FSL_SDK_FMAN_TEST)
++/* Defined as weak, to be implemented by fman pcd tester. */
++int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
++__attribute__((weak));
++
++int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
++#else
++int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
++
++int dpa_free_pcd_fqids(struct device *, uint32_t);
++
++#endif /* CONFIG_FSL_SDK_FMAN_TEST */
++
++
++int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
++ uint8_t alignment, uint32_t *base_fqid)
++{
++ dev_crit(dev, "callback not implemented!\n");
++
++ return 0;
++}
++
++int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
++{
++
++ dev_crit(dev, "callback not implemented!\n");
++
++ return 0;
++}
++
++void dpaa_eth_init_ports(struct mac_device *mac_dev,
++ struct dpa_bp *bp, size_t count,
++ struct fm_port_fqs *port_fqs,
++ struct dpa_buffer_layout_s *buf_layout,
++ struct device *dev)
++{
++ struct fm_port_pcd_param rx_port_pcd_param;
++ struct fm_port *rxport = mac_dev->port_dev[RX];
++ struct fm_port *txport = mac_dev->port_dev[TX];
++
++ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
++ port_fqs->tx_defq, &buf_layout[TX]);
++ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
++ port_fqs->rx_defq, &buf_layout[RX]);
++
++ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
++ rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
++ rx_port_pcd_param.dev = dev;
++ fm_port_pcd_bind(rxport, &rx_port_pcd_param);
++}
++EXPORT_SYMBOL(dpaa_eth_init_ports);
++
++void dpa_release_sgt(struct qm_sg_entry *sgt)
++{
++ struct dpa_bp *dpa_bp;
++ struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
++ uint8_t i = 0, j;
++
++ memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
++
++ do {
++ dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
++ DPA_BUG_ON(!dpa_bp);
++
++ j = 0;
++ do {
++ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
++ bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
++
++ j++; i++;
++ } while (j < ARRAY_SIZE(bmb) &&
++ !qm_sg_entry_get_final(&sgt[i-1]) &&
++ qm_sg_entry_get_bpid(&sgt[i-1]) ==
++ qm_sg_entry_get_bpid(&sgt[i]));
++
++ while (bman_release(dpa_bp->pool, bmb, j, 0))
++ cpu_relax();
++ } while (!qm_sg_entry_get_final(&sgt[i-1]));
++}
++EXPORT_SYMBOL(dpa_release_sgt);
++
++void __attribute__((nonnull))
++dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
++{
++ struct qm_sg_entry *sgt;
++ struct dpa_bp *dpa_bp;
++ struct bm_buffer bmb;
++ dma_addr_t addr;
++ void *vaddr;
++
++ bmb.opaque = 0;
++ bm_buffer_set64(&bmb, qm_fd_addr(fd));
++
++ dpa_bp = dpa_bpid2pool(fd->bpid);
++ DPA_BUG_ON(!dpa_bp);
++
++ if (fd->format == qm_fd_sg) {
++ vaddr = phys_to_virt(qm_fd_addr(fd));
++ sgt = vaddr + dpa_fd_offset(fd);
++
++ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
++ DMA_BIDIRECTIONAL);
++
++ dpa_release_sgt(sgt);
++ addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
++ dev_err(dpa_bp->dev, "DMA mapping failed");
++ return;
++ }
++ bm_buffer_set64(&bmb, addr);
++ }
++
++ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
++ cpu_relax();
++}
++EXPORT_SYMBOL(dpa_fd_release);
++
++void count_ern(struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_mr_entry *msg)
++{
++ switch (msg->ern.rc & QM_MR_RC_MASK) {
++ case QM_MR_RC_CGR_TAILDROP:
++ percpu_priv->ern_cnt.cg_tdrop++;
++ break;
++ case QM_MR_RC_WRED:
++ percpu_priv->ern_cnt.wred++;
++ break;
++ case QM_MR_RC_ERROR:
++ percpu_priv->ern_cnt.err_cond++;
++ break;
++ case QM_MR_RC_ORPWINDOW_EARLY:
++ percpu_priv->ern_cnt.early_window++;
++ break;
++ case QM_MR_RC_ORPWINDOW_LATE:
++ percpu_priv->ern_cnt.late_window++;
++ break;
++ case QM_MR_RC_FQ_TAILDROP:
++ percpu_priv->ern_cnt.fq_tdrop++;
++ break;
++ case QM_MR_RC_ORPWINDOW_RETIRED:
++ percpu_priv->ern_cnt.fq_retired++;
++ break;
++ case QM_MR_RC_ORP_ZERO:
++ percpu_priv->ern_cnt.orp_zero++;
++ break;
++ }
++}
++EXPORT_SYMBOL(count_ern);
++
++/**
++ * Turn on HW checksum computation for this outgoing frame.
++ * If the current protocol is not something we support in this regard
++ * (or if the stack has already computed the SW checksum), we do nothing.
++ *
++ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
++ * otherwise.
++ *
++ * Note that this function may modify the fd->cmd field and the skb data buffer
++ * (the Parse Results area).
++ */
++int dpa_enable_tx_csum(struct dpa_priv_s *priv,
++ struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
++{
++ fm_prs_result_t *parse_result;
++ struct iphdr *iph;
++ struct ipv6hdr *ipv6h = NULL;
++ u8 l4_proto;
++ u16 ethertype = ntohs(skb->protocol);
++ int retval = 0;
++
++ if (skb->ip_summed != CHECKSUM_PARTIAL)
++ return 0;
++
++ /* Note: L3 csum seems to be already computed in sw, but we can't choose
++ * L4 alone from the FM configuration anyway.
++ */
++
++ /* Fill in some fields of the Parse Results array, so the FMan
++ * can find them as if they came from the FMan Parser.
++ */
++ parse_result = (fm_prs_result_t *)parse_results;
++
++ /* If we're dealing with VLAN, get the real Ethernet type */
++ if (ethertype == ETH_P_8021Q) {
++ /* We can't always assume the MAC header is set correctly
++ * by the stack, so reset to beginning of skb->data
++ */
++ skb_reset_mac_header(skb);
++ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
++ }
++
++ /* Fill in the relevant L3 parse result fields
++ * and read the L4 protocol type
++ */
++ switch (ethertype) {
++ case ETH_P_IP:
++ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
++ iph = ip_hdr(skb);
++ DPA_BUG_ON(iph == NULL);
++ l4_proto = iph->protocol;
++ break;
++ case ETH_P_IPV6:
++ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
++ ipv6h = ipv6_hdr(skb);
++ DPA_BUG_ON(ipv6h == NULL);
++ l4_proto = ipv6h->nexthdr;
++ break;
++ default:
++ /* We shouldn't even be here */
++ if (netif_msg_tx_err(priv) && net_ratelimit())
++ netdev_alert(priv->net_dev,
++ "Can't compute HW csum for L3 proto 0x%x\n",
++ ntohs(skb->protocol));
++ retval = -EIO;
++ goto return_error;
++ }
++
++ /* Fill in the relevant L4 parse result fields */
++ switch (l4_proto) {
++ case IPPROTO_UDP:
++ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
++ break;
++ case IPPROTO_TCP:
++ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
++ break;
++ default:
++ /* This can as well be a BUG() */
++ if (netif_msg_tx_err(priv) && net_ratelimit())
++ netdev_alert(priv->net_dev,
++ "Can't compute HW csum for L4 proto 0x%x\n",
++ l4_proto);
++ retval = -EIO;
++ goto return_error;
++ }
++
++ /* At index 0 is IPOffset_1 as defined in the Parse Results */
++ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
++ parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
++
++ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
++ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
++
++ /* On P1023 and similar platforms fd->cmd interpretation could
++ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
++ * is not set so we do not need to check; in the future, if/when
++ * using context_a we need to check this bit
++ */
++
++return_error:
++ return retval;
++}
++EXPORT_SYMBOL(dpa_enable_tx_csum);
++
++#ifdef CONFIG_FSL_DPAA_CEETM
++void dpa_enable_ceetm(struct net_device *dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(dev);
++ priv->ceetm_en = true;
++}
++EXPORT_SYMBOL(dpa_enable_ceetm);
++
++void dpa_disable_ceetm(struct net_device *dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(dev);
++ priv->ceetm_en = false;
++}
++EXPORT_SYMBOL(dpa_disable_ceetm);
++#endif
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
+new file mode 100644
+index 00000000..41db4302
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
+@@ -0,0 +1,225 @@
++/* Copyright 2008-2013 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DPAA_ETH_COMMON_H
++#define __DPAA_ETH_COMMON_H
++
++#include <linux/etherdevice.h> /* struct net_device */
++#include <linux/fsl_bman.h> /* struct bm_buffer */
++#include <linux/of_platform.h> /* struct platform_device */
++#include <linux/net_tstamp.h> /* struct hwtstamp_config */
++
++#include "dpaa_eth.h"
++#include "lnxwrp_fsl_fman.h"
++
++#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
++ frag_enabled) \
++{ \
++ param.errq = errq_id; \
++ param.defq = defq_id; \
++ param.priv_data_size = buf_layout->priv_data_size; \
++ param.parse_results = buf_layout->parse_results; \
++ param.hash_results = buf_layout->hash_results; \
++ param.frag_enable = frag_enabled; \
++ param.time_stamp = buf_layout->time_stamp; \
++ param.manip_extra_space = buf_layout->manip_extra_space; \
++ param.data_align = buf_layout->data_align; \
++ fm_set_##type##_port_params(port, &param); \
++}
++
++#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
++
++#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
++
++#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
++
++#define DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno) \
++ (((dpa_fq)->fq_type == FQ_TYPE_RX_PCD_HI_PRIO) && \
++ (_errno == -EIO))
++/* return codes for the dpaa-eth hooks */
++enum dpaa_eth_hook_result {
++ /* fd/skb was retained by the hook.
++ *
++ * On the Rx path, this means the Ethernet driver will _not_
++ * deliver the skb to the stack. Instead, the hook implementation
++ * is expected to properly dispose of the skb.
++ *
++ * On the Tx path, the Ethernet driver's dpa_tx() function will
++ * immediately return NETDEV_TX_OK. The hook implementation is expected
++ * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
++ * unless you know exactly what you're doing!
++ *
++ * On the confirmation/error paths, the Ethernet driver will _not_
++ * perform any fd cleanup, nor update the interface statistics.
++ */
++ DPAA_ETH_STOLEN,
++ /* fd/skb was returned to the Ethernet driver for regular processing.
++ * The hook is not allowed to, for instance, reallocate the skb (as if
++ * by linearizing, copying, cloning or reallocating the headroom).
++ */
++ DPAA_ETH_CONTINUE
++};
++
++typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
++ struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
++typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
++ struct sk_buff *skb, struct net_device *net_dev);
++typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
++ struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
++
++/* used in napi related functions */
++extern u16 qman_portal_max;
++
++/* from dpa_ethtool.c */
++extern const struct ethtool_ops dpa_ethtool_ops;
++
++#ifdef CONFIG_FSL_DPAA_HOOKS
++/* Various hooks used for unit-testing and/or fastpath optimizations.
++ * Currently only one set of such hooks is supported.
++ */
++struct dpaa_eth_hooks_s {
++ /* Invoked on the Tx private path, immediately after receiving the skb
++ * from the stack.
++ */
++ dpaa_eth_egress_hook_t tx;
++
++ /* Invoked on the Rx private path, right before passing the skb
++ * up the stack. At that point, the packet's protocol id has already
++ * been set. The skb's data pointer is now at the L3 header, and
++ * skb->mac_header points to the L2 header. skb->len has been adjusted
++ * to be the length of L3+payload (i.e., the length of the
++ * original frame minus the L2 header len).
++ * For more details on what the skb looks like, see eth_type_trans().
++ */
++ dpaa_eth_ingress_hook_t rx_default;
++
++ /* Driver hook for the Rx error private path. */
++ dpaa_eth_confirm_hook_t rx_error;
++ /* Driver hook for the Tx confirmation private path. */
++ dpaa_eth_confirm_hook_t tx_confirm;
++ /* Driver hook for the Tx error private path. */
++ dpaa_eth_confirm_hook_t tx_error;
++};
++
++void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
++
++extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
++#endif
++
++int dpa_netdev_init(struct net_device *net_dev,
++ const uint8_t *mac_addr,
++ uint16_t tx_timeout);
++int __cold dpa_start(struct net_device *net_dev);
++int __cold dpa_stop(struct net_device *net_dev);
++void __cold dpa_timeout(struct net_device *net_dev);
++void __cold
++dpa_get_stats64(struct net_device *net_dev,
++ struct rtnl_link_stats64 *stats);
++int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
++int dpa_ndo_init(struct net_device *net_dev);
++int dpa_set_features(struct net_device *dev, netdev_features_t features);
++netdev_features_t dpa_fix_features(struct net_device *dev,
++ netdev_features_t features);
++#ifdef CONFIG_FSL_DPAA_TS
++u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
++ enum port_type rx_tx, const void *data);
++/* Updates the skb shared hw timestamp from the hardware timestamp */
++int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
++ struct skb_shared_hwtstamps *shhwtstamps, const void *data);
++#endif /* CONFIG_FSL_DPAA_TS */
++int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++int __cold dpa_remove(struct platform_device *of_dev);
++struct mac_device * __cold __must_check
++__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
++int dpa_set_mac_address(struct net_device *net_dev, void *addr);
++void dpa_set_rx_mode(struct net_device *net_dev);
++void dpa_set_buffers_layout(struct mac_device *mac_dev,
++ struct dpa_buffer_layout_s *layout);
++int __attribute__((nonnull))
++dpa_bp_alloc(struct dpa_bp *dpa_bp);
++void __cold __attribute__((nonnull))
++dpa_bp_free(struct dpa_priv_s *priv);
++struct dpa_bp *dpa_bpid2pool(int bpid);
++void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
++bool dpa_bpid2pool_use(int bpid);
++void dpa_bp_drain(struct dpa_bp *bp);
++#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
++ void *accel_priv, select_queue_fallback_t fallback);
++#endif
++struct dpa_fq *dpa_fq_alloc(struct device *dev,
++ u32 fq_start,
++ u32 fq_count,
++ struct list_head *list,
++ enum dpa_fq_type fq_type);
++int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
++ struct fm_port_fqs *port_fqs,
++ bool tx_conf_fqs_per_core,
++ enum port_type ptype);
++int dpa_get_channel(void);
++void dpa_release_channel(void);
++void dpaa_eth_add_channel(u16 channel);
++int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
++void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
++ struct fm_port *tx_port);
++int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
++int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable);
++int __cold __attribute__((nonnull))
++dpa_fq_free(struct device *dev, struct list_head *list);
++void dpaa_eth_init_ports(struct mac_device *mac_dev,
++ struct dpa_bp *bp, size_t count,
++ struct fm_port_fqs *port_fqs,
++ struct dpa_buffer_layout_s *buf_layout,
++ struct device *dev);
++void dpa_release_sgt(struct qm_sg_entry *sgt);
++void __attribute__((nonnull))
++dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
++void count_ern(struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_mr_entry *msg);
++int dpa_enable_tx_csum(struct dpa_priv_s *priv,
++ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
++#ifdef CONFIG_FSL_DPAA_CEETM
++void dpa_enable_ceetm(struct net_device *dev);
++void dpa_disable_ceetm(struct net_device *dev);
++#endif
++struct proxy_device {
++ struct mac_device *mac_dev;
++};
++
++/* mac device control functions exposed by proxy interface*/
++int dpa_proxy_start(struct net_device *net_dev);
++int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
++int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
++ struct net_device *net_dev);
++int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
++ struct net_device *net_dev);
++
++#endif /* __DPAA_ETH_COMMON_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
+new file mode 100644
+index 00000000..994d38cd
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
+@@ -0,0 +1,381 @@
++/* Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++#include "dpaa_eth_base.h"
++#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
++#include "mac.h"
++
++#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++MODULE_DESCRIPTION(DPA_DESCRIPTION);
++
++static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
++#ifdef CONFIG_PM
++
++static int proxy_suspend(struct device *dev)
++{
++ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
++ struct mac_device *mac_dev = proxy_dev->mac_dev;
++ int err = 0;
++
++ err = fm_port_suspend(mac_dev->port_dev[RX]);
++ if (err)
++ goto port_suspend_failed;
++
++ err = fm_port_suspend(mac_dev->port_dev[TX]);
++ if (err)
++ err = fm_port_resume(mac_dev->port_dev[RX]);
++
++port_suspend_failed:
++ return err;
++}
++
++static int proxy_resume(struct device *dev)
++{
++ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
++ struct mac_device *mac_dev = proxy_dev->mac_dev;
++ int err = 0;
++
++ err = fm_port_resume(mac_dev->port_dev[TX]);
++ if (err)
++ goto port_resume_failed;
++
++ err = fm_port_resume(mac_dev->port_dev[RX]);
++ if (err)
++ err = fm_port_suspend(mac_dev->port_dev[TX]);
++
++port_resume_failed:
++ return err;
++}
++
++static const struct dev_pm_ops proxy_pm_ops = {
++ .suspend = proxy_suspend,
++ .resume = proxy_resume,
++};
++
++#define PROXY_PM_OPS (&proxy_pm_ops)
++
++#else /* CONFIG_PM */
++
++#define PROXY_PM_OPS NULL
++
++#endif /* CONFIG_PM */
++
++static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
++{
++ int err = 0, i;
++ struct device *dev;
++ struct device_node *dpa_node;
++ struct dpa_bp *dpa_bp;
++ struct list_head proxy_fq_list;
++ size_t count;
++ struct fm_port_fqs port_fqs;
++ struct dpa_buffer_layout_s *buf_layout = NULL;
++ struct mac_device *mac_dev;
++ struct proxy_device *proxy_dev;
++
++ dev = &_of_dev->dev;
++
++ dpa_node = dev->of_node;
++
++ if (!of_device_is_available(dpa_node))
++ return -ENODEV;
++
++ /* Get the buffer pools assigned to this interface */
++ dpa_bp = dpa_bp_probe(_of_dev, &count);
++ if (IS_ERR(dpa_bp))
++ return PTR_ERR(dpa_bp);
++
++ mac_dev = dpa_mac_probe(_of_dev);
++ if (IS_ERR(mac_dev))
++ return PTR_ERR(mac_dev);
++
++ proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
++ if (!proxy_dev) {
++ dev_err(dev, "devm_kzalloc() failed\n");
++ return -ENOMEM;
++ }
++
++ proxy_dev->mac_dev = mac_dev;
++ dev_set_drvdata(dev, proxy_dev);
++
++ /* We have physical ports, so we need to establish
++ * the buffer layout.
++ */
++ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
++ GFP_KERNEL);
++ if (!buf_layout) {
++ dev_err(dev, "devm_kzalloc() failed\n");
++ return -ENOMEM;
++ }
++ dpa_set_buffers_layout(mac_dev, buf_layout);
++
++ INIT_LIST_HEAD(&proxy_fq_list);
++
++ memset(&port_fqs, 0, sizeof(port_fqs));
++
++ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
++ if (!err)
++ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
++ TX);
++ if (err < 0) {
++ devm_kfree(dev, buf_layout);
++ return err;
++ }
++
++ /* Proxy initializer - Just configures the MAC on behalf of
++ * another partition.
++ */
++ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
++ buf_layout, dev);
++
++ /* Proxy interfaces need to be started, and the allocated
++ * memory freed
++ */
++ devm_kfree(dev, buf_layout);
++ devm_kfree(dev, dpa_bp);
++
++ /* Free FQ structures */
++ devm_kfree(dev, port_fqs.rx_defq);
++ devm_kfree(dev, port_fqs.rx_errq);
++ devm_kfree(dev, port_fqs.tx_defq);
++ devm_kfree(dev, port_fqs.tx_errq);
++
++ for_each_port_device(i, mac_dev->port_dev) {
++ err = fm_port_enable(mac_dev->port_dev[i]);
++ if (err)
++ goto port_enable_fail;
++ }
++
++ dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
++ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
++ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
++
++ return 0; /* Proxy interface initialization ended */
++
++port_enable_fail:
++ for_each_port_device(i, mac_dev->port_dev)
++ fm_port_disable(mac_dev->port_dev[i]);
++ dpa_eth_proxy_remove(_of_dev);
++
++ return err;
++}
++
++int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
++ struct net_device *net_dev)
++{
++ struct mac_device *mac_dev;
++ int _errno;
++
++ mac_dev = proxy_dev->mac_dev;
++
++ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
++ net_dev->dev_addr);
++ if (_errno < 0)
++ return _errno;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_proxy_set_mac_address);
++
++int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
++ struct net_device *net_dev)
++{
++ struct mac_device *mac_dev = proxy_dev->mac_dev;
++ int _errno;
++
++ if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
++ mac_dev->promisc = !mac_dev->promisc;
++ _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
++ mac_dev->promisc);
++ if (unlikely(_errno < 0))
++ netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
++ _errno);
++ }
++
++ _errno = mac_dev->set_multi(net_dev, mac_dev);
++ if (unlikely(_errno < 0))
++ return _errno;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
++
++int dpa_proxy_start(struct net_device *net_dev)
++{
++ struct mac_device *mac_dev;
++ const struct dpa_priv_s *priv;
++ struct proxy_device *proxy_dev;
++ int _errno;
++ int i;
++
++ priv = netdev_priv(net_dev);
++ proxy_dev = (struct proxy_device *)priv->peer;
++ mac_dev = proxy_dev->mac_dev;
++
++ _errno = mac_dev->init_phy(net_dev, mac_dev);
++ if (_errno < 0) {
++ if (netif_msg_drv(priv))
++ netdev_err(net_dev, "init_phy() = %d\n",
++ _errno);
++ return _errno;
++ }
++
++ for_each_port_device(i, mac_dev->port_dev) {
++ _errno = fm_port_enable(mac_dev->port_dev[i]);
++ if (_errno)
++ goto port_enable_fail;
++ }
++
++ _errno = mac_dev->start(mac_dev);
++ if (_errno < 0) {
++ if (netif_msg_drv(priv))
++ netdev_err(net_dev, "mac_dev->start() = %d\n",
++ _errno);
++ goto port_enable_fail;
++ }
++
++ return _errno;
++
++port_enable_fail:
++ for_each_port_device(i, mac_dev->port_dev)
++ fm_port_disable(mac_dev->port_dev[i]);
++
++ return _errno;
++}
++EXPORT_SYMBOL(dpa_proxy_start);
++
++int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
++{
++ struct mac_device *mac_dev = proxy_dev->mac_dev;
++ const struct dpa_priv_s *priv = netdev_priv(net_dev);
++ int _errno, i, err;
++
++ _errno = mac_dev->stop(mac_dev);
++ if (_errno < 0) {
++ if (netif_msg_drv(priv))
++ netdev_err(net_dev, "mac_dev->stop() = %d\n",
++ _errno);
++ return _errno;
++ }
++
++ for_each_port_device(i, mac_dev->port_dev) {
++ err = fm_port_disable(mac_dev->port_dev[i]);
++ _errno = err ? err : _errno;
++ }
++
++ if (mac_dev->phy_dev)
++ phy_disconnect(mac_dev->phy_dev);
++ mac_dev->phy_dev = NULL;
++
++ return _errno;
++}
++EXPORT_SYMBOL(dpa_proxy_stop);
++
++static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
++{
++ struct device *dev = &of_dev->dev;
++ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
++
++ kfree(proxy_dev);
++
++ dev_set_drvdata(dev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id dpa_proxy_match[] = {
++ {
++ .compatible = "fsl,dpa-ethernet-init"
++ },
++ {}
++};
++MODULE_DEVICE_TABLE(of, dpa_proxy_match);
++
++static struct platform_driver dpa_proxy_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME "-proxy",
++ .of_match_table = dpa_proxy_match,
++ .owner = THIS_MODULE,
++ .pm = PROXY_PM_OPS,
++ },
++ .probe = dpaa_eth_proxy_probe,
++ .remove = dpa_eth_proxy_remove
++};
++
++static int __init __cold dpa_proxy_load(void)
++{
++ int _errno;
++
++ pr_info(DPA_DESCRIPTION "\n");
++
++ /* Initialize dpaa_eth mirror values */
++ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
++ dpa_max_frm = fm_get_max_frm();
++
++ _errno = platform_driver_register(&dpa_proxy_driver);
++ if (unlikely(_errno < 0)) {
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): platform_driver_register() = %d\n",
++ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
++ }
++
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++
++ return _errno;
++}
++module_init(dpa_proxy_load);
++
++static void __exit __cold dpa_proxy_unload(void)
++{
++ platform_driver_unregister(&dpa_proxy_driver);
++
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++}
++module_exit(dpa_proxy_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
+new file mode 100644
+index 00000000..11b47e8c
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
+@@ -0,0 +1,1179 @@
++/* Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++#include <linux/init.h>
++#include <linux/skbuff.h>
++#include <linux/highmem.h>
++#include <linux/fsl_bman.h>
++#include <net/sock.h>
++
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++#ifdef CONFIG_FSL_DPAA_1588
++#include "dpaa_1588.h"
++#endif
++#ifdef CONFIG_FSL_DPAA_CEETM
++#include "dpaa_eth_ceetm.h"
++#endif
++
++/* DMA map and add a page frag back into the bpool.
++ * @vaddr fragment must have been allocated with netdev_alloc_frag(),
++ * specifically for fitting into @dpa_bp.
++ */
++static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
++ int *count_ptr)
++{
++ struct bm_buffer bmb;
++ dma_addr_t addr;
++
++ bmb.opaque = 0;
++
++ addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
++ dev_err(dpa_bp->dev, "DMA mapping failed");
++ return;
++ }
++
++ bm_buffer_set64(&bmb, addr);
++
++ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
++ cpu_relax();
++
++ (*count_ptr)++;
++}
++
++static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
++{
++ struct bm_buffer bmb[8];
++ void *new_buf;
++ dma_addr_t addr;
++ uint8_t i;
++ struct device *dev = dpa_bp->dev;
++ struct sk_buff *skb, **skbh;
++
++ memset(bmb, 0, sizeof(struct bm_buffer) * 8);
++
++ for (i = 0; i < 8; i++) {
++ /* We'll prepend the skb back-pointer; can't use the DPA
++ * priv space, because FMan will overwrite it (from offset 0)
++ * if it ends up being the second, third, etc. fragment
++ * in a S/G frame.
++ *
++ * We only need enough space to store a pointer, but allocate
++ * an entire cacheline for performance reasons.
++ */
++#ifndef CONFIG_PPC
++ if (unlikely(dpaa_errata_a010022)) {
++ struct page *new_page = alloc_page(GFP_ATOMIC);
++ if (unlikely(!new_page))
++ goto netdev_alloc_failed;
++ new_buf = page_address(new_page);
++ }
++ else
++#endif
++ new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
++
++ if (unlikely(!new_buf))
++ goto netdev_alloc_failed;
++ new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
++
++ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
++ if (unlikely(!skb)) {
++ put_page(virt_to_head_page(new_buf));
++ goto build_skb_failed;
++ }
++ DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
++
++ addr = dma_map_single(dev, new_buf,
++ dpa_bp->size, DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr)))
++ goto dma_map_failed;
++
++ bm_buffer_set64(&bmb[i], addr);
++ }
++
++release_bufs:
++ /* Release the buffers. In case bman is busy, keep trying
++ * until successful. bman_release() is guaranteed to succeed
++ * in a reasonable amount of time
++ */
++ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
++ cpu_relax();
++ return i;
++
++dma_map_failed:
++ kfree_skb(skb);
++
++build_skb_failed:
++netdev_alloc_failed:
++ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
++ WARN_ONCE(1, "Memory allocation failure on Rx\n");
++
++ bm_buffer_set64(&bmb[i], 0);
++ /* Avoid releasing a completely null buffer; bman_release() requires
++ * at least one buffer.
++ */
++ if (likely(i))
++ goto release_bufs;
++
++ return 0;
++}
++
++/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
++static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
++{
++ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
++ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
++}
++
++int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
++{
++ int i;
++
++ /* Give each CPU an allotment of "config_count" buffers */
++ for_each_possible_cpu(i) {
++ int j;
++
++ /* Although we access another CPU's counters here
++ * we do it at boot time so it is safe
++ */
++ for (j = 0; j < dpa_bp->config_count; j += 8)
++ dpa_bp_add_8_bufs(dpa_bp, i);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(dpa_bp_priv_seed);
++
++/* Add buffers/(pages) for Rx processing whenever bpool count falls below
++ * REFILL_THRESHOLD.
++ */
++int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
++{
++ int count = *countptr;
++ int new_bufs;
++
++ if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
++ do {
++ new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
++ if (unlikely(!new_bufs)) {
++ /* Avoid looping forever if we've temporarily
++ * run out of memory. We'll try again at the
++ * next NAPI cycle.
++ */
++ break;
++ }
++ count += new_bufs;
++ } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
++
++ *countptr = count;
++ if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(dpaa_eth_refill_bpools);
++
++/* Cleanup function for outgoing frame descriptors that were built on Tx path,
++ * either contiguous frames or scatter/gather ones.
++ * Skb freeing is not handled here.
++ *
++ * This function may be called on error paths in the Tx function, so guard
++ * against cases when not all fd relevant fields were filled in.
++ *
++ * Return the skb backpointer, since for S/G frames the buffer containing it
++ * gets freed here.
++ */
++struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
++ const struct qm_fd *fd)
++{
++ const struct qm_sg_entry *sgt;
++ int i;
++ struct dpa_bp *dpa_bp = priv->dpa_bp;
++ dma_addr_t addr = qm_fd_addr(fd);
++ dma_addr_t sg_addr;
++ struct sk_buff **skbh;
++ struct sk_buff *skb = NULL;
++ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
++ int nr_frags;
++ int sg_len;
++
++ /* retrieve skb back pointer */
++ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
++
++ if (unlikely(fd->format == qm_fd_sg)) {
++ nr_frags = skb_shinfo(skb)->nr_frags;
++ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
++ sizeof(struct qm_sg_entry) * (1 + nr_frags),
++ dma_dir);
++
++ /* The sgt buffer has been allocated with netdev_alloc_frag(),
++ * it's from lowmem.
++ */
++ sgt = phys_to_virt(addr + dpa_fd_offset(fd));
++#ifdef CONFIG_FSL_DPAA_1588
++ if (priv->tsu && priv->tsu->valid &&
++ priv->tsu->hwts_tx_en_ioctl)
++ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
++#endif
++#ifdef CONFIG_FSL_DPAA_TS
++ if (unlikely(priv->ts_tx_en &&
++ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
++ struct skb_shared_hwtstamps shhwtstamps;
++
++ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
++ skb_tstamp_tx(skb, &shhwtstamps);
++ }
++#endif /* CONFIG_FSL_DPAA_TS */
++
++ /* sgt[0] is from lowmem, was dma_map_single()-ed */
++ sg_addr = qm_sg_addr(&sgt[0]);
++ sg_len = qm_sg_entry_get_len(&sgt[0]);
++ dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
++
++ /* remaining pages were mapped with dma_map_page() */
++ for (i = 1; i <= nr_frags; i++) {
++ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
++ sg_addr = qm_sg_addr(&sgt[i]);
++ sg_len = qm_sg_entry_get_len(&sgt[i]);
++ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
++ }
++
++ /* Free the page frag that we allocated on Tx */
++ put_page(virt_to_head_page(sgt));
++ } else {
++ dma_unmap_single(dpa_bp->dev, addr,
++ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
++#ifdef CONFIG_FSL_DPAA_TS
++ /* get the timestamp for non-SG frames */
++#ifdef CONFIG_FSL_DPAA_1588
++ if (priv->tsu && priv->tsu->valid &&
++ priv->tsu->hwts_tx_en_ioctl)
++ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
++#endif
++ if (unlikely(priv->ts_tx_en &&
++ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
++ struct skb_shared_hwtstamps shhwtstamps;
++
++ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
++ skb_tstamp_tx(skb, &shhwtstamps);
++ }
++#endif
++ }
++
++ return skb;
++}
++EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
++
++#ifndef CONFIG_FSL_DPAA_TS
++bool dpa_skb_is_recyclable(struct sk_buff *skb)
++{
++#ifndef CONFIG_PPC
++ /* Do no recycle skbs realigned by the errata workaround */
++ if (unlikely(dpaa_errata_a010022) && skb->mark == NONREC_MARK)
++ return false;
++#endif
++
++ /* No recycling possible if skb buffer is kmalloc'ed */
++ if (skb->head_frag == 0)
++ return false;
++
++ /* or if it's an userspace buffer */
++ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
++ return false;
++
++ /* or if it's cloned or shared */
++ if (skb_shared(skb) || skb_cloned(skb) ||
++ skb->fclone != SKB_FCLONE_UNAVAILABLE)
++ return false;
++
++ return true;
++}
++EXPORT_SYMBOL(dpa_skb_is_recyclable);
++
++bool dpa_buf_is_recyclable(struct sk_buff *skb,
++ uint32_t min_size,
++ uint16_t min_offset,
++ unsigned char **new_buf_start)
++{
++ unsigned char *new;
++
++ /* In order to recycle a buffer, the following conditions must be met:
++ * - buffer size no less than the buffer pool size
++ * - buffer size no higher than an upper limit (to avoid moving too much
++ * system memory to the buffer pools)
++ * - buffer address aligned to cacheline bytes
++ * - offset of data from start of buffer no lower than a minimum value
++ * - offset of data from start of buffer no higher than a maximum value
++ */
++ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
++
++ /* left align to the nearest cacheline */
++ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
++
++ if (likely(new >= skb->head &&
++ new >= (skb->data - DPA_MAX_FD_OFFSET) &&
++ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
++ *new_buf_start = new;
++ return true;
++ }
++
++ return false;
++}
++EXPORT_SYMBOL(dpa_buf_is_recyclable);
++#endif
++
++/* Build a linear skb around the received buffer.
++ * We are guaranteed there is enough room at the end of the data buffer to
++ * accommodate the shared info area of the skb.
++ */
++static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
++ const struct qm_fd *fd, int *use_gro)
++{
++ dma_addr_t addr = qm_fd_addr(fd);
++ ssize_t fd_off = dpa_fd_offset(fd);
++ void *vaddr;
++ const fm_prs_result_t *parse_results;
++ struct sk_buff *skb = NULL, **skbh;
++
++ vaddr = phys_to_virt(addr);
++ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
++
++ /* Retrieve the skb and adjust data and tail pointers, to make sure
++ * forwarded skbs will have enough space on Tx if extra headers
++ * are added.
++ */
++ DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
++
++#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
++ /* When using jumbo Rx buffers, we risk having frames dropped due to
++ * the socket backlog reaching its maximum allowed size.
++ * Use the frame length for the skb truesize instead of the buffer
++ * size, as this is the size of the data that actually gets copied to
++ * userspace.
++ * The stack may increase the payload. In this case, it will want to
++ * warn us that the frame length is larger than the truesize. We
++ * bypass the warning.
++ */
++#ifndef CONFIG_PPC
++ /* We do not support Jumbo frames on LS1043 and thus we edit
++ * the skb truesize only when the 4k errata is not present.
++ */
++ if (likely(!dpaa_errata_a010022))
++#endif
++ skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
++#endif
++
++ DPA_BUG_ON(fd_off != priv->rx_headroom);
++ skb_reserve(skb, fd_off);
++ skb_put(skb, dpa_fd_length(fd));
++
++ /* Peek at the parse results for csum validation */
++ parse_results = (const fm_prs_result_t *)(vaddr +
++ DPA_RX_PRIV_DATA_SIZE);
++ _dpa_process_parse_results(parse_results, fd, skb, use_gro);
++
++#ifdef CONFIG_FSL_DPAA_1588
++ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
++ dpa_ptp_store_rxstamp(priv, skb, vaddr);
++#endif
++#ifdef CONFIG_FSL_DPAA_TS
++ if (priv->ts_rx_en)
++ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
++#endif /* CONFIG_FSL_DPAA_TS */
++
++ return skb;
++}
++
++
++/* Build an skb with the data of the first S/G entry in the linear portion and
++ * the rest of the frame as skb fragments.
++ *
++ * The page fragment holding the S/G Table is recycled here.
++ */
++static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
++ const struct qm_fd *fd, int *use_gro,
++ int *count_ptr)
++{
++ const struct qm_sg_entry *sgt;
++ dma_addr_t addr = qm_fd_addr(fd);
++ ssize_t fd_off = dpa_fd_offset(fd);
++ dma_addr_t sg_addr;
++ void *vaddr, *sg_vaddr;
++ struct dpa_bp *dpa_bp;
++ struct page *page, *head_page;
++ int frag_offset, frag_len;
++ int page_offset;
++ int i;
++ const fm_prs_result_t *parse_results;
++ struct sk_buff *skb = NULL, *skb_tmp, **skbh;
++
++ vaddr = phys_to_virt(addr);
++ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
++
++ dpa_bp = priv->dpa_bp;
++ /* Iterate through the SGT entries and add data buffers to the skb */
++ sgt = vaddr + fd_off;
++ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
++ /* Extension bit is not supported */
++ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
++
++ /* We use a single global Rx pool */
++ DPA_BUG_ON(dpa_bp !=
++ dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
++
++ sg_addr = qm_sg_addr(&sgt[i]);
++ sg_vaddr = phys_to_virt(sg_addr);
++ DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
++ SMP_CACHE_BYTES));
++
++ dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
++ DMA_BIDIRECTIONAL);
++ if (i == 0) {
++ DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
++ DPA_BUG_ON(skb->head != sg_vaddr);
++#ifdef CONFIG_FSL_DPAA_1588
++ if (priv->tsu && priv->tsu->valid &&
++ priv->tsu->hwts_rx_en_ioctl)
++ dpa_ptp_store_rxstamp(priv, skb, vaddr);
++#endif
++#ifdef CONFIG_FSL_DPAA_TS
++ if (priv->ts_rx_en)
++ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
++#endif /* CONFIG_FSL_DPAA_TS */
++
++ /* In the case of a SG frame, FMan stores the Internal
++ * Context in the buffer containing the sgt.
++ * Inspect the parse results before anything else.
++ */
++ parse_results = (const fm_prs_result_t *)(vaddr +
++ DPA_RX_PRIV_DATA_SIZE);
++ _dpa_process_parse_results(parse_results, fd, skb,
++ use_gro);
++
++ /* Make sure forwarded skbs will have enough space
++ * on Tx, if extra headers are added.
++ */
++ DPA_BUG_ON(fd_off != priv->rx_headroom);
++ skb_reserve(skb, fd_off);
++ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
++ } else {
++ /* Not the first S/G entry; all data from buffer will
++ * be added in an skb fragment; fragment index is offset
++ * by one since first S/G entry was incorporated in the
++ * linear part of the skb.
++ *
++ * Caution: 'page' may be a tail page.
++ */
++ DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
++ page = virt_to_page(sg_vaddr);
++ head_page = virt_to_head_page(sg_vaddr);
++
++ /* Free (only) the skbuff shell because its data buffer
++ * is already a frag in the main skb.
++ */
++ get_page(head_page);
++ dev_kfree_skb(skb_tmp);
++
++ /* Compute offset in (possibly tail) page */
++ page_offset = ((unsigned long)sg_vaddr &
++ (PAGE_SIZE - 1)) +
++ (page_address(page) - page_address(head_page));
++ /* page_offset only refers to the beginning of sgt[i];
++ * but the buffer itself may have an internal offset.
++ */
++ frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
++ page_offset;
++ frag_len = qm_sg_entry_get_len(&sgt[i]);
++ /* skb_add_rx_frag() does no checking on the page; if
++ * we pass it a tail page, we'll end up with
++ * bad page accounting and eventually with segafults.
++ */
++ skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
++ frag_len, dpa_bp->size);
++ }
++ /* Update the pool count for the current {cpu x bpool} */
++ (*count_ptr)--;
++
++ if (qm_sg_entry_get_final(&sgt[i]))
++ break;
++ }
++ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
++
++ /* recycle the SGT fragment */
++ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
++ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
++ return skb;
++}
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
++ struct sk_buff *skb)
++{
++ if (unlikely(priv->loop_to < 0))
++ return 0; /* loop disabled by default */
++
++ skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
++ dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
++
++ return 1; /* Frame Tx on the selected interface */
++}
++#endif
++
++void __hot _dpa_rx(struct net_device *net_dev,
++ struct qman_portal *portal,
++ const struct dpa_priv_s *priv,
++ struct dpa_percpu_priv_s *percpu_priv,
++ const struct qm_fd *fd,
++ u32 fqid,
++ int *count_ptr)
++{
++ struct dpa_bp *dpa_bp;
++ struct sk_buff *skb;
++ dma_addr_t addr = qm_fd_addr(fd);
++ u32 fd_status = fd->status;
++ unsigned int skb_len;
++ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
++ int use_gro = net_dev->features & NETIF_F_GRO;
++
++ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
++ if (netif_msg_hw(priv) && net_ratelimit())
++ netdev_warn(net_dev, "FD status = 0x%08x\n",
++ fd_status & FM_FD_STAT_RX_ERRORS);
++
++ percpu_stats->rx_errors++;
++ goto _release_frame;
++ }
++
++ dpa_bp = priv->dpa_bp;
++ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
++
++ /* prefetch the first 64 bytes of the frame or the SGT start */
++ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
++ prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
++
++ /* The only FD types that we may receive are contig and S/G */
++ DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
++
++ if (likely(fd->format == qm_fd_contig)) {
++#ifdef CONFIG_FSL_DPAA_HOOKS
++ /* Execute the Rx processing hook, if it exists. */
++ if (dpaa_eth_hooks.rx_default &&
++ dpaa_eth_hooks.rx_default((void *)fd, net_dev,
++ fqid) == DPAA_ETH_STOLEN) {
++ /* won't count the rx bytes in */
++ return;
++ }
++#endif
++ skb = contig_fd_to_skb(priv, fd, &use_gro);
++ } else {
++ skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
++ percpu_priv->rx_sg++;
++ }
++
++ /* Account for either the contig buffer or the SGT buffer (depending on
++ * which case we were in) having been removed from the pool.
++ */
++ (*count_ptr)--;
++ skb->protocol = eth_type_trans(skb, net_dev);
++
++ /* IP Reassembled frames are allowed to be larger than MTU */
++ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
++ !(fd_status & FM_FD_IPR))) {
++ percpu_stats->rx_dropped++;
++ goto drop_bad_frame;
++ }
++
++ skb_len = skb->len;
++
++#ifdef CONFIG_FSL_DPAA_DBG_LOOP
++ if (dpa_skb_loop(priv, skb)) {
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += skb_len;
++ return;
++ }
++#endif
++
++ if (use_gro) {
++ gro_result_t gro_result;
++ const struct qman_portal_config *pc =
++ qman_p_get_portal_config(portal);
++ struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
++
++ np->p = portal;
++ gro_result = napi_gro_receive(&np->napi, skb);
++ /* If frame is dropped by the stack, rx_dropped counter is
++ * incremented automatically, so no need for us to update it
++ */
++ if (unlikely(gro_result == GRO_DROP))
++ goto packet_dropped;
++ } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
++ goto packet_dropped;
++
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += skb_len;
++
++packet_dropped:
++ return;
++
++drop_bad_frame:
++ dev_kfree_skb(skb);
++ return;
++
++_release_frame:
++ dpa_fd_release(net_dev, fd);
++}
++
++int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
++ struct sk_buff *skb, struct qm_fd *fd,
++ int *count_ptr, int *offset)
++{
++ struct sk_buff **skbh;
++ dma_addr_t addr;
++ struct dpa_bp *dpa_bp = priv->dpa_bp;
++ struct net_device *net_dev = priv->net_dev;
++ int err;
++ enum dma_data_direction dma_dir;
++ unsigned char *buffer_start;
++ int dma_map_size;
++
++#ifndef CONFIG_FSL_DPAA_TS
++ /* Check recycling conditions; only if timestamp support is not
++ * enabled, otherwise we need the fd back on tx confirmation
++ */
++
++ /* We can recycle the buffer if:
++ * - the pool is not full
++ * - the buffer meets the skb recycling conditions
++ * - the buffer meets our own (size, offset, align) conditions
++ */
++ if (likely((*count_ptr < dpa_bp->target_count) &&
++ dpa_skb_is_recyclable(skb) &&
++ dpa_buf_is_recyclable(skb, dpa_bp->size,
++ priv->tx_headroom, &buffer_start))) {
++ /* Buffer is recyclable; use the new start address
++ * and set fd parameters and DMA mapping direction
++ */
++ fd->bpid = dpa_bp->bpid;
++ DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
++ fd->offset = (uint16_t)(skb->data - buffer_start);
++ dma_dir = DMA_BIDIRECTIONAL;
++ dma_map_size = dpa_bp->size;
++
++ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
++ *offset = skb_headroom(skb) - fd->offset;
++ } else
++#endif
++ {
++ /* Not recyclable.
++ * We are guaranteed to have at least tx_headroom bytes
++ * available, so just use that for offset.
++ */
++ fd->bpid = 0xff;
++ buffer_start = skb->data - priv->tx_headroom;
++ fd->offset = priv->tx_headroom;
++ dma_dir = DMA_TO_DEVICE;
++ dma_map_size = skb_tail_pointer(skb) - buffer_start;
++
++ /* The buffer will be Tx-confirmed, but the TxConf cb must
++ * necessarily look at our Tx private data to retrieve the
++ * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
++ */
++ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
++ }
++
++ /* Enable L3/L4 hardware checksum computation.
++ *
++ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
++ * need to write into the skb.
++ */
++ err = dpa_enable_tx_csum(priv, skb, fd,
++ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
++ if (unlikely(err < 0)) {
++ if (netif_msg_tx_err(priv) && net_ratelimit())
++ netdev_err(net_dev, "HW csum error: %d\n", err);
++ return err;
++ }
++
++ /* Fill in the rest of the FD fields */
++ fd->format = qm_fd_contig;
++ fd->length20 = skb->len;
++ fd->cmd |= FM_FD_CMD_FCO;
++
++ /* Map the entire buffer size that may be seen by FMan, but no more */
++ addr = dma_map_single(dpa_bp->dev, skbh, dma_map_size, dma_dir);
++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
++ if (netif_msg_tx_err(priv) && net_ratelimit())
++ netdev_err(net_dev, "dma_map_single() failed\n");
++ return -EINVAL;
++ }
++ qm_fd_addr_set64(fd, addr);
++
++ return 0;
++}
++EXPORT_SYMBOL(skb_to_contig_fd);
++
++#ifndef CONFIG_PPC
++/* Verify the conditions that trigger the A010022 errata: data unaligned to
++ * 16 bytes and 4K memory address crossings.
++ */
++static bool a010022_check_skb(struct sk_buff *skb, struct dpa_priv_s *priv)
++{
++ int nr_frags, i = 0;
++ skb_frag_t *frag;
++
++ /* Check if the headroom is aligned */
++ if (((uintptr_t)skb->data - priv->tx_headroom) %
++ priv->buf_layout[TX].data_align != 0)
++ return true;
++
++ /* Check if the headroom crosses a boundary */
++ if (HAS_DMA_ISSUE(skb->head, skb_headroom(skb)))
++ return true;
++
++ /* Check if the non-paged data crosses a boundary */
++ if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb)))
++ return true;
++
++ /* Check if the entire linear skb crosses a boundary */
++ if (HAS_DMA_ISSUE(skb->head, skb_end_offset(skb)))
++ return true;
++
++ nr_frags = skb_shinfo(skb)->nr_frags;
++
++ while (i < nr_frags) {
++ frag = &skb_shinfo(skb)->frags[i];
++
++ /* Check if a paged fragment crosses a boundary from its
++ * offset to its end.
++ */
++ if (HAS_DMA_ISSUE(frag->page_offset, frag->size))
++ return true;
++
++ i++;
++ }
++
++ return false;
++}
++
++/* Realign the skb by copying its contents at the start of a newly allocated
++ * page. Build a new skb around the new buffer and release the old one.
++ * A performance drop should be expected.
++ */
++static struct sk_buff *a010022_realign_skb(struct sk_buff *skb,
++ struct dpa_priv_s *priv)
++{
++ int trans_offset = skb_transport_offset(skb);
++ int net_offset = skb_network_offset(skb);
++ struct sk_buff *nskb = NULL;
++ int nsize, headroom;
++ struct page *npage;
++ void *npage_addr;
++
++ /* Guarantee the minimum required headroom */
++ if (skb_headroom(skb) >= priv->tx_headroom)
++ headroom = skb_headroom(skb);
++ else
++ headroom = priv->tx_headroom;
++
++ npage = alloc_page(GFP_ATOMIC);
++ if (unlikely(!npage)) {
++ WARN_ONCE(1, "Memory allocation failure\n");
++ return NULL;
++ }
++ npage_addr = page_address(npage);
++
++ /* For the new skb we only need the old one's data (both non-paged and
++ * paged) and a headroom large enough to fit our private info. We can
++ * skip the old tailroom.
++ *
++ * Make sure the new linearized buffer will not exceed a page's size.
++ */
++ nsize = headroom + skb->len +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++ if (unlikely(nsize > 4096))
++ goto err;
++
++ nskb = build_skb(npage_addr, nsize);
++ if (unlikely(!nskb))
++ goto err;
++
++ /* Reserve only the needed headroom in order to guarantee the data's
++ * alignment.
++ * Code borrowed and adapted from skb_copy().
++ */
++ skb_reserve(nskb, priv->tx_headroom);
++ skb_put(nskb, skb->len);
++ if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
++ WARN_ONCE(1, "skb parsing failure\n");
++ goto err;
++ }
++ copy_skb_header(nskb, skb);
++
++#ifdef CONFIG_FSL_DPAA_TS
++ /* Copy relevant timestamp info from the old skb to the new */
++ if (priv->ts_tx_en) {
++ skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags;
++ skb_shinfo(nskb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
++ skb_shinfo(nskb)->tskey = skb_shinfo(skb)->tskey;
++ if (skb->sk)
++ skb_set_owner_w(nskb, skb->sk);
++ }
++#endif
++ /* We move the headroom when we align it so we have to reset the
++ * network and transport header offsets relative to the new data
++ * pointer. The checksum offload relies on these offsets.
++ */
++ skb_set_network_header(nskb, net_offset);
++ skb_set_transport_header(nskb, trans_offset);
++
++ /* We don't want the buffer to be recycled so we mark it accordingly */
++ nskb->mark = NONREC_MARK;
++
++ dev_kfree_skb(skb);
++ return nskb;
++
++err:
++ if (nskb)
++ dev_kfree_skb(nskb);
++ put_page(npage);
++ return NULL;
++}
++#endif
++
++int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
++ struct sk_buff *skb, struct qm_fd *fd)
++{
++ struct dpa_bp *dpa_bp = priv->dpa_bp;
++ dma_addr_t addr;
++ dma_addr_t sg_addr;
++ struct sk_buff **skbh;
++ struct net_device *net_dev = priv->net_dev;
++ int sg_len, sgt_size;
++ int err;
++
++ struct qm_sg_entry *sgt;
++ void *sgt_buf;
++ skb_frag_t *frag;
++ int i = 0, j = 0;
++ int nr_frags;
++ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
++
++ nr_frags = skb_shinfo(skb)->nr_frags;
++ fd->format = qm_fd_sg;
++
++ sgt_size = sizeof(struct qm_sg_entry) * (1 + nr_frags);
++
++ /* Get a page frag to store the SGTable, or a full page if the errata
++ * is in place and we need to avoid crossing a 4k boundary.
++ */
++#ifndef CONFIG_PPC
++ if (unlikely(dpaa_errata_a010022))
++ sgt_buf = page_address(alloc_page(GFP_ATOMIC));
++ else
++#endif
++ sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
++ if (unlikely(!sgt_buf)) {
++ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
++ return -ENOMEM;
++ }
++
++ /* it seems that the memory allocator does not zero the allocated mem */
++ memset(sgt_buf, 0, priv->tx_headroom + sgt_size);
++
++ /* Enable L3/L4 hardware checksum computation.
++ *
++ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
++ * need to write into the skb.
++ */
++ err = dpa_enable_tx_csum(priv, skb, fd,
++ sgt_buf + DPA_TX_PRIV_DATA_SIZE);
++ if (unlikely(err < 0)) {
++ if (netif_msg_tx_err(priv) && net_ratelimit())
++ netdev_err(net_dev, "HW csum error: %d\n", err);
++ goto csum_failed;
++ }
++
++ /* Assign the data from skb->data to the first SG list entry */
++ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
++ sg_len = skb_headlen(skb);
++ qm_sg_entry_set_bpid(&sgt[0], 0xff);
++ qm_sg_entry_set_offset(&sgt[0], 0);
++ qm_sg_entry_set_len(&sgt[0], sg_len);
++ qm_sg_entry_set_ext(&sgt[0], 0);
++ qm_sg_entry_set_final(&sgt[0], 0);
++
++ addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
++ dev_err(dpa_bp->dev, "DMA mapping failed");
++ err = -EINVAL;
++ goto sg0_map_failed;
++ }
++
++ qm_sg_entry_set64(&sgt[0], addr);
++
++ /* populate the rest of SGT entries */
++ for (i = 1; i <= nr_frags; i++) {
++ frag = &skb_shinfo(skb)->frags[i - 1];
++ qm_sg_entry_set_bpid(&sgt[i], 0xff);
++ qm_sg_entry_set_offset(&sgt[i], 0);
++ qm_sg_entry_set_len(&sgt[i], frag->size);
++ qm_sg_entry_set_ext(&sgt[i], 0);
++
++ if (i == nr_frags)
++ qm_sg_entry_set_final(&sgt[i], 1);
++ else
++ qm_sg_entry_set_final(&sgt[i], 0);
++
++ DPA_BUG_ON(!skb_frag_page(frag));
++ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
++ dma_dir);
++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
++ dev_err(dpa_bp->dev, "DMA mapping failed");
++ err = -EINVAL;
++ goto sg_map_failed;
++ }
++
++ /* keep the offset in the address */
++ qm_sg_entry_set64(&sgt[i], addr);
++ }
++
++ fd->length20 = skb->len;
++ fd->offset = priv->tx_headroom;
++
++ /* DMA map the SGT page */
++ DPA_WRITE_SKB_PTR(skb, skbh, sgt_buf, 0);
++ addr = dma_map_single(dpa_bp->dev, sgt_buf,
++ priv->tx_headroom + sgt_size,
++ dma_dir);
++
++ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
++ dev_err(dpa_bp->dev, "DMA mapping failed");
++ err = -EINVAL;
++ goto sgt_map_failed;
++ }
++
++ qm_fd_addr_set64(fd, addr);
++ fd->bpid = 0xff;
++ fd->cmd |= FM_FD_CMD_FCO;
++
++ return 0;
++
++sgt_map_failed:
++sg_map_failed:
++ for (j = 0; j < i; j++) {
++ sg_addr = qm_sg_addr(&sgt[j]);
++ dma_unmap_page(dpa_bp->dev, sg_addr,
++ qm_sg_entry_get_len(&sgt[j]), dma_dir);
++ }
++sg0_map_failed:
++csum_failed:
++ put_page(virt_to_head_page(sgt_buf));
++
++ return err;
++}
++EXPORT_SYMBOL(skb_to_sg_fd);
++
++int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
++{
++ struct dpa_priv_s *priv;
++ const int queue_mapping = dpa_get_queue_mapping(skb);
++ struct qman_fq *egress_fq, *conf_fq;
++
++#ifdef CONFIG_FSL_DPAA_HOOKS
++ /* If there is a Tx hook, run it. */
++ if (dpaa_eth_hooks.tx &&
++ dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
++ /* won't update any Tx stats */
++ return NETDEV_TX_OK;
++#endif
++
++ priv = netdev_priv(net_dev);
++
++#ifdef CONFIG_FSL_DPAA_CEETM
++ if (priv->ceetm_en)
++ return ceetm_tx(skb, net_dev);
++#endif
++
++ egress_fq = priv->egress_fqs[queue_mapping];
++ conf_fq = priv->conf_fqs[queue_mapping];
++
++ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
++}
++
++int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
++ struct qman_fq *egress_fq, struct qman_fq *conf_fq)
++{
++ struct dpa_priv_s *priv;
++ struct qm_fd fd;
++ struct dpa_percpu_priv_s *percpu_priv;
++ struct rtnl_link_stats64 *percpu_stats;
++ int err = 0;
++ bool nonlinear;
++ int *countptr, offset = 0;
++
++ priv = netdev_priv(net_dev);
++ /* Non-migratable context, safe to use raw_cpu_ptr */
++ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
++ percpu_stats = &percpu_priv->stats;
++ countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
++
++ clear_fd(&fd);
++
++#ifndef CONFIG_PPC
++ if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) {
++ skb = a010022_realign_skb(skb, priv);
++ if (!skb)
++ goto skb_to_fd_failed;
++ }
++#endif
++
++ nonlinear = skb_is_nonlinear(skb);
++
++#ifdef CONFIG_FSL_DPAA_1588
++ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
++ fd.cmd |= FM_FD_CMD_UPD;
++#endif
++#ifdef CONFIG_FSL_DPAA_TS
++ if (unlikely(priv->ts_tx_en &&
++ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
++ fd.cmd |= FM_FD_CMD_UPD;
++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
++#endif /* CONFIG_FSL_DPAA_TS */
++
++ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
++ * we don't feed FMan with more fragments than it supports.
++ * Btw, we're using the first sgt entry to store the linear part of
++ * the skb, so we're one extra frag short.
++ */
++ if (nonlinear &&
++ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
++ /* Just create a S/G fd based on the skb */
++ err = skb_to_sg_fd(priv, skb, &fd);
++ percpu_priv->tx_frag_skbuffs++;
++ } else {
++ /* Make sure we have enough headroom to accommodate private
++ * data, parse results, etc. Normally this shouldn't happen if
++ * we're here via the standard kernel stack.
++ */
++ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
++ struct sk_buff *skb_new;
++
++ skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
++ if (unlikely(!skb_new)) {
++ dev_kfree_skb(skb);
++ percpu_stats->tx_errors++;
++ return NETDEV_TX_OK;
++ }
++ dev_kfree_skb(skb);
++ skb = skb_new;
++ }
++
++ /* We're going to store the skb backpointer at the beginning
++ * of the data buffer, so we need a privately owned skb
++ */
++
++ /* Code borrowed from skb_unshare(). */
++ if (skb_cloned(skb)) {
++ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
++ kfree_skb(skb);
++ skb = nskb;
++#ifndef CONFIG_PPC
++ if (unlikely(dpaa_errata_a010022) &&
++ a010022_check_skb(skb, priv)) {
++ skb = a010022_realign_skb(skb, priv);
++ if (!skb)
++ goto skb_to_fd_failed;
++ }
++#endif
++ /* skb_copy() has now linearized the skbuff. */
++ } else if (unlikely(nonlinear)) {
++ /* We are here because the egress skb contains
++ * more fragments than we support. In this case,
++ * we have no choice but to linearize it ourselves.
++ */
++ err = __skb_linearize(skb);
++ }
++ if (unlikely(!skb || err < 0))
++ /* Common out-of-memory error path */
++ goto enomem;
++
++ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
++ }
++ if (unlikely(err < 0))
++ goto skb_to_fd_failed;
++
++ if (fd.bpid != 0xff) {
++ skb_recycle(skb);
++ /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
++ * but we need the skb to look as if returned by build_skb().
++ * We need to manually adjust the tailptr as well.
++ */
++ skb->data = skb->head + offset;
++ skb_reset_tail_pointer(skb);
++
++ (*countptr)++;
++ percpu_priv->tx_returned++;
++ }
++
++ if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
++ goto xmit_failed;
++
++ netif_trans_update(net_dev);
++ return NETDEV_TX_OK;
++
++xmit_failed:
++ if (fd.bpid != 0xff) {
++ (*countptr)--;
++ percpu_priv->tx_returned--;
++ dpa_fd_release(net_dev, &fd);
++ percpu_stats->tx_errors++;
++ return NETDEV_TX_OK;
++ }
++ _dpa_cleanup_tx_fd(priv, &fd);
++skb_to_fd_failed:
++enomem:
++ percpu_stats->tx_errors++;
++ dev_kfree_skb(skb);
++ return NETDEV_TX_OK;
++}
++EXPORT_SYMBOL(dpa_tx_extended);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
+new file mode 100644
+index 00000000..3542d0b2
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
+@@ -0,0 +1,278 @@
++/* Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <linux/io.h>
++#include <linux/of_net.h>
++#include "dpaa_eth.h"
++#include "mac.h" /* struct mac_device */
++#ifdef CONFIG_FSL_DPAA_1588
++#include "dpaa_1588.h"
++#endif
++
++static ssize_t dpaa_eth_show_addr(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ struct mac_device *mac_dev = priv->mac_dev;
++
++ if (mac_dev)
++ return sprintf(buf, "%llx",
++ (unsigned long long)mac_dev->res->start);
++ else
++ return sprintf(buf, "none");
++}
++
++static ssize_t dpaa_eth_show_type(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ ssize_t res = 0;
++
++ if (priv)
++ res = sprintf(buf, "%s", priv->if_type);
++
++ return res;
++}
++
++static ssize_t dpaa_eth_show_fqids(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ ssize_t bytes = 0;
++ int i = 0;
++ char *str;
++ struct dpa_fq *fq;
++ struct dpa_fq *tmp;
++ struct dpa_fq *prev = NULL;
++ u32 first_fqid = 0;
++ u32 last_fqid = 0;
++ char *prevstr = NULL;
++
++ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
++ switch (fq->fq_type) {
++ case FQ_TYPE_RX_DEFAULT:
++ str = "Rx default";
++ break;
++ case FQ_TYPE_RX_ERROR:
++ str = "Rx error";
++ break;
++ case FQ_TYPE_RX_PCD:
++ str = "Rx PCD";
++ break;
++ case FQ_TYPE_TX_CONFIRM:
++ str = "Tx default confirmation";
++ break;
++ case FQ_TYPE_TX_CONF_MQ:
++ str = "Tx confirmation (mq)";
++ break;
++ case FQ_TYPE_TX_ERROR:
++ str = "Tx error";
++ break;
++ case FQ_TYPE_TX:
++ str = "Tx";
++ break;
++ case FQ_TYPE_RX_PCD_HI_PRIO:
++ str ="Rx PCD High Priority";
++ break;
++ default:
++ str = "Unknown";
++ }
++
++ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
++ str != prevstr)) {
++ if (last_fqid == first_fqid)
++ bytes += sprintf(buf + bytes,
++ "%s: %d\n", prevstr, prev->fqid);
++ else
++ bytes += sprintf(buf + bytes,
++ "%s: %d - %d\n", prevstr,
++ first_fqid, last_fqid);
++ }
++
++ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
++ last_fqid = fq->fqid;
++ else
++ first_fqid = last_fqid = fq->fqid;
++
++ prev = fq;
++ prevstr = str;
++ i++;
++ }
++
++ if (prev) {
++ if (last_fqid == first_fqid)
++ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
++ prev->fqid);
++ else
++ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
++ first_fqid, last_fqid);
++ }
++
++ return bytes;
++}
++
++static ssize_t dpaa_eth_show_bpids(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ ssize_t bytes = 0;
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ struct dpa_bp *dpa_bp = priv->dpa_bp;
++ int i = 0;
++
++ for (i = 0; i < priv->bp_count; i++)
++ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
++ dpa_bp[i].bpid);
++
++ return bytes;
++}
++
++static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ struct mac_device *mac_dev = priv->mac_dev;
++ int n = 0;
++
++ if (mac_dev)
++ n = fm_mac_dump_regs(mac_dev, buf, n);
++ else
++ return sprintf(buf, "no mac registers\n");
++
++ return n;
++}
++
++static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ struct mac_device *mac_dev = priv->mac_dev;
++ int n = 0;
++
++ if (mac_dev)
++ n = fm_mac_dump_rx_stats(mac_dev, buf, n);
++ else
++ return sprintf(buf, "no mac rx stats\n");
++
++ return n;
++}
++
++static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ struct mac_device *mac_dev = priv->mac_dev;
++ int n = 0;
++
++ if (mac_dev)
++ n = fm_mac_dump_tx_stats(mac_dev, buf, n);
++ else
++ return sprintf(buf, "no mac tx stats\n");
++
++ return n;
++}
++
++#ifdef CONFIG_FSL_DPAA_1588
++static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++
++ if (priv->tsu && priv->tsu->valid)
++ return sprintf(buf, "1\n");
++ else
++ return sprintf(buf, "0\n");
++}
++
++static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
++ unsigned int num;
++ unsigned long flags;
++
++ if (kstrtouint(buf, 0, &num) < 0)
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ if (num) {
++ if (priv->tsu)
++ priv->tsu->valid = TRUE;
++ } else {
++ if (priv->tsu)
++ priv->tsu->valid = FALSE;
++ }
++
++ local_irq_restore(flags);
++
++ return count;
++}
++#endif
++
++static struct device_attribute dpaa_eth_attrs[] = {
++ __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
++ __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
++ __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
++ __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
++ __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
++ __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
++ __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
++#ifdef CONFIG_FSL_DPAA_1588
++ __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
++ dpaa_eth_set_ptp_1588),
++#endif
++};
++
++void dpaa_eth_sysfs_init(struct device *dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
++ if (device_create_file(dev, &dpaa_eth_attrs[i])) {
++ dev_err(dev, "Error creating sysfs file\n");
++ while (i > 0)
++ device_remove_file(dev, &dpaa_eth_attrs[--i]);
++ return;
++ }
++}
++EXPORT_SYMBOL(dpaa_eth_sysfs_init);
++
++void dpaa_eth_sysfs_remove(struct device *dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
++ device_remove_file(dev, &dpaa_eth_attrs[i]);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
+new file mode 100644
+index 00000000..30069ef9
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
+@@ -0,0 +1,144 @@
++/* Copyright 2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM dpaa_eth
++
++#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _DPAA_ETH_TRACE_H
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include "dpaa_eth.h"
++#include <linux/tracepoint.h>
++
++#define fd_format_name(format) { qm_fd_##format, #format }
++#define fd_format_list \
++ fd_format_name(contig), \
++ fd_format_name(sg)
++#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
++ " status=0x%08x"
++
++/* This is used to declare a class of events.
++ * individual events of this type will be defined below.
++ */
++
++/* Store details about a frame descriptor and the FQ on which it was
++ * transmitted/received.
++ */
++DECLARE_EVENT_CLASS(dpaa_eth_fd,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ struct qman_fq *fq,
++ const struct qm_fd *fd),
++
++ /* Repeat argument list here */
++ TP_ARGS(netdev, fq, fd),
++
++ /* A structure containing the relevant information we want to record.
++ * Declare name and type for each normal element, name, type and size
++ * for arrays. Use __string for variable length strings.
++ */
++ TP_STRUCT__entry(
++ __field(u32, fqid)
++ __field(u64, fd_addr)
++ __field(u8, fd_format)
++ __field(u16, fd_offset)
++ __field(u32, fd_length)
++ __field(u32, fd_status)
++ __string(name, netdev->name)
++ ),
++
++ /* The function that assigns values to the above declared fields */
++ TP_fast_assign(
++ __entry->fqid = fq->fqid;
++ __entry->fd_addr = qm_fd_addr_get64(fd);
++ __entry->fd_format = fd->format;
++ __entry->fd_offset = dpa_fd_offset(fd);
++ __entry->fd_length = dpa_fd_length(fd);
++ __entry->fd_status = fd->status;
++ __assign_str(name, netdev->name);
++ ),
++
++ /* This is what gets printed when the trace event is triggered */
++ /* TODO: print the status using __print_flags() */
++ TP_printk(TR_FMT,
++ __get_str(name), __entry->fqid, __entry->fd_addr,
++ __print_symbolic(__entry->fd_format, fd_format_list),
++ __entry->fd_offset, __entry->fd_length, __entry->fd_status)
++);
++
++/* Now declare events of the above type. Format is:
++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
++ */
++
++/* Tx (egress) fd */
++DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
++
++ TP_PROTO(struct net_device *netdev,
++ struct qman_fq *fq,
++ const struct qm_fd *fd),
++
++ TP_ARGS(netdev, fq, fd)
++);
++
++/* Rx fd */
++DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
++
++ TP_PROTO(struct net_device *netdev,
++ struct qman_fq *fq,
++ const struct qm_fd *fd),
++
++ TP_ARGS(netdev, fq, fd)
++);
++
++/* Tx confirmation fd */
++DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
++
++ TP_PROTO(struct net_device *netdev,
++ struct qman_fq *fq,
++ const struct qm_fd *fd),
++
++ TP_ARGS(netdev, fq, fd)
++);
++
++/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
++ * The syntax is the same as for DECLARE_EVENT_CLASS().
++ */
++
++#endif /* _DPAA_ETH_TRACE_H */
++
++/* This must be outside ifdef _DPAA_ETH_TRACE_H */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE dpaa_eth_trace
++#include <trace/define_trace.h>
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
+new file mode 100644
+index 00000000..4b784662
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
+@@ -0,0 +1,544 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++#include <linux/string.h>
++
++#include "dpaa_eth.h"
++#include "mac.h" /* struct mac_device */
++#include "dpaa_eth_common.h"
++
++static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
++ "interrupts",
++ "rx packets",
++ "tx packets",
++ "tx recycled",
++ "tx confirm",
++ "tx S/G",
++ "rx S/G",
++ "tx error",
++ "rx error",
++ "bp count"
++};
++
++static char dpa_stats_global[][ETH_GSTRING_LEN] = {
++ /* dpa rx errors */
++ "rx dma error",
++ "rx frame physical error",
++ "rx frame size error",
++ "rx header error",
++ "rx csum error",
++
++ /* demultiplexing errors */
++ "qman cg_tdrop",
++ "qman wred",
++ "qman error cond",
++ "qman early window",
++ "qman late window",
++ "qman fq tdrop",
++ "qman fq retired",
++ "qman orp disabled",
++
++ /* congestion related stats */
++ "congestion time (ms)",
++ "entered congestion",
++ "congested (0/1)"
++};
++
++#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
++#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
++
++static int __cold dpa_get_settings(struct net_device *net_dev,
++ struct ethtool_cmd *et_cmd)
++{
++ int _errno;
++ struct dpa_priv_s *priv;
++
++ priv = netdev_priv(net_dev);
++
++ if (priv->mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return -ENODEV;
++ }
++ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
++ netdev_dbg(net_dev, "phy device not initialized\n");
++ return 0;
++ }
++
++ _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
++ if (unlikely(_errno < 0))
++ netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
++
++ return _errno;
++}
++
++static int __cold dpa_set_settings(struct net_device *net_dev,
++ struct ethtool_cmd *et_cmd)
++{
++ int _errno;
++ struct dpa_priv_s *priv;
++
++ priv = netdev_priv(net_dev);
++
++ if (priv->mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return -ENODEV;
++ }
++ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
++ netdev_err(net_dev, "phy device not initialized\n");
++ return -ENODEV;
++ }
++
++ _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
++ if (unlikely(_errno < 0))
++ netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
++
++ return _errno;
++}
++
++static void __cold dpa_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ int _errno;
++
++ strncpy(drvinfo->driver, KBUILD_MODNAME,
++ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
++ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%X", 0);
++
++ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
++ /* Truncated output */
++ netdev_notice(net_dev, "snprintf() = %d\n", _errno);
++ } else if (unlikely(_errno < 0)) {
++ netdev_warn(net_dev, "snprintf() = %d\n", _errno);
++ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
++ }
++ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
++ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
++}
++
++static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
++{
++ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
++}
++
++static void __cold dpa_set_msglevel(struct net_device *net_dev,
++ uint32_t msg_enable)
++{
++ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
++}
++
++static int __cold dpa_nway_reset(struct net_device *net_dev)
++{
++ int _errno;
++ struct dpa_priv_s *priv;
++
++ priv = netdev_priv(net_dev);
++
++ if (priv->mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return -ENODEV;
++ }
++ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
++ netdev_err(net_dev, "phy device not initialized\n");
++ return -ENODEV;
++ }
++
++ _errno = 0;
++ if (priv->mac_dev->phy_dev->autoneg) {
++ _errno = phy_start_aneg(priv->mac_dev->phy_dev);
++ if (unlikely(_errno < 0))
++ netdev_err(net_dev, "phy_start_aneg() = %d\n",
++ _errno);
++ }
++
++ return _errno;
++}
++
++static void __cold dpa_get_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *epause)
++{
++ struct dpa_priv_s *priv;
++ struct mac_device *mac_dev;
++ struct phy_device *phy_dev;
++
++ priv = netdev_priv(net_dev);
++ mac_dev = priv->mac_dev;
++
++ if (mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return;
++ }
++
++ phy_dev = mac_dev->phy_dev;
++ if (unlikely(phy_dev == NULL)) {
++ netdev_err(net_dev, "phy device not initialized\n");
++ return;
++ }
++
++ epause->autoneg = mac_dev->autoneg_pause;
++ epause->rx_pause = mac_dev->rx_pause_active;
++ epause->tx_pause = mac_dev->tx_pause_active;
++}
++
++static int __cold dpa_set_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *epause)
++{
++ struct dpa_priv_s *priv;
++ struct mac_device *mac_dev;
++ struct phy_device *phy_dev;
++ int _errno;
++ u32 newadv, oldadv;
++ bool rx_pause, tx_pause;
++
++ priv = netdev_priv(net_dev);
++ mac_dev = priv->mac_dev;
++
++ if (mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return -ENODEV;
++ }
++
++ phy_dev = mac_dev->phy_dev;
++ if (unlikely(phy_dev == NULL)) {
++ netdev_err(net_dev, "phy device not initialized\n");
++ return -ENODEV;
++ }
++
++ if (!(phy_dev->supported & SUPPORTED_Pause) ||
++ (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
++ (epause->rx_pause != epause->tx_pause)))
++ return -EINVAL;
++
++ /* The MAC should know how to handle PAUSE frame autonegotiation before
++ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
++ * settings.
++ */
++ mac_dev->autoneg_pause = !!epause->autoneg;
++ mac_dev->rx_pause_req = !!epause->rx_pause;
++ mac_dev->tx_pause_req = !!epause->tx_pause;
++
++ /* Determine the sym/asym advertised PAUSE capabilities from the desired
++ * rx/tx pause settings.
++ */
++ newadv = 0;
++ if (epause->rx_pause)
++ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
++ if (epause->tx_pause)
++ newadv |= ADVERTISED_Asym_Pause;
++
++ oldadv = phy_dev->advertising &
++ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
++
++ /* If there are differences between the old and the new advertised
++ * values, restart PHY autonegotiation and advertise the new values.
++ */
++ if (oldadv != newadv) {
++ phy_dev->advertising &= ~(ADVERTISED_Pause
++ | ADVERTISED_Asym_Pause);
++ phy_dev->advertising |= newadv;
++ if (phy_dev->autoneg) {
++ _errno = phy_start_aneg(phy_dev);
++ if (unlikely(_errno < 0))
++ netdev_err(net_dev, "phy_start_aneg() = %d\n",
++ _errno);
++ }
++ }
++
++ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
++ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
++ if (unlikely(_errno < 0))
++ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
++
++ return _errno;
++}
++
++#ifdef CONFIG_PM
++static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++
++ wol->supported = 0;
++ wol->wolopts = 0;
++
++ if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
++ return;
++
++ if (priv->wol & DPAA_WOL_MAGIC) {
++ wol->supported = WAKE_MAGIC;
++ wol->wolopts = WAKE_MAGIC;
++ }
++}
++
++static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++
++ if (priv->mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return -ENODEV;
++ }
++
++ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
++ netdev_dbg(net_dev, "phy device not initialized\n");
++ return -ENODEV;
++ }
++
++ if (!device_can_wakeup(net_dev->dev.parent) ||
++ (wol->wolopts & ~WAKE_MAGIC))
++ return -EOPNOTSUPP;
++
++ priv->wol = 0;
++
++ if (wol->wolopts & WAKE_MAGIC) {
++ priv->wol = DPAA_WOL_MAGIC;
++ device_set_wakeup_enable(net_dev->dev.parent, 1);
++ } else {
++ device_set_wakeup_enable(net_dev->dev.parent, 0);
++ }
++
++ return 0;
++}
++#endif
++
++static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
++{
++ struct dpa_priv_s *priv;
++
++ priv = netdev_priv(net_dev);
++ if (priv->mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return -ENODEV;
++ }
++
++ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
++ netdev_err(net_dev, "phy device not initialized\n");
++ return -ENODEV;
++ }
++
++ return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
++}
++
++static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
++{
++ struct dpa_priv_s *priv;
++
++ priv = netdev_priv(net_dev);
++ if (priv->mac_dev == NULL) {
++ netdev_info(net_dev, "This is a MAC-less interface\n");
++ return -ENODEV;
++ }
++
++ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
++ netdev_err(net_dev, "phy device not initialized\n");
++ return -ENODEV;
++ }
++
++ return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
++}
++
++static int dpa_get_sset_count(struct net_device *net_dev, int type)
++{
++ unsigned int total_stats, num_stats;
++
++ num_stats = num_online_cpus() + 1;
++ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
++
++ switch (type) {
++ case ETH_SS_STATS:
++ return total_stats;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
++ int crr_cpu, u64 bp_count, u64 *data)
++{
++ int num_stat_values = num_cpus + 1;
++ int crr_stat = 0;
++
++ /* update current CPU's stats and also add them to the total values */
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
++
++ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
++ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
++
++ data[crr_stat * num_stat_values + crr_cpu] = bp_count;
++ data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
++}
++
++static void dpa_get_ethtool_stats(struct net_device *net_dev,
++ struct ethtool_stats *stats, u64 *data)
++{
++ u64 bp_count, cg_time, cg_num, cg_status;
++ struct dpa_percpu_priv_s *percpu_priv;
++ struct qm_mcr_querycgr query_cgr;
++ struct dpa_rx_errors rx_errors;
++ struct dpa_ern_cnt ern_cnt;
++ struct dpa_priv_s *priv;
++ unsigned int num_cpus, offset;
++ struct dpa_bp *dpa_bp;
++ int total_stats, i;
++
++ total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
++ priv = netdev_priv(net_dev);
++ dpa_bp = priv->dpa_bp;
++ num_cpus = num_online_cpus();
++ bp_count = 0;
++
++ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
++ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
++ memset(data, 0, total_stats * sizeof(u64));
++
++ for_each_online_cpu(i) {
++ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
++
++ if (dpa_bp->percpu_count)
++ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
++
++ rx_errors.dme += percpu_priv->rx_errors.dme;
++ rx_errors.fpe += percpu_priv->rx_errors.fpe;
++ rx_errors.fse += percpu_priv->rx_errors.fse;
++ rx_errors.phe += percpu_priv->rx_errors.phe;
++ rx_errors.cse += percpu_priv->rx_errors.cse;
++
++ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
++ ern_cnt.wred += percpu_priv->ern_cnt.wred;
++ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
++ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
++ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
++ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
++ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
++ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
++
++ copy_stats(percpu_priv, num_cpus, i, bp_count, data);
++ }
++
++ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
++ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
++
++ offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
++ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
++
++ /* gather congestion related counters */
++ cg_num = 0;
++ cg_status = 0;
++ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
++ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
++ cg_num = priv->cgr_data.cgr_congested_count;
++ cg_status = query_cgr.cgr.cs;
++
++ /* reset congestion stats (like QMan API does */
++ priv->cgr_data.congested_jiffies = 0;
++ priv->cgr_data.cgr_congested_count = 0;
++ }
++
++ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
++ data[offset++] = cg_time;
++ data[offset++] = cg_num;
++ data[offset++] = cg_status;
++}
++
++static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
++{
++ unsigned int i, j, num_cpus, size;
++ char stat_string_cpu[ETH_GSTRING_LEN];
++ u8 *strings;
++
++ strings = data;
++ num_cpus = num_online_cpus();
++ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
++
++ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
++ for (j = 0; j < num_cpus; j++) {
++ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
++ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
++ strings += ETH_GSTRING_LEN;
++ }
++ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
++ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
++ strings += ETH_GSTRING_LEN;
++ }
++ memcpy(strings, dpa_stats_global, size);
++}
++
++const struct ethtool_ops dpa_ethtool_ops = {
++ .get_settings = dpa_get_settings,
++ .set_settings = dpa_set_settings,
++ .get_drvinfo = dpa_get_drvinfo,
++ .get_msglevel = dpa_get_msglevel,
++ .set_msglevel = dpa_set_msglevel,
++ .nway_reset = dpa_nway_reset,
++ .get_pauseparam = dpa_get_pauseparam,
++ .set_pauseparam = dpa_set_pauseparam,
++ .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
++ .get_link = ethtool_op_get_link,
++ .get_eee = dpa_get_eee,
++ .set_eee = dpa_set_eee,
++ .get_sset_count = dpa_get_sset_count,
++ .get_ethtool_stats = dpa_get_ethtool_stats,
++ .get_strings = dpa_get_strings,
++#ifdef CONFIG_PM
++ .get_wol = dpa_get_wol,
++ .set_wol = dpa_set_wol,
++#endif
++};
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
+new file mode 100644
+index 00000000..f54a3d67
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
+@@ -0,0 +1,291 @@
++/*
++ * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
++ *
++ * Author: Yangbo Lu <yangbo.lu@freescale.com>
++ *
++ * Copyright 2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++*/
++
++#include <linux/device.h>
++#include <linux/hrtimer.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/timex.h>
++#include <linux/io.h>
++
++#include <linux/ptp_clock_kernel.h>
++
++#include "dpaa_eth.h"
++#include "mac.h"
++
++static struct mac_device *mac_dev;
++static u32 freqCompensation;
++
++/* Bit definitions for the TMR_CTRL register */
++#define ALM1P (1<<31) /* Alarm1 output polarity */
++#define ALM2P (1<<30) /* Alarm2 output polarity */
++#define FS (1<<28) /* FIPER start indication */
++#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
++#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
++#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
++#define TCLK_PERIOD_MASK (0x3ff)
++#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
++#define FRD (1<<14) /* FIPER Realignment Disable */
++#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
++#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
++#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
++#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
++#define COPH (1<<7) /* Generated clock output phase. */
++#define CIPH (1<<6) /* External oscillator input clock phase */
++#define TMSR (1<<5) /* Timer soft reset. */
++#define BYP (1<<3) /* Bypass drift compensated clock */
++#define TE (1<<2) /* 1588 timer enable. */
++#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
++#define CKSEL_MASK (0x3)
++
++/* Bit definitions for the TMR_TEVENT register */
++#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
++#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
++#define ALM2 (1<<17) /* Current time = alarm time register 2 */
++#define ALM1 (1<<16) /* Current time = alarm time register 1 */
++#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
++#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
++#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
++
++/* Bit definitions for the TMR_TEMASK register */
++#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
++#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
++#define ALM2EN (1<<17) /* Timer ALM2 event enable */
++#define ALM1EN (1<<16) /* Timer ALM1 event enable */
++#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
++#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
++
++/* Bit definitions for the TMR_PEVENT register */
++#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
++#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
++#define RXP (1<<0) /* PTP frame has been received */
++
++/* Bit definitions for the TMR_PEMASK register */
++#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
++#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
++#define RXPEN (1<<0) /* Receive PTP packet event enable */
++
++/* Bit definitions for the TMR_STAT register */
++#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
++#define STAT_VEC_MASK (0x3f)
++
++/* Bit definitions for the TMR_PRSC register */
++#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
++#define PRSC_OCK_MASK (0xffff)
++
++
++#define N_EXT_TS 2
++
++static void set_alarm(void)
++{
++ u64 ns;
++
++ if (mac_dev->fm_rtc_get_cnt)
++ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
++ ns += 1500000000ULL;
++ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
++ ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
++ if (mac_dev->fm_rtc_set_alarm)
++ mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
++}
++
++static void set_fipers(void)
++{
++ u64 fiper;
++
++ if (mac_dev->fm_rtc_disable)
++ mac_dev->fm_rtc_disable(mac_dev->fm_dev);
++
++ set_alarm();
++ fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
++ if (mac_dev->fm_rtc_set_fiper)
++ mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
++
++ if (mac_dev->fm_rtc_enable)
++ mac_dev->fm_rtc_enable(mac_dev->fm_dev);
++}
++
++/* PTP clock operations */
++
++static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
++{
++ u64 adj;
++ u32 diff, tmr_add;
++ int neg_adj = 0;
++
++ if (ppb < 0) {
++ neg_adj = 1;
++ ppb = -ppb;
++ }
++
++ tmr_add = freqCompensation;
++ adj = tmr_add;
++ adj *= ppb;
++ diff = div_u64(adj, 1000000000ULL);
++
++ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
++
++ if (mac_dev->fm_rtc_set_drift)
++ mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
++
++ return 0;
++}
++
++static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
++{
++ s64 now;
++
++ if (mac_dev->fm_rtc_get_cnt)
++ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
++
++ now += delta;
++
++ if (mac_dev->fm_rtc_set_cnt)
++ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
++ set_fipers();
++
++ return 0;
++}
++
++static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
++{
++ u64 ns;
++ u32 remainder;
++
++ if (mac_dev->fm_rtc_get_cnt)
++ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
++
++ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
++ ts->tv_nsec = remainder;
++ return 0;
++}
++
++static int ptp_dpa_settime(struct ptp_clock_info *ptp,
++ const struct timespec64 *ts)
++{
++ u64 ns;
++
++ ns = ts->tv_sec * 1000000000ULL;
++ ns += ts->tv_nsec;
++
++ if (mac_dev->fm_rtc_set_cnt)
++ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
++ set_fipers();
++ return 0;
++}
++
++static int ptp_dpa_enable(struct ptp_clock_info *ptp,
++ struct ptp_clock_request *rq, int on)
++{
++ u32 bit;
++
++ switch (rq->type) {
++ case PTP_CLK_REQ_EXTTS:
++ switch (rq->extts.index) {
++ case 0:
++ bit = ETS1EN;
++ break;
++ case 1:
++ bit = ETS2EN;
++ break;
++ default:
++ return -EINVAL;
++ }
++ if (on) {
++ if (mac_dev->fm_rtc_enable_interrupt)
++ mac_dev->fm_rtc_enable_interrupt(
++ mac_dev->fm_dev, bit);
++ } else {
++ if (mac_dev->fm_rtc_disable_interrupt)
++ mac_dev->fm_rtc_disable_interrupt(
++ mac_dev->fm_dev, bit);
++ }
++ return 0;
++
++ case PTP_CLK_REQ_PPS:
++ if (on) {
++ if (mac_dev->fm_rtc_enable_interrupt)
++ mac_dev->fm_rtc_enable_interrupt(
++ mac_dev->fm_dev, PP1EN);
++ } else {
++ if (mac_dev->fm_rtc_disable_interrupt)
++ mac_dev->fm_rtc_disable_interrupt(
++ mac_dev->fm_dev, PP1EN);
++ }
++ return 0;
++
++ default:
++ break;
++ }
++
++ return -EOPNOTSUPP;
++}
++
++static struct ptp_clock_info ptp_dpa_caps = {
++ .owner = THIS_MODULE,
++ .name = "dpaa clock",
++ .max_adj = 512000,
++ .n_alarm = 0,
++ .n_ext_ts = N_EXT_TS,
++ .n_per_out = 0,
++ .pps = 1,
++ .adjfreq = ptp_dpa_adjfreq,
++ .adjtime = ptp_dpa_adjtime,
++ .gettime64 = ptp_dpa_gettime,
++ .settime64 = ptp_dpa_settime,
++ .enable = ptp_dpa_enable,
++};
++
++static int __init __cold dpa_ptp_load(void)
++{
++ struct device *ptp_dev;
++ struct timespec64 now;
++ struct ptp_clock *clock = ptp_priv.clock;
++ int dpa_phc_index;
++ int err;
++
++ if (!(ptp_priv.of_dev && ptp_priv.mac_dev))
++ return -ENODEV;
++
++ ptp_dev = &ptp_priv.of_dev->dev;
++ mac_dev = ptp_priv.mac_dev;
++
++ if (mac_dev->fm_rtc_get_drift)
++ mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
++
++ getnstimeofday64(&now);
++ ptp_dpa_settime(&ptp_dpa_caps, &now);
++
++ clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
++ if (IS_ERR(clock)) {
++ err = PTR_ERR(clock);
++ return err;
++ }
++ dpa_phc_index = ptp_clock_index(clock);
++ return 0;
++}
++module_init(dpa_ptp_load);
++
++static void __exit __cold dpa_ptp_unload(void)
++{
++ struct ptp_clock *clock = ptp_priv.clock;
++
++ if (mac_dev->fm_rtc_disable_interrupt)
++ mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
++ ptp_clock_unregister(clock);
++}
++module_exit(dpa_ptp_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
+new file mode 100644
+index 00000000..2c5652d9
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
+@@ -0,0 +1,907 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/io.h>
++#include <linux/of_platform.h>
++#include <linux/of_mdio.h>
++#include <linux/phy.h>
++#include <linux/netdevice.h>
++
++#include "dpaa_eth.h"
++#include "mac.h"
++#include "lnxwrp_fsl_fman.h"
++
++#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
++
++#include "fsl_fman_dtsec.h"
++#include "fsl_fman_tgec.h"
++#include "fsl_fman_memac.h"
++#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
++
++#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
++
++MODULE_DESCRIPTION(MAC_DESCRIPTION);
++
++struct mac_priv_s {
++ struct fm_mac_dev *fm_mac;
++};
++
++const char *mac_driver_description __initconst = MAC_DESCRIPTION;
++const size_t mac_sizeof_priv[] = {
++ [DTSEC] = sizeof(struct mac_priv_s),
++ [XGMAC] = sizeof(struct mac_priv_s),
++ [MEMAC] = sizeof(struct mac_priv_s)
++};
++
++static const enet_mode_t _100[] = {
++ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
++ [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
++};
++
++static const enet_mode_t _1000[] = {
++ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
++ [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
++ [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
++ [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
++ [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
++ [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
++ [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
++ [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
++ [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
++};
++
++static enet_mode_t __cold __attribute__((nonnull))
++macdev2enetinterface(const struct mac_device *mac_dev)
++{
++ switch (mac_dev->max_speed) {
++ case SPEED_100:
++ return _100[mac_dev->phy_if];
++ case SPEED_1000:
++ return _1000[mac_dev->phy_if];
++ case SPEED_2500:
++ return e_ENET_MODE_SGMII_2500;
++ case SPEED_10000:
++ return e_ENET_MODE_XGMII_10000;
++ default:
++ return e_ENET_MODE_MII_100;
++ }
++}
++
++static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
++{
++ struct mac_device *mac_dev;
++
++ mac_dev = (struct mac_device *)_mac_dev;
++
++ if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
++ /* don't flag RX FIFO after the first */
++ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
++ e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
++ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
++ exception);
++ }
++
++ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
++ exception);
++}
++
++static int __cold init(struct mac_device *mac_dev)
++{
++ int _errno;
++ struct mac_priv_s *priv;
++ t_FmMacParams param;
++ uint32_t version;
++
++ priv = macdev_priv(mac_dev);
++
++ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
++ mac_dev->dev, mac_dev->res->start, 0x2000);
++ param.enetMode = macdev2enetinterface(mac_dev);
++ memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
++ sizeof(mac_dev->addr)));
++ param.macId = mac_dev->cell_index;
++ param.h_Fm = (handle_t)mac_dev->fm;
++ param.mdioIrq = NO_IRQ;
++ param.f_Exception = mac_exception;
++ param.f_Event = mac_exception;
++ param.h_App = mac_dev;
++
++ priv->fm_mac = fm_mac_config(&param);
++ if (unlikely(priv->fm_mac == NULL)) {
++ _errno = -EINVAL;
++ goto _return;
++ }
++
++ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
++ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
++ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
++
++ _errno = fm_mac_config_max_frame_length(priv->fm_mac,
++ fm_get_max_frm());
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++
++ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
++ /* 10G always works with pad and CRC */
++ _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++
++ _errno = fm_mac_config_half_duplex(priv->fm_mac,
++ mac_dev->half_duplex);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++ } else {
++ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++ }
++
++ _errno = fm_mac_init(priv->fm_mac);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++
++#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
++ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
++ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
++ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
++ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++ }
++#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
++
++ /* For 10G MAC, disable Tx ECC exception */
++ if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
++ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
++ e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++ }
++
++ _errno = fm_mac_get_version(priv->fm_mac, &version);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++
++ dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
++ ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
++ "dTSEC" : "XGEC"), version);
++
++ goto _return;
++
++
++_return_fm_mac_free:
++ fm_mac_free(mac_dev->get_mac_handle(mac_dev));
++
++_return:
++ return _errno;
++}
++
++static int __cold memac_init(struct mac_device *mac_dev)
++{
++ int _errno;
++ struct mac_priv_s *priv;
++ t_FmMacParams param;
++
++ priv = macdev_priv(mac_dev);
++
++ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
++ mac_dev->dev, mac_dev->res->start, 0x2000);
++ param.enetMode = macdev2enetinterface(mac_dev);
++ memcpy(&param.addr, mac_dev->addr, sizeof(mac_dev->addr));
++ param.macId = mac_dev->cell_index;
++ param.h_Fm = (handle_t)mac_dev->fm;
++ param.mdioIrq = NO_IRQ;
++ param.f_Exception = mac_exception;
++ param.f_Event = mac_exception;
++ param.h_App = mac_dev;
++
++ priv->fm_mac = fm_mac_config(&param);
++ if (unlikely(priv->fm_mac == NULL)) {
++ _errno = -EINVAL;
++ goto _return;
++ }
++
++ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
++ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
++ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
++
++ _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++
++ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++
++ _errno = fm_mac_init(priv->fm_mac);
++ if (unlikely(_errno < 0))
++ goto _return_fm_mac_free;
++
++ dev_info(mac_dev->dev, "FMan MEMAC\n");
++
++ goto _return;
++
++_return_fm_mac_free:
++ fm_mac_free(priv->fm_mac);
++
++_return:
++ return _errno;
++}
++
++static int __cold start(struct mac_device *mac_dev)
++{
++ int _errno;
++ struct phy_device *phy_dev = mac_dev->phy_dev;
++
++ _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
++
++ if (!_errno && phy_dev)
++ phy_start(phy_dev);
++
++ return _errno;
++}
++
++static int __cold stop(struct mac_device *mac_dev)
++{
++ if (mac_dev->phy_dev)
++ phy_stop(mac_dev->phy_dev);
++
++ return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
++}
++
++static int __cold set_multi(struct net_device *net_dev,
++ struct mac_device *mac_dev)
++{
++ struct mac_priv_s *mac_priv;
++ struct mac_address *old_addr, *tmp;
++ struct netdev_hw_addr *ha;
++ int _errno;
++
++ mac_priv = macdev_priv(mac_dev);
++
++ /* Clear previous address list */
++ list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
++ _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
++ (t_EnetAddr *)old_addr->addr);
++ if (_errno < 0)
++ return _errno;
++
++ list_del(&old_addr->list);
++ kfree(old_addr);
++ }
++
++ /* Add all the addresses from the new list */
++ netdev_for_each_mc_addr(ha, net_dev) {
++ _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
++ (t_EnetAddr *)ha->addr);
++ if (_errno < 0)
++ return _errno;
++
++ tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
++ if (!tmp) {
++ dev_err(mac_dev->dev, "Out of memory\n");
++ return -ENOMEM;
++ }
++ memcpy(tmp->addr, ha->addr, ETH_ALEN);
++ list_add(&tmp->list, &mac_dev->mc_addr_list);
++ }
++ return 0;
++}
++
++/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
++ * active PAUSE settings. Otherwise, the new active settings should be reflected
++ * in FMan.
++ */
++int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
++{
++ struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
++ int _errno = 0;
++
++ if (unlikely(rx != mac_dev->rx_pause_active)) {
++ _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
++ if (likely(_errno == 0))
++ mac_dev->rx_pause_active = rx;
++ }
++
++ if (unlikely(tx != mac_dev->tx_pause_active)) {
++ _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
++ if (likely(_errno == 0))
++ mac_dev->tx_pause_active = tx;
++ }
++
++ return _errno;
++}
++EXPORT_SYMBOL(set_mac_active_pause);
++
++/* Determine the MAC RX/TX PAUSE frames settings based on PHY
++ * autonegotiation or values set by eththool.
++ */
++void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
++{
++ struct phy_device *phy_dev = mac_dev->phy_dev;
++ u16 lcl_adv, rmt_adv;
++ u8 flowctrl;
++
++ *rx_pause = *tx_pause = false;
++
++ if (!phy_dev->duplex)
++ return;
++
++ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
++ * are those set by ethtool.
++ */
++ if (!mac_dev->autoneg_pause) {
++ *rx_pause = mac_dev->rx_pause_req;
++ *tx_pause = mac_dev->tx_pause_req;
++ return;
++ }
++
++ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
++ * settings depend on the result of the link negotiation.
++ */
++
++ /* get local capabilities */
++ lcl_adv = 0;
++ if (phy_dev->advertising & ADVERTISED_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_CAP;
++ if (phy_dev->advertising & ADVERTISED_Asym_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_ASYM;
++
++ /* get link partner capabilities */
++ rmt_adv = 0;
++ if (phy_dev->pause)
++ rmt_adv |= LPA_PAUSE_CAP;
++ if (phy_dev->asym_pause)
++ rmt_adv |= LPA_PAUSE_ASYM;
++
++ /* Calculate TX/RX settings based on local and peer advertised
++ * symmetric/asymmetric PAUSE capabilities.
++ */
++ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
++ if (flowctrl & FLOW_CTRL_RX)
++ *rx_pause = true;
++ if (flowctrl & FLOW_CTRL_TX)
++ *tx_pause = true;
++}
++EXPORT_SYMBOL(get_pause_cfg);
++
++static void adjust_link_void(struct net_device *net_dev)
++{
++}
++
++static void adjust_link(struct net_device *net_dev)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ struct mac_device *mac_dev = priv->mac_dev;
++ struct phy_device *phy_dev = mac_dev->phy_dev;
++ struct fm_mac_dev *fm_mac_dev;
++ bool rx_pause, tx_pause;
++ int _errno;
++
++ fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
++ fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
++ phy_dev->duplex);
++
++ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
++ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
++ if (unlikely(_errno < 0))
++ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
++}
++
++/* Initializes driver's PHY state, and attaches to the PHY.
++ * Returns 0 on success.
++ */
++static int dtsec_init_phy(struct net_device *net_dev,
++ struct mac_device *mac_dev)
++{
++ struct phy_device *phy_dev;
++
++ if (of_phy_is_fixed_link(mac_dev->phy_node))
++ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
++ 0, mac_dev->phy_if);
++ else
++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
++ &adjust_link, 0, mac_dev->phy_if);
++ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
++ netdev_err(net_dev, "Could not connect to PHY %s\n",
++ mac_dev->phy_node ?
++ mac_dev->phy_node->full_name :
++ mac_dev->fixed_bus_id);
++ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
++ }
++
++ /* Remove any features not supported by the controller */
++ phy_dev->supported &= mac_dev->if_support;
++ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
++ * as most of the PHY drivers do not enable them by default.
++ */
++ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
++ phy_dev->advertising = phy_dev->supported;
++
++ mac_dev->phy_dev = phy_dev;
++
++ return 0;
++}
++
++static int xgmac_init_phy(struct net_device *net_dev,
++ struct mac_device *mac_dev)
++{
++ struct phy_device *phy_dev;
++
++ if (of_phy_is_fixed_link(mac_dev->phy_node))
++ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
++ 0, mac_dev->phy_if);
++ else
++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
++ &adjust_link_void, 0, mac_dev->phy_if);
++ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
++ netdev_err(net_dev, "Could not attach to PHY %s\n",
++ mac_dev->phy_node ?
++ mac_dev->phy_node->full_name :
++ mac_dev->fixed_bus_id);
++ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
++ }
++
++ phy_dev->supported &= mac_dev->if_support;
++ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
++ * as most of the PHY drivers do not enable them by default.
++ */
++ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
++ phy_dev->advertising = phy_dev->supported;
++
++ mac_dev->phy_dev = phy_dev;
++
++ return 0;
++}
++
++static int memac_init_phy(struct net_device *net_dev,
++ struct mac_device *mac_dev)
++{
++ struct phy_device *phy_dev;
++
++ if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
++ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500) ||
++ of_phy_is_fixed_link(mac_dev->phy_node)) {
++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
++ &adjust_link_void, 0,
++ mac_dev->phy_if);
++ } else {
++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
++ &adjust_link, 0, mac_dev->phy_if);
++ }
++
++ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
++ netdev_err(net_dev, "Could not connect to PHY %s\n",
++ mac_dev->phy_node ?
++ mac_dev->phy_node->full_name :
++ mac_dev->fixed_bus_id);
++ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
++ }
++
++ /* Remove any features not supported by the controller */
++ phy_dev->supported &= mac_dev->if_support;
++ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
++ * as most of the PHY drivers do not enable them by default.
++ */
++ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
++ phy_dev->advertising = phy_dev->supported;
++
++ mac_dev->phy_dev = phy_dev;
++
++ return 0;
++}
++
++static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
++{
++ int _errno, __errno;
++
++ _errno = fm_mac_disable(fm_mac_dev);
++ __errno = fm_mac_free(fm_mac_dev);
++
++ if (unlikely(__errno < 0))
++ _errno = __errno;
++
++ return _errno;
++}
++
++static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
++{
++ const struct mac_priv_s *priv;
++ priv = macdev_priv(mac_dev);
++ return priv->fm_mac;
++}
++
++static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
++{
++ struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
++ int i = 0, n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
++
++ FM_DMP_V32(buf, n, p_mm, tsec_id);
++ FM_DMP_V32(buf, n, p_mm, tsec_id2);
++ FM_DMP_V32(buf, n, p_mm, ievent);
++ FM_DMP_V32(buf, n, p_mm, imask);
++ FM_DMP_V32(buf, n, p_mm, ecntrl);
++ FM_DMP_V32(buf, n, p_mm, ptv);
++ FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
++ FM_DMP_V32(buf, n, p_mm, tmr_pevent);
++ FM_DMP_V32(buf, n, p_mm, tmr_pemask);
++ FM_DMP_V32(buf, n, p_mm, tctrl);
++ FM_DMP_V32(buf, n, p_mm, rctrl);
++ FM_DMP_V32(buf, n, p_mm, maccfg1);
++ FM_DMP_V32(buf, n, p_mm, maccfg2);
++ FM_DMP_V32(buf, n, p_mm, ipgifg);
++ FM_DMP_V32(buf, n, p_mm, hafdup);
++ FM_DMP_V32(buf, n, p_mm, maxfrm);
++
++ FM_DMP_V32(buf, n, p_mm, macstnaddr1);
++ FM_DMP_V32(buf, n, p_mm, macstnaddr2);
++
++ for (i = 0; i < 7; ++i) {
++ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
++ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
++ }
++
++ FM_DMP_V32(buf, n, p_mm, car1);
++ FM_DMP_V32(buf, n, p_mm, car2);
++
++ return n;
++}
++
++static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
++{
++ struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
++
++ FM_DMP_V32(buf, n, p_mm, tgec_id);
++ FM_DMP_V32(buf, n, p_mm, command_config);
++ FM_DMP_V32(buf, n, p_mm, mac_addr_0);
++ FM_DMP_V32(buf, n, p_mm, mac_addr_1);
++ FM_DMP_V32(buf, n, p_mm, maxfrm);
++ FM_DMP_V32(buf, n, p_mm, pause_quant);
++ FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
++ FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
++ FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
++ FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
++ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
++ FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
++ FM_DMP_V32(buf, n, p_mm, mdio_command);
++ FM_DMP_V32(buf, n, p_mm, mdio_data);
++ FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
++ FM_DMP_V32(buf, n, p_mm, status);
++ FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
++ FM_DMP_V32(buf, n, p_mm, mac_addr_2);
++ FM_DMP_V32(buf, n, p_mm, mac_addr_3);
++ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
++ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
++ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
++ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
++ FM_DMP_V32(buf, n, p_mm, imask);
++ FM_DMP_V32(buf, n, p_mm, ievent);
++
++ return n;
++}
++
++static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
++{
++ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
++ int i = 0, n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
++
++ FM_DMP_V32(buf, n, p_mm, command_config);
++ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
++ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
++ FM_DMP_V32(buf, n, p_mm, maxfrm);
++ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
++ FM_DMP_V32(buf, n, p_mm, ievent);
++ FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
++ FM_DMP_V32(buf, n, p_mm, imask);
++
++ for (i = 0; i < 4; ++i)
++ FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
++
++ for (i = 0; i < 4; ++i)
++ FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
++
++ FM_DMP_V32(buf, n, p_mm, rx_pause_status);
++
++ for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
++ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
++ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
++ }
++
++ FM_DMP_V32(buf, n, p_mm, lpwake_timer);
++ FM_DMP_V32(buf, n, p_mm, sleep_timer);
++ FM_DMP_V32(buf, n, p_mm, statn_config);
++ FM_DMP_V32(buf, n, p_mm, if_mode);
++ FM_DMP_V32(buf, n, p_mm, if_status);
++ FM_DMP_V32(buf, n, p_mm, hg_config);
++ FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
++ FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
++ FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
++ FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
++ FM_DMP_V32(buf, n, p_mm, rhm);
++ FM_DMP_V32(buf, n, p_mm, thm);
++
++ return n;
++}
++
++static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
++{
++ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
++
++ /* Rx Statistics Counter */
++ FM_DMP_V32(buf, n, p_mm, reoct_l);
++ FM_DMP_V32(buf, n, p_mm, reoct_u);
++ FM_DMP_V32(buf, n, p_mm, roct_l);
++ FM_DMP_V32(buf, n, p_mm, roct_u);
++ FM_DMP_V32(buf, n, p_mm, raln_l);
++ FM_DMP_V32(buf, n, p_mm, raln_u);
++ FM_DMP_V32(buf, n, p_mm, rxpf_l);
++ FM_DMP_V32(buf, n, p_mm, rxpf_u);
++ FM_DMP_V32(buf, n, p_mm, rfrm_l);
++ FM_DMP_V32(buf, n, p_mm, rfrm_u);
++ FM_DMP_V32(buf, n, p_mm, rfcs_l);
++ FM_DMP_V32(buf, n, p_mm, rfcs_u);
++ FM_DMP_V32(buf, n, p_mm, rvlan_l);
++ FM_DMP_V32(buf, n, p_mm, rvlan_u);
++ FM_DMP_V32(buf, n, p_mm, rerr_l);
++ FM_DMP_V32(buf, n, p_mm, rerr_u);
++ FM_DMP_V32(buf, n, p_mm, ruca_l);
++ FM_DMP_V32(buf, n, p_mm, ruca_u);
++ FM_DMP_V32(buf, n, p_mm, rmca_l);
++ FM_DMP_V32(buf, n, p_mm, rmca_u);
++ FM_DMP_V32(buf, n, p_mm, rbca_l);
++ FM_DMP_V32(buf, n, p_mm, rbca_u);
++ FM_DMP_V32(buf, n, p_mm, rdrp_l);
++ FM_DMP_V32(buf, n, p_mm, rdrp_u);
++ FM_DMP_V32(buf, n, p_mm, rpkt_l);
++ FM_DMP_V32(buf, n, p_mm, rpkt_u);
++ FM_DMP_V32(buf, n, p_mm, rund_l);
++ FM_DMP_V32(buf, n, p_mm, rund_u);
++ FM_DMP_V32(buf, n, p_mm, r64_l);
++ FM_DMP_V32(buf, n, p_mm, r64_u);
++ FM_DMP_V32(buf, n, p_mm, r127_l);
++ FM_DMP_V32(buf, n, p_mm, r127_u);
++ FM_DMP_V32(buf, n, p_mm, r255_l);
++ FM_DMP_V32(buf, n, p_mm, r255_u);
++ FM_DMP_V32(buf, n, p_mm, r511_l);
++ FM_DMP_V32(buf, n, p_mm, r511_u);
++ FM_DMP_V32(buf, n, p_mm, r1023_l);
++ FM_DMP_V32(buf, n, p_mm, r1023_u);
++ FM_DMP_V32(buf, n, p_mm, r1518_l);
++ FM_DMP_V32(buf, n, p_mm, r1518_u);
++ FM_DMP_V32(buf, n, p_mm, r1519x_l);
++ FM_DMP_V32(buf, n, p_mm, r1519x_u);
++ FM_DMP_V32(buf, n, p_mm, rovr_l);
++ FM_DMP_V32(buf, n, p_mm, rovr_u);
++ FM_DMP_V32(buf, n, p_mm, rjbr_l);
++ FM_DMP_V32(buf, n, p_mm, rjbr_u);
++ FM_DMP_V32(buf, n, p_mm, rfrg_l);
++ FM_DMP_V32(buf, n, p_mm, rfrg_u);
++ FM_DMP_V32(buf, n, p_mm, rcnp_l);
++ FM_DMP_V32(buf, n, p_mm, rcnp_u);
++ FM_DMP_V32(buf, n, p_mm, rdrntp_l);
++ FM_DMP_V32(buf, n, p_mm, rdrntp_u);
++
++ return n;
++}
++
++static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
++{
++ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
++
++
++ /* Tx Statistics Counter */
++ FM_DMP_V32(buf, n, p_mm, teoct_l);
++ FM_DMP_V32(buf, n, p_mm, teoct_u);
++ FM_DMP_V32(buf, n, p_mm, toct_l);
++ FM_DMP_V32(buf, n, p_mm, toct_u);
++ FM_DMP_V32(buf, n, p_mm, txpf_l);
++ FM_DMP_V32(buf, n, p_mm, txpf_u);
++ FM_DMP_V32(buf, n, p_mm, tfrm_l);
++ FM_DMP_V32(buf, n, p_mm, tfrm_u);
++ FM_DMP_V32(buf, n, p_mm, tfcs_l);
++ FM_DMP_V32(buf, n, p_mm, tfcs_u);
++ FM_DMP_V32(buf, n, p_mm, tvlan_l);
++ FM_DMP_V32(buf, n, p_mm, tvlan_u);
++ FM_DMP_V32(buf, n, p_mm, terr_l);
++ FM_DMP_V32(buf, n, p_mm, terr_u);
++ FM_DMP_V32(buf, n, p_mm, tuca_l);
++ FM_DMP_V32(buf, n, p_mm, tuca_u);
++ FM_DMP_V32(buf, n, p_mm, tmca_l);
++ FM_DMP_V32(buf, n, p_mm, tmca_u);
++ FM_DMP_V32(buf, n, p_mm, tbca_l);
++ FM_DMP_V32(buf, n, p_mm, tbca_u);
++ FM_DMP_V32(buf, n, p_mm, tpkt_l);
++ FM_DMP_V32(buf, n, p_mm, tpkt_u);
++ FM_DMP_V32(buf, n, p_mm, tund_l);
++ FM_DMP_V32(buf, n, p_mm, tund_u);
++ FM_DMP_V32(buf, n, p_mm, t64_l);
++ FM_DMP_V32(buf, n, p_mm, t64_u);
++ FM_DMP_V32(buf, n, p_mm, t127_l);
++ FM_DMP_V32(buf, n, p_mm, t127_u);
++ FM_DMP_V32(buf, n, p_mm, t255_l);
++ FM_DMP_V32(buf, n, p_mm, t255_u);
++ FM_DMP_V32(buf, n, p_mm, t511_l);
++ FM_DMP_V32(buf, n, p_mm, t511_u);
++ FM_DMP_V32(buf, n, p_mm, t1023_l);
++ FM_DMP_V32(buf, n, p_mm, t1023_u);
++ FM_DMP_V32(buf, n, p_mm, t1518_l);
++ FM_DMP_V32(buf, n, p_mm, t1518_u);
++ FM_DMP_V32(buf, n, p_mm, t1519x_l);
++ FM_DMP_V32(buf, n, p_mm, t1519x_u);
++ FM_DMP_V32(buf, n, p_mm, tcnp_l);
++ FM_DMP_V32(buf, n, p_mm, tcnp_u);
++
++ return n;
++}
++
++int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
++{
++ int n = nn;
++
++ n = h_mac->dump_mac_regs(h_mac, buf, n);
++
++ return n;
++}
++EXPORT_SYMBOL(fm_mac_dump_regs);
++
++int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
++{
++ int n = nn;
++
++ if(h_mac->dump_mac_rx_stats)
++ n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
++
++ return n;
++}
++EXPORT_SYMBOL(fm_mac_dump_rx_stats);
++
++int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
++{
++ int n = nn;
++
++ if(h_mac->dump_mac_tx_stats)
++ n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
++
++ return n;
++}
++EXPORT_SYMBOL(fm_mac_dump_tx_stats);
++
++static void __cold setup_dtsec(struct mac_device *mac_dev)
++{
++ mac_dev->init_phy = dtsec_init_phy;
++ mac_dev->init = init;
++ mac_dev->start = start;
++ mac_dev->stop = stop;
++ mac_dev->set_promisc = fm_mac_set_promiscuous;
++ mac_dev->change_addr = fm_mac_modify_mac_addr;
++ mac_dev->set_multi = set_multi;
++ mac_dev->uninit = uninit;
++ mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
++ mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
++ mac_dev->get_mac_handle = get_mac_handle;
++ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
++ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
++ mac_dev->fm_rtc_enable = fm_rtc_enable;
++ mac_dev->fm_rtc_disable = fm_rtc_disable;
++ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
++ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
++ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
++ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
++ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
++ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
++ mac_dev->set_wol = fm_mac_set_wol;
++ mac_dev->dump_mac_regs = dtsec_dump_regs;
++}
++
++static void __cold setup_xgmac(struct mac_device *mac_dev)
++{
++ mac_dev->init_phy = xgmac_init_phy;
++ mac_dev->init = init;
++ mac_dev->start = start;
++ mac_dev->stop = stop;
++ mac_dev->set_promisc = fm_mac_set_promiscuous;
++ mac_dev->change_addr = fm_mac_modify_mac_addr;
++ mac_dev->set_multi = set_multi;
++ mac_dev->uninit = uninit;
++ mac_dev->get_mac_handle = get_mac_handle;
++ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
++ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
++ mac_dev->set_wol = fm_mac_set_wol;
++ mac_dev->dump_mac_regs = xgmac_dump_regs;
++}
++
++static void __cold setup_memac(struct mac_device *mac_dev)
++{
++ mac_dev->init_phy = memac_init_phy;
++ mac_dev->init = memac_init;
++ mac_dev->start = start;
++ mac_dev->stop = stop;
++ mac_dev->set_promisc = fm_mac_set_promiscuous;
++ mac_dev->change_addr = fm_mac_modify_mac_addr;
++ mac_dev->set_multi = set_multi;
++ mac_dev->uninit = uninit;
++ mac_dev->get_mac_handle = get_mac_handle;
++ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
++ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
++ mac_dev->fm_rtc_enable = fm_rtc_enable;
++ mac_dev->fm_rtc_disable = fm_rtc_disable;
++ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
++ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
++ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
++ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
++ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
++ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
++ mac_dev->set_wol = fm_mac_set_wol;
++ mac_dev->dump_mac_regs = memac_dump_regs;
++ mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
++ mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
++}
++
++void (*const mac_setup[])(struct mac_device *mac_dev) = {
++ [DTSEC] = setup_dtsec,
++ [XGMAC] = setup_xgmac,
++ [MEMAC] = setup_memac
++};
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/mac.c b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
+new file mode 100644
+index 00000000..60133b02
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
+@@ -0,0 +1,489 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/of_net.h>
++#include <linux/of_mdio.h>
++#include <linux/phy_fixed.h>
++#include <linux/device.h>
++#include <linux/phy.h>
++#include <linux/io.h>
++
++#include "lnxwrp_fm_ext.h"
++
++#include "mac.h"
++
++#define DTSEC_SUPPORTED \
++ (SUPPORTED_10baseT_Half \
++ | SUPPORTED_10baseT_Full \
++ | SUPPORTED_100baseT_Half \
++ | SUPPORTED_100baseT_Full \
++ | SUPPORTED_Autoneg \
++ | SUPPORTED_Pause \
++ | SUPPORTED_Asym_Pause \
++ | SUPPORTED_MII)
++
++static const char phy_str[][11] = {
++ [PHY_INTERFACE_MODE_MII] = "mii",
++ [PHY_INTERFACE_MODE_GMII] = "gmii",
++ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
++ [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
++ [PHY_INTERFACE_MODE_TBI] = "tbi",
++ [PHY_INTERFACE_MODE_RMII] = "rmii",
++ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
++ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
++ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
++ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
++ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
++ [PHY_INTERFACE_MODE_XGMII] = "xgmii",
++ [PHY_INTERFACE_MODE_SGMII_2500] = "sgmii-2500",
++};
++
++static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(phy_str); i++)
++ if (strcmp(str, phy_str[i]) == 0)
++ return (phy_interface_t)i;
++
++ return PHY_INTERFACE_MODE_MII;
++}
++
++static const uint16_t phy2speed[] = {
++ [PHY_INTERFACE_MODE_MII] = SPEED_100,
++ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
++ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
++ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
++ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
++ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
++ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
++ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
++ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
++ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
++ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
++ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
++ [PHY_INTERFACE_MODE_SGMII_2500] = SPEED_2500,
++};
++
++static struct mac_device * __cold
++alloc_macdev(struct device *dev, size_t sizeof_priv,
++ void (*setup)(struct mac_device *mac_dev))
++{
++ struct mac_device *mac_dev;
++
++ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
++ if (unlikely(mac_dev == NULL))
++ mac_dev = ERR_PTR(-ENOMEM);
++ else {
++ mac_dev->dev = dev;
++ dev_set_drvdata(dev, mac_dev);
++ setup(mac_dev);
++ }
++
++ return mac_dev;
++}
++
++static int __cold free_macdev(struct mac_device *mac_dev)
++{
++ dev_set_drvdata(mac_dev->dev, NULL);
++
++ return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
++}
++
++static const struct of_device_id mac_match[] = {
++ [DTSEC] = {
++ .compatible = "fsl,fman-1g-mac"
++ },
++ [XGMAC] = {
++ .compatible = "fsl,fman-10g-mac"
++ },
++ [MEMAC] = {
++ .compatible = "fsl,fman-memac"
++ },
++ {}
++};
++MODULE_DEVICE_TABLE(of, mac_match);
++
++static int __cold mac_probe(struct platform_device *_of_dev)
++{
++ int _errno, i;
++ struct device *dev;
++ struct device_node *mac_node, *dev_node;
++ struct mac_device *mac_dev;
++ struct platform_device *of_dev;
++ struct resource res;
++ const uint8_t *mac_addr;
++ const char *char_prop;
++ int nph;
++ u32 cell_index;
++ const struct of_device_id *match;
++
++ dev = &_of_dev->dev;
++ mac_node = dev->of_node;
++
++ match = of_match_device(mac_match, dev);
++ if (!match)
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
++ i++)
++ ;
++ BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
++
++ mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
++ if (IS_ERR(mac_dev)) {
++ _errno = PTR_ERR(mac_dev);
++ dev_err(dev, "alloc_macdev() = %d\n", _errno);
++ goto _return;
++ }
++
++ INIT_LIST_HEAD(&mac_dev->mc_addr_list);
++
++ /* Get the FM node */
++ dev_node = of_get_parent(mac_node);
++ if (unlikely(dev_node == NULL)) {
++ dev_err(dev, "of_get_parent(%s) failed\n",
++ mac_node->full_name);
++ _errno = -EINVAL;
++ goto _return_dev_set_drvdata;
++ }
++
++ of_dev = of_find_device_by_node(dev_node);
++ if (unlikely(of_dev == NULL)) {
++ dev_err(dev, "of_find_device_by_node(%s) failed\n",
++ dev_node->full_name);
++ _errno = -EINVAL;
++ goto _return_of_node_put;
++ }
++
++ mac_dev->fm_dev = fm_bind(&of_dev->dev);
++ if (unlikely(mac_dev->fm_dev == NULL)) {
++ dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
++ _errno = -ENODEV;
++ goto _return_of_node_put;
++ }
++
++ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
++ of_node_put(dev_node);
++
++ /* Get the address of the memory mapped registers */
++ _errno = of_address_to_resource(mac_node, 0, &res);
++ if (unlikely(_errno < 0)) {
++ dev_err(dev, "of_address_to_resource(%s) = %d\n",
++ mac_node->full_name, _errno);
++ goto _return_dev_set_drvdata;
++ }
++
++ mac_dev->res = __devm_request_region(
++ dev,
++ fm_get_mem_region(mac_dev->fm_dev),
++ res.start, res.end + 1 - res.start, "mac");
++ if (unlikely(mac_dev->res == NULL)) {
++ dev_err(dev, "__devm_request_mem_region(mac) failed\n");
++ _errno = -EBUSY;
++ goto _return_dev_set_drvdata;
++ }
++
++ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
++ mac_dev->res->end + 1
++ - mac_dev->res->start);
++ if (unlikely(mac_dev->vaddr == NULL)) {
++ dev_err(dev, "devm_ioremap() failed\n");
++ _errno = -EIO;
++ goto _return_dev_set_drvdata;
++ }
++
++#define TBIPA_OFFSET 0x1c
++#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
++ mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
++ if (mac_dev->tbi_node) {
++ u32 tbiaddr = TBIPA_DEFAULT_ADDR;
++ const __be32 *tbi_reg;
++ void __iomem *addr;
++
++ tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
++ if (tbi_reg)
++ tbiaddr = be32_to_cpup(tbi_reg);
++ addr = mac_dev->vaddr + TBIPA_OFFSET;
++ /* TODO: out_be32 does not exist on ARM */
++ out_be32(addr, tbiaddr);
++ }
++
++ if (!of_device_is_available(mac_node)) {
++ devm_iounmap(dev, mac_dev->vaddr);
++ __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
++ res.start, res.end + 1 - res.start);
++ fm_unbind(mac_dev->fm_dev);
++ devm_kfree(dev, mac_dev);
++ dev_set_drvdata(dev, NULL);
++ return -ENODEV;
++ }
++
++ /* Get the cell-index */
++ _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
++ if (unlikely(_errno)) {
++ dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
++ mac_node->full_name);
++ goto _return_dev_set_drvdata;
++ }
++ mac_dev->cell_index = (uint8_t)cell_index;
++ if (mac_dev->cell_index >= 8)
++ mac_dev->cell_index -= 8;
++
++ /* Get the MAC address */
++ mac_addr = of_get_mac_address(mac_node);
++ if (unlikely(mac_addr == NULL)) {
++ dev_err(dev, "of_get_mac_address(%s) failed\n",
++ mac_node->full_name);
++ _errno = -EINVAL;
++ goto _return_dev_set_drvdata;
++ }
++ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
++
++ /* Verify the number of port handles */
++ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
++ if (unlikely(nph < 0)) {
++ dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
++ mac_node->full_name);
++ _errno = nph;
++ goto _return_dev_set_drvdata;
++ }
++
++ if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
++ dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
++ mac_node->full_name);
++ _errno = -EINVAL;
++ goto _return_dev_set_drvdata;
++ }
++
++ for_each_port_device(i, mac_dev->port_dev) {
++ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
++ if (unlikely(dev_node == NULL)) {
++ dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
++ mac_node->full_name);
++ _errno = -EINVAL;
++ goto _return_of_node_put;
++ }
++
++ of_dev = of_find_device_by_node(dev_node);
++ if (unlikely(of_dev == NULL)) {
++ dev_err(dev, "of_find_device_by_node(%s) failed\n",
++ dev_node->full_name);
++ _errno = -EINVAL;
++ goto _return_of_node_put;
++ }
++
++ mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
++ if (unlikely(mac_dev->port_dev[i] == NULL)) {
++ dev_err(dev, "dev_get_drvdata(%s) failed\n",
++ dev_node->full_name);
++ _errno = -EINVAL;
++ goto _return_of_node_put;
++ }
++ of_node_put(dev_node);
++ }
++
++ /* Get the PHY connection type */
++ _errno = of_property_read_string(mac_node, "phy-connection-type",
++ &char_prop);
++ if (unlikely(_errno)) {
++ dev_warn(dev,
++ "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
++ mac_node->full_name);
++ mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
++ } else
++ mac_dev->phy_if = str2phy(char_prop);
++
++ mac_dev->link = false;
++ mac_dev->half_duplex = false;
++ mac_dev->speed = phy2speed[mac_dev->phy_if];
++ mac_dev->max_speed = mac_dev->speed;
++ mac_dev->if_support = DTSEC_SUPPORTED;
++ /* We don't support half-duplex in SGMII mode */
++ if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii") ||
++ strstr(char_prop, "sgmii-2500"))
++ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
++ SUPPORTED_100baseT_Half);
++
++ /* Gigabit support (no half-duplex) */
++ if (mac_dev->max_speed == SPEED_1000 ||
++ mac_dev->max_speed == SPEED_2500)
++ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
++
++ /* The 10G interface only supports one mode */
++ if (strstr(char_prop, "xgmii"))
++ mac_dev->if_support = SUPPORTED_10000baseT_Full;
++
++ /* Get the rest of the PHY information */
++ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
++ if (!mac_dev->phy_node) {
++ struct phy_device *phy;
++
++ if (!of_phy_is_fixed_link(mac_node)) {
++ dev_err(dev, "Wrong PHY information of mac node %s\n",
++ mac_node->full_name);
++ goto _return_dev_set_drvdata;
++ }
++
++ _errno = of_phy_register_fixed_link(mac_node);
++ if (_errno)
++ goto _return_dev_set_drvdata;
++
++ mac_dev->fixed_link = devm_kzalloc(mac_dev->dev,
++ sizeof(*mac_dev->fixed_link),
++ GFP_KERNEL);
++ if (!mac_dev->fixed_link)
++ goto _return_dev_set_drvdata;
++
++ mac_dev->phy_node = of_node_get(mac_node);
++ phy = of_phy_find_device(mac_dev->phy_node);
++ if (!phy)
++ goto _return_dev_set_drvdata;
++
++ mac_dev->fixed_link->link = phy->link;
++ mac_dev->fixed_link->speed = phy->speed;
++ mac_dev->fixed_link->duplex = phy->duplex;
++ mac_dev->fixed_link->pause = phy->pause;
++ mac_dev->fixed_link->asym_pause = phy->asym_pause;
++ }
++
++ _errno = mac_dev->init(mac_dev);
++ if (unlikely(_errno < 0)) {
++ dev_err(dev, "mac_dev->init() = %d\n", _errno);
++ goto _return_dev_set_drvdata;
++ }
++
++ /* pause frame autonegotiation enabled*/
++ mac_dev->autoneg_pause = true;
++
++ /* by intializing the values to false, force FMD to enable PAUSE frames
++ * on RX and TX
++ */
++ mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
++ mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
++ _errno = set_mac_active_pause(mac_dev, true, true);
++ if (unlikely(_errno < 0))
++ dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
++
++ dev_info(dev,
++ "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
++ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
++ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
++
++ goto _return;
++
++_return_of_node_put:
++ of_node_put(dev_node);
++_return_dev_set_drvdata:
++ dev_set_drvdata(dev, NULL);
++_return:
++ return _errno;
++}
++
++static int __cold mac_remove(struct platform_device *of_dev)
++{
++ int i, _errno;
++ struct device *dev;
++ struct mac_device *mac_dev;
++
++ dev = &of_dev->dev;
++ mac_dev = (struct mac_device *)dev_get_drvdata(dev);
++
++ for_each_port_device(i, mac_dev->port_dev)
++ fm_port_unbind(mac_dev->port_dev[i]);
++
++ fm_unbind(mac_dev->fm_dev);
++
++ _errno = free_macdev(mac_dev);
++
++ return _errno;
++}
++
++static struct platform_driver mac_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = mac_match,
++ .owner = THIS_MODULE,
++ },
++ .probe = mac_probe,
++ .remove = mac_remove
++};
++
++static int __init __cold mac_load(void)
++{
++ int _errno;
++
++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
++ KBUILD_BASENAME".c", __func__);
++
++ pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
++
++ _errno = platform_driver_register(&mac_driver);
++ if (unlikely(_errno < 0)) {
++ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
++ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
++ goto _return;
++ }
++
++ goto _return;
++
++_return:
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++
++ return _errno;
++}
++module_init(mac_load);
++
++static void __exit __cold mac_unload(void)
++{
++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
++ KBUILD_BASENAME".c", __func__);
++
++ platform_driver_unregister(&mac_driver);
++
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++}
++module_exit(mac_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/mac.h b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
+new file mode 100644
+index 00000000..b5288f2a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
+@@ -0,0 +1,135 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __MAC_H
++#define __MAC_H
++
++#include <linux/device.h> /* struct device, BUS_ID_SIZE */
++#include <linux/if_ether.h> /* ETH_ALEN */
++#include <linux/phy.h> /* phy_interface_t, struct phy_device */
++#include <linux/list.h>
++
++#include "lnxwrp_fsl_fman.h" /* struct port_device */
++
++enum {DTSEC, XGMAC, MEMAC};
++
++struct mac_device {
++ struct device *dev;
++ void *priv;
++ uint8_t cell_index;
++ struct resource *res;
++ void __iomem *vaddr;
++ uint8_t addr[ETH_ALEN];
++ bool promisc;
++
++ struct fm *fm_dev;
++ struct fm_port *port_dev[2];
++
++ phy_interface_t phy_if;
++ u32 if_support;
++ bool link;
++ bool half_duplex;
++ uint16_t speed;
++ uint16_t max_speed;
++ struct device_node *phy_node;
++ char fixed_bus_id[MII_BUS_ID_SIZE + 3];
++ struct device_node *tbi_node;
++ struct phy_device *phy_dev;
++ void *fm;
++ /* List of multicast addresses */
++ struct list_head mc_addr_list;
++ struct fixed_phy_status *fixed_link;
++
++ bool autoneg_pause;
++ bool rx_pause_req;
++ bool tx_pause_req;
++ bool rx_pause_active;
++ bool tx_pause_active;
++
++ struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
++ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
++ int (*init)(struct mac_device *mac_dev);
++ int (*start)(struct mac_device *mac_dev);
++ int (*stop)(struct mac_device *mac_dev);
++ int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
++ int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
++ int (*set_multi)(struct net_device *net_dev,
++ struct mac_device *mac_dev);
++ int (*uninit)(struct fm_mac_dev *fm_mac_dev);
++ int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
++ int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
++ int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
++ int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
++ int (*fm_rtc_enable)(struct fm *fm_dev);
++ int (*fm_rtc_disable)(struct fm *fm_dev);
++ int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
++ int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
++ int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
++ int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
++ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
++ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
++ uint64_t fiper);
++#ifdef CONFIG_PTP_1588_CLOCK_DPAA
++ int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
++ int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
++#endif
++ int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
++ bool en);
++ int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
++ int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
++ int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
++};
++
++struct mac_address {
++ uint8_t addr[ETH_ALEN];
++ struct list_head list;
++};
++
++#define get_fm_handle(net_dev) \
++ (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
++
++#define for_each_port_device(i, port_dev) \
++ for (i = 0; i < ARRAY_SIZE(port_dev); i++)
++
++static inline __attribute((nonnull)) void *macdev_priv(
++ const struct mac_device *mac_dev)
++{
++ return (void *)mac_dev + sizeof(*mac_dev);
++}
++
++extern const char *mac_driver_description;
++extern const size_t mac_sizeof_priv[];
++extern void (*const mac_setup[])(struct mac_device *mac_dev);
++
++int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
++void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
++
++#endif /* __MAC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
+new file mode 100644
+index 00000000..fb084af5
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
+@@ -0,0 +1,848 @@
++/* Copyright 2011-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
++ * Validates device-tree configuration and sets up the offline ports.
++ */
++
++#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
++ KBUILD_BASENAME".c", __LINE__, __func__
++#else
++#define pr_fmt(fmt) \
++ KBUILD_MODNAME ": " fmt
++#endif
++
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/fsl_qman.h>
++
++#include "offline_port.h"
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++
++#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
++/* Manip extra space and data alignment for fragmentation */
++#define FRAG_MANIP_SPACE 128
++#define FRAG_DATA_ALIGN 64
++
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
++MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
++
++
++static const struct of_device_id oh_port_match_table[] = {
++ {
++ .compatible = "fsl,dpa-oh"
++ },
++ {
++ .compatible = "fsl,dpa-oh-shared"
++ },
++ {}
++};
++MODULE_DEVICE_TABLE(of, oh_port_match_table);
++
++#ifdef CONFIG_PM
++
++static int oh_suspend(struct device *dev)
++{
++ struct dpa_oh_config_s *oh_config;
++
++ oh_config = dev_get_drvdata(dev);
++ return fm_port_suspend(oh_config->oh_port);
++}
++
++static int oh_resume(struct device *dev)
++{
++ struct dpa_oh_config_s *oh_config;
++
++ oh_config = dev_get_drvdata(dev);
++ return fm_port_resume(oh_config->oh_port);
++}
++
++static const struct dev_pm_ops oh_pm_ops = {
++ .suspend = oh_suspend,
++ .resume = oh_resume,
++};
++
++#define OH_PM_OPS (&oh_pm_ops)
++
++#else /* CONFIG_PM */
++
++#define OH_PM_OPS NULL
++
++#endif /* CONFIG_PM */
++
++/* Creates Frame Queues */
++static uint32_t oh_fq_create(struct qman_fq *fq,
++ uint32_t fq_id, uint16_t channel,
++ uint16_t wq_id)
++{
++ struct qm_mcc_initfq fq_opts;
++ uint32_t create_flags, init_flags;
++ uint32_t ret = 0;
++
++ if (fq == NULL)
++ return 1;
++
++ /* Set flags for FQ create */
++ create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
++
++ /* Create frame queue */
++ ret = qman_create_fq(fq_id, create_flags, fq);
++ if (ret != 0)
++ return 1;
++
++ /* Set flags for FQ init */
++ init_flags = QMAN_INITFQ_FLAG_SCHED;
++
++ /* Set FQ init options. Specify destination WQ ID and channel */
++ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
++ fq_opts.fqd.dest.wq = wq_id;
++ fq_opts.fqd.dest.channel = channel;
++
++ /* Initialize frame queue */
++ ret = qman_init_fq(fq, init_flags, &fq_opts);
++ if (ret != 0) {
++ qman_destroy_fq(fq, 0);
++ return 1;
++ }
++
++ return 0;
++}
++
++static void dump_fq(struct device *dev, int fqid, uint16_t channel)
++{
++ if (channel) {
++ /* display fqs with a valid (!= 0) destination channel */
++ dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
++ }
++}
++
++static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
++ int fqs_count, uint16_t channel_id)
++{
++ int i;
++ for (i = 0; i < fqs_count; i++)
++ dump_fq(dev, (fqs + i)->fqid, channel_id);
++}
++
++static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
++{
++ struct list_head *fq_list;
++ struct fq_duple *fqd;
++ int i;
++
++ dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
++ dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
++
++ /* TX queues (old initialization) */
++ dev_info(dev, "Initialized queues:");
++ for (i = 0; i < conf->egress_cnt; i++)
++ dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
++ conf->channel);
++
++ /* initialized ingress queues */
++ list_for_each(fq_list, &conf->fqs_ingress_list) {
++ fqd = list_entry(fq_list, struct fq_duple, fq_list);
++ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
++ }
++
++ /* initialized egress queues */
++ list_for_each(fq_list, &conf->fqs_egress_list) {
++ fqd = list_entry(fq_list, struct fq_duple, fq_list);
++ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
++ }
++}
++
++/* Destroys Frame Queues */
++static void oh_fq_destroy(struct qman_fq *fq)
++{
++ int _errno = 0;
++
++ _errno = qman_retire_fq(fq, NULL);
++ if (unlikely(_errno < 0))
++ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
++ KBUILD_BASENAME".c", __LINE__, __func__,
++ qman_fq_fqid(fq), _errno);
++
++ _errno = qman_oos_fq(fq);
++ if (unlikely(_errno < 0)) {
++ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
++ KBUILD_BASENAME".c", __LINE__, __func__,
++ qman_fq_fqid(fq), _errno);
++ }
++
++ qman_destroy_fq(fq, 0);
++}
++
++/* Allocation code for the OH port's PCD frame queues */
++static int __cold oh_alloc_pcd_fqids(struct device *dev,
++ uint32_t num,
++ uint8_t alignment,
++ uint32_t *base_fqid)
++{
++ dev_crit(dev, "callback not implemented!\n");
++ BUG();
++
++ return 0;
++}
++
++static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
++{
++ dev_crit(dev, "callback not implemented!\n");
++ BUG();
++
++ return 0;
++}
++
++static void oh_set_buffer_layout(struct fm_port *port,
++ struct dpa_buffer_layout_s *layout)
++{
++ struct fm_port_params params;
++
++ layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
++ layout->parse_results = true;
++ layout->hash_results = true;
++ layout->time_stamp = false;
++
++ fm_port_get_buff_layout_ext_params(port, &params);
++ layout->manip_extra_space = params.manip_extra_space;
++ layout->data_align = params.data_align;
++}
++
++static int
++oh_port_probe(struct platform_device *_of_dev)
++{
++ struct device *dpa_oh_dev;
++ struct device_node *dpa_oh_node;
++ int lenp, _errno = 0, fq_idx, duple_idx;
++ int n_size, i, j, ret, duples_count;
++ struct platform_device *oh_of_dev;
++ struct device_node *oh_node, *bpool_node = NULL, *root_node;
++ struct device *oh_dev;
++ struct dpa_oh_config_s *oh_config = NULL;
++ const __be32 *oh_all_queues;
++ const __be32 *channel_ids;
++ const __be32 *oh_tx_queues;
++ uint32_t queues_count;
++ uint32_t crt_fqid_base;
++ uint32_t crt_fq_count;
++ bool frag_enabled = false;
++ struct fm_port_params oh_port_tx_params;
++ struct fm_port_pcd_param oh_port_pcd_params;
++ struct dpa_buffer_layout_s buf_layout;
++
++ /* True if the current partition owns the OH port. */
++ bool init_oh_port;
++
++ const struct of_device_id *match;
++ int crt_ext_pools_count;
++ u32 ext_pool_size;
++ u32 port_id;
++ u32 channel_id;
++
++ int channel_ids_count;
++ int channel_idx;
++ struct fq_duple *fqd;
++ struct list_head *fq_list, *fq_list_tmp;
++
++ const __be32 *bpool_cfg;
++ uint32_t bpid;
++
++ memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
++ dpa_oh_dev = &_of_dev->dev;
++ dpa_oh_node = dpa_oh_dev->of_node;
++ BUG_ON(dpa_oh_node == NULL);
++
++ match = of_match_device(oh_port_match_table, dpa_oh_dev);
++ if (!match)
++ return -EINVAL;
++
++ dev_dbg(dpa_oh_dev, "Probing OH port...\n");
++
++ /* Find the referenced OH node */
++ oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
++ if (oh_node == NULL) {
++ dev_err(dpa_oh_dev,
++ "Can't find OH node referenced from node %s\n",
++ dpa_oh_node->full_name);
++ return -EINVAL;
++ }
++ dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
++ match->compatible);
++
++ _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
++ if (_errno) {
++ dev_err(dpa_oh_dev, "No port id found in node %s\n",
++ dpa_oh_node->full_name);
++ goto return_kfree;
++ }
++
++ _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
++ &channel_id);
++ if (_errno) {
++ dev_err(dpa_oh_dev, "No channel id found in node %s\n",
++ dpa_oh_node->full_name);
++ goto return_kfree;
++ }
++
++ oh_of_dev = of_find_device_by_node(oh_node);
++ BUG_ON(oh_of_dev == NULL);
++ oh_dev = &oh_of_dev->dev;
++
++ /* The OH port must be initialized exactly once.
++ * The following scenarios are of interest:
++ * - the node is Linux-private (will always initialize it);
++ * - the node is shared between two Linux partitions
++ * (only one of them will initialize it);
++ * - the node is shared between a Linux and a LWE partition
++ * (Linux will initialize it) - "fsl,dpa-oh-shared"
++ */
++
++ /* Check if the current partition owns the OH port
++ * and ought to initialize it. It may be the case that we leave this
++ * to another (also Linux) partition.
++ */
++ init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
++
++ /* If we aren't the "owner" of the OH node, we're done here. */
++ if (!init_oh_port) {
++ dev_dbg(dpa_oh_dev,
++ "Not owning the shared OH port %s, will not initialize it.\n",
++ oh_node->full_name);
++ of_node_put(oh_node);
++ return 0;
++ }
++
++ /* Allocate OH dev private data */
++ oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
++ if (oh_config == NULL) {
++ dev_err(dpa_oh_dev,
++ "Can't allocate private data for OH node %s referenced from node %s!\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ _errno = -ENOMEM;
++ goto return_kfree;
++ }
++
++ INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
++ INIT_LIST_HEAD(&oh_config->fqs_egress_list);
++
++ /* FQs that enter OH port */
++ lenp = 0;
++ oh_all_queues = of_get_property(dpa_oh_node,
++ "fsl,qman-frame-queues-ingress", &lenp);
++ if (lenp % (2 * sizeof(*oh_all_queues))) {
++ dev_warn(dpa_oh_dev,
++ "Wrong ingress queues format for OH node %s referenced from node %s!\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ /* just ignore the last unpaired value */
++ }
++
++ duples_count = lenp / (2 * sizeof(*oh_all_queues));
++ dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
++ duples_count);
++ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
++ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
++ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
++
++ fqd = devm_kzalloc(dpa_oh_dev,
++ sizeof(struct fq_duple), GFP_KERNEL);
++ if (!fqd) {
++ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
++ oh_node->full_name,
++ dpa_oh_node->full_name);
++ _errno = -ENOMEM;
++ goto return_kfree;
++ }
++
++ fqd->fqs = devm_kzalloc(dpa_oh_dev,
++ crt_fq_count * sizeof(struct qman_fq),
++ GFP_KERNEL);
++ if (!fqd->fqs) {
++ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
++ oh_node->full_name,
++ dpa_oh_node->full_name);
++ _errno = -ENOMEM;
++ goto return_kfree;
++ }
++
++ for (j = 0; j < crt_fq_count; j++)
++ (fqd->fqs + j)->fqid = crt_fqid_base + j;
++ fqd->fqs_count = crt_fq_count;
++ fqd->channel_id = (uint16_t)channel_id;
++ list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
++ }
++
++ /* create the ingress queues */
++ list_for_each(fq_list, &oh_config->fqs_ingress_list) {
++ fqd = list_entry(fq_list, struct fq_duple, fq_list);
++
++ for (j = 0; j < fqd->fqs_count; j++) {
++ ret = oh_fq_create(fqd->fqs + j,
++ (fqd->fqs + j)->fqid,
++ fqd->channel_id, 3);
++ if (ret != 0) {
++ dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
++ (fqd->fqs + j)->fqid,
++ oh_node->full_name,
++ dpa_oh_node->full_name);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++ }
++ }
++
++ /* FQs that exit OH port */
++ lenp = 0;
++ oh_all_queues = of_get_property(dpa_oh_node,
++ "fsl,qman-frame-queues-egress", &lenp);
++ if (lenp % (2 * sizeof(*oh_all_queues))) {
++ dev_warn(dpa_oh_dev,
++ "Wrong egress queues format for OH node %s referenced from node %s!\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ /* just ignore the last unpaired value */
++ }
++
++ duples_count = lenp / (2 * sizeof(*oh_all_queues));
++ dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
++ duples_count);
++ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
++ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
++ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
++
++ fqd = devm_kzalloc(dpa_oh_dev,
++ sizeof(struct fq_duple), GFP_KERNEL);
++ if (!fqd) {
++ dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
++ oh_node->full_name,
++ dpa_oh_node->full_name);
++ _errno = -ENOMEM;
++ goto return_kfree;
++ }
++
++ fqd->fqs = devm_kzalloc(dpa_oh_dev,
++ crt_fq_count * sizeof(struct qman_fq),
++ GFP_KERNEL);
++ if (!fqd->fqs) {
++ dev_err(dpa_oh_dev,
++ "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
++ oh_node->full_name,
++ dpa_oh_node->full_name);
++ _errno = -ENOMEM;
++ goto return_kfree;
++ }
++
++ for (j = 0; j < crt_fq_count; j++)
++ (fqd->fqs + j)->fqid = crt_fqid_base + j;
++ fqd->fqs_count = crt_fq_count;
++ /* channel ID is specified in another attribute */
++ fqd->channel_id = 0;
++ list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
++
++ /* allocate the queue */
++
++ }
++
++ /* channel_ids for FQs that exit OH port */
++ lenp = 0;
++ channel_ids = of_get_property(dpa_oh_node,
++ "fsl,qman-channel-ids-egress", &lenp);
++
++ channel_ids_count = lenp / (sizeof(*channel_ids));
++ if (channel_ids_count != duples_count) {
++ dev_warn(dpa_oh_dev,
++ "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ /* just ignore the queues that do not have a Channel ID */
++ }
++
++ channel_idx = 0;
++ list_for_each(fq_list, &oh_config->fqs_egress_list) {
++ if (channel_idx + 1 > channel_ids_count)
++ break;
++ fqd = list_entry(fq_list, struct fq_duple, fq_list);
++ fqd->channel_id =
++ (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
++ }
++
++ /* create egress queues */
++ list_for_each(fq_list, &oh_config->fqs_egress_list) {
++ fqd = list_entry(fq_list, struct fq_duple, fq_list);
++
++ if (fqd->channel_id == 0) {
++ /* missing channel id in dts */
++ continue;
++ }
++
++ for (j = 0; j < fqd->fqs_count; j++) {
++ ret = oh_fq_create(fqd->fqs + j,
++ (fqd->fqs + j)->fqid,
++ fqd->channel_id, 3);
++ if (ret != 0) {
++ dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
++ (fqd->fqs + j)->fqid,
++ oh_node->full_name,
++ dpa_oh_node->full_name);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++ }
++ }
++
++ /* Read FQ ids/nums for the DPA OH node */
++ oh_all_queues = of_get_property(dpa_oh_node,
++ "fsl,qman-frame-queues-oh", &lenp);
++ if (oh_all_queues == NULL) {
++ dev_err(dpa_oh_dev,
++ "No frame queues have been defined for OH node %s referenced from node %s\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ /* Check that the OH error and default FQs are there */
++ BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
++ queues_count = lenp / (2 * sizeof(*oh_all_queues));
++ if (queues_count != 2) {
++ dev_err(dpa_oh_dev,
++ "Error and Default queues must be defined for OH node %s referenced from node %s\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ /* Read the FQIDs defined for this OH port */
++ dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
++ fq_idx = 0;
++
++ /* Error FQID - must be present */
++ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
++ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
++ if (crt_fq_count != 1) {
++ dev_err(dpa_oh_dev,
++ "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
++ oh_node->full_name, dpa_oh_node->full_name,
++ crt_fq_count);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++ oh_config->error_fqid = crt_fqid_base;
++ dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
++ oh_config->error_fqid, oh_node->full_name);
++
++ /* Default FQID - must be present */
++ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
++ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
++ if (crt_fq_count != 1) {
++ dev_err(dpa_oh_dev,
++ "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
++ oh_node->full_name, dpa_oh_node->full_name,
++ crt_fq_count);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++ oh_config->default_fqid = crt_fqid_base;
++ dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
++ oh_config->default_fqid, oh_node->full_name);
++
++ /* TX FQID - presence is optional */
++ oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
++ &lenp);
++ if (oh_tx_queues == NULL) {
++ dev_dbg(dpa_oh_dev,
++ "No tx queues have been defined for OH node %s referenced from node %s\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ goto config_port;
++ }
++
++ /* Check that queues-tx has only a base and a count defined */
++ BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
++ queues_count = lenp / (2 * sizeof(*oh_tx_queues));
++ if (queues_count != 1) {
++ dev_err(dpa_oh_dev,
++ "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ fq_idx = 0;
++ crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
++ crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
++ oh_config->egress_cnt = crt_fq_count;
++
++ /* Allocate TX queues */
++ dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
++ oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
++ crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
++ if (oh_config->egress_fqs == NULL) {
++ dev_err(dpa_oh_dev,
++ "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
++ oh_node->full_name, dpa_oh_node->full_name);
++ _errno = -ENOMEM;
++ goto return_kfree;
++ }
++
++ /* Create TX queues */
++ for (i = 0; i < crt_fq_count; i++) {
++ ret = oh_fq_create(oh_config->egress_fqs + i,
++ crt_fqid_base + i, (uint16_t)channel_id, 3);
++ if (ret != 0) {
++ dev_err(dpa_oh_dev,
++ "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
++ crt_fqid_base + i, oh_node->full_name,
++ dpa_oh_node->full_name);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++ }
++
++config_port:
++ /* Get a handle to the fm_port so we can set
++ * its configuration params
++ */
++ oh_config->oh_port = fm_port_bind(oh_dev);
++ if (oh_config->oh_port == NULL) {
++ dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
++ oh_node->full_name);
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
++
++ /* read the pool handlers */
++ crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
++ "fsl,bman-buffer-pools", NULL);
++ if (crt_ext_pools_count <= 0) {
++ dev_info(dpa_oh_dev,
++ "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
++ oh_node->full_name);
++ goto init_port;
++ }
++
++ /* used for reading ext_pool_size*/
++ root_node = of_find_node_by_path("/");
++ if (root_node == NULL) {
++ dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ n_size = of_n_size_cells(root_node);
++ of_node_put(root_node);
++
++ dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
++ crt_ext_pools_count);
++
++ oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
++
++ for (i = 0; i < crt_ext_pools_count; i++) {
++ bpool_node = of_parse_phandle(dpa_oh_node,
++ "fsl,bman-buffer-pools", i);
++ if (bpool_node == NULL) {
++ dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
++ if (_errno) {
++ dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
++ dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
++
++ bpool_cfg = of_get_property(bpool_node,
++ "fsl,bpool-ethernet-cfg", &lenp);
++ if (bpool_cfg == NULL) {
++ dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
++ _errno = -EINVAL;
++ goto return_kfree;
++ }
++
++ ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
++ oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
++ dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
++ ext_pool_size);
++ of_node_put(bpool_node);
++
++ }
++
++ if (buf_layout.data_align != FRAG_DATA_ALIGN ||
++ buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
++ goto init_port;
++
++ frag_enabled = true;
++ dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
++ port_id);
++
++init_port:
++ of_node_put(oh_node);
++ /* Set Tx params */
++ dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
++ oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
++ frag_enabled);
++ /* Set PCD params */
++ oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
++ oh_port_pcd_params.cbf = oh_free_pcd_fqids;
++ oh_port_pcd_params.dev = dpa_oh_dev;
++ fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
++
++ dev_set_drvdata(dpa_oh_dev, oh_config);
++
++ /* Enable the OH port */
++ _errno = fm_port_enable(oh_config->oh_port);
++ if (_errno)
++ goto return_kfree;
++
++ dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
++
++ /* print of all referenced & created queues */
++ dump_oh_config(dpa_oh_dev, oh_config);
++
++ return 0;
++
++return_kfree:
++ if (bpool_node)
++ of_node_put(bpool_node);
++ if (oh_node)
++ of_node_put(oh_node);
++ if (oh_config && oh_config->egress_fqs)
++ devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
++
++ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
++ fqd = list_entry(fq_list, struct fq_duple, fq_list);
++ list_del(fq_list);
++ devm_kfree(dpa_oh_dev, fqd->fqs);
++ devm_kfree(dpa_oh_dev, fqd);
++ }
++
++ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
++ fqd = list_entry(fq_list, struct fq_duple, fq_list);
++ list_del(fq_list);
++ devm_kfree(dpa_oh_dev, fqd->fqs);
++ devm_kfree(dpa_oh_dev, fqd);
++ }
++
++ devm_kfree(dpa_oh_dev, oh_config);
++ return _errno;
++}
++
++static int __cold oh_port_remove(struct platform_device *_of_dev)
++{
++ int _errno = 0, i;
++ struct dpa_oh_config_s *oh_config;
++
++ pr_info("Removing OH port...\n");
++
++ oh_config = dev_get_drvdata(&_of_dev->dev);
++ if (oh_config == NULL) {
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): No OH config in device private data!\n",
++ KBUILD_BASENAME".c", __LINE__, __func__);
++ _errno = -ENODEV;
++ goto return_error;
++ }
++
++ if (oh_config->egress_fqs)
++ for (i = 0; i < oh_config->egress_cnt; i++)
++ oh_fq_destroy(oh_config->egress_fqs + i);
++
++ if (oh_config->oh_port == NULL) {
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): No fm port in device private data!\n",
++ KBUILD_BASENAME".c", __LINE__, __func__);
++ _errno = -EINVAL;
++ goto free_egress_fqs;
++ }
++
++ _errno = fm_port_disable(oh_config->oh_port);
++
++free_egress_fqs:
++ if (oh_config->egress_fqs)
++ devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
++ devm_kfree(&_of_dev->dev, oh_config);
++ dev_set_drvdata(&_of_dev->dev, NULL);
++
++return_error:
++ return _errno;
++}
++
++static struct platform_driver oh_port_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = oh_port_match_table,
++ .owner = THIS_MODULE,
++ .pm = OH_PM_OPS,
++ },
++ .probe = oh_port_probe,
++ .remove = oh_port_remove
++};
++
++static int __init __cold oh_port_load(void)
++{
++ int _errno;
++
++ pr_info(OH_MOD_DESCRIPTION "\n");
++
++ _errno = platform_driver_register(&oh_port_driver);
++ if (_errno < 0) {
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): platform_driver_register() = %d\n",
++ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
++ }
++
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++ return _errno;
++}
++module_init(oh_port_load);
++
++static void __exit __cold oh_port_unload(void)
++{
++ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
++ KBUILD_BASENAME".c", __func__);
++
++ platform_driver_unregister(&oh_port_driver);
++
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME".c", __func__);
++}
++module_exit(oh_port_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
+new file mode 100644
+index 00000000..432ee88d
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
+@@ -0,0 +1,59 @@
++/* Copyright 2011 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __OFFLINE_PORT_H
++#define __OFFLINE_PORT_H
++
++struct fm_port;
++struct qman_fq;
++
++/* fqs are defined in duples (base_fq, fq_count) */
++struct fq_duple {
++ struct qman_fq *fqs;
++ int fqs_count;
++ uint16_t channel_id;
++ struct list_head fq_list;
++};
++
++/* OH port configuration */
++struct dpa_oh_config_s {
++ uint32_t error_fqid;
++ uint32_t default_fqid;
++ struct fm_port *oh_port;
++ uint32_t egress_cnt;
++ struct qman_fq *egress_fqs;
++ uint16_t channel;
++
++ struct list_head fqs_ingress_list;
++ struct list_head fqs_egress_list;
++};
++
++#endif /* __OFFLINE_PORT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Kconfig b/drivers/net/ethernet/freescale/sdk_fman/Kconfig
+new file mode 100644
+index 00000000..d98c0989
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Kconfig
+@@ -0,0 +1,153 @@
++menu "Frame Manager support"
++
++menuconfig FSL_SDK_FMAN
++ bool "Freescale Frame Manager (datapath) support - SDK driver"
++ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && !FSL_FMAN
++ default y
++ ---help---
++ If unsure, say Y.
++
++if FSL_SDK_FMAN
++
++config FSL_SDK_FMAN_TEST
++ bool "FMan test module"
++ default n
++ select FSL_DPAA_HOOKS
++ ---help---
++ This option compiles test code for FMan.
++
++menu "FMAN Processor support"
++choice
++ depends on FSL_SDK_FMAN
++ prompt "Processor Type"
++
++config FMAN_ARM
++ bool "LS1043"
++ depends on ARM64 || ARM
++ ---help---
++ Choose "LS1043" for the ARM platforms:
++ LS1043
++
++config FMAN_P3040_P4080_P5020
++ bool "P3040 P4080 5020"
++
++config FMAN_P1023
++ bool "P1023"
++
++config FMAN_V3H
++ bool "FmanV3H"
++ ---help---
++ Choose "FmanV3H" for Fman rev3H:
++ B4860, T4240, T4160, etc
++
++config FMAN_V3L
++ bool "FmanV3L"
++ ---help---
++ Choose "FmanV3L" for Fman rev3L:
++ T1040, T1042, T1020, T1022, T1023, T1024, etc
++
++endchoice
++endmenu
++
++config FMAN_MIB_CNT_OVF_IRQ_EN
++ bool "Enable the dTSEC MIB counters overflow interrupt"
++ default n
++ ---help---
++ Enable the dTSEC MIB counters overflow interrupt to get
++ accurate MIB counters values. Enabled it compensates
++ for the counters overflow but reduces performance and
++ triggers error messages in HV setups.
++
++config FSL_FM_MAX_FRAME_SIZE
++ int "Maximum L2 frame size"
++ depends on FSL_SDK_FMAN
++ range 64 9600
++ default "1522"
++ help
++ Configure this in relation to the maximum possible MTU of your
++ network configuration. In particular, one would need to
++ increase this value in order to use jumbo frames.
++ FSL_FM_MAX_FRAME_SIZE must accommodate the Ethernet FCS (4 bytes)
++ and one ETH+VLAN header (18 bytes), to a total of 22 bytes in
++ excess of the desired L3 MTU.
++
++ Note that having too large a FSL_FM_MAX_FRAME_SIZE (much larger
++ than the actual MTU) may lead to buffer exhaustion, especially
++ in the case of badly fragmented datagrams on the Rx path.
++ Conversely, having a FSL_FM_MAX_FRAME_SIZE smaller than the actual
++ MTU will lead to frames being dropped.
++
++ This can be overridden by specifying "fsl_fm_max_frm" in
++ the kernel bootargs:
++ * in Hypervisor-based scenarios, by adding a "chosen" node
++ with the "bootargs" property specifying
++ "fsl_fm_max_frm=<YourValue>";
++ * in non-Hypervisor-based scenarios, via u-boot's env, by
++ modifying the "bootargs" env variable.
++
++config FSL_FM_RX_EXTRA_HEADROOM
++ int "Add extra headroom at beginning of data buffers"
++ depends on FSL_SDK_FMAN
++ range 16 384
++ default "64"
++ help
++ Configure this to tell the Frame Manager to reserve some extra
++ space at the beginning of a data buffer on the receive path,
++ before Internal Context fields are copied. This is in addition
++ to the private data area already reserved for driver internal
++ use. The provided value must be a multiple of 16.
++
++ This setting can be overridden by specifying
++ "fsl_fm_rx_extra_headroom" in the kernel bootargs:
++ * in Hypervisor-based scenarios, by adding a "chosen" node
++ with the "bootargs" property specifying
++ "fsl_fm_rx_extra_headroom=<YourValue>";
++ * in non-Hypervisor-based scenarios, via u-boot's env, by
++ modifying the "bootargs" env variable.
++
++config FMAN_PFC
++ bool "FMan PFC support (EXPERIMENTAL)"
++ depends on ( FMAN_V3H || FMAN_V3L || FMAN_ARM) && FSL_SDK_FMAN
++ default n
++ help
++ This option enables PFC support on FMan v3 ports.
++ Data Center Bridging defines Classes of Service that are
++ flow-controlled using PFC pause frames.
++
++if FMAN_PFC
++config FMAN_PFC_COS_COUNT
++ int "Number of PFC Classes of Service"
++ depends on FMAN_PFC && FSL_SDK_FMAN
++ range 1 4
++ default "3"
++ help
++ The number of Classes of Service controlled by PFC.
++
++config FMAN_PFC_QUANTA_0
++ int "The pause quanta for PFC CoS 0"
++ depends on FMAN_PFC && FSL_SDK_FMAN
++ range 0 65535
++ default "65535"
++
++config FMAN_PFC_QUANTA_1
++ int "The pause quanta for PFC CoS 1"
++ depends on FMAN_PFC && FSL_SDK_FMAN
++ range 0 65535
++ default "65535"
++
++config FMAN_PFC_QUANTA_2
++ int "The pause quanta for PFC CoS 2"
++ depends on FMAN_PFC && FSL_SDK_FMAN
++ range 0 65535
++ default "65535"
++
++config FMAN_PFC_QUANTA_3
++ int "The pause quanta for PFC CoS 3"
++ depends on FMAN_PFC && FSL_SDK_FMAN
++ range 0 65535
++ default "65535"
++endif
++
++endif # FSL_SDK_FMAN
++
++endmenu
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Makefile
+new file mode 100644
+index 00000000..25ce7e6a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Makefile
+@@ -0,0 +1,11 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++#
++obj-y += etc/
++obj-y += Peripherals/FM/
++obj-y += src/
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/Makefile
+new file mode 100644
+index 00000000..d0e76727
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++obj-y += fsl-ncsw-Hc.o
++
++fsl-ncsw-Hc-objs := hc.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/hc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/hc.c
+new file mode 100644
+index 00000000..363c8f95
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/hc.c
+@@ -0,0 +1,1232 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "sprint_ext.h"
++#include "string_ext.h"
++
++#include "fm_common.h"
++#include "fm_hc.h"
++
++
++/**************************************************************************//**
++ @Description defaults
++*//***************************************************************************/
++#define DEFAULT_dataMemId 0
++
++#define HC_HCOR_OPCODE_PLCR_PRFL 0x0
++#define HC_HCOR_OPCODE_KG_SCM 0x1
++#define HC_HCOR_OPCODE_SYNC 0x2
++#define HC_HCOR_OPCODE_CC 0x3
++#define HC_HCOR_OPCODE_CC_AGE_MASK 0x4
++#define HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT 0x5
++#define HC_HCOR_OPCODE_CC_REASSM_TIMEOUT 0x10
++#define HC_HCOR_OPCODE_CC_IP_FRAG_INITIALIZATION 0x11
++#define HC_HCOR_OPCODE_CC_UPDATE_WITH_AGING 0x13
++#define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_ACTIVE_SHIFT 24
++#define HC_HCOR_EXTRA_REG_REASSM_TIMEOUT_TSBS_SHIFT 24
++#define HC_HCOR_EXTRA_REG_CC_AGING_ADD 0x80000000
++#define HC_HCOR_EXTRA_REG_CC_AGING_REMOVE 0x40000000
++#define HC_HCOR_EXTRA_REG_CC_AGING_CHANGE_MASK 0xC0000000
++#define HC_HCOR_EXTRA_REG_CC_REMOVE_INDX_SHIFT 24
++#define HC_HCOR_EXTRA_REG_CC_REMOVE_INDX_MASK 0x1F000000
++#define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_SHIFT 16
++#define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_MASK 0xF
++#define HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_CMD_SHIFT 24
++#define HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_BPID 16
++
++#define HC_HCOR_GBL 0x20000000
++
++#define HC_HCOR_KG_SCHEME_COUNTER 0x00000400
++
++#if (DPAA_VERSION == 10)
++#define HC_HCOR_KG_SCHEME_REGS_MASK 0xFFFFF800
++#else
++#define HC_HCOR_KG_SCHEME_REGS_MASK 0xFFFFFE00
++#endif /* (DPAA_VERSION == 10) */
++
++#define SIZE_OF_HC_FRAME_PORT_REGS (sizeof(t_HcFrame)-sizeof(struct fman_kg_scheme_regs)+sizeof(t_FmPcdKgPortRegs))
++#define SIZE_OF_HC_FRAME_SCHEME_REGS sizeof(t_HcFrame)
++#define SIZE_OF_HC_FRAME_PROFILES_REGS (sizeof(t_HcFrame)-sizeof(struct fman_kg_scheme_regs)+sizeof(t_FmPcdPlcrProfileRegs))
++#define SIZE_OF_HC_FRAME_PROFILE_CNT (sizeof(t_HcFrame)-sizeof(t_FmPcdPlcrProfileRegs)+sizeof(uint32_t))
++#define SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC 16
++
++#define HC_CMD_POOL_SIZE (INTG_MAX_NUM_OF_CORES)
++
++#define BUILD_FD(len) \
++do { \
++ memset(&fmFd, 0, sizeof(t_DpaaFD)); \
++ DPAA_FD_SET_ADDR(&fmFd, p_HcFrame); \
++ DPAA_FD_SET_OFFSET(&fmFd, 0); \
++ DPAA_FD_SET_LENGTH(&fmFd, len); \
++} while (0)
++
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++typedef struct t_FmPcdKgPortRegs {
++ volatile uint32_t spReg;
++ volatile uint32_t cppReg;
++} t_FmPcdKgPortRegs;
++
++typedef struct t_HcFrame {
++ volatile uint32_t opcode;
++ volatile uint32_t actionReg;
++ volatile uint32_t extraReg;
++ volatile uint32_t commandSequence;
++ union {
++ struct fman_kg_scheme_regs schemeRegs;
++ struct fman_kg_scheme_regs schemeRegsWithoutCounter;
++ t_FmPcdPlcrProfileRegs profileRegs;
++ volatile uint32_t singleRegForWrite; /* for writing SP, CPP, profile counter */
++ t_FmPcdKgPortRegs portRegsForRead;
++ volatile uint32_t clsPlanEntries[CLS_PLAN_NUM_PER_GRP];
++ t_FmPcdCcCapwapReassmTimeoutParams ccCapwapReassmTimeout;
++ t_FmPcdCcReassmTimeoutParams ccReassmTimeout;
++ } hcSpecificData;
++} t_HcFrame;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++typedef struct t_FmHc {
++ t_Handle h_FmPcd;
++ t_Handle h_HcPortDev;
++ t_FmPcdQmEnqueueCallback *f_QmEnqueue; /**< A callback for enqueuing frames to the QM */
++ t_Handle h_QmArg; /**< A handle to the QM module */
++ uint8_t dataMemId; /**< Memory partition ID for data buffers */
++
++ uint32_t seqNum[HC_CMD_POOL_SIZE]; /* FIFO of seqNum to use when
++ taking buffer */
++ uint32_t nextSeqNumLocation; /* seqNum location in seqNum[] for next buffer */
++ volatile bool enqueued[HC_CMD_POOL_SIZE]; /* HC is active - frame is enqueued
++ and not confirmed yet */
++ t_HcFrame *p_Frm[HC_CMD_POOL_SIZE];
++} t_FmHc;
++
++
++static t_Error FillBufPool(t_FmHc *p_FmHc)
++{
++ uint32_t i;
++
++ ASSERT_COND(p_FmHc);
++
++ for (i = 0; i < HC_CMD_POOL_SIZE; i++)
++ {
++#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
++ p_FmHc->p_Frm[i] = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + (16 - (sizeof(t_FmHc) % 16))),
++ p_FmHc->dataMemId,
++ 16);
++#else
++ p_FmHc->p_Frm[i] = (t_HcFrame *)XX_MallocSmart(sizeof(t_HcFrame),
++ p_FmHc->dataMemId,
++ 16);
++#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */
++ if (!p_FmHc->p_Frm[i])
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM HC frames!"));
++ }
++
++ /* Initialize FIFO of seqNum to use during GetBuf */
++ for (i = 0; i < HC_CMD_POOL_SIZE; i++)
++ {
++ p_FmHc->seqNum[i] = i;
++ }
++ p_FmHc->nextSeqNumLocation = 0;
++
++ return E_OK;
++}
++
++static __inline__ t_HcFrame * GetBuf(t_FmHc *p_FmHc, uint32_t *p_SeqNum)
++{
++ uint32_t intFlags;
++
++ ASSERT_COND(p_FmHc);
++
++ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
++
++ if (p_FmHc->nextSeqNumLocation == HC_CMD_POOL_SIZE)
++ {
++ /* No more buffers */
++ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
++ return NULL;
++ }
++
++ *p_SeqNum = p_FmHc->seqNum[p_FmHc->nextSeqNumLocation];
++ p_FmHc->nextSeqNumLocation++;
++
++ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
++ return p_FmHc->p_Frm[*p_SeqNum];
++}
++
++static __inline__ void PutBuf(t_FmHc *p_FmHc, t_HcFrame *p_Buf, uint32_t seqNum)
++{
++ uint32_t intFlags;
++
++ UNUSED(p_Buf);
++
++ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
++ ASSERT_COND(p_FmHc->nextSeqNumLocation);
++ p_FmHc->nextSeqNumLocation--;
++ p_FmHc->seqNum[p_FmHc->nextSeqNumLocation] = seqNum;
++ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
++}
++
++static __inline__ t_Error EnQFrm(t_FmHc *p_FmHc, t_DpaaFD *p_FmFd, uint32_t seqNum)
++{
++ t_Error err = E_OK;
++ uint32_t intFlags;
++ uint32_t timeout=100;
++
++ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
++ ASSERT_COND(!p_FmHc->enqueued[seqNum]);
++ p_FmHc->enqueued[seqNum] = TRUE;
++ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
++ DBG(TRACE, ("Send Hc, SeqNum %d, buff@0x%x, fd offset 0x%x",
++ seqNum,
++ DPAA_FD_GET_ADDR(p_FmFd),
++ DPAA_FD_GET_OFFSET(p_FmFd)));
++ err = p_FmHc->f_QmEnqueue(p_FmHc->h_QmArg, (void *)p_FmFd);
++ if (err)
++ RETURN_ERROR(MINOR, err, ("HC enqueue failed"));
++
++ while (p_FmHc->enqueued[seqNum] && --timeout)
++ XX_UDelay(100);
++
++ if (!timeout)
++ RETURN_ERROR(MINOR, E_TIMEOUT, ("HC Callback, timeout exceeded"));
++
++ return err;
++}
++
++
++t_Handle FmHcConfigAndInit(t_FmHcParams *p_FmHcParams)
++{
++ t_FmHc *p_FmHc;
++ t_FmPortParams fmPortParam;
++ t_Error err;
++
++ p_FmHc = (t_FmHc *)XX_Malloc(sizeof(t_FmHc));
++ if (!p_FmHc)
++ {
++ REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC obj"));
++ return NULL;
++ }
++ memset(p_FmHc,0,sizeof(t_FmHc));
++
++ p_FmHc->h_FmPcd = p_FmHcParams->h_FmPcd;
++ p_FmHc->f_QmEnqueue = p_FmHcParams->params.f_QmEnqueue;
++ p_FmHc->h_QmArg = p_FmHcParams->params.h_QmArg;
++ p_FmHc->dataMemId = DEFAULT_dataMemId;
++
++ err = FillBufPool(p_FmHc);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ FmHcFree(p_FmHc);
++ return NULL;
++ }
++
++ if (!FmIsMaster(p_FmHcParams->h_Fm))
++ return (t_Handle)p_FmHc;
++
++ memset(&fmPortParam, 0, sizeof(fmPortParam));
++ fmPortParam.baseAddr = p_FmHcParams->params.portBaseAddr;
++ fmPortParam.portType = e_FM_PORT_TYPE_OH_HOST_COMMAND;
++ fmPortParam.portId = p_FmHcParams->params.portId;
++ fmPortParam.liodnBase = p_FmHcParams->params.liodnBase;
++ fmPortParam.h_Fm = p_FmHcParams->h_Fm;
++
++ fmPortParam.specificParams.nonRxParams.errFqid = p_FmHcParams->params.errFqid;
++ fmPortParam.specificParams.nonRxParams.dfltFqid = p_FmHcParams->params.confFqid;
++ fmPortParam.specificParams.nonRxParams.qmChannel = p_FmHcParams->params.qmChannel;
++
++ p_FmHc->h_HcPortDev = FM_PORT_Config(&fmPortParam);
++ if (!p_FmHc->h_HcPortDev)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM HC port!"));
++ XX_Free(p_FmHc);
++ return NULL;
++ }
++
++ err = FM_PORT_ConfigMaxFrameLength(p_FmHc->h_HcPortDev,
++ (uint16_t)sizeof(t_HcFrame));
++
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MAJOR, err, ("FM HC port init!"));
++ FmHcFree(p_FmHc);
++ return NULL;
++ }
++
++ /* final init */
++ err = FM_PORT_Init(p_FmHc->h_HcPortDev);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MAJOR, err, ("FM HC port init!"));
++ FmHcFree(p_FmHc);
++ return NULL;
++ }
++
++ err = FM_PORT_Enable(p_FmHc->h_HcPortDev);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MAJOR, err, ("FM HC port enable!"));
++ FmHcFree(p_FmHc);
++ return NULL;
++ }
++
++ return (t_Handle)p_FmHc;
++}
++
++void FmHcFree(t_Handle h_FmHc)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ int i;
++
++ if (!p_FmHc)
++ return;
++
++ for (i=0; i<HC_CMD_POOL_SIZE; i++)
++ if (p_FmHc->p_Frm[i])
++ XX_FreeSmart(p_FmHc->p_Frm[i]);
++ else
++ break;
++
++ if (p_FmHc->h_HcPortDev)
++ FM_PORT_Free(p_FmHc->h_HcPortDev);
++
++ XX_Free(p_FmHc);
++}
++
++/*****************************************************************************/
++t_Error FmHcSetFramesDataMemory(t_Handle h_FmHc,
++ uint8_t memId)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ int i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmHc, E_INVALID_HANDLE);
++
++ p_FmHc->dataMemId = memId;
++
++ for (i=0; i<HC_CMD_POOL_SIZE; i++)
++ if (p_FmHc->p_Frm[i])
++ XX_FreeSmart(p_FmHc->p_Frm[i]);
++
++ return FillBufPool(p_FmHc);
++}
++
++void FmHcTxConf(t_Handle h_FmHc, t_DpaaFD *p_Fd)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ uint32_t intFlags;
++
++ ASSERT_COND(p_FmHc);
++
++ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
++ p_HcFrame = (t_HcFrame *)PTR_MOVE(DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd));
++
++ DBG(TRACE, ("Hc Conf, SeqNum %d, FD@0x%x, fd offset 0x%x",
++ p_HcFrame->commandSequence, DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd)));
++
++ if (!(p_FmHc->enqueued[p_HcFrame->commandSequence]))
++ REPORT_ERROR(MINOR, E_INVALID_FRAME, ("Not an Host-Command frame received!"));
++ else
++ p_FmHc->enqueued[p_HcFrame->commandSequence] = FALSE;
++ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
++}
++
++t_Error FmHcPcdKgSetScheme(t_Handle h_FmHc,
++ t_Handle h_Scheme,
++ struct fman_kg_scheme_regs *p_SchemeRegs,
++ bool updateCounter)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_Error err = E_OK;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint8_t physicalSchemeId;
++ uint32_t seqNum;
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++
++ physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
++
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, updateCounter);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++ memcpy(&p_HcFrame->hcSpecificData.schemeRegs, p_SchemeRegs, sizeof(struct fman_kg_scheme_regs));
++ if (!updateCounter)
++ {
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_dv0 = p_SchemeRegs->kgse_dv0;
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_dv1 = p_SchemeRegs->kgse_dv1;
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_ccbs = p_SchemeRegs->kgse_ccbs;
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_mv = p_SchemeRegs->kgse_mv;
++ }
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmHcPcdKgDeleteScheme(t_Handle h_FmHc, t_Handle h_Scheme)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_Error err = E_OK;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
++ uint32_t seqNum;
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++ memset(&p_HcFrame->hcSpecificData.schemeRegs, 0, sizeof(struct fman_kg_scheme_regs));
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmHcPcdKgCcGetSetParams(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_Error err = E_OK;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint8_t relativeSchemeId;
++ uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
++ uint32_t tmpReg32 = 0;
++ uint32_t seqNum;
++
++ /* Scheme is locked by calling routine */
++ /* WARNING - this lock will not be efficient if other HC routine will attempt to change
++ * "kgse_mode" or "kgse_om" without locking scheme !
++ */
++
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId);
++ if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++
++ if (!FmPcdKgGetRequiredActionFlag(p_FmHc->h_FmPcd, relativeSchemeId) ||
++ !(FmPcdKgGetRequiredAction(p_FmHc->h_FmPcd, relativeSchemeId) & requiredAction))
++ {
++ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) &&
++ (FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_PLCR))
++ {
++ if ((FmPcdKgIsDirectPlcr(p_FmHc->h_FmPcd, relativeSchemeId) == FALSE) ||
++ (FmPcdKgIsDistrOnPlcrProfile(p_FmHc->h_FmPcd, relativeSchemeId) == TRUE))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared"));
++ err = FmPcdPlcrCcGetSetParams(p_FmHc->h_FmPcd, FmPcdKgGetRelativeProfileId(p_FmHc->h_FmPcd, relativeSchemeId), requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ else /* From here we deal with KG-Schemes only */
++ {
++ /* Pre change general code */
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++ p_HcFrame->commandSequence = seqNum;
++ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ /* specific change */
++ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) &&
++ ((FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_DONE) &&
++ (FmPcdKgGetDoneAction(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_ENQ_FRAME)))
++ {
++ tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode;
++ ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME));
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++ }
++
++ if ((requiredAction & UPDATE_KG_NIA_CC_WA) &&
++ (FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_CC))
++ {
++ tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode;
++ ASSERT_COND(tmpReg32 & (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC));
++ tmpReg32 &= ~NIA_FM_CTL_AC_CC;
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32 | NIA_FM_CTL_AC_PRE_CC;
++ }
++
++ if (requiredAction & UPDATE_KG_OPT_MODE)
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_om = value;
++
++ if (requiredAction & UPDATE_KG_NIA)
++ {
++ tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode;
++ tmpReg32 &= ~(NIA_ENG_MASK | NIA_AC_MASK);
++ tmpReg32 |= value;
++ p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32;
++ }
++
++ /* Post change general code */
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++
++ BUILD_FD(sizeof(t_HcFrame));
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++ }
++
++ return E_OK;
++}
++
++uint32_t FmHcPcdKgGetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_Error err;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint32_t retVal;
++ uint8_t relativeSchemeId;
++ uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
++ uint32_t seqNum;
++
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId);
++ if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++ return 0;
++ }
++
++ /* first read scheme and check that it is valid */
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ {
++ REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ return 0;
++ }
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++ if (err != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return 0;
++ }
++
++ if (!FmPcdKgHwSchemeIsValid(p_HcFrame->hcSpecificData.schemeRegs.kgse_mode))
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is invalid"));
++ return 0;
++ }
++
++ retVal = p_HcFrame->hcSpecificData.schemeRegs.kgse_spc;
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ return retVal;
++}
++
++t_Error FmHcPcdKgSetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t value)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_Error err = E_OK;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint8_t relativeSchemeId, physicalSchemeId;
++ uint32_t seqNum;
++
++ physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId);
++ if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++
++ /* first read scheme and check that it is valid */
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_COUNTER;
++ /* write counter */
++ p_HcFrame->hcSpecificData.singleRegForWrite = value;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ return err;
++}
++
++t_Error FmHcPcdKgSetClsPlan(t_Handle h_FmHc, t_FmPcdKgInterModuleClsPlanSet *p_Set)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint8_t i, idx;
++ uint32_t seqNum;
++ t_Error err = E_OK;
++
++ ASSERT_COND(p_FmHc);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++
++ for (i = p_Set->baseEntry; i < (p_Set->baseEntry+p_Set->numOfClsPlanEntries); i+=8)
++ {
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP));
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++
++ idx = (uint8_t)(i - p_Set->baseEntry);
++ ASSERT_COND(idx < FM_PCD_MAX_NUM_OF_CLS_PLANS);
++ memcpy(&p_HcFrame->hcSpecificData.clsPlanEntries, &p_Set->vectors[idx], CLS_PLAN_NUM_PER_GRP*sizeof(uint32_t));
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++ }
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ return err;
++}
++
++t_Error FmHcPcdKgDeleteClsPlan(t_Handle h_FmHc, uint8_t grpId)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet;
++
++ p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet));
++ if (!p_ClsPlanSet)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set"));
++
++ memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet));
++
++ p_ClsPlanSet->baseEntry = FmPcdKgGetClsPlanGrpBase(p_FmHc->h_FmPcd, grpId);
++ p_ClsPlanSet->numOfClsPlanEntries = FmPcdKgGetClsPlanGrpSize(p_FmHc->h_FmPcd, grpId);
++ ASSERT_COND(p_ClsPlanSet->numOfClsPlanEntries <= FM_PCD_MAX_NUM_OF_CLS_PLANS);
++
++ if (FmHcPcdKgSetClsPlan(p_FmHc, p_ClsPlanSet) != E_OK)
++ {
++ XX_Free(p_ClsPlanSet);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++ }
++
++ XX_Free(p_ClsPlanSet);
++ FmPcdKgDestroyClsPlanGrp(p_FmHc->h_FmPcd, grpId);
++
++ return E_OK;
++}
++
++t_Error FmHcPcdCcCapwapTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcCapwapReassmTimeoutParams *p_CcCapwapReassmTimeoutParams )
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err;
++ uint32_t seqNum;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT);
++ memcpy(&p_HcFrame->hcSpecificData.ccCapwapReassmTimeout, p_CcCapwapReassmTimeoutParams, sizeof(t_FmPcdCcCapwapReassmTimeoutParams));
++ p_HcFrame->commandSequence = seqNum;
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ return err;
++}
++
++t_Error FmHcPcdCcIpFragScratchPollCmd(t_Handle h_FmHc, bool fill, t_FmPcdCcFragScratchPoolCmdParams *p_FmPcdCcFragScratchPoolCmdParams)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err;
++ uint32_t seqNum;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_IP_FRAG_INITIALIZATION);
++ p_HcFrame->actionReg = (uint32_t)(((fill == TRUE) ? 0 : 1) << HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_CMD_SHIFT);
++ p_HcFrame->actionReg |= p_FmPcdCcFragScratchPoolCmdParams->bufferPoolId << HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_BPID;
++ if (fill == TRUE)
++ {
++ p_HcFrame->extraReg = p_FmPcdCcFragScratchPoolCmdParams->numOfBuffers;
++ }
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ p_FmPcdCcFragScratchPoolCmdParams->numOfBuffers = p_HcFrame->extraReg;
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ return E_OK;
++}
++
++t_Error FmHcPcdCcTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcReassmTimeoutParams *p_CcReassmTimeoutParams, uint8_t *p_Result)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err;
++ uint32_t seqNum;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_REASSM_TIMEOUT);
++ p_HcFrame->actionReg = (uint32_t)((p_CcReassmTimeoutParams->activate ? 0 : 1) << HC_HCOR_ACTION_REG_REASSM_TIMEOUT_ACTIVE_SHIFT);
++ p_HcFrame->extraReg = (p_CcReassmTimeoutParams->tsbs << HC_HCOR_EXTRA_REG_REASSM_TIMEOUT_TSBS_SHIFT) | p_CcReassmTimeoutParams->iprcpt;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ *p_Result = (uint8_t)
++ ((p_HcFrame->actionReg >> HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_SHIFT) & HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_MASK);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ return E_OK;
++}
++
++t_Error FmHcPcdPlcrCcGetSetParams(t_Handle h_FmHc,uint16_t absoluteProfileId, uint32_t requiredAction)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err;
++ uint32_t tmpReg32 = 0;
++ uint32_t requiredActionTmp, requiredActionFlag;
++ uint32_t seqNum;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
++
++ /* Profile is locked by calling routine */
++ /* WARNING - this lock will not be efficient if other HC routine will attempt to change
++ * "fmpl_pegnia" "fmpl_peynia" or "fmpl_pernia" without locking Profile !
++ */
++
++ requiredActionTmp = FmPcdPlcrGetRequiredAction(p_FmHc->h_FmPcd, absoluteProfileId);
++ requiredActionFlag = FmPcdPlcrGetRequiredActionFlag(p_FmHc->h_FmPcd, absoluteProfileId);
++
++ if (!requiredActionFlag || !(requiredActionTmp & requiredAction))
++ {
++ if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
++ {
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ /* first read scheme and check that it is valid */
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId);
++ p_HcFrame->extraReg = 0x00008000;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
++
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegnia;
++ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
++ }
++
++ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
++ p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(TRUE, FALSE, FALSE);
++ p_HcFrame->extraReg = 0x00008000;
++ p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
++
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_peynia;
++ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
++ }
++
++ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
++ p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, TRUE, FALSE);
++ p_HcFrame->extraReg = 0x00008000;
++ p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
++
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pernia;
++ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
++ }
++
++ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
++ p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, FALSE, TRUE);
++ p_HcFrame->extraReg = 0x00008000;
++ p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
++
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ }
++ }
++
++ return E_OK;
++}
++
++t_Error FmHcPcdPlcrSetProfile(t_Handle h_FmHc, t_Handle h_Profile, t_FmPcdPlcrProfileRegs *p_PlcrRegs)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_Error err = E_OK;
++ uint16_t profileIndx;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint32_t seqNum;
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++
++ profileIndx = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
++
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx);
++ p_HcFrame->extraReg = 0x00008000;
++ memcpy(&p_HcFrame->hcSpecificData.profileRegs, p_PlcrRegs, sizeof(t_FmPcdPlcrProfileRegs));
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmHcPcdPlcrDeleteProfile(t_Handle h_FmHc, t_Handle h_Profile)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
++ t_Error err = E_OK;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint32_t seqNum;
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
++ p_HcFrame->actionReg |= 0x00008000;
++ p_HcFrame->extraReg = 0x00008000;
++ memset(&p_HcFrame->hcSpecificData.profileRegs, 0, sizeof(t_FmPcdPlcrProfileRegs));
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmHcPcdPlcrSetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value)
++{
++
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
++ t_Error err = E_OK;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint32_t seqNum;
++
++ /* first read scheme and check that it is valid */
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
++ p_HcFrame->actionReg |= FmPcdPlcrBuildCounterProfileReg(counter);
++ p_HcFrame->extraReg = 0x00008000;
++ p_HcFrame->hcSpecificData.singleRegForWrite = value;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++uint32_t FmHcPcdPlcrGetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
++ t_Error err;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ uint32_t retVal = 0;
++ uint32_t seqNum;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
++
++ /* first read scheme and check that it is valid */
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ {
++ REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ return 0;
++ }
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
++ p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId);
++ p_HcFrame->extraReg = 0x00008000;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++ if (err != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return 0;
++ }
++
++ switch (counter)
++ {
++ case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER:
++ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegpc;
++ break;
++ case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER:
++ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_peypc;
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER:
++ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perpc;
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER:
++ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perypc;
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER:
++ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perrpc;
++ break;
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ return retVal;
++}
++
++t_Error FmHcKgWriteSp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t spReg, bool add)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err = E_OK;
++ uint32_t seqNum;
++
++ ASSERT_COND(p_FmHc);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ /* first read SP register */
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_PORT_REGS);
++
++ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
++ {
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ /* spReg is the first reg, so we can use it both for read and for write */
++ if (add)
++ p_HcFrame->hcSpecificData.portRegsForRead.spReg |= spReg;
++ else
++ p_HcFrame->hcSpecificData.portRegsForRead.spReg &= ~spReg;
++
++ p_HcFrame->actionReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId);
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmHcKgWriteCpp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t cppReg)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err = E_OK;
++ uint32_t seqNum;
++
++ ASSERT_COND(p_FmHc);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ /* first read SP register */
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
++ p_HcFrame->actionReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId);
++ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
++ p_HcFrame->hcSpecificData.singleRegForWrite = cppReg;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmHcPcdCcDoDynamicChange(t_Handle h_FmHc, uint32_t oldAdAddrOffset, uint32_t newAdAddrOffset)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err = E_OK;
++ uint32_t seqNum;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmHc, E_INVALID_HANDLE);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC);
++ p_HcFrame->actionReg = newAdAddrOffset;
++ p_HcFrame->actionReg |= 0xc0000000;
++ p_HcFrame->extraReg = oldAdAddrOffset;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmHcPcdSync(t_Handle h_FmHc)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ t_HcFrame *p_HcFrame;
++ t_DpaaFD fmFd;
++ t_Error err = E_OK;
++ uint32_t seqNum;
++
++ ASSERT_COND(p_FmHc);
++
++ p_HcFrame = GetBuf(p_FmHc, &seqNum);
++ if (!p_HcFrame)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
++ memset(p_HcFrame, 0, sizeof(t_HcFrame));
++ /* first read SP register */
++ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_SYNC);
++ p_HcFrame->actionReg = 0;
++ p_HcFrame->extraReg = 0;
++ p_HcFrame->commandSequence = seqNum;
++
++ BUILD_FD(sizeof(t_HcFrame));
++
++ err = EnQFrm(p_FmHc, &fmFd, seqNum);
++
++ PutBuf(p_FmHc, p_HcFrame, seqNum);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Handle FmHcGetPort(t_Handle h_FmHc)
++{
++ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
++ return p_FmHc->h_HcPortDev;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/Makefile
+new file mode 100644
+index 00000000..f6b090da
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/Makefile
+@@ -0,0 +1,28 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++obj-y += fsl-ncsw-MAC.o
++
++fsl-ncsw-MAC-objs := dtsec.o dtsec_mii_acc.o fm_mac.o tgec.o tgec_mii_acc.o \
++ fman_dtsec.o fman_dtsec_mii_acc.o fman_memac.o \
++ fman_tgec.o fman_crc32.o
++
++ifeq ($(CONFIG_FMAN_V3H),y)
++fsl-ncsw-MAC-objs += memac.o memac_mii_acc.o fman_memac_mii_acc.o
++endif
++ifeq ($(CONFIG_FMAN_V3L),y)
++fsl-ncsw-MAC-objs += memac.o memac_mii_acc.o fman_memac_mii_acc.o
++endif
++ifeq ($(CONFIG_FMAN_ARM),y)
++fsl-ncsw-MAC-objs += memac.o memac_mii_acc.o fman_memac_mii_acc.o
++endif
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c
+new file mode 100644
+index 00000000..f853825f
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c
+@@ -0,0 +1,1464 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File dtsec.c
++
++ @Description FMan dTSEC driver
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "xx_ext.h"
++#include "endian_ext.h"
++#include "debug_ext.h"
++#include "crc_mac_addr_ext.h"
++
++#include "fm_common.h"
++#include "dtsec.h"
++#include "fsl_fman_dtsec.h"
++#include "fsl_fman_dtsec_mii_acc.h"
++
++/*****************************************************************************/
++/* Internal routines */
++/*****************************************************************************/
++
++static t_Error CheckInitParameters(t_Dtsec *p_Dtsec)
++{
++ if (ENET_SPEED_FROM_MODE(p_Dtsec->enetMode) >= e_ENET_SPEED_10000)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 1G MAC driver only supports 1G or lower speeds"));
++ if (p_Dtsec->macId >= FM_MAX_NUM_OF_1G_MACS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("macId can not be greater than the number of 1G MACs"));
++ if (p_Dtsec->addr == 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC Must have a valid MAC Address"));
++ if ((ENET_SPEED_FROM_MODE(p_Dtsec->enetMode) >= e_ENET_SPEED_1000) &&
++ p_Dtsec->p_DtsecDriverParam->halfdup_on)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC 1G can't work in half duplex"));
++ if (p_Dtsec->p_DtsecDriverParam->halfdup_on && (p_Dtsec->p_DtsecDriverParam)->loopback)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("LoopBack is not supported in halfDuplex mode"));
++#ifdef FM_RX_PREAM_4_ERRATA_DTSEC_A001
++ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev <= 6) /* fixed for rev3 */
++ if (p_Dtsec->p_DtsecDriverParam->rx_preamble)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("preambleRxEn"));
++#endif /* FM_RX_PREAM_4_ERRATA_DTSEC_A001 */
++ if (((p_Dtsec->p_DtsecDriverParam)->tx_preamble || (p_Dtsec->p_DtsecDriverParam)->rx_preamble) &&( (p_Dtsec->p_DtsecDriverParam)->preamble_len != 0x7))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Preamble length should be 0x7 bytes"));
++ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_on &&
++ (p_Dtsec->p_DtsecDriverParam->tx_time_stamp_en || p_Dtsec->p_DtsecDriverParam->rx_time_stamp_en))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dTSEC in half duplex mode has to be with 1588 timeStamping diable"));
++ if ((p_Dtsec->p_DtsecDriverParam)->rx_flow && (p_Dtsec->p_DtsecDriverParam)->rx_ctrl_acc )
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Receive control frame are not passed to the system memory so it can not be accept "));
++ if ((p_Dtsec->p_DtsecDriverParam)->rx_prepend > MAX_PACKET_ALIGNMENT)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("packetAlignmentPadding can't be greater than %d ",MAX_PACKET_ALIGNMENT ));
++ if (((p_Dtsec->p_DtsecDriverParam)->non_back_to_back_ipg1 > MAX_INTER_PACKET_GAP) ||
++ ((p_Dtsec->p_DtsecDriverParam)->non_back_to_back_ipg2 > MAX_INTER_PACKET_GAP) ||
++ ((p_Dtsec->p_DtsecDriverParam)->back_to_back_ipg > MAX_INTER_PACKET_GAP))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inter packet gap can't be greater than %d ",MAX_INTER_PACKET_GAP ));
++ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_alt_backoff_val > MAX_INTER_PALTERNATE_BEB)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("alternateBackoffVal can't be greater than %d ",MAX_INTER_PALTERNATE_BEB ));
++ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_retransmit > MAX_RETRANSMISSION)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("maxRetransmission can't be greater than %d ",MAX_RETRANSMISSION ));
++ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_coll_window > MAX_COLLISION_WINDOW)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("collisionWindow can't be greater than %d ",MAX_COLLISION_WINDOW ));
++
++ /* If Auto negotiation process is disabled, need to */
++ /* Set up the PHY using the MII Management Interface */
++ if (p_Dtsec->p_DtsecDriverParam->tbipa > MAX_PHYS)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("PHY address (should be 0-%d)", MAX_PHYS));
++ if (!p_Dtsec->f_Exception)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("uninitialized f_Exception"));
++ if (!p_Dtsec->f_Event)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("uninitialized f_Event"));
++
++#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002
++ if (p_Dtsec->p_DtsecDriverParam->rx_len_check)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!"));
++#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static uint32_t GetMacAddrHashCode(uint64_t ethAddr)
++{
++ uint32_t crc;
++
++ /* CRC calculation */
++ GET_MAC_ADDR_CRC(ethAddr, crc);
++
++ crc = GetMirror32(crc);
++
++ return crc;
++}
++
++/* ......................................................................... */
++
++static void UpdateStatistics(t_Dtsec *p_Dtsec)
++{
++ uint32_t car1, car2;
++
++ fman_dtsec_get_clear_carry_regs(p_Dtsec->p_MemMap, &car1, &car2);
++
++ if (car1)
++ {
++ if (car1 & CAR1_TR64)
++ p_Dtsec->internalStatistics.tr64 += VAL22BIT;
++ if (car1 & CAR1_TR127)
++ p_Dtsec->internalStatistics.tr127 += VAL22BIT;
++ if (car1 & CAR1_TR255)
++ p_Dtsec->internalStatistics.tr255 += VAL22BIT;
++ if (car1 & CAR1_TR511)
++ p_Dtsec->internalStatistics.tr511 += VAL22BIT;
++ if (car1 & CAR1_TRK1)
++ p_Dtsec->internalStatistics.tr1k += VAL22BIT;
++ if (car1 & CAR1_TRMAX)
++ p_Dtsec->internalStatistics.trmax += VAL22BIT;
++ if (car1 & CAR1_TRMGV)
++ p_Dtsec->internalStatistics.trmgv += VAL22BIT;
++ if (car1 & CAR1_RBYT)
++ p_Dtsec->internalStatistics.rbyt += (uint64_t)VAL32BIT;
++ if (car1 & CAR1_RPKT)
++ p_Dtsec->internalStatistics.rpkt += VAL22BIT;
++ if (car1 & CAR1_RMCA)
++ p_Dtsec->internalStatistics.rmca += VAL22BIT;
++ if (car1 & CAR1_RBCA)
++ p_Dtsec->internalStatistics.rbca += VAL22BIT;
++ if (car1 & CAR1_RXPF)
++ p_Dtsec->internalStatistics.rxpf += VAL16BIT;
++ if (car1 & CAR1_RALN)
++ p_Dtsec->internalStatistics.raln += VAL16BIT;
++ if (car1 & CAR1_RFLR)
++ p_Dtsec->internalStatistics.rflr += VAL16BIT;
++ if (car1 & CAR1_RCDE)
++ p_Dtsec->internalStatistics.rcde += VAL16BIT;
++ if (car1 & CAR1_RCSE)
++ p_Dtsec->internalStatistics.rcse += VAL16BIT;
++ if (car1 & CAR1_RUND)
++ p_Dtsec->internalStatistics.rund += VAL16BIT;
++ if (car1 & CAR1_ROVR)
++ p_Dtsec->internalStatistics.rovr += VAL16BIT;
++ if (car1 & CAR1_RFRG)
++ p_Dtsec->internalStatistics.rfrg += VAL16BIT;
++ if (car1 & CAR1_RJBR)
++ p_Dtsec->internalStatistics.rjbr += VAL16BIT;
++ if (car1 & CAR1_RDRP)
++ p_Dtsec->internalStatistics.rdrp += VAL16BIT;
++ }
++ if (car2)
++ {
++ if (car2 & CAR2_TFCS)
++ p_Dtsec->internalStatistics.tfcs += VAL12BIT;
++ if (car2 & CAR2_TBYT)
++ p_Dtsec->internalStatistics.tbyt += (uint64_t)VAL32BIT;
++ if (car2 & CAR2_TPKT)
++ p_Dtsec->internalStatistics.tpkt += VAL22BIT;
++ if (car2 & CAR2_TMCA)
++ p_Dtsec->internalStatistics.tmca += VAL22BIT;
++ if (car2 & CAR2_TBCA)
++ p_Dtsec->internalStatistics.tbca += VAL22BIT;
++ if (car2 & CAR2_TXPF)
++ p_Dtsec->internalStatistics.txpf += VAL16BIT;
++ if (car2 & CAR2_TDRP)
++ p_Dtsec->internalStatistics.tdrp += VAL16BIT;
++ }
++}
++
++/* .............................................................................. */
++
++static uint16_t DtsecGetMaxFrameLength(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_VALUE(p_Dtsec, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE, 0);
++
++ return fman_dtsec_get_max_frame_len(p_Dtsec->p_MemMap);
++}
++
++/* .............................................................................. */
++
++static void DtsecIsr(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ uint32_t event;
++ struct dtsec_regs *p_DtsecMemMap = p_Dtsec->p_MemMap;
++
++ /* do not handle MDIO events */
++ event = fman_dtsec_get_event(p_DtsecMemMap, (uint32_t)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN)));
++
++ event &= fman_dtsec_get_interrupt_mask(p_DtsecMemMap);
++
++ fman_dtsec_ack_event(p_DtsecMemMap, event);
++
++ if (event & DTSEC_IMASK_BREN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_BAB_RX);
++ if (event & DTSEC_IMASK_RXCEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_RX_CTL);
++ if (event & DTSEC_IMASK_MSROEN)
++ UpdateStatistics(p_Dtsec);
++ if (event & DTSEC_IMASK_GTSCEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
++ if (event & DTSEC_IMASK_BTEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_BAB_TX);
++ if (event & DTSEC_IMASK_TXCEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_CTL);
++ if (event & DTSEC_IMASK_TXEEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_ERR);
++ if (event & DTSEC_IMASK_LCEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_LATE_COL);
++ if (event & DTSEC_IMASK_CRLEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_COL_RET_LMT);
++ if (event & DTSEC_IMASK_XFUNEN)
++ {
++#ifdef FM_TX_LOCKUP_ERRATA_DTSEC6
++ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
++ {
++ uint32_t tpkt1, tmpReg1, tpkt2, tmpReg2, i;
++ /* a. Write 0x00E0_0C00 to DTSEC_ID */
++ /* This is a read only regidter */
++
++ /* b. Read and save the value of TPKT */
++ tpkt1 = GET_UINT32(p_DtsecMemMap->tpkt);
++
++ /* c. Read the register at dTSEC address offset 0x32C */
++ tmpReg1 = GET_UINT32(*(uint32_t*)((uint8_t*)p_DtsecMemMap + 0x32c));
++
++ /* d. Compare bits [9:15] to bits [25:31] of the register at address offset 0x32C. */
++ if ((tmpReg1 & 0x007F0000) != (tmpReg1 & 0x0000007F))
++ {
++ /* If they are not equal, save the value of this register and wait for at least
++ * MAXFRM*16 ns */
++ XX_UDelay((uint32_t)(MIN(DtsecGetMaxFrameLength(p_Dtsec)*16/1000, 1)));
++ }
++
++ /* e. Read and save TPKT again and read the register at dTSEC address offset
++ 0x32C again*/
++ tpkt2 = GET_UINT32(p_DtsecMemMap->tpkt);
++ tmpReg2 = GET_UINT32(*(uint32_t*)((uint8_t*)p_DtsecMemMap + 0x32c));
++
++ /* f. Compare the value of TPKT saved in step b to value read in step e. Also
++ compare bits [9:15] of the register at offset 0x32C saved in step d to the value
++ of bits [9:15] saved in step e. If the two registers values are unchanged, then
++ the transmit portion of the dTSEC controller is locked up and the user should
++ proceed to the recover sequence. */
++ if ((tpkt1 == tpkt2) && ((tmpReg1 & 0x007F0000) == (tmpReg2 & 0x007F0000)))
++ {
++ /* recover sequence */
++
++ /* a.Write a 1 to RCTRL[GRS]*/
++
++ WRITE_UINT32(p_DtsecMemMap->rctrl, GET_UINT32(p_DtsecMemMap->rctrl) | RCTRL_GRS);
++
++ /* b.Wait until IEVENT[GRSC]=1, or at least 100 us has elapsed. */
++ for (i = 0 ; i < 100 ; i++ )
++ {
++ if (GET_UINT32(p_DtsecMemMap->ievent) & DTSEC_IMASK_GRSCEN)
++ break;
++ XX_UDelay(1);
++ }
++ if (GET_UINT32(p_DtsecMemMap->ievent) & DTSEC_IMASK_GRSCEN)
++ WRITE_UINT32(p_DtsecMemMap->ievent, DTSEC_IMASK_GRSCEN);
++ else
++ DBG(INFO,("Rx lockup due to dTSEC Tx lockup"));
++
++ /* c.Write a 1 to bit n of FM_RSTC (offset 0x0CC of FPM)*/
++ FmResetMac(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MAC_1G, p_Dtsec->fmMacControllerDriver.macId);
++
++ /* d.Wait 4 Tx clocks (32 ns) */
++ XX_UDelay(1);
++
++ /* e.Write a 0 to bit n of FM_RSTC. */
++ /* cleared by FMAN */
++ }
++ }
++#endif /* FM_TX_LOCKUP_ERRATA_DTSEC6 */
++
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_FIFO_UNDRN);
++ }
++ if (event & DTSEC_IMASK_MAGEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_MAG_PCKT);
++ if (event & DTSEC_IMASK_GRSCEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
++ if (event & DTSEC_IMASK_TDPEEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_DATA_ERR);
++ if (event & DTSEC_IMASK_RDPEEN)
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_RX_DATA_ERR);
++
++ /* - masked interrupts */
++ ASSERT_COND(!(event & DTSEC_IMASK_ABRTEN));
++ ASSERT_COND(!(event & DTSEC_IMASK_IFERREN));
++}
++
++static void DtsecMdioIsr(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ uint32_t event;
++ struct dtsec_regs *p_DtsecMemMap = p_Dtsec->p_MemMap;
++
++ event = GET_UINT32(p_DtsecMemMap->ievent);
++ /* handle only MDIO events */
++ event &= (DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN);
++ if (event)
++ {
++ event &= GET_UINT32(p_DtsecMemMap->imask);
++
++ WRITE_UINT32(p_DtsecMemMap->ievent, event);
++
++ if (event & DTSEC_IMASK_MMRDEN)
++ p_Dtsec->f_Event(p_Dtsec->h_App, e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET);
++ if (event & DTSEC_IMASK_MMWREN)
++ p_Dtsec->f_Event(p_Dtsec->h_App, e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET);
++ }
++}
++
++static void Dtsec1588Isr(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ uint32_t event;
++ struct dtsec_regs *p_DtsecMemMap = p_Dtsec->p_MemMap;
++
++ if (p_Dtsec->ptpTsuEnabled)
++ {
++ event = fman_dtsec_check_and_clear_tmr_event(p_DtsecMemMap);
++
++ if (event)
++ {
++ ASSERT_COND(event & TMR_PEVENT_TSRE);
++ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_1588_TS_RX_ERR);
++ }
++ }
++}
++
++/* ........................................................................... */
++
++static void FreeInitResources(t_Dtsec *p_Dtsec)
++{
++ if (p_Dtsec->mdioIrq != NO_IRQ)
++ {
++ XX_DisableIntr(p_Dtsec->mdioIrq);
++ XX_FreeIntr(p_Dtsec->mdioIrq);
++ }
++ FmUnregisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Dtsec->macId, e_FM_INTR_TYPE_ERR);
++ FmUnregisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Dtsec->macId, e_FM_INTR_TYPE_NORMAL);
++
++ /* release the driver's group hash table */
++ FreeHashTable(p_Dtsec->p_MulticastAddrHash);
++ p_Dtsec->p_MulticastAddrHash = NULL;
++
++ /* release the driver's individual hash table */
++ FreeHashTable(p_Dtsec->p_UnicastAddrHash);
++ p_Dtsec->p_UnicastAddrHash = NULL;
++}
++
++/* ........................................................................... */
++
++static t_Error GracefulStop(t_Dtsec *p_Dtsec, e_CommMode mode)
++{
++ struct dtsec_regs *p_MemMap;
++
++ ASSERT_COND(p_Dtsec);
++
++ p_MemMap = p_Dtsec->p_MemMap;
++ ASSERT_COND(p_MemMap);
++
++ /* Assert the graceful transmit stop bit */
++ if (mode & e_COMM_MODE_RX)
++ {
++ fman_dtsec_stop_rx(p_MemMap);
++
++#ifdef FM_GRS_ERRATA_DTSEC_A002
++ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
++ XX_UDelay(100);
++#else /* FM_GRS_ERRATA_DTSEC_A002 */
++#ifdef FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839
++ XX_UDelay(10);
++#endif /* FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839 */
++#endif /* FM_GRS_ERRATA_DTSEC_A002 */
++ }
++
++ if (mode & e_COMM_MODE_TX)
++#if defined(FM_GTS_ERRATA_DTSEC_A004) || defined(FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012)
++ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
++ DBG(INFO, ("GTS not supported due to DTSEC_A004 errata."));
++#else /* not defined(FM_GTS_ERRATA_DTSEC_A004) ||... */
++#ifdef FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014
++ DBG(INFO, ("GTS not supported due to DTSEC_A0014 errata."));
++#else /* FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014 */
++ fman_dtsec_stop_tx(p_MemMap);
++#endif /* FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014 */
++#endif /* defined(FM_GTS_ERRATA_DTSEC_A004) ||... */
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error GracefulRestart(t_Dtsec *p_Dtsec, e_CommMode mode)
++{
++ struct dtsec_regs *p_MemMap;
++
++ ASSERT_COND(p_Dtsec);
++ p_MemMap = p_Dtsec->p_MemMap;
++ ASSERT_COND(p_MemMap);
++
++ /* clear the graceful receive stop bit */
++ if (mode & e_COMM_MODE_TX)
++ fman_dtsec_start_tx(p_MemMap);
++
++ if (mode & e_COMM_MODE_RX)
++ fman_dtsec_start_rx(p_MemMap);
++
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* dTSEC Configs modification functions */
++/*****************************************************************************/
++
++/* .............................................................................. */
++
++static t_Error DtsecConfigLoopback(t_Handle h_Dtsec, bool newVal)
++{
++
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->p_DtsecDriverParam->loopback = newVal;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecConfigMaxFrameLength(t_Handle h_Dtsec, uint16_t newVal)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->p_DtsecDriverParam->maximum_frame = newVal;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecConfigPadAndCrc(t_Handle h_Dtsec, bool newVal)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->p_DtsecDriverParam->tx_pad_crc = newVal;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecConfigHalfDuplex(t_Handle h_Dtsec, bool newVal)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->p_DtsecDriverParam->halfdup_on = newVal;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecConfigTbiPhyAddr(t_Handle h_Dtsec, uint8_t newVal)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->p_DtsecDriverParam->tbi_phy_addr = newVal;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecConfigLengthCheck(t_Handle h_Dtsec, bool newVal)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->p_DtsecDriverParam->rx_len_check = newVal;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecConfigException(t_Handle h_Dtsec, e_FmMacExceptions exception, bool enable)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ if (exception != e_FM_MAC_EX_1G_1588_TS_RX_ERR)
++ {
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Dtsec->exceptions |= bitMask;
++ else
++ p_Dtsec->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++ }
++ else
++ {
++ if (!p_Dtsec->ptpTsuEnabled)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exception valid for 1588 only"));
++
++ if (enable)
++ p_Dtsec->enTsuErrExeption = TRUE;
++ else
++ p_Dtsec->enTsuErrExeption = FALSE;
++ }
++
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* dTSEC Run Time API functions */
++/*****************************************************************************/
++
++/* .............................................................................. */
++
++static t_Error DtsecEnable(t_Handle h_Dtsec, e_CommMode mode)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ fman_dtsec_enable(p_Dtsec->p_MemMap,
++ (bool)!!(mode & e_COMM_MODE_RX),
++ (bool)!!(mode & e_COMM_MODE_TX));
++
++ GracefulRestart(p_Dtsec, mode);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecDisable (t_Handle h_Dtsec, e_CommMode mode)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ GracefulStop(p_Dtsec, mode);
++
++ fman_dtsec_disable(p_Dtsec->p_MemMap,
++ (bool)!!(mode & e_COMM_MODE_RX),
++ (bool)!!(mode & e_COMM_MODE_TX));
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecSetTxPauseFrames(t_Handle h_Dtsec,
++ uint8_t priority,
++ uint16_t pauseTime,
++ uint16_t threshTime)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ UNUSED(priority);UNUSED(threshTime);
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++#ifdef FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003
++ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
++ if (0 < pauseTime && pauseTime <= 320)
++ RETURN_ERROR(MINOR, E_INVALID_VALUE,
++ ("This pause-time value of %d is illegal due to errata dTSEC-A003!"
++ " value should be greater than 320."));
++#endif /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 */
++
++ fman_dtsec_set_tx_pause_frames(p_Dtsec->p_MemMap, pauseTime);
++ return E_OK;
++}
++
++/* .............................................................................. */
++/* backward compatibility. will be removed in the future. */
++static t_Error DtsecTxMacPause(t_Handle h_Dtsec, uint16_t pauseTime)
++{
++ return DtsecSetTxPauseFrames(h_Dtsec, 0, pauseTime, 0);
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecRxIgnoreMacPause(t_Handle h_Dtsec, bool en)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ bool accept_pause = !en;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ fman_dtsec_handle_rx_pause(p_Dtsec->p_MemMap, accept_pause);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecEnable1588TimeStamp(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->ptpTsuEnabled = TRUE;
++ fman_dtsec_set_ts(p_Dtsec->p_MemMap, TRUE);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecDisable1588TimeStamp(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->ptpTsuEnabled = FALSE;
++ fman_dtsec_set_ts(p_Dtsec->p_MemMap, FALSE);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecGetStatistics(t_Handle h_Dtsec, t_FmMacStatistics *p_Statistics)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ struct dtsec_regs *p_DtsecMemMap;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER);
++
++ p_DtsecMemMap = p_Dtsec->p_MemMap;
++
++ if (p_Dtsec->statisticsLevel == e_FM_MAC_NONE_STATISTICS)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Statistics disabled"));
++
++ memset(p_Statistics, 0xff, sizeof(t_FmMacStatistics));
++
++ if (p_Dtsec->statisticsLevel == e_FM_MAC_FULL_STATISTICS)
++ {
++ p_Statistics->eStatPkts64 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR64)
++ + p_Dtsec->internalStatistics.tr64;
++ p_Statistics->eStatPkts65to127 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR127)
++ + p_Dtsec->internalStatistics.tr127;
++ p_Statistics->eStatPkts128to255 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR255)
++ + p_Dtsec->internalStatistics.tr255;
++ p_Statistics->eStatPkts256to511 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR511)
++ + p_Dtsec->internalStatistics.tr511;
++ p_Statistics->eStatPkts512to1023 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR1K)
++ + p_Dtsec->internalStatistics.tr1k;
++ p_Statistics->eStatPkts1024to1518 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TRMAX)
++ + p_Dtsec->internalStatistics.trmax;
++ p_Statistics->eStatPkts1519to1522 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TRMGV)
++ + p_Dtsec->internalStatistics.trmgv;
++
++ /* MIB II */
++ p_Statistics->ifInOctets = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RBYT)
++ + p_Dtsec->internalStatistics.rbyt;
++ p_Statistics->ifInPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RPKT)
++ + p_Dtsec->internalStatistics.rpkt;
++ p_Statistics->ifInUcastPkts = 0;
++ p_Statistics->ifInMcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RMCA)
++ + p_Dtsec->internalStatistics.rmca;
++ p_Statistics->ifInBcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RBCA)
++ + p_Dtsec->internalStatistics.rbca;
++ p_Statistics->ifOutOctets = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TBYT)
++ + p_Dtsec->internalStatistics.tbyt;
++ p_Statistics->ifOutPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TPKT)
++ + p_Dtsec->internalStatistics.tpkt;
++ p_Statistics->ifOutUcastPkts = 0;
++ p_Statistics->ifOutMcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TMCA)
++ + p_Dtsec->internalStatistics.tmca;
++ p_Statistics->ifOutBcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TBCA)
++ + p_Dtsec->internalStatistics.tbca;
++ }
++
++ p_Statistics->eStatFragments = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RFRG)
++ + p_Dtsec->internalStatistics.rfrg;
++ p_Statistics->eStatJabbers = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RJBR)
++ + p_Dtsec->internalStatistics.rjbr;
++ p_Statistics->eStatsDropEvents = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RDRP)
++ + p_Dtsec->internalStatistics.rdrp;
++ p_Statistics->eStatCRCAlignErrors = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RALN)
++ + p_Dtsec->internalStatistics.raln;
++ p_Statistics->eStatUndersizePkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RUND)
++ + p_Dtsec->internalStatistics.rund;
++ p_Statistics->eStatOversizePkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_ROVR)
++ + p_Dtsec->internalStatistics.rovr;
++ p_Statistics->reStatPause = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RXPF)
++ + p_Dtsec->internalStatistics.rxpf;
++ p_Statistics->teStatPause = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TXPF)
++ + p_Dtsec->internalStatistics.txpf;
++ p_Statistics->ifInDiscards = p_Statistics->eStatsDropEvents;
++ p_Statistics->ifInErrors = p_Statistics->eStatsDropEvents + p_Statistics->eStatCRCAlignErrors
++ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_RFLR) + p_Dtsec->internalStatistics.rflr
++ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_RCDE) + p_Dtsec->internalStatistics.rcde
++ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_RCSE) + p_Dtsec->internalStatistics.rcse;
++
++ p_Statistics->ifOutDiscards = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TDRP)
++ + p_Dtsec->internalStatistics.tdrp;
++ p_Statistics->ifOutErrors = p_Statistics->ifOutDiscards /**< Number of frames transmitted with error: */
++ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_TFCS)
++ + p_Dtsec->internalStatistics.tfcs;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecModifyMacAddress (t_Handle h_Dtsec, t_EnetAddr *p_EnetAddr)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ /* Initialize MAC Station Address registers (1 & 2) */
++ /* Station address have to be swapped (big endian to little endian */
++ p_Dtsec->addr = ENET_ADDR_TO_UINT64(*p_EnetAddr);
++ fman_dtsec_set_mac_address(p_Dtsec->p_MemMap, (uint8_t *)(*p_EnetAddr));
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecResetCounters (t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ /* clear HW counters */
++ fman_dtsec_reset_stat(p_Dtsec->p_MemMap);
++
++ /* clear SW counters holding carries */
++ memset(&p_Dtsec->internalStatistics, 0, sizeof(t_InternalStatistics));
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecAddExactMatchMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *) h_Dtsec;
++ uint64_t ethAddr;
++ uint8_t paddrNum;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ if (ethAddr & GROUP_ADDRESS)
++ /* Multicast address has no effect in PADDR */
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address"));
++
++ /* Make sure no PADDR contains this address */
++ for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++)
++ if (p_Dtsec->indAddrRegUsed[paddrNum])
++ if (p_Dtsec->paddr[paddrNum] == ethAddr)
++ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
++
++ /* Find first unused PADDR */
++ for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++)
++ if (!(p_Dtsec->indAddrRegUsed[paddrNum]))
++ {
++ /* mark this PADDR as used */
++ p_Dtsec->indAddrRegUsed[paddrNum] = TRUE;
++ /* store address */
++ p_Dtsec->paddr[paddrNum] = ethAddr;
++
++ /* put in hardware */
++ fman_dtsec_add_addr_in_paddr(p_Dtsec->p_MemMap, (uint64_t)PTR_TO_UINT(&ethAddr), paddrNum);
++ p_Dtsec->numOfIndAddrInRegs++;
++
++ return E_OK;
++ }
++
++ /* No free PADDR */
++ RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecDelExactMatchMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *) h_Dtsec;
++ uint64_t ethAddr;
++ uint8_t paddrNum;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ /* Find used PADDR containing this address */
++ for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++)
++ {
++ if ((p_Dtsec->indAddrRegUsed[paddrNum]) &&
++ (p_Dtsec->paddr[paddrNum] == ethAddr))
++ {
++ /* mark this PADDR as not used */
++ p_Dtsec->indAddrRegUsed[paddrNum] = FALSE;
++ /* clear in hardware */
++ fman_dtsec_clear_addr_in_paddr(p_Dtsec->p_MemMap, paddrNum);
++ p_Dtsec->numOfIndAddrInRegs--;
++
++ return E_OK;
++ }
++ }
++
++ RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG);
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecAddHashMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ t_EthHashEntry *p_HashEntry;
++ uint64_t ethAddr;
++ int32_t bucket;
++ uint32_t crc;
++ bool mcast, ghtx;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ ghtx = (bool)((fman_dtsec_get_rctrl(p_Dtsec->p_MemMap) & RCTRL_GHTX) ? TRUE : FALSE);
++ mcast = (bool)((ethAddr & MAC_GROUP_ADDRESS) ? TRUE : FALSE);
++
++ if (ghtx && !mcast) /* Cannot handle unicast mac addr when GHTX is on */
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Could not compute hash bucket"));
++
++ crc = GetMacAddrHashCode(ethAddr);
++
++ /* considering the 9 highest order bits in crc H[8:0]:
++ * if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
++ * and H[5:1] (next 5 bits) identify the hash bit
++ * if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
++ * and H[4:0] (next 5 bits) identify the hash bit.
++ *
++ * In bucket index output the low 5 bits identify the hash register bit,
++ * while the higher 4 bits identify the hash register
++ */
++
++ if (ghtx)
++ bucket = (int32_t)((crc >> 23) & 0x1ff);
++ else {
++ bucket = (int32_t)((crc >> 24) & 0xff);
++ /* if !ghtx and mcast the bit must be set in gaddr instead of igaddr. */
++ if (mcast)
++ bucket += 0x100;
++ }
++
++ fman_dtsec_set_bucket(p_Dtsec->p_MemMap, bucket, TRUE);
++
++ /* Create element to be added to the driver hash table */
++ p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry));
++ p_HashEntry->addr = ethAddr;
++ INIT_LIST(&p_HashEntry->node);
++
++ if (ethAddr & MAC_GROUP_ADDRESS)
++ /* Group Address */
++ LIST_AddToTail(&(p_HashEntry->node), &(p_Dtsec->p_MulticastAddrHash->p_Lsts[bucket]));
++ else
++ LIST_AddToTail(&(p_HashEntry->node), &(p_Dtsec->p_UnicastAddrHash->p_Lsts[bucket]));
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecDelHashMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ t_List *p_Pos;
++ t_EthHashEntry *p_HashEntry = NULL;
++ uint64_t ethAddr;
++ int32_t bucket;
++ uint32_t crc;
++ bool mcast, ghtx;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ ghtx = (bool)((fman_dtsec_get_rctrl(p_Dtsec->p_MemMap) & RCTRL_GHTX) ? TRUE : FALSE);
++ mcast = (bool)((ethAddr & MAC_GROUP_ADDRESS) ? TRUE : FALSE);
++
++ if (ghtx && !mcast) /* Cannot handle unicast mac addr when GHTX is on */
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Could not compute hash bucket"));
++
++ crc = GetMacAddrHashCode(ethAddr);
++
++ if (ghtx)
++ bucket = (int32_t)((crc >> 23) & 0x1ff);
++ else {
++ bucket = (int32_t)((crc >> 24) & 0xff);
++ /* if !ghtx and mcast the bit must be set in gaddr instead of igaddr. */
++ if (mcast)
++ bucket += 0x100;
++ }
++
++ if (ethAddr & MAC_GROUP_ADDRESS)
++ {
++ /* Group Address */
++ LIST_FOR_EACH(p_Pos, &(p_Dtsec->p_MulticastAddrHash->p_Lsts[bucket]))
++ {
++ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
++ if (p_HashEntry->addr == ethAddr)
++ {
++ LIST_DelAndInit(&p_HashEntry->node);
++ XX_Free(p_HashEntry);
++ break;
++ }
++ }
++ if (LIST_IsEmpty(&p_Dtsec->p_MulticastAddrHash->p_Lsts[bucket]))
++ fman_dtsec_set_bucket(p_Dtsec->p_MemMap, bucket, FALSE);
++ }
++ else
++ {
++ /* Individual Address */
++ LIST_FOR_EACH(p_Pos, &(p_Dtsec->p_UnicastAddrHash->p_Lsts[bucket]))
++ {
++ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
++ if (p_HashEntry->addr == ethAddr)
++ {
++ LIST_DelAndInit(&p_HashEntry->node);
++ XX_Free(p_HashEntry);
++ break;
++ }
++ }
++ if (LIST_IsEmpty(&p_Dtsec->p_UnicastAddrHash->p_Lsts[bucket]))
++ fman_dtsec_set_bucket(p_Dtsec->p_MemMap, bucket, FALSE);
++ }
++
++ /* address does not exist */
++ ASSERT_COND(p_HashEntry != NULL);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecSetPromiscuous(t_Handle h_Dtsec, bool newVal)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ fman_dtsec_set_uc_promisc(p_Dtsec->p_MemMap, newVal);
++ fman_dtsec_set_mc_promisc(p_Dtsec->p_MemMap, newVal);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecSetStatistics(t_Handle h_Dtsec, e_FmMacStatisticsLevel statisticsLevel)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->statisticsLevel = statisticsLevel;
++
++ err = (t_Error)fman_dtsec_set_stat_level(p_Dtsec->p_MemMap,
++ (enum dtsec_stat_level)statisticsLevel);
++ if (err != E_OK)
++ return err;
++
++ switch (statisticsLevel)
++ {
++ case (e_FM_MAC_NONE_STATISTICS):
++ p_Dtsec->exceptions &= ~DTSEC_IMASK_MSROEN;
++ break;
++ case (e_FM_MAC_PARTIAL_STATISTICS):
++ p_Dtsec->exceptions |= DTSEC_IMASK_MSROEN;
++ break;
++ case (e_FM_MAC_FULL_STATISTICS):
++ p_Dtsec->exceptions |= DTSEC_IMASK_MSROEN;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecSetWakeOnLan(t_Handle h_Dtsec, bool en)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ fman_dtsec_set_wol(p_Dtsec->p_MemMap, en);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecAdjustLink(t_Handle h_Dtsec, e_EnetSpeed speed, bool fullDuplex)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ int err;
++ enum enet_interface enet_interface;
++ enum enet_speed enet_speed;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ p_Dtsec->enetMode = MAKE_ENET_MODE(ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode), speed);
++ enet_interface = (enum enet_interface) ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode);
++ enet_speed = (enum enet_speed) ENET_SPEED_FROM_MODE(p_Dtsec->enetMode);
++ p_Dtsec->halfDuplex = !fullDuplex;
++
++ err = fman_dtsec_adjust_link(p_Dtsec->p_MemMap, enet_interface, enet_speed, fullDuplex);
++
++ if (err == -EINVAL)
++ RETURN_ERROR(MAJOR, E_CONFLICT, ("Ethernet interface does not support Half Duplex mode"));
++
++ return (t_Error)err;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecRestartAutoneg(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ uint16_t tmpReg16;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ DTSEC_MII_ReadPhyReg(p_Dtsec, p_Dtsec->tbi_phy_addr, 0, &tmpReg16);
++
++ tmpReg16 &= ~( PHY_CR_SPEED0 | PHY_CR_SPEED1 );
++ tmpReg16 |= (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
++
++ DTSEC_MII_WritePhyReg(p_Dtsec, p_Dtsec->tbi_phy_addr, 0, tmpReg16);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecGetId(t_Handle h_Dtsec, uint32_t *macId)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ *macId = p_Dtsec->macId;
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecGetVersion(t_Handle h_Dtsec, uint32_t *macVersion)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ *macVersion = fman_dtsec_get_revision(p_Dtsec->p_MemMap);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error DtsecSetException(t_Handle h_Dtsec, e_FmMacExceptions exception, bool enable)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++
++ if (exception != e_FM_MAC_EX_1G_1588_TS_RX_ERR)
++ {
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Dtsec->exceptions |= bitMask;
++ else
++ p_Dtsec->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ if (enable)
++ fman_dtsec_enable_interrupt(p_Dtsec->p_MemMap, bitMask);
++ else
++ fman_dtsec_disable_interrupt(p_Dtsec->p_MemMap, bitMask);
++ }
++ else
++ {
++ if (!p_Dtsec->ptpTsuEnabled)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exception valid for 1588 only"));
++
++ if (enable)
++ {
++ p_Dtsec->enTsuErrExeption = TRUE;
++ fman_dtsec_enable_tmr_interrupt(p_Dtsec->p_MemMap);
++ }
++ else
++ {
++ p_Dtsec->enTsuErrExeption = FALSE;
++ fman_dtsec_disable_tmr_interrupt(p_Dtsec->p_MemMap);
++ }
++ }
++
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* dTSEC Init & Free API */
++/*****************************************************************************/
++
++/* .............................................................................. */
++
++static t_Error DtsecInit(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ struct dtsec_cfg *p_DtsecDriverParam;
++ t_Error err;
++ uint16_t maxFrmLn;
++ enum enet_interface enet_interface;
++ enum enet_speed enet_speed;
++ t_EnetAddr ethAddr;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->fmMacControllerDriver.h_Fm, E_INVALID_HANDLE);
++
++ FM_GetRevision(p_Dtsec->fmMacControllerDriver.h_Fm, &p_Dtsec->fmMacControllerDriver.fmRevInfo);
++ CHECK_INIT_PARAMETERS(p_Dtsec, CheckInitParameters);
++
++ p_DtsecDriverParam = p_Dtsec->p_DtsecDriverParam;
++ p_Dtsec->halfDuplex = p_DtsecDriverParam->halfdup_on;
++
++ enet_interface = (enum enet_interface)ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode);
++ enet_speed = (enum enet_speed)ENET_SPEED_FROM_MODE(p_Dtsec->enetMode);
++ MAKE_ENET_ADDR_FROM_UINT64(p_Dtsec->addr, ethAddr);
++
++ err = (t_Error)fman_dtsec_init(p_Dtsec->p_MemMap,
++ p_DtsecDriverParam,
++ enet_interface,
++ enet_speed,
++ (uint8_t*)ethAddr,
++ p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev,
++ p_Dtsec->fmMacControllerDriver.fmRevInfo.minorRev,
++ p_Dtsec->exceptions);
++ if (err)
++ {
++ FreeInitResources(p_Dtsec);
++ RETURN_ERROR(MAJOR, err, ("This DTSEC version does not support the required i/f mode"));
++ }
++
++ if (ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode) == e_ENET_IF_SGMII)
++ {
++ uint16_t tmpReg16;
++
++ /* Configure the TBI PHY Control Register */
++ tmpReg16 = PHY_TBICON_CLK_SEL | PHY_TBICON_SRESET;
++ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 17, tmpReg16);
++
++ tmpReg16 = PHY_TBICON_CLK_SEL;
++ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 17, tmpReg16);
++
++ tmpReg16 = (PHY_CR_PHY_RESET | PHY_CR_ANE | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
++ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 0, tmpReg16);
++
++ if (p_Dtsec->enetMode & ENET_IF_SGMII_BASEX)
++ tmpReg16 = PHY_TBIANA_1000X;
++ else
++ tmpReg16 = PHY_TBIANA_SGMII;
++ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 4, tmpReg16);
++
++ tmpReg16 = (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
++
++ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 0, tmpReg16);
++ }
++
++ /* Max Frame Length */
++ maxFrmLn = fman_dtsec_get_max_frame_len(p_Dtsec->p_MemMap);
++ err = FmSetMacMaxFrame(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MAC_1G,
++ p_Dtsec->fmMacControllerDriver.macId, maxFrmLn);
++ if (err)
++ RETURN_ERROR(MINOR,err, NO_MSG);
++
++ p_Dtsec->p_MulticastAddrHash = AllocHashTable(EXTENDED_HASH_TABLE_SIZE);
++ if (!p_Dtsec->p_MulticastAddrHash) {
++ FreeInitResources(p_Dtsec);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MC hash table is FAILED"));
++ }
++
++ p_Dtsec->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
++ if (!p_Dtsec->p_UnicastAddrHash)
++ {
++ FreeInitResources(p_Dtsec);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("UC hash table is FAILED"));
++ }
++
++ /* register err intr handler for dtsec to FPM (err)*/
++ FmRegisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm,
++ e_FM_MOD_1G_MAC,
++ p_Dtsec->macId,
++ e_FM_INTR_TYPE_ERR,
++ DtsecIsr,
++ p_Dtsec);
++ /* register 1588 intr handler for TMR to FPM (normal)*/
++ FmRegisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm,
++ e_FM_MOD_1G_MAC,
++ p_Dtsec->macId,
++ e_FM_INTR_TYPE_NORMAL,
++ Dtsec1588Isr,
++ p_Dtsec);
++ /* register normal intr handler for dtsec to main interrupt controller. */
++ if (p_Dtsec->mdioIrq != NO_IRQ)
++ {
++ XX_SetIntr(p_Dtsec->mdioIrq, DtsecMdioIsr, p_Dtsec);
++ XX_EnableIntr(p_Dtsec->mdioIrq);
++ }
++
++ XX_Free(p_DtsecDriverParam);
++ p_Dtsec->p_DtsecDriverParam = NULL;
++
++ err = DtsecSetStatistics(h_Dtsec, e_FM_MAC_FULL_STATISTICS);
++ if (err)
++ {
++ FreeInitResources(p_Dtsec);
++ RETURN_ERROR(MAJOR, err, ("Undefined statistics level"));
++ }
++
++ return E_OK;
++}
++
++/* ........................................................................... */
++
++static t_Error DtsecFree(t_Handle h_Dtsec)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++
++ if (p_Dtsec->p_DtsecDriverParam)
++ {
++ /* Called after config */
++ XX_Free(p_Dtsec->p_DtsecDriverParam);
++ p_Dtsec->p_DtsecDriverParam = NULL;
++ }
++ else
++ /* Called after init */
++ FreeInitResources(p_Dtsec);
++
++ XX_Free(p_Dtsec);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver)
++{
++ p_FmMacControllerDriver->f_FM_MAC_Init = DtsecInit;
++ p_FmMacControllerDriver->f_FM_MAC_Free = DtsecFree;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetStatistics = DtsecSetStatistics;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = DtsecConfigLoopback;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = DtsecConfigMaxFrameLength;
++
++ p_FmMacControllerDriver->f_FM_MAC_ConfigWan = NULL; /* Not supported on dTSEC */
++
++ p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = DtsecConfigPadAndCrc;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = DtsecConfigHalfDuplex;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = DtsecConfigLengthCheck;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigTbiPhyAddr = DtsecConfigTbiPhyAddr;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigException = DtsecConfigException;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit = NULL;
++
++ p_FmMacControllerDriver->f_FM_MAC_Enable = DtsecEnable;
++ p_FmMacControllerDriver->f_FM_MAC_Disable = DtsecDisable;
++ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetException = DtsecSetException;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = DtsecSetPromiscuous;
++ p_FmMacControllerDriver->f_FM_MAC_AdjustLink = DtsecAdjustLink;
++ p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan = DtsecSetWakeOnLan;
++ p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg = DtsecRestartAutoneg;
++
++ p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = DtsecEnable1588TimeStamp;
++ p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = DtsecDisable1588TimeStamp;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = DtsecTxMacPause;
++ p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = DtsecSetTxPauseFrames;
++ p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = DtsecRxIgnoreMacPause;
++
++ p_FmMacControllerDriver->f_FM_MAC_ResetCounters = DtsecResetCounters;
++ p_FmMacControllerDriver->f_FM_MAC_GetStatistics = DtsecGetStatistics;
++
++ p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = DtsecModifyMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = DtsecAddHashMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = DtsecDelHashMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = DtsecAddExactMatchMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = DtsecDelExactMatchMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_GetId = DtsecGetId;
++ p_FmMacControllerDriver->f_FM_MAC_GetVersion = DtsecGetVersion;
++ p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = DtsecGetMaxFrameLength;
++
++ p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = DTSEC_MII_WritePhyReg;
++ p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = DTSEC_MII_ReadPhyReg;
++
++}
++
++
++/*****************************************************************************/
++/* dTSEC Config Main Entry */
++/*****************************************************************************/
++
++/* .............................................................................. */
++
++t_Handle DTSEC_Config(t_FmMacParams *p_FmMacParam)
++{
++ t_Dtsec *p_Dtsec;
++ struct dtsec_cfg *p_DtsecDriverParam;
++ uintptr_t baseAddr;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL);
++
++ baseAddr = p_FmMacParam->baseAddr;
++
++ /* allocate memory for the UCC GETH data structure. */
++ p_Dtsec = (t_Dtsec *)XX_Malloc(sizeof(t_Dtsec));
++ if (!p_Dtsec)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("dTSEC driver structure"));
++ return NULL;
++ }
++ memset(p_Dtsec, 0, sizeof(t_Dtsec));
++ InitFmMacControllerDriver(&p_Dtsec->fmMacControllerDriver);
++
++ /* allocate memory for the dTSEC driver parameters data structure. */
++ p_DtsecDriverParam = (struct dtsec_cfg *) XX_Malloc(sizeof(struct dtsec_cfg));
++ if (!p_DtsecDriverParam)
++ {
++ XX_Free(p_Dtsec);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("dTSEC driver parameters"));
++ return NULL;
++ }
++ memset(p_DtsecDriverParam, 0, sizeof(struct dtsec_cfg));
++
++ /* Plant parameter structure pointer */
++ p_Dtsec->p_DtsecDriverParam = p_DtsecDriverParam;
++
++ fman_dtsec_defconfig(p_DtsecDriverParam);
++
++ p_Dtsec->p_MemMap = (struct dtsec_regs *)UINT_TO_PTR(baseAddr);
++ p_Dtsec->p_MiiMemMap = (struct dtsec_mii_reg *)UINT_TO_PTR(baseAddr + DTSEC_TO_MII_OFFSET);
++ p_Dtsec->addr = ENET_ADDR_TO_UINT64(p_FmMacParam->addr);
++ p_Dtsec->enetMode = p_FmMacParam->enetMode;
++ p_Dtsec->macId = p_FmMacParam->macId;
++ p_Dtsec->exceptions = DEFAULT_exceptions;
++ p_Dtsec->mdioIrq = p_FmMacParam->mdioIrq;
++ p_Dtsec->f_Exception = p_FmMacParam->f_Exception;
++ p_Dtsec->f_Event = p_FmMacParam->f_Event;
++ p_Dtsec->h_App = p_FmMacParam->h_App;
++ p_Dtsec->ptpTsuEnabled = p_Dtsec->p_DtsecDriverParam->ptp_tsu_en;
++ p_Dtsec->enTsuErrExeption = p_Dtsec->p_DtsecDriverParam->ptp_exception_en;
++ p_Dtsec->tbi_phy_addr = p_Dtsec->p_DtsecDriverParam->tbi_phy_addr;
++
++ return p_Dtsec;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h
+new file mode 100644
+index 00000000..c26f40cc
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h
+@@ -0,0 +1,228 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File dtsec.h
++
++ @Description FM dTSEC ...
++*//***************************************************************************/
++#ifndef __DTSEC_H
++#define __DTSEC_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++#include "enet_ext.h"
++
++#include "dtsec_mii_acc.h"
++#include "fm_mac.h"
++
++
++#define DEFAULT_exceptions \
++ ((uint32_t)(DTSEC_IMASK_BREN | \
++ DTSEC_IMASK_RXCEN | \
++ DTSEC_IMASK_BTEN | \
++ DTSEC_IMASK_TXCEN | \
++ DTSEC_IMASK_TXEEN | \
++ DTSEC_IMASK_ABRTEN | \
++ DTSEC_IMASK_LCEN | \
++ DTSEC_IMASK_CRLEN | \
++ DTSEC_IMASK_XFUNEN | \
++ DTSEC_IMASK_IFERREN | \
++ DTSEC_IMASK_MAGEN | \
++ DTSEC_IMASK_TDPEEN | \
++ DTSEC_IMASK_RDPEEN))
++
++#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
++ case e_FM_MAC_EX_1G_BAB_RX: \
++ bitMask = DTSEC_IMASK_BREN; break; \
++ case e_FM_MAC_EX_1G_RX_CTL: \
++ bitMask = DTSEC_IMASK_RXCEN; break; \
++ case e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET: \
++ bitMask = DTSEC_IMASK_GTSCEN ; break; \
++ case e_FM_MAC_EX_1G_BAB_TX: \
++ bitMask = DTSEC_IMASK_BTEN ; break; \
++ case e_FM_MAC_EX_1G_TX_CTL: \
++ bitMask = DTSEC_IMASK_TXCEN ; break; \
++ case e_FM_MAC_EX_1G_TX_ERR: \
++ bitMask = DTSEC_IMASK_TXEEN ; break; \
++ case e_FM_MAC_EX_1G_LATE_COL: \
++ bitMask = DTSEC_IMASK_LCEN ; break; \
++ case e_FM_MAC_EX_1G_COL_RET_LMT: \
++ bitMask = DTSEC_IMASK_CRLEN ; break; \
++ case e_FM_MAC_EX_1G_TX_FIFO_UNDRN: \
++ bitMask = DTSEC_IMASK_XFUNEN ; break; \
++ case e_FM_MAC_EX_1G_MAG_PCKT: \
++ bitMask = DTSEC_IMASK_MAGEN ; break; \
++ case e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET: \
++ bitMask = DTSEC_IMASK_MMRDEN; break; \
++ case e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET: \
++ bitMask = DTSEC_IMASK_MMWREN ; break; \
++ case e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET: \
++ bitMask = DTSEC_IMASK_GRSCEN; break; \
++ case e_FM_MAC_EX_1G_TX_DATA_ERR: \
++ bitMask = DTSEC_IMASK_TDPEEN; break; \
++ case e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL: \
++ bitMask = DTSEC_IMASK_MSROEN ; break; \
++ default: bitMask = 0;break;}
++
++
++#define MAX_PACKET_ALIGNMENT 31
++#define MAX_INTER_PACKET_GAP 0x7f
++#define MAX_INTER_PALTERNATE_BEB 0x0f
++#define MAX_RETRANSMISSION 0x0f
++#define MAX_COLLISION_WINDOW 0x03ff
++
++
++/********************* From mac ext ******************************************/
++typedef uint32_t t_ErrorDisable;
++
++#define ERROR_DISABLE_TRANSMIT 0x00400000
++#define ERROR_DISABLE_LATE_COLLISION 0x00040000
++#define ERROR_DISABLE_COLLISION_RETRY_LIMIT 0x00020000
++#define ERROR_DISABLE_TxFIFO_UNDERRUN 0x00010000
++#define ERROR_DISABLE_TxABORT 0x00008000
++#define ERROR_DISABLE_INTERFACE 0x00004000
++#define ERROR_DISABLE_TxDATA_PARITY 0x00000002
++#define ERROR_DISABLE_RxDATA_PARITY 0x00000001
++
++/*****************************************************************************/
++#define DTSEC_NUM_OF_PADDRS 15 /* number of pattern match registers (entries) */
++
++#define GROUP_ADDRESS 0x0000010000000000LL /* Group address bit indication */
++
++#define HASH_TABLE_SIZE 256 /* Hash table size (= 32 bits * 8 regs) */
++
++#define HASH_TABLE_SIZE 256 /* Hash table size (32 bits * 8 regs) */
++#define EXTENDED_HASH_TABLE_SIZE 512 /* Extended Hash table size (32 bits * 16 regs) */
++
++#define DTSEC_TO_MII_OFFSET 0x1000 /* number of pattern match registers (entries) */
++
++#define MAX_PHYS 32 /* maximum number of phys */
++
++#define VAL32BIT 0x100000000LL
++#define VAL22BIT 0x00400000
++#define VAL16BIT 0x00010000
++#define VAL12BIT 0x00001000
++
++/* CAR1/2 bits */
++#define CAR1_TR64 0x80000000
++#define CAR1_TR127 0x40000000
++#define CAR1_TR255 0x20000000
++#define CAR1_TR511 0x10000000
++#define CAR1_TRK1 0x08000000
++#define CAR1_TRMAX 0x04000000
++#define CAR1_TRMGV 0x02000000
++
++#define CAR1_RBYT 0x00010000
++#define CAR1_RPKT 0x00008000
++#define CAR1_RMCA 0x00002000
++#define CAR1_RBCA 0x00001000
++#define CAR1_RXPF 0x00000400
++#define CAR1_RALN 0x00000100
++#define CAR1_RFLR 0x00000080
++#define CAR1_RCDE 0x00000040
++#define CAR1_RCSE 0x00000020
++#define CAR1_RUND 0x00000010
++#define CAR1_ROVR 0x00000008
++#define CAR1_RFRG 0x00000004
++#define CAR1_RJBR 0x00000002
++#define CAR1_RDRP 0x00000001
++
++#define CAR2_TFCS 0x00040000
++#define CAR2_TBYT 0x00002000
++#define CAR2_TPKT 0x00001000
++#define CAR2_TMCA 0x00000800
++#define CAR2_TBCA 0x00000400
++#define CAR2_TXPF 0x00000200
++#define CAR2_TDRP 0x00000001
++
++typedef struct t_InternalStatistics
++{
++ uint64_t tr64;
++ uint64_t tr127;
++ uint64_t tr255;
++ uint64_t tr511;
++ uint64_t tr1k;
++ uint64_t trmax;
++ uint64_t trmgv;
++ uint64_t rfrg;
++ uint64_t rjbr;
++ uint64_t rdrp;
++ uint64_t raln;
++ uint64_t rund;
++ uint64_t rovr;
++ uint64_t rxpf;
++ uint64_t txpf;
++ uint64_t rbyt;
++ uint64_t rpkt;
++ uint64_t rmca;
++ uint64_t rbca;
++ uint64_t rflr;
++ uint64_t rcde;
++ uint64_t rcse;
++ uint64_t tbyt;
++ uint64_t tpkt;
++ uint64_t tmca;
++ uint64_t tbca;
++ uint64_t tdrp;
++ uint64_t tfcs;
++} t_InternalStatistics;
++
++typedef struct {
++ t_FmMacControllerDriver fmMacControllerDriver;
++ t_Handle h_App; /**< Handle to the upper layer application */
++ struct dtsec_regs *p_MemMap; /**< pointer to dTSEC memory mapped registers. */
++ struct dtsec_mii_reg *p_MiiMemMap; /**< pointer to dTSEC MII memory mapped registers. */
++ uint64_t addr; /**< MAC address of device; */
++ e_EnetMode enetMode; /**< Ethernet physical interface */
++ t_FmMacExceptionCallback *f_Exception;
++ int mdioIrq;
++ t_FmMacExceptionCallback *f_Event;
++ bool indAddrRegUsed[DTSEC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */
++ uint64_t paddr[DTSEC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */
++ uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */
++ bool halfDuplex;
++ t_InternalStatistics internalStatistics;
++ t_EthHash *p_MulticastAddrHash; /* pointer to driver's global address hash table */
++ t_EthHash *p_UnicastAddrHash; /* pointer to driver's individual address hash table */
++ uint8_t macId;
++ uint8_t tbi_phy_addr;
++ uint32_t exceptions;
++ bool ptpTsuEnabled;
++ bool enTsuErrExeption;
++ e_FmMacStatisticsLevel statisticsLevel;
++ struct dtsec_cfg *p_DtsecDriverParam;
++} t_Dtsec;
++
++
++#endif /* __DTSEC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c
+new file mode 100644
+index 00000000..87da25ff
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c
+@@ -0,0 +1,97 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File dtsec_mii_acc.c
++
++ @Description FM dtsec MII register access MAC ...
++*//***************************************************************************/
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_mac.h"
++#include "dtsec.h"
++#include "fsl_fman_dtsec_mii_acc.h"
++
++
++/*****************************************************************************/
++t_Error DTSEC_MII_WritePhyReg(t_Handle h_Dtsec,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t data)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ struct dtsec_mii_reg *miiregs;
++ uint16_t dtsec_freq;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MiiMemMap, E_INVALID_HANDLE);
++
++ dtsec_freq = (uint16_t)(p_Dtsec->fmMacControllerDriver.clkFreq >> 1);
++ miiregs = p_Dtsec->p_MiiMemMap;
++
++ err = (t_Error)fman_dtsec_mii_write_reg(miiregs, phyAddr, reg, data, dtsec_freq);
++
++ return err;
++}
++
++/*****************************************************************************/
++t_Error DTSEC_MII_ReadPhyReg(t_Handle h_Dtsec,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t *p_Data)
++{
++ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
++ struct dtsec_mii_reg *miiregs;
++ uint16_t dtsec_freq;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MiiMemMap, E_INVALID_HANDLE);
++
++ dtsec_freq = (uint16_t)(p_Dtsec->fmMacControllerDriver.clkFreq >> 1);
++ miiregs = p_Dtsec->p_MiiMemMap;
++
++ err = fman_dtsec_mii_read_reg(miiregs, phyAddr, reg, p_Data, dtsec_freq);
++
++ if (*p_Data == 0xffff)
++ RETURN_ERROR(MINOR, E_NO_DEVICE,
++ ("Read wrong data (0xffff): phyAddr 0x%x, reg 0x%x",
++ phyAddr, reg));
++ if (err)
++ RETURN_ERROR(MINOR, (t_Error)err, NO_MSG);
++
++ return E_OK;
++}
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h
+new file mode 100644
+index 00000000..75cc658a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h
+@@ -0,0 +1,42 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DTSEC_MII_ACC_H
++#define __DTSEC_MII_ACC_H
++
++#include "std_ext.h"
++
++
++t_Error DTSEC_MII_WritePhyReg(t_Handle h_Dtsec, uint8_t phyAddr, uint8_t reg, uint16_t data);
++t_Error DTSEC_MII_ReadPhyReg(t_Handle h_Dtsec, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
++
++#endif /* __DTSEC_MII_ACC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c
+new file mode 100644
+index 00000000..20bf150a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c
+@@ -0,0 +1,658 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_mac.c
++
++ @Description FM MAC ...
++*//***************************************************************************/
++#include "std_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++#include "error_ext.h"
++#include "fm_ext.h"
++
++#include "fm_common.h"
++#include "fm_mac.h"
++
++
++/* ......................................................................... */
++
++t_Handle FM_MAC_Config (t_FmMacParams *p_FmMacParam)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver;
++ uint16_t fmClkFreq;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_INVALID_HANDLE, NULL);
++
++ fmClkFreq = FmGetClockFreq(p_FmMacParam->h_Fm);
++ if (fmClkFreq == 0)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Can't get clock for MAC!"));
++ return NULL;
++ }
++
++#if (DPAA_VERSION == 10)
++ if (ENET_SPEED_FROM_MODE(p_FmMacParam->enetMode) < e_ENET_SPEED_10000)
++ p_FmMacControllerDriver = (t_FmMacControllerDriver *)DTSEC_Config(p_FmMacParam);
++ else
++#if FM_MAX_NUM_OF_10G_MACS > 0
++ p_FmMacControllerDriver = (t_FmMacControllerDriver *)TGEC_Config(p_FmMacParam);
++#else
++ p_FmMacControllerDriver = NULL;
++#endif /* FM_MAX_NUM_OF_10G_MACS > 0 */
++#else
++ p_FmMacControllerDriver = (t_FmMacControllerDriver *)MEMAC_Config(p_FmMacParam);
++#endif /* (DPAA_VERSION == 10) */
++
++ if (!p_FmMacControllerDriver)
++ return NULL;
++
++ p_FmMacControllerDriver->h_Fm = p_FmMacParam->h_Fm;
++ p_FmMacControllerDriver->enetMode = p_FmMacParam->enetMode;
++ p_FmMacControllerDriver->macId = p_FmMacParam->macId;
++ p_FmMacControllerDriver->resetOnInit = DEFAULT_resetOnInit;
++
++ p_FmMacControllerDriver->clkFreq = fmClkFreq;
++
++ return (t_Handle)p_FmMacControllerDriver;
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_Init (t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->resetOnInit &&
++ !p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit &&
++ (FmResetMac(p_FmMacControllerDriver->h_Fm,
++ ((ENET_INTERFACE_FROM_MODE(p_FmMacControllerDriver->enetMode) == e_ENET_IF_XGMII) ?
++ e_FM_MAC_10G : e_FM_MAC_1G),
++ p_FmMacControllerDriver->macId) != E_OK))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't reset MAC!"));
++
++ if (p_FmMacControllerDriver->f_FM_MAC_Init)
++ return p_FmMacControllerDriver->f_FM_MAC_Init(h_FmMac);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_Free (t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_Free)
++ return p_FmMacControllerDriver->f_FM_MAC_Free(h_FmMac);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigResetOnInit (t_Handle h_FmMac, bool enable)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit(h_FmMac, enable);
++
++ p_FmMacControllerDriver->resetOnInit = enable;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigLoopback (t_Handle h_FmMac, bool newVal)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback(h_FmMac, newVal);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigMaxFrameLength (t_Handle h_FmMac, uint16_t newVal)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength(h_FmMac, newVal);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigWan (t_Handle h_FmMac, bool flag)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigWan)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigWan(h_FmMac, flag);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigPadAndCrc (t_Handle h_FmMac, bool newVal)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc(h_FmMac, newVal);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigHalfDuplex (t_Handle h_FmMac, bool newVal)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex(h_FmMac,newVal);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigTbiPhyAddr (t_Handle h_FmMac, uint8_t newVal)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigTbiPhyAddr)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigTbiPhyAddr(h_FmMac,newVal);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigLengthCheck (t_Handle h_FmMac, bool newVal)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck(h_FmMac,newVal);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigException (t_Handle h_FmMac, e_FmMacExceptions ex, bool enable)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigException)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigException(h_FmMac, ex, enable);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++/* ......................................................................... */
++
++t_Error FM_MAC_ConfigSkipFman11Workaround (t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround)
++ return p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround(h_FmMac);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++
++/*****************************************************************************/
++/* Run Time Control */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++t_Error FM_MAC_Enable (t_Handle h_FmMac, e_CommMode mode)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_Enable)
++ return p_FmMacControllerDriver->f_FM_MAC_Enable(h_FmMac, mode);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_Disable (t_Handle h_FmMac, e_CommMode mode)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_Disable)
++ return p_FmMacControllerDriver->f_FM_MAC_Disable(h_FmMac, mode);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MAC_Resume (t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_Resume)
++ return p_FmMacControllerDriver->f_FM_MAC_Resume(h_FmMac);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_Enable1588TimeStamp (t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp)
++ return p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp(h_FmMac);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_Disable1588TimeStamp (t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp)
++ return p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp(h_FmMac);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_SetTxAutoPauseFrames(t_Handle h_FmMac,
++ uint16_t pauseTime)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames)
++ return p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames(h_FmMac,
++ pauseTime);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_SetTxPauseFrames(t_Handle h_FmMac,
++ uint8_t priority,
++ uint16_t pauseTime,
++ uint16_t threshTime)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames)
++ return p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames(h_FmMac,
++ priority,
++ pauseTime,
++ threshTime);
++
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_SetRxIgnorePauseFrames (t_Handle h_FmMac, bool en)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames)
++ return p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames(h_FmMac, en);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_SetWakeOnLan (t_Handle h_FmMac, bool en)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan)
++ return p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan(h_FmMac, en);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ResetCounters (t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ResetCounters)
++ return p_FmMacControllerDriver->f_FM_MAC_ResetCounters(h_FmMac);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_SetException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_SetException)
++ return p_FmMacControllerDriver->f_FM_MAC_SetException(h_FmMac, ex, enable);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_SetStatistics (t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_SetStatistics)
++ return p_FmMacControllerDriver->f_FM_MAC_SetStatistics(h_FmMac, statisticsLevel);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_GetStatistics (t_Handle h_FmMac, t_FmMacStatistics *p_Statistics)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_GetStatistics)
++ return p_FmMacControllerDriver->f_FM_MAC_GetStatistics(h_FmMac, p_Statistics);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_ModifyMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr)
++ return p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr(h_FmMac, p_EnetAddr);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_AddHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr)
++ return p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr(h_FmMac, p_EnetAddr);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_RemoveHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr)
++ return p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr(h_FmMac, p_EnetAddr);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_AddExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr)
++ return p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr(h_FmMac, p_EnetAddr);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_RemovelExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr)
++ return p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr(h_FmMac, p_EnetAddr);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_GetVesrion (t_Handle h_FmMac, uint32_t *macVresion)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_GetVersion)
++ return p_FmMacControllerDriver->f_FM_MAC_GetVersion(h_FmMac, macVresion);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_GetId (t_Handle h_FmMac, uint32_t *macId)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_GetId)
++ return p_FmMacControllerDriver->f_FM_MAC_GetId(h_FmMac, macId);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_SetPromiscuous (t_Handle h_FmMac, bool newVal)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous)
++ return p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous(h_FmMac, newVal);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_AdjustLink(t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_AdjustLink)
++ return p_FmMacControllerDriver->f_FM_MAC_AdjustLink(h_FmMac, speed, fullDuplex);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_RestartAutoneg(t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg)
++ return p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg(h_FmMac);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_MII_WritePhyReg (t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg)
++ return p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg(h_FmMac, phyAddr, reg, data);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++t_Error FM_MAC_MII_ReadPhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg)
++ return p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg(h_FmMac, phyAddr, reg, p_Data);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++/* ......................................................................... */
++
++uint16_t FM_MAC_GetMaxFrameLength(t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmMacControllerDriver, E_INVALID_HANDLE, 0);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength)
++ return p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength(h_FmMac);
++
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++ return 0;
++}
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/*****************************************************************************/
++t_Error FM_MAC_DumpRegs(t_Handle h_FmMac)
++{
++ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacControllerDriver->f_FM_MAC_DumpRegs)
++ return p_FmMacControllerDriver->f_FM_MAC_DumpRegs(h_FmMac);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++#endif /* (defined(DEBUG_ERRORS) && ... */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h
+new file mode 100644
+index 00000000..77b9a89d
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h
+@@ -0,0 +1,225 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_mac.h
++
++ @Description FM MAC ...
++*//***************************************************************************/
++#ifndef __FM_MAC_H
++#define __FM_MAC_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++#include "fm_mac_ext.h"
++#include "fm_common.h"
++
++
++#define __ERR_MODULE__ MODULE_FM_MAC
++
++/**************************************************************************//**
++ @Description defaults
++*//***************************************************************************/
++
++
++#define DEFAULT_halfDuplex FALSE
++#define DEFAULT_padAndCrcEnable TRUE
++#define DEFAULT_resetOnInit FALSE
++
++
++typedef struct {
++ uint64_t addr; /* Ethernet Address */
++ t_List node;
++} t_EthHashEntry;
++#define ETH_HASH_ENTRY_OBJ(ptr) LIST_OBJECT(ptr, t_EthHashEntry, node)
++
++typedef struct {
++ uint16_t size;
++ t_List *p_Lsts;
++} t_EthHash;
++
++typedef struct {
++ t_Error (*f_FM_MAC_Init) (t_Handle h_FmMac);
++ t_Error (*f_FM_MAC_Free) (t_Handle h_FmMac);
++
++ t_Error (*f_FM_MAC_SetStatistics) (t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel);
++ t_Error (*f_FM_MAC_ConfigLoopback) (t_Handle h_FmMac, bool newVal);
++ t_Error (*f_FM_MAC_ConfigMaxFrameLength) (t_Handle h_FmMac, uint16_t newVal);
++ t_Error (*f_FM_MAC_ConfigWan) (t_Handle h_FmMac, bool flag);
++ t_Error (*f_FM_MAC_ConfigPadAndCrc) (t_Handle h_FmMac, bool newVal);
++ t_Error (*f_FM_MAC_ConfigHalfDuplex) (t_Handle h_FmMac, bool newVal);
++ t_Error (*f_FM_MAC_ConfigLengthCheck) (t_Handle h_FmMac, bool newVal);
++ t_Error (*f_FM_MAC_ConfigTbiPhyAddr) (t_Handle h_FmMac, uint8_t newVal);
++ t_Error (*f_FM_MAC_ConfigException) (t_Handle h_FmMac, e_FmMacExceptions, bool enable);
++ t_Error (*f_FM_MAC_ConfigResetOnInit) (t_Handle h_FmMac, bool enable);
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++ t_Error (*f_FM_MAC_ConfigSkipFman11Workaround) (t_Handle h_FmMac);
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++ t_Error (*f_FM_MAC_SetException) (t_Handle h_FmMac, e_FmMacExceptions ex, bool enable);
++
++ t_Error (*f_FM_MAC_Enable) (t_Handle h_FmMac, e_CommMode mode);
++ t_Error (*f_FM_MAC_Disable) (t_Handle h_FmMac, e_CommMode mode);
++ t_Error (*f_FM_MAC_Resume) (t_Handle h_FmMac);
++ t_Error (*f_FM_MAC_Enable1588TimeStamp) (t_Handle h_FmMac);
++ t_Error (*f_FM_MAC_Disable1588TimeStamp) (t_Handle h_FmMac);
++ t_Error (*f_FM_MAC_Reset) (t_Handle h_FmMac, bool wait);
++
++ t_Error (*f_FM_MAC_SetTxAutoPauseFrames) (t_Handle h_FmMac,
++ uint16_t pauseTime);
++ t_Error (*f_FM_MAC_SetTxPauseFrames) (t_Handle h_FmMac,
++ uint8_t priority,
++ uint16_t pauseTime,
++ uint16_t threshTime);
++ t_Error (*f_FM_MAC_SetRxIgnorePauseFrames) (t_Handle h_FmMac, bool en);
++
++ t_Error (*f_FM_MAC_ResetCounters) (t_Handle h_FmMac);
++ t_Error (*f_FM_MAC_GetStatistics) (t_Handle h_FmMac, t_FmMacStatistics *p_Statistics);
++
++ t_Error (*f_FM_MAC_ModifyMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++ t_Error (*f_FM_MAC_AddHashMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++ t_Error (*f_FM_MAC_RemoveHashMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++ t_Error (*f_FM_MAC_AddExactMatchMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++ t_Error (*f_FM_MAC_RemovelExactMatchMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++
++ t_Error (*f_FM_MAC_SetPromiscuous) (t_Handle h_FmMac, bool newVal);
++ t_Error (*f_FM_MAC_AdjustLink) (t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex);
++ t_Error (*f_FM_MAC_RestartAutoneg) (t_Handle h_FmMac);
++
++ t_Error (*f_FM_MAC_SetWakeOnLan) (t_Handle h_FmMac, bool en);
++
++ t_Error (*f_FM_MAC_GetId) (t_Handle h_FmMac, uint32_t *macId);
++
++ t_Error (*f_FM_MAC_GetVersion) (t_Handle h_FmMac, uint32_t *macVersion);
++
++ uint16_t (*f_FM_MAC_GetMaxFrameLength) (t_Handle h_FmMac);
++
++ t_Error (*f_FM_MAC_MII_WritePhyReg)(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data);
++ t_Error (*f_FM_MAC_MII_ReadPhyReg)(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_Error (*f_FM_MAC_DumpRegs) (t_Handle h_FmMac);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ t_Handle h_Fm;
++ t_FmRevisionInfo fmRevInfo;
++ e_EnetMode enetMode;
++ uint8_t macId;
++ bool resetOnInit;
++ uint16_t clkFreq;
++} t_FmMacControllerDriver;
++
++
++#if (DPAA_VERSION == 10)
++t_Handle DTSEC_Config(t_FmMacParams *p_FmMacParam);
++t_Handle TGEC_Config(t_FmMacParams *p_FmMacParams);
++#else
++t_Handle MEMAC_Config(t_FmMacParams *p_FmMacParam);
++#endif /* (DPAA_VERSION == 10) */
++uint16_t FM_MAC_GetMaxFrameLength(t_Handle FmMac);
++
++
++/* ........................................................................... */
++
++static __inline__ t_EthHashEntry *DequeueAddrFromHashEntry(t_List *p_AddrLst)
++{
++ t_EthHashEntry *p_HashEntry = NULL;
++ if (!LIST_IsEmpty(p_AddrLst))
++ {
++ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_AddrLst->p_Next);
++ LIST_DelAndInit(&p_HashEntry->node);
++ }
++ return p_HashEntry;
++}
++
++/* ........................................................................... */
++
++static __inline__ void FreeHashTable(t_EthHash *p_Hash)
++{
++ t_EthHashEntry *p_HashEntry;
++ int i = 0;
++
++ if (p_Hash)
++ {
++ if (p_Hash->p_Lsts)
++ {
++ for (i=0; i<p_Hash->size; i++)
++ {
++ p_HashEntry = DequeueAddrFromHashEntry(&p_Hash->p_Lsts[i]);
++ while (p_HashEntry)
++ {
++ XX_Free(p_HashEntry);
++ p_HashEntry = DequeueAddrFromHashEntry(&p_Hash->p_Lsts[i]);
++ }
++ }
++
++ XX_Free(p_Hash->p_Lsts);
++ }
++
++ XX_Free(p_Hash);
++ }
++}
++
++/* ........................................................................... */
++
++static __inline__ t_EthHash * AllocHashTable(uint16_t size)
++{
++ uint32_t i;
++ t_EthHash *p_Hash;
++
++ /* Allocate address hash table */
++ p_Hash = (t_EthHash *)XX_Malloc(sizeof(t_EthHash));
++ if (!p_Hash)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Address hash table"));
++ return NULL;
++ }
++ p_Hash->size = size;
++
++ p_Hash->p_Lsts = (t_List *)XX_Malloc(p_Hash->size*sizeof(t_List));
++ if (!p_Hash->p_Lsts)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Address hash table"));
++ XX_Free(p_Hash);
++ return NULL;
++ }
++
++ for (i=0 ; i<p_Hash->size; i++)
++ INIT_LIST(&p_Hash->p_Lsts[i]);
++
++ return p_Hash;
++}
++
++
++#endif /* __FM_MAC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.c
+new file mode 100644
+index 00000000..b6a4ca25
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.c
+@@ -0,0 +1,119 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "fman_crc32.h"
++#include "common/general.h"
++
++
++/* precomputed CRC values for address hashing */
++static const uint32_t crc_tbl[256] = {
++ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
++ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
++ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
++ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
++ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
++ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
++ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
++ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
++ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
++ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
++ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
++ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
++ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
++ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
++ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
++ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
++ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
++ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
++ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
++ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
++ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
++ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
++ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
++ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
++ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
++ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
++ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
++ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
++ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
++ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
++ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
++ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
++ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
++ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
++ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
++ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
++ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
++ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
++ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
++ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
++ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
++ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
++ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
++};
++
++/* Get the mirrored value of a byte size number. (0x11010011 --> 0x11001011) */
++static inline uint8_t get_mirror8(uint8_t n)
++{
++ uint8_t mirror[16] = {
++ 0x00, 0x08, 0x04, 0x0c, 0x02, 0x0a, 0x06, 0x0e,
++ 0x01, 0x09, 0x05, 0x0d, 0x03, 0x0b, 0x07, 0x0f
++ };
++ return (uint8_t)(((mirror[n & 0x0f] << 4) | (mirror[n >> 4])));
++}
++
++static inline uint32_t get_mirror32(uint32_t n)
++{
++ return ((uint32_t)get_mirror8((uint8_t)(n))<<24) |
++ ((uint32_t)get_mirror8((uint8_t)(n>>8))<<16) |
++ ((uint32_t)get_mirror8((uint8_t)(n>>16))<<8) |
++ ((uint32_t)get_mirror8((uint8_t)(n>>24)));
++}
++
++uint32_t get_mac_addr_crc(uint64_t _addr)
++{
++ uint32_t i;
++ uint8_t data;
++ uint32_t crc;
++
++ /* CRC calculation */
++ crc = 0xffffffff;
++ for (i = 0; i < 6; i++) {
++ data = (uint8_t)(_addr >> ((5-i)*8));
++ crc = crc ^ data;
++ crc = crc_tbl[crc&0xff] ^ (crc>>8);
++ }
++
++ crc = get_mirror32(crc);
++ return crc;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.h
+new file mode 100644
+index 00000000..6e32fdc6
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.h
+@@ -0,0 +1,43 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __FMAN_CRC32_H
++#define __FMAN_CRC32_H
++
++#include "common/general.h"
++
++
++uint32_t get_mac_addr_crc(uint64_t _addr);
++
++
++#endif /* __FMAN_CRC32_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec.c
+new file mode 100644
+index 00000000..5b092865
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec.c
+@@ -0,0 +1,845 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "fsl_fman_dtsec.h"
++
++
++void fman_dtsec_stop_rx(struct dtsec_regs *regs)
++{
++ /* Assert the graceful stop bit */
++ iowrite32be(ioread32be(&regs->rctrl) | RCTRL_GRS, &regs->rctrl);
++}
++
++void fman_dtsec_stop_tx(struct dtsec_regs *regs)
++{
++ /* Assert the graceful stop bit */
++ iowrite32be(ioread32be(&regs->tctrl) | DTSEC_TCTRL_GTS, &regs->tctrl);
++}
++
++void fman_dtsec_start_tx(struct dtsec_regs *regs)
++{
++ /* clear the graceful stop bit */
++ iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS, &regs->tctrl);
++}
++
++void fman_dtsec_start_rx(struct dtsec_regs *regs)
++{
++ /* clear the graceful stop bit */
++ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
++}
++
++void fman_dtsec_defconfig(struct dtsec_cfg *cfg)
++{
++ cfg->halfdup_on = DEFAULT_HALFDUP_ON;
++ cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
++ cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
++ cfg->halfdup_excess_defer = DEFAULT_HALFDUP_EXCESS_DEFER;
++ cfg->halfdup_no_backoff = DEFAULT_HALFDUP_NO_BACKOFF;
++ cfg->halfdup_bp_no_backoff = DEFAULT_HALFDUP_BP_NO_BACKOFF;
++ cfg->halfdup_alt_backoff_val = DEFAULT_HALFDUP_ALT_BACKOFF_VAL;
++ cfg->halfdup_alt_backoff_en = DEFAULT_HALFDUP_ALT_BACKOFF_EN;
++ cfg->rx_drop_bcast = DEFAULT_RX_DROP_BCAST;
++ cfg->rx_short_frm = DEFAULT_RX_SHORT_FRM;
++ cfg->rx_len_check = DEFAULT_RX_LEN_CHECK;
++ cfg->tx_pad_crc = DEFAULT_TX_PAD_CRC;
++ cfg->tx_crc = DEFAULT_TX_CRC;
++ cfg->rx_ctrl_acc = DEFAULT_RX_CTRL_ACC;
++ cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
++ cfg->tbipa = DEFAULT_TBIPA; /* PHY address 0 is reserved (DPAA RM)*/
++ cfg->rx_prepend = DEFAULT_RX_PREPEND;
++ cfg->ptp_tsu_en = DEFAULT_PTP_TSU_EN;
++ cfg->ptp_exception_en = DEFAULT_PTP_EXCEPTION_EN;
++ cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
++ cfg->rx_preamble = DEFAULT_RX_PREAMBLE;
++ cfg->tx_preamble = DEFAULT_TX_PREAMBLE;
++ cfg->loopback = DEFAULT_LOOPBACK;
++ cfg->rx_time_stamp_en = DEFAULT_RX_TIME_STAMP_EN;
++ cfg->tx_time_stamp_en = DEFAULT_TX_TIME_STAMP_EN;
++ cfg->rx_flow = DEFAULT_RX_FLOW;
++ cfg->tx_flow = DEFAULT_TX_FLOW;
++ cfg->rx_group_hash_exd = DEFAULT_RX_GROUP_HASH_EXD;
++ cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
++ cfg->rx_promisc = DEFAULT_RX_PROMISC;
++ cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
++ cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
++ cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
++ cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
++ cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
++ cfg->tbi_phy_addr = DEFAULT_TBI_PHY_ADDR;
++ cfg->wake_on_lan = DEFAULT_WAKE_ON_LAN;
++}
++
++int fman_dtsec_init(struct dtsec_regs *regs, struct dtsec_cfg *cfg,
++ enum enet_interface iface_mode,
++ enum enet_speed iface_speed,
++ uint8_t *macaddr,
++ uint8_t fm_rev_maj,
++ uint8_t fm_rev_min,
++ uint32_t exception_mask)
++{
++ bool is_rgmii = FALSE;
++ bool is_sgmii = FALSE;
++ bool is_qsgmii = FALSE;
++ int i;
++ uint32_t tmp;
++
++UNUSED(fm_rev_maj);UNUSED(fm_rev_min);
++
++ /* let's start with a soft reset */
++ iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
++ iowrite32be(0, &regs->maccfg1);
++
++ /*************dtsec_id2******************/
++ tmp = ioread32be(&regs->tsec_id2);
++
++ /* check RGMII support */
++ if (iface_mode == E_ENET_IF_RGMII ||
++ iface_mode == E_ENET_IF_RMII)
++ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
++ return -EINVAL;
++
++ if (iface_mode == E_ENET_IF_SGMII ||
++ iface_mode == E_ENET_IF_MII)
++ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
++ return -EINVAL;
++
++ /***************ECNTRL************************/
++
++ is_rgmii = (bool)((iface_mode == E_ENET_IF_RGMII) ? TRUE : FALSE);
++ is_sgmii = (bool)((iface_mode == E_ENET_IF_SGMII) ? TRUE : FALSE);
++ is_qsgmii = (bool)((iface_mode == E_ENET_IF_QSGMII) ? TRUE : FALSE);
++
++ tmp = 0;
++ if (is_rgmii || iface_mode == E_ENET_IF_GMII)
++ tmp |= DTSEC_ECNTRL_GMIIM;
++ if (is_sgmii)
++ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
++ if (is_qsgmii)
++ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
++ DTSEC_ECNTRL_QSGMIIM);
++ if (is_rgmii)
++ tmp |= DTSEC_ECNTRL_RPM;
++ if (iface_speed == E_ENET_SPEED_100)
++ tmp |= DTSEC_ECNTRL_R100M;
++
++ iowrite32be(tmp, &regs->ecntrl);
++ /***************ECNTRL************************/
++
++ /***************TCTRL************************/
++ tmp = 0;
++ if (cfg->halfdup_on)
++ tmp |= DTSEC_TCTRL_THDF;
++ if (cfg->tx_time_stamp_en)
++ tmp |= DTSEC_TCTRL_TTSE;
++
++ iowrite32be(tmp, &regs->tctrl);
++
++ /***************TCTRL************************/
++
++ /***************PTV************************/
++ tmp = 0;
++
++#ifdef FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1
++ if ((fm_rev_maj == 1) && (fm_rev_min == 0))
++ cfg->tx_pause_time += 2;
++#endif /* FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 */
++
++ if (cfg->tx_pause_time)
++ tmp |= cfg->tx_pause_time;
++ if (cfg->tx_pause_time_extd)
++ tmp |= cfg->tx_pause_time_extd << PTV_PTE_OFST;
++ iowrite32be(tmp, &regs->ptv);
++
++ /***************RCTRL************************/
++ tmp = 0;
++ tmp |= ((uint32_t)(cfg->rx_prepend & 0x0000001f)) << 16;
++ if (cfg->rx_ctrl_acc)
++ tmp |= RCTRL_CFA;
++ if (cfg->rx_group_hash_exd)
++ tmp |= RCTRL_GHTX;
++ if (cfg->rx_time_stamp_en)
++ tmp |= RCTRL_RTSE;
++ if (cfg->rx_drop_bcast)
++ tmp |= RCTRL_BC_REJ;
++ if (cfg->rx_short_frm)
++ tmp |= RCTRL_RSF;
++ if (cfg->rx_promisc)
++ tmp |= RCTRL_PROM;
++
++ iowrite32be(tmp, &regs->rctrl);
++ /***************RCTRL************************/
++
++ /*
++ * Assign a Phy Address to the TBI (TBIPA).
++ * Done also in cases where TBI is not selected to avoid conflict with
++ * the external PHY's Physical address
++ */
++ iowrite32be(cfg->tbipa, &regs->tbipa);
++
++ /***************TMR_CTL************************/
++ iowrite32be(0, &regs->tmr_ctrl);
++
++ if (cfg->ptp_tsu_en) {
++ tmp = 0;
++ tmp |= TMR_PEVENT_TSRE;
++ iowrite32be(tmp, &regs->tmr_pevent);
++
++ if (cfg->ptp_exception_en) {
++ tmp = 0;
++ tmp |= TMR_PEMASK_TSREEN;
++ iowrite32be(tmp, &regs->tmr_pemask);
++ }
++ }
++
++ /***************MACCFG1***********************/
++ tmp = 0;
++ if (cfg->loopback)
++ tmp |= MACCFG1_LOOPBACK;
++ if (cfg->rx_flow)
++ tmp |= MACCFG1_RX_FLOW;
++ if (cfg->tx_flow)
++ tmp |= MACCFG1_TX_FLOW;
++ iowrite32be(tmp, &regs->maccfg1);
++
++ /***************MACCFG1***********************/
++
++ /***************MACCFG2***********************/
++ tmp = 0;
++
++ if (iface_speed < E_ENET_SPEED_1000)
++ tmp |= MACCFG2_NIBBLE_MODE;
++ else if (iface_speed == E_ENET_SPEED_1000)
++ tmp |= MACCFG2_BYTE_MODE;
++
++ tmp |= ((uint32_t) cfg->preamble_len & 0x0000000f)
++ << PREAMBLE_LENGTH_SHIFT;
++
++ if (cfg->rx_preamble)
++ tmp |= MACCFG2_PRE_AM_Rx_EN;
++ if (cfg->tx_preamble)
++ tmp |= MACCFG2_PRE_AM_Tx_EN;
++ if (cfg->rx_len_check)
++ tmp |= MACCFG2_LENGTH_CHECK;
++ if (cfg->tx_pad_crc)
++ tmp |= MACCFG2_PAD_CRC_EN;
++ if (cfg->tx_crc)
++ tmp |= MACCFG2_CRC_EN;
++ if (!cfg->halfdup_on)
++ tmp |= MACCFG2_FULL_DUPLEX;
++ iowrite32be(tmp, &regs->maccfg2);
++
++ /***************MACCFG2***********************/
++
++ /***************IPGIFG************************/
++ tmp = (((cfg->non_back_to_back_ipg1 <<
++ IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
++ & IPGIFG_NON_BACK_TO_BACK_IPG_1)
++ | ((cfg->non_back_to_back_ipg2 <<
++ IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
++ & IPGIFG_NON_BACK_TO_BACK_IPG_2)
++ | ((cfg->min_ifg_enforcement <<
++ IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
++ & IPGIFG_MIN_IFG_ENFORCEMENT)
++ | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
++ iowrite32be(tmp, &regs->ipgifg);
++
++ /***************IPGIFG************************/
++
++ /***************HAFDUP************************/
++ tmp = 0;
++
++ if (cfg->halfdup_alt_backoff_en)
++ tmp = (uint32_t)(HAFDUP_ALT_BEB |
++ ((cfg->halfdup_alt_backoff_val & 0x0000000f)
++ << HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT));
++ if (cfg->halfdup_bp_no_backoff)
++ tmp |= HAFDUP_BP_NO_BACKOFF;
++ if (cfg->halfdup_no_backoff)
++ tmp |= HAFDUP_NO_BACKOFF;
++ if (cfg->halfdup_excess_defer)
++ tmp |= HAFDUP_EXCESS_DEFER;
++ tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
++ & HAFDUP_RETRANSMISSION_MAX);
++ tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
++
++ iowrite32be(tmp, &regs->hafdup);
++ /***************HAFDUP************************/
++
++ /***************MAXFRM************************/
++ /* Initialize MAXFRM */
++ iowrite32be(cfg->maximum_frame, &regs->maxfrm);
++
++ /***************MAXFRM************************/
++
++ /***************CAM1************************/
++ iowrite32be(0xffffffff, &regs->cam1);
++ iowrite32be(0xffffffff, &regs->cam2);
++
++ /***************IMASK************************/
++ iowrite32be(exception_mask, &regs->imask);
++ /***************IMASK************************/
++
++ /***************IEVENT************************/
++ iowrite32be(0xffffffff, &regs->ievent);
++
++ /***************MACSTNADDR1/2*****************/
++
++ tmp = (uint32_t)((macaddr[5] << 24) |
++ (macaddr[4] << 16) |
++ (macaddr[3] << 8) |
++ macaddr[2]);
++ iowrite32be(tmp, &regs->macstnaddr1);
++
++ tmp = (uint32_t)((macaddr[1] << 24) |
++ (macaddr[0] << 16));
++ iowrite32be(tmp, &regs->macstnaddr2);
++
++ /***************MACSTNADDR1/2*****************/
++
++ /*****************HASH************************/
++ for (i = 0; i < NUM_OF_HASH_REGS ; i++) {
++ /* Initialize IADDRx */
++ iowrite32be(0, &regs->igaddr[i]);
++ /* Initialize GADDRx */
++ iowrite32be(0, &regs->gaddr[i]);
++ }
++
++ fman_dtsec_reset_stat(regs);
++
++ return 0;
++}
++
++uint16_t fman_dtsec_get_max_frame_len(struct dtsec_regs *regs)
++{
++ return (uint16_t)ioread32be(&regs->maxfrm);
++}
++
++void fman_dtsec_set_max_frame_len(struct dtsec_regs *regs, uint16_t length)
++{
++ iowrite32be(length, &regs->maxfrm);
++}
++
++void fman_dtsec_set_mac_address(struct dtsec_regs *regs, uint8_t *adr)
++{
++ uint32_t tmp;
++
++ tmp = (uint32_t)((adr[5] << 24) |
++ (adr[4] << 16) |
++ (adr[3] << 8) |
++ adr[2]);
++ iowrite32be(tmp, &regs->macstnaddr1);
++
++ tmp = (uint32_t)((adr[1] << 24) |
++ (adr[0] << 16));
++ iowrite32be(tmp, &regs->macstnaddr2);
++}
++
++void fman_dtsec_get_mac_address(struct dtsec_regs *regs, uint8_t *macaddr)
++{
++ uint32_t tmp1, tmp2;
++
++ tmp1 = ioread32be(&regs->macstnaddr1);
++ tmp2 = ioread32be(&regs->macstnaddr2);
++
++ macaddr[0] = (uint8_t)((tmp2 & 0x00ff0000) >> 16);
++ macaddr[1] = (uint8_t)((tmp2 & 0xff000000) >> 24);
++ macaddr[2] = (uint8_t)(tmp1 & 0x000000ff);
++ macaddr[3] = (uint8_t)((tmp1 & 0x0000ff00) >> 8);
++ macaddr[4] = (uint8_t)((tmp1 & 0x00ff0000) >> 16);
++ macaddr[5] = (uint8_t)((tmp1 & 0xff000000) >> 24);
++}
++
++void fman_dtsec_set_hash_table(struct dtsec_regs *regs, uint32_t crc, bool mcast, bool ghtx)
++{
++ int32_t bucket;
++ if (ghtx)
++ bucket = (int32_t)((crc >> 23) & 0x1ff);
++ else {
++ bucket = (int32_t)((crc >> 24) & 0xff);
++ /* if !ghtx and mcast the bit must be set in gaddr instead of igaddr. */
++ if (mcast)
++ bucket += 0x100;
++ }
++ fman_dtsec_set_bucket(regs, bucket, TRUE);
++}
++
++void fman_dtsec_set_bucket(struct dtsec_regs *regs, int bucket, bool enable)
++{
++ int reg_idx = (bucket >> 5) & 0xf;
++ int bit_idx = bucket & 0x1f;
++ uint32_t bit_mask = 0x80000000 >> bit_idx;
++ uint32_t *reg;
++
++ if (reg_idx > 7)
++ reg = &regs->gaddr[reg_idx-8];
++ else
++ reg = &regs->igaddr[reg_idx];
++
++ if (enable)
++ iowrite32be(ioread32be(reg) | bit_mask, reg);
++ else
++ iowrite32be(ioread32be(reg) & (~bit_mask), reg);
++}
++
++void fman_dtsec_reset_filter_table(struct dtsec_regs *regs, bool mcast, bool ucast)
++{
++ int i;
++ bool ghtx;
++
++ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? TRUE : FALSE);
++
++ if (ucast || (ghtx && mcast)) {
++ for (i = 0; i < NUM_OF_HASH_REGS; i++)
++ iowrite32be(0, &regs->igaddr[i]);
++ }
++ if (mcast) {
++ for (i = 0; i < NUM_OF_HASH_REGS; i++)
++ iowrite32be(0, &regs->gaddr[i]);
++ }
++}
++
++int fman_dtsec_set_tbi_phy_addr(struct dtsec_regs *regs,
++ uint8_t addr)
++{
++ if (addr > 0 && addr < 32)
++ iowrite32be(addr, &regs->tbipa);
++ else
++ return -EINVAL;
++
++ return 0;
++}
++
++void fman_dtsec_set_wol(struct dtsec_regs *regs, bool en)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->maccfg2);
++ if (en)
++ tmp |= MACCFG2_MAGIC_PACKET_EN;
++ else
++ tmp &= ~MACCFG2_MAGIC_PACKET_EN;
++ iowrite32be(tmp, &regs->maccfg2);
++}
++
++int fman_dtsec_adjust_link(struct dtsec_regs *regs,
++ enum enet_interface iface_mode,
++ enum enet_speed speed, bool full_dx)
++{
++ uint32_t tmp;
++
++ UNUSED(iface_mode);
++
++ if ((speed == E_ENET_SPEED_1000) && !full_dx)
++ return -EINVAL;
++
++ tmp = ioread32be(&regs->maccfg2);
++ if (!full_dx)
++ tmp &= ~MACCFG2_FULL_DUPLEX;
++ else
++ tmp |= MACCFG2_FULL_DUPLEX;
++
++ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
++ if (speed < E_ENET_SPEED_1000)
++ tmp |= MACCFG2_NIBBLE_MODE;
++ else if (speed == E_ENET_SPEED_1000)
++ tmp |= MACCFG2_BYTE_MODE;
++ iowrite32be(tmp, &regs->maccfg2);
++
++ tmp = ioread32be(&regs->ecntrl);
++ if (speed == E_ENET_SPEED_100)
++ tmp |= DTSEC_ECNTRL_R100M;
++ else
++ tmp &= ~DTSEC_ECNTRL_R100M;
++ iowrite32be(tmp, &regs->ecntrl);
++
++ return 0;
++}
++
++void fman_dtsec_set_uc_promisc(struct dtsec_regs *regs, bool enable)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->rctrl);
++
++ if (enable)
++ tmp |= RCTRL_UPROM;
++ else
++ tmp &= ~RCTRL_UPROM;
++
++ iowrite32be(tmp, &regs->rctrl);
++}
++
++void fman_dtsec_set_mc_promisc(struct dtsec_regs *regs, bool enable)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->rctrl);
++
++ if (enable)
++ tmp |= RCTRL_MPROM;
++ else
++ tmp &= ~RCTRL_MPROM;
++
++ iowrite32be(tmp, &regs->rctrl);
++}
++
++bool fman_dtsec_get_clear_carry_regs(struct dtsec_regs *regs,
++ uint32_t *car1, uint32_t *car2)
++{
++ /* read carry registers */
++ *car1 = ioread32be(&regs->car1);
++ *car2 = ioread32be(&regs->car2);
++ /* clear carry registers */
++ if (*car1)
++ iowrite32be(*car1, &regs->car1);
++ if (*car2)
++ iowrite32be(*car2, &regs->car2);
++
++ return (bool)((*car1 | *car2) ? TRUE : FALSE);
++}
++
++void fman_dtsec_reset_stat(struct dtsec_regs *regs)
++{
++ /* clear HW counters */
++ iowrite32be(ioread32be(&regs->ecntrl) |
++ DTSEC_ECNTRL_CLRCNT, &regs->ecntrl);
++}
++
++int fman_dtsec_set_stat_level(struct dtsec_regs *regs, enum dtsec_stat_level level)
++{
++ switch (level) {
++ case E_MAC_STAT_NONE:
++ iowrite32be(0xffffffff, &regs->cam1);
++ iowrite32be(0xffffffff, &regs->cam2);
++ iowrite32be(ioread32be(&regs->ecntrl) & ~DTSEC_ECNTRL_STEN,
++ &regs->ecntrl);
++ iowrite32be(ioread32be(&regs->imask) & ~DTSEC_IMASK_MSROEN,
++ &regs->imask);
++ break;
++ case E_MAC_STAT_PARTIAL:
++ iowrite32be(CAM1_ERRORS_ONLY, &regs->cam1);
++ iowrite32be(CAM2_ERRORS_ONLY, &regs->cam2);
++ iowrite32be(ioread32be(&regs->ecntrl) | DTSEC_ECNTRL_STEN,
++ &regs->ecntrl);
++ iowrite32be(ioread32be(&regs->imask) | DTSEC_IMASK_MSROEN,
++ &regs->imask);
++ break;
++ case E_MAC_STAT_MIB_GRP1:
++ iowrite32be((uint32_t)~CAM1_MIB_GRP_1, &regs->cam1);
++ iowrite32be((uint32_t)~CAM2_MIB_GRP_1, &regs->cam2);
++ iowrite32be(ioread32be(&regs->ecntrl) | DTSEC_ECNTRL_STEN,
++ &regs->ecntrl);
++ iowrite32be(ioread32be(&regs->imask) | DTSEC_IMASK_MSROEN,
++ &regs->imask);
++ break;
++ case E_MAC_STAT_FULL:
++ iowrite32be(0, &regs->cam1);
++ iowrite32be(0, &regs->cam2);
++ iowrite32be(ioread32be(&regs->ecntrl) | DTSEC_ECNTRL_STEN,
++ &regs->ecntrl);
++ iowrite32be(ioread32be(&regs->imask) | DTSEC_IMASK_MSROEN,
++ &regs->imask);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++void fman_dtsec_set_ts(struct dtsec_regs *regs, bool en)
++{
++ if (en) {
++ iowrite32be(ioread32be(&regs->rctrl) | RCTRL_RTSE,
++ &regs->rctrl);
++ iowrite32be(ioread32be(&regs->tctrl) | DTSEC_TCTRL_TTSE,
++ &regs->tctrl);
++ } else {
++ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_RTSE,
++ &regs->rctrl);
++ iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_TTSE,
++ &regs->tctrl);
++ }
++}
++
++void fman_dtsec_enable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->maccfg1);
++
++ if (apply_rx)
++ tmp |= MACCFG1_RX_EN ;
++
++ if (apply_tx)
++ tmp |= MACCFG1_TX_EN ;
++
++ iowrite32be(tmp, &regs->maccfg1);
++}
++
++void fman_dtsec_clear_addr_in_paddr(struct dtsec_regs *regs, uint8_t paddr_num)
++{
++ iowrite32be(0, &regs->macaddr[paddr_num].exact_match1);
++ iowrite32be(0, &regs->macaddr[paddr_num].exact_match2);
++}
++
++void fman_dtsec_add_addr_in_paddr(struct dtsec_regs *regs,
++ uint64_t addr,
++ uint8_t paddr_num)
++{
++ uint32_t tmp;
++
++ tmp = (uint32_t)(addr);
++ /* swap */
++ tmp = (((tmp & 0x000000FF) << 24) |
++ ((tmp & 0x0000FF00) << 8) |
++ ((tmp & 0x00FF0000) >> 8) |
++ ((tmp & 0xFF000000) >> 24));
++ iowrite32be(tmp, &regs->macaddr[paddr_num].exact_match1);
++
++ tmp = (uint32_t)(addr>>32);
++ /* swap */
++ tmp = (((tmp & 0x000000FF) << 24) |
++ ((tmp & 0x0000FF00) << 8) |
++ ((tmp & 0x00FF0000) >> 8) |
++ ((tmp & 0xFF000000) >> 24));
++ iowrite32be(tmp, &regs->macaddr[paddr_num].exact_match2);
++}
++
++void fman_dtsec_disable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->maccfg1);
++
++ if (apply_rx)
++ tmp &= ~MACCFG1_RX_EN;
++
++ if (apply_tx)
++ tmp &= ~MACCFG1_TX_EN;
++
++ iowrite32be(tmp, &regs->maccfg1);
++}
++
++void fman_dtsec_set_tx_pause_frames(struct dtsec_regs *regs, uint16_t time)
++{
++ uint32_t ptv = 0;
++
++ /* fixme: don't enable tx pause for half-duplex */
++
++ if (time) {
++ ptv = ioread32be(&regs->ptv);
++ ptv &= 0xffff0000;
++ ptv |= time & 0x0000ffff;
++ iowrite32be(ptv, &regs->ptv);
++
++ /* trigger the transmission of a flow-control pause frame */
++ iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
++ &regs->maccfg1);
++ } else
++ iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
++ &regs->maccfg1);
++}
++
++void fman_dtsec_handle_rx_pause(struct dtsec_regs *regs, bool en)
++{
++ uint32_t tmp;
++
++ /* todo: check if mac is set to full-duplex */
++
++ tmp = ioread32be(&regs->maccfg1);
++ if (en)
++ tmp |= MACCFG1_RX_FLOW;
++ else
++ tmp &= ~MACCFG1_RX_FLOW;
++ iowrite32be(tmp, &regs->maccfg1);
++}
++
++uint32_t fman_dtsec_get_rctrl(struct dtsec_regs *regs)
++{
++ return ioread32be(&regs->rctrl);
++}
++
++uint32_t fman_dtsec_get_revision(struct dtsec_regs *regs)
++{
++ return ioread32be(&regs->tsec_id);
++}
++
++uint32_t fman_dtsec_get_event(struct dtsec_regs *regs, uint32_t ev_mask)
++{
++ return ioread32be(&regs->ievent) & ev_mask;
++}
++
++void fman_dtsec_ack_event(struct dtsec_regs *regs, uint32_t ev_mask)
++{
++ iowrite32be(ev_mask, &regs->ievent);
++}
++
++uint32_t fman_dtsec_get_interrupt_mask(struct dtsec_regs *regs)
++{
++ return ioread32be(&regs->imask);
++}
++
++uint32_t fman_dtsec_check_and_clear_tmr_event(struct dtsec_regs *regs)
++{
++ uint32_t event;
++
++ event = ioread32be(&regs->tmr_pevent);
++ event &= ioread32be(&regs->tmr_pemask);
++
++ if (event)
++ iowrite32be(event, &regs->tmr_pevent);
++ return event;
++}
++
++void fman_dtsec_enable_tmr_interrupt(struct dtsec_regs *regs)
++{
++ iowrite32be(ioread32be(&regs->tmr_pemask) | TMR_PEMASK_TSREEN,
++ &regs->tmr_pemask);
++}
++
++void fman_dtsec_disable_tmr_interrupt(struct dtsec_regs *regs)
++{
++ iowrite32be(ioread32be(&regs->tmr_pemask) & ~TMR_PEMASK_TSREEN,
++ &regs->tmr_pemask);
++}
++
++void fman_dtsec_enable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask)
++{
++ iowrite32be(ioread32be(&regs->imask) | ev_mask, &regs->imask);
++}
++
++void fman_dtsec_disable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask)
++{
++ iowrite32be(ioread32be(&regs->imask) & ~ev_mask, &regs->imask);
++}
++
++uint32_t fman_dtsec_get_stat_counter(struct dtsec_regs *regs,
++ enum dtsec_stat_counters reg_name)
++{
++ uint32_t ret_val;
++
++ switch (reg_name) {
++ case E_DTSEC_STAT_TR64:
++ ret_val = ioread32be(&regs->tr64);
++ break;
++ case E_DTSEC_STAT_TR127:
++ ret_val = ioread32be(&regs->tr127);
++ break;
++ case E_DTSEC_STAT_TR255:
++ ret_val = ioread32be(&regs->tr255);
++ break;
++ case E_DTSEC_STAT_TR511:
++ ret_val = ioread32be(&regs->tr511);
++ break;
++ case E_DTSEC_STAT_TR1K:
++ ret_val = ioread32be(&regs->tr1k);
++ break;
++ case E_DTSEC_STAT_TRMAX:
++ ret_val = ioread32be(&regs->trmax);
++ break;
++ case E_DTSEC_STAT_TRMGV:
++ ret_val = ioread32be(&regs->trmgv);
++ break;
++ case E_DTSEC_STAT_RBYT:
++ ret_val = ioread32be(&regs->rbyt);
++ break;
++ case E_DTSEC_STAT_RPKT:
++ ret_val = ioread32be(&regs->rpkt);
++ break;
++ case E_DTSEC_STAT_RMCA:
++ ret_val = ioread32be(&regs->rmca);
++ break;
++ case E_DTSEC_STAT_RBCA:
++ ret_val = ioread32be(&regs->rbca);
++ break;
++ case E_DTSEC_STAT_RXPF:
++ ret_val = ioread32be(&regs->rxpf);
++ break;
++ case E_DTSEC_STAT_RALN:
++ ret_val = ioread32be(&regs->raln);
++ break;
++ case E_DTSEC_STAT_RFLR:
++ ret_val = ioread32be(&regs->rflr);
++ break;
++ case E_DTSEC_STAT_RCDE:
++ ret_val = ioread32be(&regs->rcde);
++ break;
++ case E_DTSEC_STAT_RCSE:
++ ret_val = ioread32be(&regs->rcse);
++ break;
++ case E_DTSEC_STAT_RUND:
++ ret_val = ioread32be(&regs->rund);
++ break;
++ case E_DTSEC_STAT_ROVR:
++ ret_val = ioread32be(&regs->rovr);
++ break;
++ case E_DTSEC_STAT_RFRG:
++ ret_val = ioread32be(&regs->rfrg);
++ break;
++ case E_DTSEC_STAT_RJBR:
++ ret_val = ioread32be(&regs->rjbr);
++ break;
++ case E_DTSEC_STAT_RDRP:
++ ret_val = ioread32be(&regs->rdrp);
++ break;
++ case E_DTSEC_STAT_TFCS:
++ ret_val = ioread32be(&regs->tfcs);
++ break;
++ case E_DTSEC_STAT_TBYT:
++ ret_val = ioread32be(&regs->tbyt);
++ break;
++ case E_DTSEC_STAT_TPKT:
++ ret_val = ioread32be(&regs->tpkt);
++ break;
++ case E_DTSEC_STAT_TMCA:
++ ret_val = ioread32be(&regs->tmca);
++ break;
++ case E_DTSEC_STAT_TBCA:
++ ret_val = ioread32be(&regs->tbca);
++ break;
++ case E_DTSEC_STAT_TXPF:
++ ret_val = ioread32be(&regs->txpf);
++ break;
++ case E_DTSEC_STAT_TNCL:
++ ret_val = ioread32be(&regs->tncl);
++ break;
++ case E_DTSEC_STAT_TDRP:
++ ret_val = ioread32be(&regs->tdrp);
++ break;
++ default:
++ ret_val = 0;
++ }
++
++ return ret_val;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec_mii_acc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec_mii_acc.c
+new file mode 100644
+index 00000000..8819f8fc
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec_mii_acc.c
+@@ -0,0 +1,163 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "common/general.h"
++#include "fsl_fman_dtsec_mii_acc.h"
++
++
++/**
++ * dtsec_mii_get_div() - calculates the value of the dtsec mii divider
++ * @dtsec_freq: dtsec clock frequency (in Mhz)
++ *
++ * This function calculates the dtsec mii clock divider that determines
++ * the MII MDC clock. MII MDC clock will be set to work in the range
++ * of 1.5 to 2.5Mhz
++ * The output of this function is the value of MIIMCFG[MgmtClk] which
++ * implicitly determines the divider value.
++ * Note: the dTSEC system clock is equal to 1/2 of the FMan clock.
++ *
++ * The table below which reflects dtsec_mii_get_div() functionality
++ * shows the relations among dtsec_freq, MgmtClk, actual divider
++ * and the MII frequency:
++ *
++ * dtsec freq MgmtClk div MII freq Mhz
++ * [0.....80] 1 (1/4)(1/8) [0 to 2.5]
++ * [81...120] 2 (1/6)(1/8) [1.6 to 2.5]
++ * [121..160] 3 (1/8)(1/8) [1.8 to 2.5]
++ * [161..200] 4 (1/10)(1/8) [2.0 to 2.5]
++ * [201..280] 5 (1/14)(1/8) [1.8 to 2.5]
++ * [281..400] 6 (1/20)(1/8) [1.1 to 2.5]
++ * [401..560] 7 (1/28)(1/8) [1.8 to 2.5]
++ * [560..frq] 7 (1/28)(1/8) [frq/224]
++ *
++ * Returns: the MIIMCFG[MgmtClk] appropriate value
++ */
++
++static uint8_t dtsec_mii_get_div(uint16_t dtsec_freq)
++{
++ uint16_t mgmt_clk;
++
++ if (dtsec_freq < 80) mgmt_clk = 1;
++ else if (dtsec_freq < 120) mgmt_clk = 2;
++ else if (dtsec_freq < 160) mgmt_clk = 3;
++ else if (dtsec_freq < 200) mgmt_clk = 4;
++ else if (dtsec_freq < 280) mgmt_clk = 5;
++ else if (dtsec_freq < 400) mgmt_clk = 6;
++ else mgmt_clk = 7;
++
++ return (uint8_t)mgmt_clk;
++}
++
++void fman_dtsec_mii_reset(struct dtsec_mii_reg *regs)
++{
++ /* Reset the management interface */
++ iowrite32be(ioread32be(&regs->miimcfg) | MIIMCFG_RESET_MGMT,
++ &regs->miimcfg);
++ iowrite32be(ioread32be(&regs->miimcfg) & ~MIIMCFG_RESET_MGMT,
++ &regs->miimcfg);
++}
++
++
++int fman_dtsec_mii_write_reg(struct dtsec_mii_reg *regs, uint8_t addr,
++ uint8_t reg, uint16_t data, uint16_t dtsec_freq)
++{
++ uint32_t tmp;
++
++ /* Setup the MII Mgmt clock speed */
++ iowrite32be((uint32_t)dtsec_mii_get_div(dtsec_freq), &regs->miimcfg);
++ wmb();
++
++ /* Stop the MII management read cycle */
++ iowrite32be(0, &regs->miimcom);
++ /* Dummy read to make sure MIIMCOM is written */
++ tmp = ioread32be(&regs->miimcom);
++ wmb();
++
++ /* Setting up MII Management Address Register */
++ tmp = (uint32_t)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
++ iowrite32be(tmp, &regs->miimadd);
++ wmb();
++
++ /* Setting up MII Management Control Register with data */
++ iowrite32be((uint32_t)data, &regs->miimcon);
++ /* Dummy read to make sure MIIMCON is written */
++ tmp = ioread32be(&regs->miimcon);
++ wmb();
++
++ /* Wait until MII management write is complete */
++ /* todo: a timeout could be useful here */
++ while ((ioread32be(&regs->miimind)) & MIIMIND_BUSY)
++ /* busy wait */;
++
++ return 0;
++}
++
++int fman_dtsec_mii_read_reg(struct dtsec_mii_reg *regs, uint8_t addr,
++ uint8_t reg, uint16_t *data, uint16_t dtsec_freq)
++{
++ uint32_t tmp;
++
++ /* Setup the MII Mgmt clock speed */
++ iowrite32be((uint32_t)dtsec_mii_get_div(dtsec_freq), &regs->miimcfg);
++ wmb();
++
++ /* Setting up the MII Management Address Register */
++ tmp = (uint32_t)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
++ iowrite32be(tmp, &regs->miimadd);
++ wmb();
++
++ /* Perform an MII management read cycle */
++ iowrite32be(MIIMCOM_READ_CYCLE, &regs->miimcom);
++ /* Dummy read to make sure MIIMCOM is written */
++ tmp = ioread32be(&regs->miimcom);
++ wmb();
++
++ /* Wait until MII management read is complete */
++ /* todo: a timeout could be useful here */
++ while ((ioread32be(&regs->miimind)) & MIIMIND_BUSY)
++ /* busy wait */;
++
++ /* Read MII management status */
++ *data = (uint16_t)ioread32be(&regs->miimstat);
++ wmb();
++
++ iowrite32be(0, &regs->miimcom);
++ /* Dummy read to make sure MIIMCOM is written */
++ tmp = ioread32be(&regs->miimcom);
++
++ if (*data == 0xffff)
++ return -ENXIO;
++
++ return 0;
++}
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac.c
+new file mode 100644
+index 00000000..00995a10
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac.c
+@@ -0,0 +1,511 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "fsl_fman_memac.h"
++
++
++uint32_t fman_memac_get_event(struct memac_regs *regs, uint32_t ev_mask)
++{
++ return ioread32be(&regs->ievent) & ev_mask;
++}
++
++uint32_t fman_memac_get_interrupt_mask(struct memac_regs *regs)
++{
++ return ioread32be(&regs->imask);
++}
++
++void fman_memac_ack_event(struct memac_regs *regs, uint32_t ev_mask)
++{
++ iowrite32be(ev_mask, &regs->ievent);
++}
++
++void fman_memac_set_promiscuous(struct memac_regs *regs, bool val)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++
++ if (val)
++ tmp |= CMD_CFG_PROMIS_EN;
++ else
++ tmp &= ~CMD_CFG_PROMIS_EN;
++
++ iowrite32be(tmp, &regs->command_config);
++}
++
++void fman_memac_clear_addr_in_paddr(struct memac_regs *regs,
++ uint8_t paddr_num)
++{
++ if (paddr_num == 0) {
++ iowrite32be(0, &regs->mac_addr0.mac_addr_l);
++ iowrite32be(0, &regs->mac_addr0.mac_addr_u);
++ } else {
++ iowrite32be(0x0, &regs->mac_addr[paddr_num - 1].mac_addr_l);
++ iowrite32be(0x0, &regs->mac_addr[paddr_num - 1].mac_addr_u);
++ }
++}
++
++void fman_memac_add_addr_in_paddr(struct memac_regs *regs,
++ uint8_t *adr,
++ uint8_t paddr_num)
++{
++ uint32_t tmp0, tmp1;
++
++ tmp0 = (uint32_t)(adr[0] |
++ adr[1] << 8 |
++ adr[2] << 16 |
++ adr[3] << 24);
++ tmp1 = (uint32_t)(adr[4] | adr[5] << 8);
++
++ if (paddr_num == 0) {
++ iowrite32be(tmp0, &regs->mac_addr0.mac_addr_l);
++ iowrite32be(tmp1, &regs->mac_addr0.mac_addr_u);
++ } else {
++ iowrite32be(tmp0, &regs->mac_addr[paddr_num-1].mac_addr_l);
++ iowrite32be(tmp1, &regs->mac_addr[paddr_num-1].mac_addr_u);
++ }
++}
++
++void fman_memac_enable(struct memac_regs *regs, bool apply_rx, bool apply_tx)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++
++ if (apply_rx)
++ tmp |= CMD_CFG_RX_EN;
++
++ if (apply_tx)
++ tmp |= CMD_CFG_TX_EN;
++
++ iowrite32be(tmp, &regs->command_config);
++}
++
++void fman_memac_disable(struct memac_regs *regs, bool apply_rx, bool apply_tx)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++
++ if (apply_rx)
++ tmp &= ~CMD_CFG_RX_EN;
++
++ if (apply_tx)
++ tmp &= ~CMD_CFG_TX_EN;
++
++ iowrite32be(tmp, &regs->command_config);
++}
++
++void fman_memac_reset_stat(struct memac_regs *regs)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->statn_config);
++
++ tmp |= STATS_CFG_CLR;
++
++ iowrite32be(tmp, &regs->statn_config);
++
++ while (ioread32be(&regs->statn_config) & STATS_CFG_CLR);
++}
++
++void fman_memac_reset(struct memac_regs *regs)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++
++ tmp |= CMD_CFG_SW_RESET;
++
++ iowrite32be(tmp, &regs->command_config);
++
++ while (ioread32be(&regs->command_config) & CMD_CFG_SW_RESET);
++}
++
++int fman_memac_init(struct memac_regs *regs,
++ struct memac_cfg *cfg,
++ enum enet_interface enet_interface,
++ enum enet_speed enet_speed,
++ bool slow_10g_if,
++ uint32_t exceptions)
++{
++ uint32_t tmp;
++
++ /* Config */
++ tmp = 0;
++ if (cfg->wan_mode_enable)
++ tmp |= CMD_CFG_WAN_MODE;
++ if (cfg->promiscuous_mode_enable)
++ tmp |= CMD_CFG_PROMIS_EN;
++ if (cfg->pause_forward_enable)
++ tmp |= CMD_CFG_PAUSE_FWD;
++ if (cfg->pause_ignore)
++ tmp |= CMD_CFG_PAUSE_IGNORE;
++ if (cfg->tx_addr_ins_enable)
++ tmp |= CMD_CFG_TX_ADDR_INS;
++ if (cfg->loopback_enable)
++ tmp |= CMD_CFG_LOOPBACK_EN;
++ if (cfg->cmd_frame_enable)
++ tmp |= CMD_CFG_CNT_FRM_EN;
++ if (cfg->send_idle_enable)
++ tmp |= CMD_CFG_SEND_IDLE;
++ if (cfg->no_length_check_enable)
++ tmp |= CMD_CFG_NO_LEN_CHK;
++ if (cfg->rx_sfd_any)
++ tmp |= CMD_CFG_SFD_ANY;
++ if (cfg->pad_enable)
++ tmp |= CMD_CFG_TX_PAD_EN;
++ if (cfg->wake_on_lan)
++ tmp |= CMD_CFG_MG;
++
++ tmp |= CMD_CFG_CRC_FWD;
++
++ iowrite32be(tmp, &regs->command_config);
++
++ /* Max Frame Length */
++ iowrite32be((uint32_t)cfg->max_frame_length, &regs->maxfrm);
++
++ /* Pause Time */
++ iowrite32be((uint32_t)cfg->pause_quanta, &regs->pause_quanta[0]);
++ iowrite32be((uint32_t)0, &regs->pause_thresh[0]);
++
++ /* IF_MODE */
++ tmp = 0;
++ switch (enet_interface) {
++ case E_ENET_IF_XGMII:
++ case E_ENET_IF_XFI:
++ tmp |= IF_MODE_XGMII;
++ break;
++ default:
++ tmp |= IF_MODE_GMII;
++ if (enet_interface == E_ENET_IF_RGMII && !cfg->loopback_enable)
++ tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
++ }
++ iowrite32be(tmp, &regs->if_mode);
++
++ /* TX_FIFO_SECTIONS */
++ tmp = 0;
++ if (enet_interface == E_ENET_IF_XGMII ||
++ enet_interface == E_ENET_IF_XFI) {
++ if(slow_10g_if) {
++ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
++ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
++ } else {
++ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
++ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
++ }
++ } else {
++ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
++ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
++ }
++ iowrite32be(tmp, &regs->tx_fifo_sections);
++
++ /* clear all pending events and set-up interrupts */
++ fman_memac_ack_event(regs, 0xffffffff);
++ fman_memac_set_exception(regs, exceptions, TRUE);
++
++ return 0;
++}
++
++void fman_memac_set_exception(struct memac_regs *regs, uint32_t val, bool enable)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->imask);
++ if (enable)
++ tmp |= val;
++ else
++ tmp &= ~val;
++
++ iowrite32be(tmp, &regs->imask);
++}
++
++void fman_memac_reset_filter_table(struct memac_regs *regs)
++{
++ uint32_t i;
++ for (i = 0; i < 64; i++)
++ iowrite32be(i & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
++}
++
++void fman_memac_set_hash_table_entry(struct memac_regs *regs, uint32_t crc)
++{
++ iowrite32be(crc | HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
++}
++
++void fman_memac_set_hash_table(struct memac_regs *regs, uint32_t val)
++{
++ iowrite32be(val, &regs->hashtable_ctrl);
++}
++
++uint16_t fman_memac_get_max_frame_len(struct memac_regs *regs)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->maxfrm);
++
++ return(uint16_t)tmp;
++}
++
++
++void fman_memac_set_tx_pause_frames(struct memac_regs *regs,
++ uint8_t priority,
++ uint16_t pause_time,
++ uint16_t thresh_time)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->tx_fifo_sections);
++
++ if (priority == 0xff) {
++ GET_TX_EMPTY_DEFAULT_VALUE(tmp);
++ iowrite32be(tmp, &regs->tx_fifo_sections);
++
++ tmp = ioread32be(&regs->command_config);
++ tmp &= ~CMD_CFG_PFC_MODE;
++ priority = 0;
++ } else {
++ GET_TX_EMPTY_PFC_VALUE(tmp);
++ iowrite32be(tmp, &regs->tx_fifo_sections);
++
++ tmp = ioread32be(&regs->command_config);
++ tmp |= CMD_CFG_PFC_MODE;
++ }
++
++ iowrite32be(tmp, &regs->command_config);
++
++ tmp = ioread32be(&regs->pause_quanta[priority / 2]);
++ if (priority % 2)
++ tmp &= 0x0000FFFF;
++ else
++ tmp &= 0xFFFF0000;
++ tmp |= ((uint32_t)pause_time << (16 * (priority % 2)));
++ iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
++
++ tmp = ioread32be(&regs->pause_thresh[priority / 2]);
++ if (priority % 2)
++ tmp &= 0x0000FFFF;
++ else
++ tmp &= 0xFFFF0000;
++ tmp |= ((uint32_t)thresh_time<<(16 * (priority % 2)));
++ iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
++}
++
++void fman_memac_set_rx_ignore_pause_frames(struct memac_regs *regs,bool enable)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++ if (enable)
++ tmp |= CMD_CFG_PAUSE_IGNORE;
++ else
++ tmp &= ~CMD_CFG_PAUSE_IGNORE;
++
++ iowrite32be(tmp, &regs->command_config);
++}
++
++void fman_memac_set_wol(struct memac_regs *regs, bool enable)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++
++ if (enable)
++ tmp |= CMD_CFG_MG;
++ else
++ tmp &= ~CMD_CFG_MG;
++
++ iowrite32be(tmp, &regs->command_config);
++}
++
++#define GET_MEMAC_CNTR_64(bn) \
++ (ioread32be(&regs->bn ## _l) | \
++ ((uint64_t)ioread32be(&regs->bn ## _u) << 32))
++
++uint64_t fman_memac_get_counter(struct memac_regs *regs,
++ enum memac_counters reg_name)
++{
++ uint64_t ret_val;
++
++ switch (reg_name) {
++ case E_MEMAC_COUNTER_R64:
++ ret_val = GET_MEMAC_CNTR_64(r64);
++ break;
++ case E_MEMAC_COUNTER_R127:
++ ret_val = GET_MEMAC_CNTR_64(r127);
++ break;
++ case E_MEMAC_COUNTER_R255:
++ ret_val = GET_MEMAC_CNTR_64(r255);
++ break;
++ case E_MEMAC_COUNTER_R511:
++ ret_val = GET_MEMAC_CNTR_64(r511);
++ break;
++ case E_MEMAC_COUNTER_R1023:
++ ret_val = GET_MEMAC_CNTR_64(r1023);
++ break;
++ case E_MEMAC_COUNTER_R1518:
++ ret_val = GET_MEMAC_CNTR_64(r1518);
++ break;
++ case E_MEMAC_COUNTER_R1519X:
++ ret_val = GET_MEMAC_CNTR_64(r1519x);
++ break;
++ case E_MEMAC_COUNTER_RFRG:
++ ret_val = GET_MEMAC_CNTR_64(rfrg);
++ break;
++ case E_MEMAC_COUNTER_RJBR:
++ ret_val = GET_MEMAC_CNTR_64(rjbr);
++ break;
++ case E_MEMAC_COUNTER_RDRP:
++ ret_val = GET_MEMAC_CNTR_64(rdrp);
++ break;
++ case E_MEMAC_COUNTER_RALN:
++ ret_val = GET_MEMAC_CNTR_64(raln);
++ break;
++ case E_MEMAC_COUNTER_TUND:
++ ret_val = GET_MEMAC_CNTR_64(tund);
++ break;
++ case E_MEMAC_COUNTER_ROVR:
++ ret_val = GET_MEMAC_CNTR_64(rovr);
++ break;
++ case E_MEMAC_COUNTER_RXPF:
++ ret_val = GET_MEMAC_CNTR_64(rxpf);
++ break;
++ case E_MEMAC_COUNTER_TXPF:
++ ret_val = GET_MEMAC_CNTR_64(txpf);
++ break;
++ case E_MEMAC_COUNTER_ROCT:
++ ret_val = GET_MEMAC_CNTR_64(roct);
++ break;
++ case E_MEMAC_COUNTER_RMCA:
++ ret_val = GET_MEMAC_CNTR_64(rmca);
++ break;
++ case E_MEMAC_COUNTER_RBCA:
++ ret_val = GET_MEMAC_CNTR_64(rbca);
++ break;
++ case E_MEMAC_COUNTER_RPKT:
++ ret_val = GET_MEMAC_CNTR_64(rpkt);
++ break;
++ case E_MEMAC_COUNTER_RUCA:
++ ret_val = GET_MEMAC_CNTR_64(ruca);
++ break;
++ case E_MEMAC_COUNTER_RERR:
++ ret_val = GET_MEMAC_CNTR_64(rerr);
++ break;
++ case E_MEMAC_COUNTER_TOCT:
++ ret_val = GET_MEMAC_CNTR_64(toct);
++ break;
++ case E_MEMAC_COUNTER_TMCA:
++ ret_val = GET_MEMAC_CNTR_64(tmca);
++ break;
++ case E_MEMAC_COUNTER_TBCA:
++ ret_val = GET_MEMAC_CNTR_64(tbca);
++ break;
++ case E_MEMAC_COUNTER_TUCA:
++ ret_val = GET_MEMAC_CNTR_64(tuca);
++ break;
++ case E_MEMAC_COUNTER_TERR:
++ ret_val = GET_MEMAC_CNTR_64(terr);
++ break;
++ default:
++ ret_val = 0;
++ }
++
++ return ret_val;
++}
++
++void fman_memac_adjust_link(struct memac_regs *regs,
++ enum enet_interface iface_mode,
++ enum enet_speed speed, bool full_dx)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->if_mode);
++
++ if (full_dx)
++ tmp &= ~IF_MODE_HD;
++ else
++ tmp |= IF_MODE_HD;
++
++ if (iface_mode == E_ENET_IF_RGMII) {
++ /* Configure RGMII in manual mode */
++ tmp &= ~IF_MODE_RGMII_AUTO;
++ tmp &= ~IF_MODE_RGMII_SP_MASK;
++
++ if (full_dx)
++ tmp |= IF_MODE_RGMII_FD;
++ else
++ tmp &= ~IF_MODE_RGMII_FD;
++
++ switch (speed) {
++ case E_ENET_SPEED_1000:
++ tmp |= IF_MODE_RGMII_1000;
++ break;
++ case E_ENET_SPEED_100:
++ tmp |= IF_MODE_RGMII_100;
++ break;
++ case E_ENET_SPEED_10:
++ tmp |= IF_MODE_RGMII_10;
++ break;
++ default:
++ break;
++ }
++ }
++
++ iowrite32be(tmp, &regs->if_mode);
++}
++
++void fman_memac_defconfig(struct memac_cfg *cfg)
++{
++ cfg->reset_on_init = FALSE;
++ cfg->wan_mode_enable = FALSE;
++ cfg->promiscuous_mode_enable = FALSE;
++ cfg->pause_forward_enable = FALSE;
++ cfg->pause_ignore = FALSE;
++ cfg->tx_addr_ins_enable = FALSE;
++ cfg->loopback_enable = FALSE;
++ cfg->cmd_frame_enable = FALSE;
++ cfg->rx_error_discard = FALSE;
++ cfg->send_idle_enable = FALSE;
++ cfg->no_length_check_enable = TRUE;
++ cfg->lgth_check_nostdr = FALSE;
++ cfg->time_stamp_enable = FALSE;
++ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
++ cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
++ cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
++ cfg->pad_enable = TRUE;
++ cfg->phy_tx_ena_on = FALSE;
++ cfg->rx_sfd_any = FALSE;
++ cfg->rx_pbl_fwd = FALSE;
++ cfg->tx_pbl_fwd = FALSE;
++ cfg->debug_mode = FALSE;
++ cfg->wake_on_lan = FALSE;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac_mii_acc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac_mii_acc.c
+new file mode 100755
+index 00000000..ccda11ec
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac_mii_acc.c
+@@ -0,0 +1,213 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "fsl_fman_memac_mii_acc.h"
++
++static void write_phy_reg_10g(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t data)
++{
++ uint32_t tmp_reg;
++
++ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
++ /* Leave only MDIO_CLK_DIV bits set on */
++ tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
++ /* Set maximum MDIO_HOLD value to allow phy to see
++ change of data signal */
++ tmp_reg |= MDIO_CFG_HOLD_MASK;
++ /* Add 10G interface mode */
++ tmp_reg |= MDIO_CFG_ENC45;
++ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
++
++ /* Wait for command completion */
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ /* Specify phy and register to be accessed */
++ iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
++ iowrite32be(reg, &mii_regs->mdio_addr);
++ wmb();
++
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ /* Write data */
++ iowrite32be(data, &mii_regs->mdio_data);
++ wmb();
++
++ /* Wait for write transaction end */
++ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
++ udelay(1);
++}
++
++static uint32_t read_phy_reg_10g(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t *data)
++{
++ uint32_t tmp_reg;
++
++ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
++ /* Leave only MDIO_CLK_DIV bits set on */
++ tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
++ /* Set maximum MDIO_HOLD value to allow phy to see
++ change of data signal */
++ tmp_reg |= MDIO_CFG_HOLD_MASK;
++ /* Add 10G interface mode */
++ tmp_reg |= MDIO_CFG_ENC45;
++ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
++
++ /* Wait for command completion */
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ /* Specify phy and register to be accessed */
++ iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
++ iowrite32be(reg, &mii_regs->mdio_addr);
++ wmb();
++
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ /* Read cycle */
++ tmp_reg = phy_addr;
++ tmp_reg |= MDIO_CTL_READ;
++ iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
++ wmb();
++
++ /* Wait for data to be available */
++ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
++ udelay(1);
++
++ *data = (uint16_t)ioread32be(&mii_regs->mdio_data);
++
++ /* Check if there was an error */
++ return ioread32be(&mii_regs->mdio_cfg);
++}
++
++static void write_phy_reg_1g(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t data)
++{
++ uint32_t tmp_reg;
++
++ /* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
++ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
++ tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
++ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
++
++ /* Wait for command completion */
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ /* Write transaction */
++ tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
++ tmp_reg |= reg;
++ iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
++
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ iowrite32be(data, &mii_regs->mdio_data);
++
++ wmb();
++
++ /* Wait for write transaction to end */
++ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
++ udelay(1);
++}
++
++static uint32_t read_phy_reg_1g(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t *data)
++{
++ uint32_t tmp_reg;
++
++ /* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
++ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
++ tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
++ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
++
++ /* Wait for command completion */
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ /* Read transaction */
++ tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
++ tmp_reg |= reg;
++ tmp_reg |= MDIO_CTL_READ;
++ iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
++
++ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
++ udelay(1);
++
++ /* Wait for data to be available */
++ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
++ udelay(1);
++
++ *data = (uint16_t)ioread32be(&mii_regs->mdio_data);
++
++ /* Check error */
++ return ioread32be(&mii_regs->mdio_cfg);
++}
++
++/*****************************************************************************/
++int fman_memac_mii_write_phy_reg(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t data,
++ enum enet_speed enet_speed)
++{
++ /* Figure out interface type - 10G vs 1G.
++ In 10G interface both phy_addr and devAddr present. */
++ if (enet_speed == E_ENET_SPEED_10000)
++ write_phy_reg_10g(mii_regs, phy_addr, reg, data);
++ else
++ write_phy_reg_1g(mii_regs, phy_addr, reg, data);
++
++ return 0;
++}
++
++/*****************************************************************************/
++int fman_memac_mii_read_phy_reg(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t *data,
++ enum enet_speed enet_speed)
++{
++ uint32_t ans;
++ /* Figure out interface type - 10G vs 1G.
++ In 10G interface both phy_addr and devAddr present. */
++ if (enet_speed == E_ENET_SPEED_10000)
++ ans = read_phy_reg_10g(mii_regs, phy_addr, reg, data);
++ else
++ ans = read_phy_reg_1g(mii_regs, phy_addr, reg, data);
++
++ if (ans & MDIO_CFG_READ_ERR)
++ return -EINVAL;
++ return 0;
++}
++
++/* ......................................................................... */
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_tgec.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_tgec.c
+new file mode 100644
+index 00000000..fff9d5de
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_tgec.c
+@@ -0,0 +1,367 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "fsl_fman_tgec.h"
++
++
++void fman_tgec_set_mac_address(struct tgec_regs *regs, uint8_t *adr)
++{
++ uint32_t tmp0, tmp1;
++
++ tmp0 = (uint32_t)(adr[0] |
++ adr[1] << 8 |
++ adr[2] << 16 |
++ adr[3] << 24);
++ tmp1 = (uint32_t)(adr[4] | adr[5] << 8);
++ iowrite32be(tmp0, &regs->mac_addr_0);
++ iowrite32be(tmp1, &regs->mac_addr_1);
++}
++
++void fman_tgec_reset_stat(struct tgec_regs *regs)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++
++ tmp |= CMD_CFG_STAT_CLR;
++
++ iowrite32be(tmp, &regs->command_config);
++
++ while (ioread32be(&regs->command_config) & CMD_CFG_STAT_CLR) ;
++}
++
++#define GET_TGEC_CNTR_64(bn) \
++ (((uint64_t)ioread32be(&regs->bn ## _u) << 32) | \
++ ioread32be(&regs->bn ## _l))
++
++uint64_t fman_tgec_get_counter(struct tgec_regs *regs, enum tgec_counters reg_name)
++{
++ uint64_t ret_val;
++
++ switch (reg_name) {
++ case E_TGEC_COUNTER_R64:
++ ret_val = GET_TGEC_CNTR_64(r64);
++ break;
++ case E_TGEC_COUNTER_R127:
++ ret_val = GET_TGEC_CNTR_64(r127);
++ break;
++ case E_TGEC_COUNTER_R255:
++ ret_val = GET_TGEC_CNTR_64(r255);
++ break;
++ case E_TGEC_COUNTER_R511:
++ ret_val = GET_TGEC_CNTR_64(r511);
++ break;
++ case E_TGEC_COUNTER_R1023:
++ ret_val = GET_TGEC_CNTR_64(r1023);
++ break;
++ case E_TGEC_COUNTER_R1518:
++ ret_val = GET_TGEC_CNTR_64(r1518);
++ break;
++ case E_TGEC_COUNTER_R1519X:
++ ret_val = GET_TGEC_CNTR_64(r1519x);
++ break;
++ case E_TGEC_COUNTER_TRFRG:
++ ret_val = GET_TGEC_CNTR_64(trfrg);
++ break;
++ case E_TGEC_COUNTER_TRJBR:
++ ret_val = GET_TGEC_CNTR_64(trjbr);
++ break;
++ case E_TGEC_COUNTER_RDRP:
++ ret_val = GET_TGEC_CNTR_64(rdrp);
++ break;
++ case E_TGEC_COUNTER_RALN:
++ ret_val = GET_TGEC_CNTR_64(raln);
++ break;
++ case E_TGEC_COUNTER_TRUND:
++ ret_val = GET_TGEC_CNTR_64(trund);
++ break;
++ case E_TGEC_COUNTER_TROVR:
++ ret_val = GET_TGEC_CNTR_64(trovr);
++ break;
++ case E_TGEC_COUNTER_RXPF:
++ ret_val = GET_TGEC_CNTR_64(rxpf);
++ break;
++ case E_TGEC_COUNTER_TXPF:
++ ret_val = GET_TGEC_CNTR_64(txpf);
++ break;
++ case E_TGEC_COUNTER_ROCT:
++ ret_val = GET_TGEC_CNTR_64(roct);
++ break;
++ case E_TGEC_COUNTER_RMCA:
++ ret_val = GET_TGEC_CNTR_64(rmca);
++ break;
++ case E_TGEC_COUNTER_RBCA:
++ ret_val = GET_TGEC_CNTR_64(rbca);
++ break;
++ case E_TGEC_COUNTER_RPKT:
++ ret_val = GET_TGEC_CNTR_64(rpkt);
++ break;
++ case E_TGEC_COUNTER_RUCA:
++ ret_val = GET_TGEC_CNTR_64(ruca);
++ break;
++ case E_TGEC_COUNTER_RERR:
++ ret_val = GET_TGEC_CNTR_64(rerr);
++ break;
++ case E_TGEC_COUNTER_TOCT:
++ ret_val = GET_TGEC_CNTR_64(toct);
++ break;
++ case E_TGEC_COUNTER_TMCA:
++ ret_val = GET_TGEC_CNTR_64(tmca);
++ break;
++ case E_TGEC_COUNTER_TBCA:
++ ret_val = GET_TGEC_CNTR_64(tbca);
++ break;
++ case E_TGEC_COUNTER_TUCA:
++ ret_val = GET_TGEC_CNTR_64(tuca);
++ break;
++ case E_TGEC_COUNTER_TERR:
++ ret_val = GET_TGEC_CNTR_64(terr);
++ break;
++ default:
++ ret_val = 0;
++ }
++
++ return ret_val;
++}
++
++void fman_tgec_enable(struct tgec_regs *regs, bool apply_rx, bool apply_tx)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++ if (apply_rx)
++ tmp |= CMD_CFG_RX_EN;
++ if (apply_tx)
++ tmp |= CMD_CFG_TX_EN;
++ iowrite32be(tmp, &regs->command_config);
++}
++
++void fman_tgec_disable(struct tgec_regs *regs, bool apply_rx, bool apply_tx)
++{
++ uint32_t tmp_reg_32;
++
++ tmp_reg_32 = ioread32be(&regs->command_config);
++ if (apply_rx)
++ tmp_reg_32 &= ~CMD_CFG_RX_EN;
++ if (apply_tx)
++ tmp_reg_32 &= ~CMD_CFG_TX_EN;
++ iowrite32be(tmp_reg_32, &regs->command_config);
++}
++
++void fman_tgec_set_promiscuous(struct tgec_regs *regs, bool val)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++ if (val)
++ tmp |= CMD_CFG_PROMIS_EN;
++ else
++ tmp &= ~CMD_CFG_PROMIS_EN;
++ iowrite32be(tmp, &regs->command_config);
++}
++
++void fman_tgec_reset_filter_table(struct tgec_regs *regs)
++{
++ uint32_t i;
++ for (i = 0; i < 512; i++)
++ iowrite32be(i & ~TGEC_HASH_MCAST_EN, &regs->hashtable_ctrl);
++}
++
++void fman_tgec_set_hash_table_entry(struct tgec_regs *regs, uint32_t crc)
++{
++ uint32_t hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; /* Take 9 MSB bits */
++ iowrite32be(hash | TGEC_HASH_MCAST_EN, &regs->hashtable_ctrl);
++}
++
++void fman_tgec_set_hash_table(struct tgec_regs *regs, uint32_t value)
++{
++ iowrite32be(value, &regs->hashtable_ctrl);
++}
++
++void fman_tgec_set_tx_pause_frames(struct tgec_regs *regs, uint16_t pause_time)
++{
++ iowrite32be((uint32_t)pause_time, &regs->pause_quant);
++}
++
++void fman_tgec_set_rx_ignore_pause_frames(struct tgec_regs *regs, bool en)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++ if (en)
++ tmp |= CMD_CFG_PAUSE_IGNORE;
++ else
++ tmp &= ~CMD_CFG_PAUSE_IGNORE;
++ iowrite32be(tmp, &regs->command_config);
++}
++
++void fman_tgec_enable_1588_time_stamp(struct tgec_regs *regs, bool en)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->command_config);
++ if (en)
++ tmp |= CMD_CFG_EN_TIMESTAMP;
++ else
++ tmp &= ~CMD_CFG_EN_TIMESTAMP;
++ iowrite32be(tmp, &regs->command_config);
++}
++
++uint32_t fman_tgec_get_event(struct tgec_regs *regs, uint32_t ev_mask)
++{
++ return ioread32be(&regs->ievent) & ev_mask;
++}
++
++void fman_tgec_ack_event(struct tgec_regs *regs, uint32_t ev_mask)
++{
++ iowrite32be(ev_mask, &regs->ievent);
++}
++
++uint32_t fman_tgec_get_interrupt_mask(struct tgec_regs *regs)
++{
++ return ioread32be(&regs->imask);
++}
++
++void fman_tgec_add_addr_in_paddr(struct tgec_regs *regs, uint8_t *adr)
++{
++ uint32_t tmp0, tmp1;
++
++ tmp0 = (uint32_t)(adr[0] |
++ adr[1] << 8 |
++ adr[2] << 16 |
++ adr[3] << 24);
++ tmp1 = (uint32_t)(adr[4] | adr[5] << 8);
++ iowrite32be(tmp0, &regs->mac_addr_2);
++ iowrite32be(tmp1, &regs->mac_addr_3);
++}
++
++void fman_tgec_clear_addr_in_paddr(struct tgec_regs *regs)
++{
++ iowrite32be(0, &regs->mac_addr_2);
++ iowrite32be(0, &regs->mac_addr_3);
++}
++
++uint32_t fman_tgec_get_revision(struct tgec_regs *regs)
++{
++ return ioread32be(&regs->tgec_id);
++}
++
++void fman_tgec_enable_interrupt(struct tgec_regs *regs, uint32_t ev_mask)
++{
++ iowrite32be(ioread32be(&regs->imask) | ev_mask, &regs->imask);
++}
++
++void fman_tgec_disable_interrupt(struct tgec_regs *regs, uint32_t ev_mask)
++{
++ iowrite32be(ioread32be(&regs->imask) & ~ev_mask, &regs->imask);
++}
++
++uint16_t fman_tgec_get_max_frame_len(struct tgec_regs *regs)
++{
++ return (uint16_t) ioread32be(&regs->maxfrm);
++}
++
++void fman_tgec_defconfig(struct tgec_cfg *cfg)
++{
++ cfg->wan_mode_enable = DEFAULT_WAN_MODE_ENABLE;
++ cfg->promiscuous_mode_enable = DEFAULT_PROMISCUOUS_MODE_ENABLE;
++ cfg->pause_forward_enable = DEFAULT_PAUSE_FORWARD_ENABLE;
++ cfg->pause_ignore = DEFAULT_PAUSE_IGNORE;
++ cfg->tx_addr_ins_enable = DEFAULT_TX_ADDR_INS_ENABLE;
++ cfg->loopback_enable = DEFAULT_LOOPBACK_ENABLE;
++ cfg->cmd_frame_enable = DEFAULT_CMD_FRAME_ENABLE;
++ cfg->rx_error_discard = DEFAULT_RX_ERROR_DISCARD;
++ cfg->send_idle_enable = DEFAULT_SEND_IDLE_ENABLE;
++ cfg->no_length_check_enable = DEFAULT_NO_LENGTH_CHECK_ENABLE;
++ cfg->lgth_check_nostdr = DEFAULT_LGTH_CHECK_NOSTDR;
++ cfg->time_stamp_enable = DEFAULT_TIME_STAMP_ENABLE;
++ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
++ cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
++ cfg->pause_quant = DEFAULT_PAUSE_QUANT;
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++ cfg->skip_fman11_workaround = FALSE;
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++}
++
++int fman_tgec_init(struct tgec_regs *regs, struct tgec_cfg *cfg,
++ uint32_t exception_mask)
++{
++ uint32_t tmp;
++
++ /* Config */
++ tmp = 0x40; /* CRC forward */
++ if (cfg->wan_mode_enable)
++ tmp |= CMD_CFG_WAN_MODE;
++ if (cfg->promiscuous_mode_enable)
++ tmp |= CMD_CFG_PROMIS_EN;
++ if (cfg->pause_forward_enable)
++ tmp |= CMD_CFG_PAUSE_FWD;
++ if (cfg->pause_ignore)
++ tmp |= CMD_CFG_PAUSE_IGNORE;
++ if (cfg->tx_addr_ins_enable)
++ tmp |= CMD_CFG_TX_ADDR_INS;
++ if (cfg->loopback_enable)
++ tmp |= CMD_CFG_LOOPBACK_EN;
++ if (cfg->cmd_frame_enable)
++ tmp |= CMD_CFG_CMD_FRM_EN;
++ if (cfg->rx_error_discard)
++ tmp |= CMD_CFG_RX_ER_DISC;
++ if (cfg->send_idle_enable)
++ tmp |= CMD_CFG_SEND_IDLE;
++ if (cfg->no_length_check_enable)
++ tmp |= CMD_CFG_NO_LEN_CHK;
++ if (cfg->time_stamp_enable)
++ tmp |= CMD_CFG_EN_TIMESTAMP;
++ iowrite32be(tmp, &regs->command_config);
++
++ /* Max Frame Length */
++ iowrite32be((uint32_t)cfg->max_frame_length, &regs->maxfrm);
++ /* Pause Time */
++ iowrite32be(cfg->pause_quant, &regs->pause_quant);
++
++ /* clear all pending events and set-up interrupts */
++ fman_tgec_ack_event(regs, 0xffffffff);
++ fman_tgec_enable_interrupt(regs, exception_mask);
++
++ return 0;
++}
++
++void fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007(struct tgec_regs *regs)
++{
++ uint32_t tmp;
++
++ /* restore the default tx ipg Length */
++ tmp = (ioread32be(&regs->tx_ipg_len) & ~TGEC_TX_IPG_LENGTH_MASK) | 12;
++
++ iowrite32be(tmp, &regs->tx_ipg_len);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
+new file mode 100644
+index 00000000..85426c5f
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
+@@ -0,0 +1,1096 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File memac.c
++
++ @Description FM mEMAC driver
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "string_ext.h"
++#include "error_ext.h"
++#include "xx_ext.h"
++#include "endian_ext.h"
++#include "debug_ext.h"
++
++#include "fm_common.h"
++#include "memac.h"
++
++
++/*****************************************************************************/
++/* Internal routines */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++static uint32_t GetMacAddrHashCode(uint64_t ethAddr)
++{
++ uint64_t mask1, mask2;
++ uint32_t xorVal = 0;
++ uint8_t i, j;
++
++ for (i=0; i<6; i++)
++ {
++ mask1 = ethAddr & (uint64_t)0x01;
++ ethAddr >>= 1;
++
++ for (j=0; j<7; j++)
++ {
++ mask2 = ethAddr & (uint64_t)0x01;
++ mask1 ^= mask2;
++ ethAddr >>= 1;
++ }
++
++ xorVal |= (mask1 << (5-i));
++ }
++
++ return xorVal;
++}
++
++/* ......................................................................... */
++
++static void SetupSgmiiInternalPhy(t_Memac *p_Memac, uint8_t phyAddr)
++{
++ uint16_t tmpReg16;
++ e_EnetMode enetMode;
++
++ /* In case the higher MACs are used (i.e. the MACs that should support 10G),
++ speed=10000 is provided for SGMII ports. Temporary modify enet mode
++ to 1G one, so MII functions can work correctly. */
++ enetMode = p_Memac->enetMode;
++
++ /* SGMII mode + AN enable */
++ tmpReg16 = PHY_SGMII_IF_MODE_AN | PHY_SGMII_IF_MODE_SGMII;
++ if ((p_Memac->enetMode) == e_ENET_MODE_SGMII_2500)
++ tmpReg16 = PHY_SGMII_CR_PHY_RESET | PHY_SGMII_IF_SPEED_GIGABIT | PHY_SGMII_IF_MODE_SGMII;
++
++ p_Memac->enetMode = MAKE_ENET_MODE(ENET_INTERFACE_FROM_MODE(p_Memac->enetMode), e_ENET_SPEED_1000);
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x14, tmpReg16);
++
++ /* Device ability according to SGMII specification */
++ tmpReg16 = PHY_SGMII_DEV_ABILITY_SGMII;
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x4, tmpReg16);
++
++ /* Adjust link timer for SGMII -
++ According to Cisco SGMII specification the timer should be 1.6 ms.
++ The link_timer register is configured in units of the clock.
++ - When running as 1G SGMII, Serdes clock is 125 MHz, so
++ unit = 1 / (125*10^6 Hz) = 8 ns.
++ 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2 * 10^5 = 0x30d40
++ - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
++ unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
++ 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5 * 10^5 = 0x7a120.
++ Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
++ we always set up here a value of 2.5 SGMII. */
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x13, 0x0007);
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x12, 0xa120);
++
++ /* Restart AN */
++ tmpReg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x0, tmpReg16);
++
++ /* Restore original enet mode */
++ p_Memac->enetMode = enetMode;
++}
++
++/* ......................................................................... */
++
++static void SetupSgmiiInternalPhyBaseX(t_Memac *p_Memac, uint8_t phyAddr)
++{
++ uint16_t tmpReg16;
++ e_EnetMode enetMode;
++
++ /* In case the higher MACs are used (i.e. the MACs that should support 10G),
++ speed=10000 is provided for SGMII ports. Temporary modify enet mode
++ to 1G one, so MII functions can work correctly. */
++ enetMode = p_Memac->enetMode;
++ p_Memac->enetMode = MAKE_ENET_MODE(ENET_INTERFACE_FROM_MODE(p_Memac->enetMode), e_ENET_SPEED_1000);
++
++ /* 1000BaseX mode */
++ tmpReg16 = PHY_SGMII_IF_MODE_1000X;
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x14, tmpReg16);
++
++ /* AN Device capability */
++ tmpReg16 = PHY_SGMII_DEV_ABILITY_1000X;
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x4, tmpReg16);
++
++ /* Adjust link timer for SGMII -
++ For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
++ The link_timer register is configured in units of the clock.
++ - When running as 1G SGMII, Serdes clock is 125 MHz, so
++ unit = 1 / (125*10^6 Hz) = 8 ns.
++ 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
++ - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
++ unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
++ 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
++ Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
++ we always set up here a value of 2.5 SGMII. */
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x13, 0x002f);
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x12, 0xaf08);
++
++ /* Restart AN */
++ tmpReg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
++ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x0, tmpReg16);
++
++ /* Restore original enet mode */
++ p_Memac->enetMode = enetMode;
++}
++
++/* ......................................................................... */
++
++static t_Error CheckInitParameters(t_Memac *p_Memac)
++{
++ e_FmMacType portType;
++
++ portType = ((ENET_SPEED_FROM_MODE(p_Memac->enetMode) < e_ENET_SPEED_10000) ? e_FM_MAC_1G : e_FM_MAC_10G);
++
++#if (FM_MAX_NUM_OF_10G_MACS > 0)
++ if ((portType == e_FM_MAC_10G) && (p_Memac->macId >= FM_MAX_NUM_OF_10G_MACS))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("10G MAC ID must be less than %d", FM_MAX_NUM_OF_10G_MACS));
++#endif /* (FM_MAX_NUM_OF_10G_MACS > 0) */
++
++ if ((portType == e_FM_MAC_1G) && (p_Memac->macId >= FM_MAX_NUM_OF_1G_MACS))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("1G MAC ID must be less than %d", FM_MAX_NUM_OF_1G_MACS));
++ if (p_Memac->addr == 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC must have a valid MAC address"));
++ if (!p_Memac->f_Exception)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Uninitialized f_Exception"));
++ if (!p_Memac->f_Event)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Uninitialized f_Event"));
++#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002
++ if (!p_Memac->p_MemacDriverParam->no_length_check_enable)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!"));
++#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */
++
++ return E_OK;
++}
++
++/* ........................................................................... */
++
++static void MemacErrException(t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ uint32_t event, imask;
++
++ event = fman_memac_get_event(p_Memac->p_MemMap, 0xffffffff);
++ imask = fman_memac_get_interrupt_mask(p_Memac->p_MemMap);
++
++ /* Imask include both error and notification/event bits.
++ Leaving only error bits enabled by imask.
++ The imask error bits are shifted by 16 bits offset from
++ their corresponding location in the ievent - hence the >> 16 */
++ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
++
++ fman_memac_ack_event(p_Memac->p_MemMap, event);
++
++ if (event & MEMAC_IEVNT_TS_ECC_ER)
++ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_TS_FIFO_ECC_ERR);
++ if (event & MEMAC_IEVNT_TX_ECC_ER)
++ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_10G_1TX_ECC_ER);
++ if (event & MEMAC_IEVNT_RX_ECC_ER)
++ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_10G_RX_ECC_ER);
++}
++
++static void MemacException(t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ uint32_t event, imask;
++
++ event = fman_memac_get_event(p_Memac->p_MemMap, 0xffffffff);
++ imask = fman_memac_get_interrupt_mask(p_Memac->p_MemMap);
++
++ /* Imask include both error and notification/event bits.
++ Leaving only error bits enabled by imask.
++ The imask error bits are shifted by 16 bits offset from
++ their corresponding location in the ievent - hence the >> 16 */
++ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
++
++ fman_memac_ack_event(p_Memac->p_MemMap, event);
++
++ if (event & MEMAC_IEVNT_MGI)
++ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_MAGIC_PACKET_INDICATION);
++}
++
++/* ......................................................................... */
++
++static void FreeInitResources(t_Memac *p_Memac)
++{
++ e_FmMacType portType;
++
++ portType =
++ ((ENET_SPEED_FROM_MODE(p_Memac->enetMode) < e_ENET_SPEED_10000) ? e_FM_MAC_1G : e_FM_MAC_10G);
++
++ if (portType == e_FM_MAC_10G)
++ FmUnregisterIntr(p_Memac->fmMacControllerDriver.h_Fm, e_FM_MOD_10G_MAC, p_Memac->macId, e_FM_INTR_TYPE_ERR);
++ else
++ FmUnregisterIntr(p_Memac->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Memac->macId, e_FM_INTR_TYPE_ERR);
++
++ /* release the driver's group hash table */
++ FreeHashTable(p_Memac->p_MulticastAddrHash);
++ p_Memac->p_MulticastAddrHash = NULL;
++
++ /* release the driver's individual hash table */
++ FreeHashTable(p_Memac->p_UnicastAddrHash);
++ p_Memac->p_UnicastAddrHash = NULL;
++}
++
++
++/*****************************************************************************/
++/* mEMAC API routines */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++static t_Error MemacEnable(t_Handle h_Memac, e_CommMode mode)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ fman_memac_enable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacDisable (t_Handle h_Memac, e_CommMode mode)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ fman_memac_disable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacSetPromiscuous(t_Handle h_Memac, bool newVal)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ fman_memac_set_promiscuous(p_Memac->p_MemMap, newVal);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error MemacAdjustLink(t_Handle h_Memac, e_EnetSpeed speed, bool fullDuplex)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ if ((speed >= e_ENET_SPEED_1000) && (!fullDuplex))
++ RETURN_ERROR(MAJOR, E_CONFLICT,
++ ("Ethernet MAC 1G or 10G does not support half-duplex"));
++
++ fman_memac_adjust_link(p_Memac->p_MemMap,
++ (enum enet_interface)ENET_INTERFACE_FROM_MODE(p_Memac->enetMode),
++ (enum enet_speed)speed,
++ fullDuplex);
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* Memac Configs modification functions */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++static t_Error MemacConfigLoopback(t_Handle h_Memac, bool newVal)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ p_Memac->p_MemacDriverParam->loopback_enable = newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacConfigWan(t_Handle h_Memac, bool newVal)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ p_Memac->p_MemacDriverParam->wan_mode_enable = newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacConfigMaxFrameLength(t_Handle h_Memac, uint16_t newVal)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ p_Memac->p_MemacDriverParam->max_frame_length = newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacConfigPad(t_Handle h_Memac, bool newVal)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ p_Memac->p_MemacDriverParam->pad_enable = newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacConfigLengthCheck(t_Handle h_Memac, bool newVal)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ p_Memac->p_MemacDriverParam->no_length_check_enable = !newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacConfigException(t_Handle h_Memac, e_FmMacExceptions exception, bool enable)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Memac->exceptions |= bitMask;
++ else
++ p_Memac->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacConfigResetOnInit(t_Handle h_Memac, bool enable)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ p_Memac->p_MemacDriverParam->reset_on_init = enable;
++
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* Memac Run Time API functions */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++static t_Error MemacSetTxPauseFrames(t_Handle h_Memac,
++ uint8_t priority,
++ uint16_t pauseTime,
++ uint16_t threshTime)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ if (priority != 0xFF)
++ {
++ bool PortConfigured, PreFetchEnabled;
++
++ if (FmGetTnumAgingPeriod(p_Memac->fmMacControllerDriver.h_Fm) == 0)
++ RETURN_ERROR(MAJOR, E_CONFLICT, ("For PFC operation, TNUM aging must be enabled"));
++
++ FmGetPortPreFetchConfiguration(p_Memac->fmMacControllerDriver.h_Fm,
++ p_Memac->fmMacControllerDriver.macId,
++ &PortConfigured,
++ &PreFetchEnabled);
++
++ if ((ENET_SPEED_FROM_MODE(p_Memac->fmMacControllerDriver.enetMode) == e_ENET_SPEED_1000) && !PortConfigured)
++ DBG(INFO, ("For PFC correct operation, prefetch must be configured on the FM Tx PORT"));
++
++ if ((ENET_SPEED_FROM_MODE(p_Memac->fmMacControllerDriver.enetMode) == e_ENET_SPEED_1000) && PortConfigured && !PreFetchEnabled)
++ DBG(WARNING, ("For PFC correct operation, prefetch must be configured on the FM Tx PORT"));
++ }
++
++ fman_memac_set_tx_pause_frames(p_Memac->p_MemMap, priority, pauseTime, threshTime);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacSetTxAutoPauseFrames(t_Handle h_Memac,
++ uint16_t pauseTime)
++{
++ return MemacSetTxPauseFrames(h_Memac, FM_MAC_NO_PFC, pauseTime, 0);
++}
++
++/* ......................................................................... */
++
++static t_Error MemacSetRxIgnorePauseFrames(t_Handle h_Memac, bool en)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ fman_memac_set_rx_ignore_pause_frames(p_Memac->p_MemMap, en);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacSetWakeOnLan(t_Handle h_Memac, bool en)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ fman_memac_set_wol(p_Memac->p_MemMap, en);
++
++ return E_OK;
++}
++
++/* .............................................................................. */
++
++static t_Error MemacEnable1588TimeStamp(t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++UNUSED(p_Memac);
++DBG(WARNING, ("mEMAC has 1588 always enabled!"));
++
++ return E_OK;
++}
++
++/* Counters handling */
++/* ......................................................................... */
++
++static t_Error MemacGetStatistics(t_Handle h_Memac, t_FmMacStatistics *p_Statistics)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER);
++
++ p_Statistics->eStatPkts64 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R64);
++ p_Statistics->eStatPkts65to127 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R127);
++ p_Statistics->eStatPkts128to255 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R255);
++ p_Statistics->eStatPkts256to511 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R511);
++ p_Statistics->eStatPkts512to1023 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1023);
++ p_Statistics->eStatPkts1024to1518 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1518);
++ p_Statistics->eStatPkts1519to1522 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1519X);
++/* */
++ p_Statistics->eStatFragments = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RFRG);
++ p_Statistics->eStatJabbers = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RJBR);
++
++ p_Statistics->eStatsDropEvents = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RDRP);
++ p_Statistics->eStatCRCAlignErrors = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RALN);
++
++ p_Statistics->eStatUndersizePkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TUND);
++ p_Statistics->eStatOversizePkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_ROVR);
++/* Pause */
++ p_Statistics->reStatPause = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RXPF);
++ p_Statistics->teStatPause = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TXPF);
++
++/* MIB II */
++ p_Statistics->ifInOctets = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_ROCT);
++ p_Statistics->ifInUcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RUCA);
++ p_Statistics->ifInMcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RMCA);
++ p_Statistics->ifInBcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RBCA);
++ p_Statistics->ifInPkts = p_Statistics->ifInUcastPkts
++ + p_Statistics->ifInMcastPkts
++ + p_Statistics->ifInBcastPkts;
++ p_Statistics->ifInDiscards = 0;
++ p_Statistics->ifInErrors = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RERR);
++
++ p_Statistics->ifOutOctets = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TOCT);
++ p_Statistics->ifOutUcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TUCA);
++ p_Statistics->ifOutMcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TMCA);
++ p_Statistics->ifOutBcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TBCA);
++ p_Statistics->ifOutPkts = p_Statistics->ifOutUcastPkts
++ + p_Statistics->ifOutMcastPkts
++ + p_Statistics->ifOutBcastPkts;
++ p_Statistics->ifOutDiscards = 0;
++ p_Statistics->ifOutErrors = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TERR);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacModifyMacAddress (t_Handle h_Memac, t_EnetAddr *p_EnetAddr)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ fman_memac_add_addr_in_paddr(p_Memac->p_MemMap, (uint8_t *)(*p_EnetAddr), 0);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacResetCounters (t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ fman_memac_reset_stat(p_Memac->p_MemMap);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacAddExactMatchMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
++{
++ t_Memac *p_Memac = (t_Memac *) h_Memac;
++ uint64_t ethAddr;
++ uint8_t paddrNum;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ if (ethAddr & GROUP_ADDRESS)
++ /* Multicast address has no effect in PADDR */
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address"));
++
++ /* Make sure no PADDR contains this address */
++ for (paddrNum = 0; paddrNum < MEMAC_NUM_OF_PADDRS; paddrNum++)
++ if (p_Memac->indAddrRegUsed[paddrNum])
++ if (p_Memac->paddr[paddrNum] == ethAddr)
++ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
++
++ /* Find first unused PADDR */
++ for (paddrNum = 0; paddrNum < MEMAC_NUM_OF_PADDRS; paddrNum++)
++ if (!(p_Memac->indAddrRegUsed[paddrNum]))
++ {
++ /* mark this PADDR as used */
++ p_Memac->indAddrRegUsed[paddrNum] = TRUE;
++ /* store address */
++ p_Memac->paddr[paddrNum] = ethAddr;
++
++ /* put in hardware */
++ fman_memac_add_addr_in_paddr(p_Memac->p_MemMap, (uint8_t*)(*p_EthAddr), paddrNum);
++ p_Memac->numOfIndAddrInRegs++;
++
++ return E_OK;
++ }
++
++ /* No free PADDR */
++ RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
++}
++
++/* ......................................................................... */
++
++static t_Error MemacDelExactMatchMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
++{
++ t_Memac *p_Memac = (t_Memac *) h_Memac;
++ uint64_t ethAddr;
++ uint8_t paddrNum;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ /* Find used PADDR containing this address */
++ for (paddrNum = 0; paddrNum < MEMAC_NUM_OF_PADDRS; paddrNum++)
++ {
++ if ((p_Memac->indAddrRegUsed[paddrNum]) &&
++ (p_Memac->paddr[paddrNum] == ethAddr))
++ {
++ /* mark this PADDR as not used */
++ p_Memac->indAddrRegUsed[paddrNum] = FALSE;
++ /* clear in hardware */
++ fman_memac_clear_addr_in_paddr(p_Memac->p_MemMap, paddrNum);
++ p_Memac->numOfIndAddrInRegs--;
++
++ return E_OK;
++ }
++ }
++
++ RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG);
++}
++
++/* ......................................................................... */
++
++static t_Error MemacGetId(t_Handle h_Memac, uint32_t *macId)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ *macId = p_Memac->macId;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++
++static t_Error MemacAddHashMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ t_EthHashEntry *p_HashEntry;
++ uint32_t hash;
++ uint64_t ethAddr;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ if (!(ethAddr & GROUP_ADDRESS))
++ /* Unicast addresses not supported in hash */
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unicast Address"));
++
++ hash = GetMacAddrHashCode(ethAddr) & HASH_CTRL_ADDR_MASK;
++
++ /* Create element to be added to the driver hash table */
++ p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry));
++ p_HashEntry->addr = ethAddr;
++ INIT_LIST(&p_HashEntry->node);
++
++ LIST_AddToTail(&(p_HashEntry->node), &(p_Memac->p_MulticastAddrHash->p_Lsts[hash]));
++ fman_memac_set_hash_table(p_Memac->p_MemMap, (hash | HASH_CTRL_MCAST_EN));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacDelHashMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ t_EthHashEntry *p_HashEntry = NULL;
++ t_List *p_Pos;
++ uint32_t hash;
++ uint64_t ethAddr;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ hash = GetMacAddrHashCode(ethAddr) & HASH_CTRL_ADDR_MASK;
++
++ LIST_FOR_EACH(p_Pos, &(p_Memac->p_MulticastAddrHash->p_Lsts[hash]))
++ {
++ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
++ if (p_HashEntry->addr == ethAddr)
++ {
++ LIST_DelAndInit(&p_HashEntry->node);
++ XX_Free(p_HashEntry);
++ break;
++ }
++ }
++ if (LIST_IsEmpty(&p_Memac->p_MulticastAddrHash->p_Lsts[hash]))
++ fman_memac_set_hash_table(p_Memac->p_MemMap, (hash & ~HASH_CTRL_MCAST_EN));
++
++ return E_OK;
++}
++
++
++/* ......................................................................... */
++
++static t_Error MemacSetException(t_Handle h_Memac, e_FmMacExceptions exception, bool enable)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Memac->exceptions |= bitMask;
++ else
++ p_Memac->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ fman_memac_set_exception(p_Memac->p_MemMap, bitMask, enable);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static uint16_t MemacGetMaxFrameLength(t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_VALUE(p_Memac, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_Memac->p_MemacDriverParam, E_INVALID_STATE, 0);
++
++ return fman_memac_get_max_frame_len(p_Memac->p_MemMap);
++}
++
++static t_Error MemacInitInternalPhy(t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ uint8_t i, phyAddr;
++
++ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_SGMII)
++ {
++ /* Configure internal SGMII PHY */
++ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX)
++ SetupSgmiiInternalPhyBaseX(p_Memac, PHY_MDIO_ADDR);
++ else
++ SetupSgmiiInternalPhy(p_Memac, PHY_MDIO_ADDR);
++ }
++ else if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_QSGMII)
++ {
++ /* Configure 4 internal SGMII PHYs */
++ for (i = 0; i < 4; i++)
++ {
++ /* QSGMII PHY address occupies 3 upper bits of 5-bit
++ phyAddress; the lower 2 bits are used to extend
++ register address space and access each one of 4
++ ports inside QSGMII. */
++ phyAddr = (uint8_t)((PHY_MDIO_ADDR << 2) | i);
++ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX)
++ SetupSgmiiInternalPhyBaseX(p_Memac, phyAddr);
++ else
++ SetupSgmiiInternalPhy(p_Memac, phyAddr);
++ }
++ }
++ return E_OK;
++}
++
++/*****************************************************************************/
++/* mEMAC Init & Free API */
++/*****************************************************************************/
++
++/* ......................................................................... */
++void *g_MemacRegs;
++static t_Error MemacInit(t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++ struct memac_cfg *p_MemacDriverParam;
++ enum enet_interface enet_interface;
++ enum enet_speed enet_speed;
++ t_EnetAddr ethAddr;
++ e_FmMacType portType;
++ t_Error err;
++ bool slow_10g_if = FALSE;
++ if (p_Memac->macId == 3) /* This is a quick WA */
++ g_MemacRegs = p_Memac->p_MemMap;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->fmMacControllerDriver.h_Fm, E_INVALID_HANDLE);
++
++ FM_GetRevision(p_Memac->fmMacControllerDriver.h_Fm, &p_Memac->fmMacControllerDriver.fmRevInfo);
++ if (p_Memac->fmMacControllerDriver.fmRevInfo.majorRev == 6 &&
++ p_Memac->fmMacControllerDriver.fmRevInfo.minorRev == 4)
++ slow_10g_if = TRUE;
++
++ CHECK_INIT_PARAMETERS(p_Memac, CheckInitParameters);
++
++ p_MemacDriverParam = p_Memac->p_MemacDriverParam;
++
++ portType =
++ ((ENET_SPEED_FROM_MODE(p_Memac->enetMode) < e_ENET_SPEED_10000) ? e_FM_MAC_1G : e_FM_MAC_10G);
++
++ /* First, reset the MAC if desired. */
++ if (p_MemacDriverParam->reset_on_init)
++ fman_memac_reset(p_Memac->p_MemMap);
++
++ /* MAC Address */
++ MAKE_ENET_ADDR_FROM_UINT64(p_Memac->addr, ethAddr);
++ fman_memac_add_addr_in_paddr(p_Memac->p_MemMap, (uint8_t*)ethAddr, 0);
++
++ enet_interface = (enum enet_interface) ENET_INTERFACE_FROM_MODE(p_Memac->enetMode);
++ enet_speed = (enum enet_speed) ENET_SPEED_FROM_MODE(p_Memac->enetMode);
++
++ fman_memac_init(p_Memac->p_MemMap,
++ p_Memac->p_MemacDriverParam,
++ enet_interface,
++ enet_speed,
++ slow_10g_if,
++ p_Memac->exceptions);
++
++#ifdef FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
++ {
++ uint32_t tmpReg = 0;
++
++ FM_GetRevision(p_Memac->fmMacControllerDriver.h_Fm, &p_Memac->fmMacControllerDriver.fmRevInfo);
++ /* check the FMAN version - the bug exists only in rev1 */
++ if ((p_Memac->fmMacControllerDriver.fmRevInfo.majorRev == 6) &&
++ (p_Memac->fmMacControllerDriver.fmRevInfo.minorRev == 0))
++ {
++ /* MAC strips CRC from received frames - this workaround should
++ decrease the likelihood of bug appearance
++ */
++ tmpReg = GET_UINT32(p_Memac->p_MemMap->command_config);
++ tmpReg &= ~CMD_CFG_CRC_FWD;
++ WRITE_UINT32(p_Memac->p_MemMap->command_config, tmpReg);
++ /* DBG(WARNING, ("mEMAC strips CRC from received frames as part of A006320 errata workaround"));*/
++ }
++ }
++#endif /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 */
++
++ MemacInitInternalPhy(h_Memac);
++
++ /* Max Frame Length */
++ err = FmSetMacMaxFrame(p_Memac->fmMacControllerDriver.h_Fm,
++ portType,
++ p_Memac->fmMacControllerDriver.macId,
++ p_MemacDriverParam->max_frame_length);
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("settings Mac max frame length is FAILED"));
++
++ p_Memac->p_MulticastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
++ if (!p_Memac->p_MulticastAddrHash)
++ {
++ FreeInitResources(p_Memac);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
++ }
++
++ p_Memac->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
++ if (!p_Memac->p_UnicastAddrHash)
++ {
++ FreeInitResources(p_Memac);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
++ }
++
++ FmRegisterIntr(p_Memac->fmMacControllerDriver.h_Fm,
++ (portType == e_FM_MAC_10G) ? e_FM_MOD_10G_MAC : e_FM_MOD_1G_MAC,
++ p_Memac->macId,
++ e_FM_INTR_TYPE_ERR,
++ MemacErrException,
++ p_Memac);
++
++ FmRegisterIntr(p_Memac->fmMacControllerDriver.h_Fm,
++ (portType == e_FM_MAC_10G) ? e_FM_MOD_10G_MAC : e_FM_MOD_1G_MAC,
++ p_Memac->macId,
++ e_FM_INTR_TYPE_NORMAL,
++ MemacException,
++ p_Memac);
++
++ XX_Free(p_MemacDriverParam);
++ p_Memac->p_MemacDriverParam = NULL;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error MemacFree(t_Handle h_Memac)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++
++ if (p_Memac->p_MemacDriverParam)
++ {
++ /* Called after config */
++ XX_Free(p_Memac->p_MemacDriverParam);
++ p_Memac->p_MemacDriverParam = NULL;
++ }
++ else
++ /* Called after init */
++ FreeInitResources(p_Memac);
++
++ XX_Free(p_Memac);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver)
++{
++ p_FmMacControllerDriver->f_FM_MAC_Init = MemacInit;
++ p_FmMacControllerDriver->f_FM_MAC_Free = MemacFree;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetStatistics = NULL;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = MemacConfigLoopback;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = MemacConfigMaxFrameLength;
++
++ p_FmMacControllerDriver->f_FM_MAC_ConfigWan = MemacConfigWan;
++
++ p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = MemacConfigPad;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = NULL; /* half-duplex is detected automatically */
++ p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = MemacConfigLengthCheck;
++
++ p_FmMacControllerDriver->f_FM_MAC_ConfigException = MemacConfigException;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit = MemacConfigResetOnInit;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetException = MemacSetException;
++
++ p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = MemacEnable1588TimeStamp; /* always enabled */
++ p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = NULL;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = MemacSetPromiscuous;
++ p_FmMacControllerDriver->f_FM_MAC_AdjustLink = MemacAdjustLink;
++ p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg = NULL;
++
++ p_FmMacControllerDriver->f_FM_MAC_Enable = MemacEnable;
++ p_FmMacControllerDriver->f_FM_MAC_Disable = MemacDisable;
++ p_FmMacControllerDriver->f_FM_MAC_Resume = MemacInitInternalPhy;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = MemacSetTxAutoPauseFrames;
++ p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = MemacSetTxPauseFrames;
++ p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = MemacSetRxIgnorePauseFrames;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan = MemacSetWakeOnLan;
++
++ p_FmMacControllerDriver->f_FM_MAC_ResetCounters = MemacResetCounters;
++ p_FmMacControllerDriver->f_FM_MAC_GetStatistics = MemacGetStatistics;
++
++ p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = MemacModifyMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = MemacAddHashMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = MemacDelHashMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = MemacAddExactMatchMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = MemacDelExactMatchMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_GetId = MemacGetId;
++ p_FmMacControllerDriver->f_FM_MAC_GetVersion = NULL;
++ p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = MemacGetMaxFrameLength;
++
++ p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = MEMAC_MII_WritePhyReg;
++ p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = MEMAC_MII_ReadPhyReg;
++}
++
++
++/*****************************************************************************/
++/* mEMAC Config Main Entry */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++t_Handle MEMAC_Config(t_FmMacParams *p_FmMacParam)
++{
++ t_Memac *p_Memac;
++ struct memac_cfg *p_MemacDriverParam;
++ uintptr_t baseAddr;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL);
++
++ baseAddr = p_FmMacParam->baseAddr;
++ /* Allocate memory for the mEMAC data structure */
++ p_Memac = (t_Memac *)XX_Malloc(sizeof(t_Memac));
++ if (!p_Memac)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("mEMAC driver structure"));
++ return NULL;
++ }
++ memset(p_Memac, 0, sizeof(t_Memac));
++ InitFmMacControllerDriver(&p_Memac->fmMacControllerDriver);
++
++ /* Allocate memory for the mEMAC driver parameters data structure */
++ p_MemacDriverParam = (struct memac_cfg *)XX_Malloc(sizeof(struct memac_cfg));
++ if (!p_MemacDriverParam)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("mEMAC driver parameters"));
++ XX_Free(p_Memac);
++ return NULL;
++ }
++ memset(p_MemacDriverParam, 0, sizeof(struct memac_cfg));
++
++ /* Plant parameter structure pointer */
++ p_Memac->p_MemacDriverParam = p_MemacDriverParam;
++
++ fman_memac_defconfig(p_MemacDriverParam);
++
++ p_Memac->addr = ENET_ADDR_TO_UINT64(p_FmMacParam->addr);
++
++ p_Memac->p_MemMap = (struct memac_regs *)UINT_TO_PTR(baseAddr);
++ p_Memac->p_MiiMemMap = (struct memac_mii_access_mem_map*)UINT_TO_PTR(baseAddr + MEMAC_TO_MII_OFFSET);
++
++ p_Memac->enetMode = p_FmMacParam->enetMode;
++ p_Memac->macId = p_FmMacParam->macId;
++ p_Memac->exceptions = MEMAC_default_exceptions;
++ p_Memac->f_Exception = p_FmMacParam->f_Exception;
++ p_Memac->f_Event = p_FmMacParam->f_Event;
++ p_Memac->h_App = p_FmMacParam->h_App;
++
++ return p_Memac;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.h
+new file mode 100644
+index 00000000..2fd89dae
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.h
+@@ -0,0 +1,110 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File memac.h
++
++ @Description FM Multirate Ethernet MAC (mEMAC)
++*//***************************************************************************/
++#ifndef __MEMAC_H
++#define __MEMAC_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++
++#include "fsl_fman_memac_mii_acc.h"
++#include "fm_mac.h"
++#include "fsl_fman_memac.h"
++
++
++#define MEMAC_default_exceptions \
++ ((uint32_t)(MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI))
++
++#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
++ case e_FM_MAC_EX_10G_1TX_ECC_ER: \
++ bitMask = MEMAC_IMASK_TECC_ER; break; \
++ case e_FM_MAC_EX_10G_RX_ECC_ER: \
++ bitMask = MEMAC_IMASK_RECC_ER; break; \
++ case e_FM_MAC_EX_TS_FIFO_ECC_ERR: \
++ bitMask = MEMAC_IMASK_TSECC_ER; break; \
++ case e_FM_MAC_EX_MAGIC_PACKET_INDICATION: \
++ bitMask = MEMAC_IMASK_MGI; break; \
++ default: bitMask = 0;break;}
++
++
++typedef struct
++{
++ t_FmMacControllerDriver fmMacControllerDriver; /**< Upper Mac control block */
++ t_Handle h_App; /**< Handle to the upper layer application */
++ struct memac_regs *p_MemMap; /**< Pointer to MAC memory mapped registers */
++ struct memac_mii_access_mem_map *p_MiiMemMap; /**< Pointer to MII memory mapped registers */
++ uint64_t addr; /**< MAC address of device */
++ e_EnetMode enetMode; /**< Ethernet physical interface */
++ t_FmMacExceptionCallback *f_Exception;
++ int mdioIrq;
++ t_FmMacExceptionCallback *f_Event;
++ bool indAddrRegUsed[MEMAC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */
++ uint64_t paddr[MEMAC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */
++ uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */
++ t_EthHash *p_MulticastAddrHash; /**< Pointer to driver's global address hash table */
++ t_EthHash *p_UnicastAddrHash; /**< Pointer to driver's individual address hash table */
++ bool debugMode;
++ uint8_t macId;
++ uint32_t exceptions;
++ struct memac_cfg *p_MemacDriverParam;
++} t_Memac;
++
++
++/* Internal PHY access */
++#define PHY_MDIO_ADDR 0
++
++/* Internal PHY Registers - SGMII */
++#define PHY_SGMII_CR_PHY_RESET 0x8000
++#define PHY_SGMII_CR_RESET_AN 0x0200
++#define PHY_SGMII_CR_DEF_VAL 0x1140
++#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
++#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
++#define PHY_SGMII_IF_SPEED_GIGABIT 0x0008
++#define PHY_SGMII_IF_MODE_AN 0x0002
++#define PHY_SGMII_IF_MODE_SGMII 0x0001
++#define PHY_SGMII_IF_MODE_1000X 0x0000
++
++
++#define MEMAC_TO_MII_OFFSET 0x030 /* Offset from the MEM map to the MDIO mem map */
++
++t_Error MEMAC_MII_WritePhyReg(t_Handle h_Memac, uint8_t phyAddr, uint8_t reg, uint16_t data);
++t_Error MEMAC_MII_ReadPhyReg(t_Handle h_Memac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
++
++
++#endif /* __MEMAC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c
+new file mode 100644
+index 00000000..56eaffbc
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c
+@@ -0,0 +1,78 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_mac.h"
++#include "memac.h"
++#include "xx_ext.h"
++
++#include "fm_common.h"
++#include "memac_mii_acc.h"
++
++
++/*****************************************************************************/
++t_Error MEMAC_MII_WritePhyReg(t_Handle h_Memac,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t data)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MiiMemMap, E_INVALID_HANDLE);
++
++ return (t_Error)fman_memac_mii_write_phy_reg(p_Memac->p_MiiMemMap,
++ phyAddr,
++ reg,
++ data,
++ (enum enet_speed)ENET_SPEED_FROM_MODE(p_Memac->enetMode));
++}
++
++/*****************************************************************************/
++t_Error MEMAC_MII_ReadPhyReg(t_Handle h_Memac,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t *p_Data)
++{
++ t_Memac *p_Memac = (t_Memac *)h_Memac;
++
++ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MiiMemMap, E_INVALID_HANDLE);
++
++ return fman_memac_mii_read_phy_reg(p_Memac->p_MiiMemMap,
++ phyAddr,
++ reg,
++ p_Data,
++ (enum enet_speed)ENET_SPEED_FROM_MODE(p_Memac->enetMode));
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h
+new file mode 100644
+index 00000000..325ec082
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h
+@@ -0,0 +1,73 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __MEMAC_MII_ACC_H
++#define __MEMAC_MII_ACC_H
++
++#include "std_ext.h"
++
++
++/* MII Management Registers */
++#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
++#define MDIO_CFG_CLK_DIV_SHIFT 7
++#define MDIO_CFG_HOLD_MASK 0x0000001c
++#define MDIO_CFG_ENC45 0x00000040
++#define MDIO_CFG_READ_ERR 0x00000002
++#define MDIO_CFG_BSY 0x00000001
++
++#define MDIO_CTL_PHY_ADDR_SHIFT 5
++#define MDIO_CTL_READ 0x00008000
++
++#define MDIO_DATA_BSY 0x80000000
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/*----------------------------------------------------*/
++/* MII Configuration Control Memory Map Registers */
++/*----------------------------------------------------*/
++typedef struct t_MemacMiiAccessMemMap
++{
++ volatile uint32_t mdio_cfg; /* 0x030 */
++ volatile uint32_t mdio_ctrl; /* 0x034 */
++ volatile uint32_t mdio_data; /* 0x038 */
++ volatile uint32_t mdio_addr; /* 0x03c */
++} t_MemacMiiAccessMemMap ;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++#endif /* __MEMAC_MII_ACC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c
+new file mode 100644
+index 00000000..9b136a69
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c
+@@ -0,0 +1,975 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File tgec.c
++
++ @Description FM 10G MAC ...
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "string_ext.h"
++#include "error_ext.h"
++#include "xx_ext.h"
++#include "endian_ext.h"
++#include "debug_ext.h"
++#include "crc_mac_addr_ext.h"
++
++#include "fm_common.h"
++#include "fsl_fman_tgec.h"
++#include "tgec.h"
++
++
++/*****************************************************************************/
++/* Internal routines */
++/*****************************************************************************/
++
++static t_Error CheckInitParameters(t_Tgec *p_Tgec)
++{
++ if (ENET_SPEED_FROM_MODE(p_Tgec->enetMode) < e_ENET_SPEED_10000)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 10G MAC driver only support 10G speed"));
++#if (FM_MAX_NUM_OF_10G_MACS > 0)
++ if (p_Tgec->macId >= FM_MAX_NUM_OF_10G_MACS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("macId of 10G can not be greater than 0"));
++#endif /* (FM_MAX_NUM_OF_10G_MACS > 0) */
++
++ if (p_Tgec->addr == 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 10G MAC Must have a valid MAC Address"));
++ if (!p_Tgec->f_Exception)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("uninitialized f_Exception"));
++ if (!p_Tgec->f_Event)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("uninitialized f_Event"));
++#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002
++ if (!p_Tgec->p_TgecDriverParam->no_length_check_enable)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!"));
++#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static uint32_t GetMacAddrHashCode(uint64_t ethAddr)
++{
++ uint32_t crc;
++
++ /* CRC calculation */
++ GET_MAC_ADDR_CRC(ethAddr, crc);
++
++ crc = GetMirror32(crc);
++
++ return crc;
++}
++
++/* ......................................................................... */
++
++static void TgecErrException(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ uint32_t event;
++ struct tgec_regs *p_TgecMemMap = p_Tgec->p_MemMap;
++
++ /* do not handle MDIO events */
++ event = fman_tgec_get_event(p_TgecMemMap, ~(TGEC_IMASK_MDIO_SCAN_EVENT | TGEC_IMASK_MDIO_CMD_CMPL));
++ event &= fman_tgec_get_interrupt_mask(p_TgecMemMap);
++
++ fman_tgec_ack_event(p_TgecMemMap, event);
++
++ if (event & TGEC_IMASK_REM_FAULT)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_REM_FAULT);
++ if (event & TGEC_IMASK_LOC_FAULT)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_LOC_FAULT);
++ if (event & TGEC_IMASK_TX_ECC_ER)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_1TX_ECC_ER);
++ if (event & TGEC_IMASK_TX_FIFO_UNFL)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_FIFO_UNFL);
++ if (event & TGEC_IMASK_TX_FIFO_OVFL)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_FIFO_OVFL);
++ if (event & TGEC_IMASK_TX_ER)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_ER);
++ if (event & TGEC_IMASK_RX_FIFO_OVFL)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_FIFO_OVFL);
++ if (event & TGEC_IMASK_RX_ECC_ER)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_ECC_ER);
++ if (event & TGEC_IMASK_RX_JAB_FRM)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_JAB_FRM);
++ if (event & TGEC_IMASK_RX_OVRSZ_FRM)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_OVRSZ_FRM);
++ if (event & TGEC_IMASK_RX_RUNT_FRM)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_RUNT_FRM);
++ if (event & TGEC_IMASK_RX_FRAG_FRM)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_FRAG_FRM);
++ if (event & TGEC_IMASK_RX_LEN_ER)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_LEN_ER);
++ if (event & TGEC_IMASK_RX_CRC_ER)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_CRC_ER);
++ if (event & TGEC_IMASK_RX_ALIGN_ER)
++ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_ALIGN_ER);
++}
++
++/* ......................................................................... */
++
++static void TgecException(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ uint32_t event;
++ struct tgec_regs *p_TgecMemMap = p_Tgec->p_MemMap;
++
++ /* handle only MDIO events */
++ event = fman_tgec_get_event(p_TgecMemMap, (TGEC_IMASK_MDIO_SCAN_EVENT | TGEC_IMASK_MDIO_CMD_CMPL));
++ event &= fman_tgec_get_interrupt_mask(p_TgecMemMap);
++
++ fman_tgec_ack_event(p_TgecMemMap, event);
++
++ if (event & TGEC_IMASK_MDIO_SCAN_EVENT)
++ p_Tgec->f_Event(p_Tgec->h_App, e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO);
++ if (event & TGEC_IMASK_MDIO_CMD_CMPL)
++ p_Tgec->f_Event(p_Tgec->h_App, e_FM_MAC_EX_10G_MDIO_CMD_CMPL);
++}
++
++/* ......................................................................... */
++
++static void FreeInitResources(t_Tgec *p_Tgec)
++{
++ if (p_Tgec->mdioIrq != NO_IRQ)
++ {
++ XX_DisableIntr(p_Tgec->mdioIrq);
++ XX_FreeIntr(p_Tgec->mdioIrq);
++ }
++
++ FmUnregisterIntr(p_Tgec->fmMacControllerDriver.h_Fm, e_FM_MOD_10G_MAC, p_Tgec->macId, e_FM_INTR_TYPE_ERR);
++
++ /* release the driver's group hash table */
++ FreeHashTable(p_Tgec->p_MulticastAddrHash);
++ p_Tgec->p_MulticastAddrHash = NULL;
++
++ /* release the driver's individual hash table */
++ FreeHashTable(p_Tgec->p_UnicastAddrHash);
++ p_Tgec->p_UnicastAddrHash = NULL;
++}
++
++
++/*****************************************************************************/
++/* 10G MAC API routines */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++static t_Error TgecEnable(t_Handle h_Tgec, e_CommMode mode)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ fman_tgec_enable(p_Tgec->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecDisable (t_Handle h_Tgec, e_CommMode mode)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ fman_tgec_disable(p_Tgec->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecSetPromiscuous(t_Handle h_Tgec, bool newVal)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ fman_tgec_set_promiscuous(p_Tgec->p_MemMap, newVal);
++
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* Tgec Configs modification functions */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++static t_Error TgecConfigLoopback(t_Handle h_Tgec, bool newVal)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ p_Tgec->p_TgecDriverParam->loopback_enable = newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecConfigWan(t_Handle h_Tgec, bool newVal)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ p_Tgec->p_TgecDriverParam->wan_mode_enable = newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecConfigMaxFrameLength(t_Handle h_Tgec, uint16_t newVal)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ p_Tgec->p_TgecDriverParam->max_frame_length = newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecConfigLengthCheck(t_Handle h_Tgec, bool newVal)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ UNUSED(newVal);
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ p_Tgec->p_TgecDriverParam->no_length_check_enable = !newVal;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecConfigException(t_Handle h_Tgec, e_FmMacExceptions exception, bool enable)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Tgec->exceptions |= bitMask;
++ else
++ p_Tgec->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ return E_OK;
++}
++
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++/* ......................................................................... */
++
++static t_Error TgecConfigSkipFman11Workaround(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ p_Tgec->p_TgecDriverParam->skip_fman11_workaround = TRUE;
++
++ return E_OK;
++}
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++
++/*****************************************************************************/
++/* Tgec Run Time API functions */
++/*****************************************************************************/
++
++/* ......................................................................... */
++/* backward compatibility. will be removed in the future. */
++static t_Error TgecTxMacPause(t_Handle h_Tgec, uint16_t pauseTime)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++ fman_tgec_set_tx_pause_frames(p_Tgec->p_MemMap, pauseTime);
++
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecSetTxPauseFrames(t_Handle h_Tgec,
++ uint8_t priority,
++ uint16_t pauseTime,
++ uint16_t threshTime)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ UNUSED(priority); UNUSED(threshTime);
++
++ fman_tgec_set_tx_pause_frames(p_Tgec->p_MemMap, pauseTime);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecRxIgnoreMacPause(t_Handle h_Tgec, bool en)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ fman_tgec_set_rx_ignore_pause_frames(p_Tgec->p_MemMap, en);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecGetStatistics(t_Handle h_Tgec, t_FmMacStatistics *p_Statistics)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ struct tgec_regs *p_TgecMemMap;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER);
++
++ p_TgecMemMap = p_Tgec->p_MemMap;
++
++ p_Statistics->eStatPkts64 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R64);
++ p_Statistics->eStatPkts65to127 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R127);
++ p_Statistics->eStatPkts128to255 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R255);
++ p_Statistics->eStatPkts256to511 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R511);
++ p_Statistics->eStatPkts512to1023 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1023);
++ p_Statistics->eStatPkts1024to1518 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1518);
++ p_Statistics->eStatPkts1519to1522 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1519X);
++/* */
++ p_Statistics->eStatFragments = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TRFRG);
++ p_Statistics->eStatJabbers = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TRJBR);
++
++ p_Statistics->eStatsDropEvents = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RDRP);
++ p_Statistics->eStatCRCAlignErrors = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RALN);
++
++ p_Statistics->eStatUndersizePkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TRUND);
++ p_Statistics->eStatOversizePkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TROVR);
++/* Pause */
++ p_Statistics->reStatPause = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RXPF);
++ p_Statistics->teStatPause = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TXPF);
++
++/* MIB II */
++ p_Statistics->ifInOctets = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_ROCT);
++ p_Statistics->ifInUcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RUCA);
++ p_Statistics->ifInMcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RMCA);
++ p_Statistics->ifInBcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RBCA);
++ p_Statistics->ifInPkts = p_Statistics->ifInUcastPkts
++ + p_Statistics->ifInMcastPkts
++ + p_Statistics->ifInBcastPkts;
++ p_Statistics->ifInDiscards = 0;
++ p_Statistics->ifInErrors = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RERR);
++
++ p_Statistics->ifOutOctets = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TOCT);
++ p_Statistics->ifOutUcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TUCA);
++ p_Statistics->ifOutMcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TMCA);
++ p_Statistics->ifOutBcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TBCA);
++ p_Statistics->ifOutPkts = p_Statistics->ifOutUcastPkts
++ + p_Statistics->ifOutMcastPkts
++ + p_Statistics->ifOutBcastPkts;
++ p_Statistics->ifOutDiscards = 0;
++ p_Statistics->ifOutErrors = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TERR);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecEnable1588TimeStamp(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ fman_tgec_enable_1588_time_stamp(p_Tgec->p_MemMap, 1);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecDisable1588TimeStamp(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ fman_tgec_enable_1588_time_stamp(p_Tgec->p_MemMap, 0);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecModifyMacAddress (t_Handle h_Tgec, t_EnetAddr *p_EnetAddr)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ p_Tgec->addr = ENET_ADDR_TO_UINT64(*p_EnetAddr);
++ fman_tgec_set_mac_address(p_Tgec->p_MemMap, (uint8_t *)(*p_EnetAddr));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecResetCounters (t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ fman_tgec_reset_stat(p_Tgec->p_MemMap);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecAddExactMatchMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
++{
++ t_Tgec *p_Tgec = (t_Tgec *) h_Tgec;
++ uint64_t ethAddr;
++ uint8_t paddrNum;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ if (ethAddr & GROUP_ADDRESS)
++ /* Multicast address has no effect in PADDR */
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address"));
++
++ /* Make sure no PADDR contains this address */
++ for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++)
++ if (p_Tgec->indAddrRegUsed[paddrNum])
++ if (p_Tgec->paddr[paddrNum] == ethAddr)
++ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
++
++ /* Find first unused PADDR */
++ for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++)
++ {
++ if (!(p_Tgec->indAddrRegUsed[paddrNum]))
++ {
++ /* mark this PADDR as used */
++ p_Tgec->indAddrRegUsed[paddrNum] = TRUE;
++ /* store address */
++ p_Tgec->paddr[paddrNum] = ethAddr;
++
++ /* put in hardware */
++ fman_tgec_add_addr_in_paddr(p_Tgec->p_MemMap, (uint8_t*)(*p_EthAddr)/* , paddrNum */);
++ p_Tgec->numOfIndAddrInRegs++;
++
++ return E_OK;
++ }
++ }
++
++ /* No free PADDR */
++ RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
++}
++
++/* ......................................................................... */
++
++static t_Error TgecDelExactMatchMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
++{
++ t_Tgec *p_Tgec = (t_Tgec *) h_Tgec;
++ uint64_t ethAddr;
++ uint8_t paddrNum;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ /* Find used PADDR containing this address */
++ for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++)
++ {
++ if ((p_Tgec->indAddrRegUsed[paddrNum]) &&
++ (p_Tgec->paddr[paddrNum] == ethAddr))
++ {
++ /* mark this PADDR as not used */
++ p_Tgec->indAddrRegUsed[paddrNum] = FALSE;
++ /* clear in hardware */
++ fman_tgec_clear_addr_in_paddr(p_Tgec->p_MemMap /*, paddrNum */);
++ p_Tgec->numOfIndAddrInRegs--;
++
++ return E_OK;
++ }
++ }
++
++ RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG);
++}
++
++/* ......................................................................... */
++
++static t_Error TgecAddHashMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ t_EthHashEntry *p_HashEntry;
++ uint32_t crc;
++ uint32_t hash;
++ uint64_t ethAddr;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
++
++ if (!(ethAddr & GROUP_ADDRESS))
++ /* Unicast addresses not supported in hash */
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unicast Address"));
++
++ /* CRC calculation */
++ crc = GetMacAddrHashCode(ethAddr);
++
++ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; /* Take 9 MSB bits */
++
++ /* Create element to be added to the driver hash table */
++ p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry));
++ p_HashEntry->addr = ethAddr;
++ INIT_LIST(&p_HashEntry->node);
++
++ LIST_AddToTail(&(p_HashEntry->node), &(p_Tgec->p_MulticastAddrHash->p_Lsts[hash]));
++ fman_tgec_set_hash_table(p_Tgec->p_MemMap, (hash | TGEC_HASH_MCAST_EN));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecDelHashMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ t_EthHashEntry *p_HashEntry = NULL;
++ t_List *p_Pos;
++ uint32_t crc;
++ uint32_t hash;
++ uint64_t ethAddr;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ ethAddr = ((*(uint64_t *)p_EthAddr) >> 16);
++
++ /* CRC calculation */
++ crc = GetMacAddrHashCode(ethAddr);
++
++ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; /* Take 9 MSB bits */
++
++ LIST_FOR_EACH(p_Pos, &(p_Tgec->p_MulticastAddrHash->p_Lsts[hash]))
++ {
++ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
++ if (p_HashEntry->addr == ethAddr)
++ {
++ LIST_DelAndInit(&p_HashEntry->node);
++ XX_Free(p_HashEntry);
++ break;
++ }
++ }
++ if (LIST_IsEmpty(&p_Tgec->p_MulticastAddrHash->p_Lsts[hash]))
++ fman_tgec_set_hash_table(p_Tgec->p_MemMap, (hash & ~TGEC_HASH_MCAST_EN));
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecGetId(t_Handle h_Tgec, uint32_t *macId)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ UNUSED(p_Tgec);
++ UNUSED(macId);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("TgecGetId Not Supported"));
++}
++
++/* ......................................................................... */
++
++static t_Error TgecGetVersion(t_Handle h_Tgec, uint32_t *macVersion)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ *macVersion = fman_tgec_get_revision(p_Tgec->p_MemMap);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecSetExcpetion(t_Handle h_Tgec, e_FmMacExceptions exception, bool enable)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Tgec->exceptions |= bitMask;
++ else
++ p_Tgec->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ if (enable)
++ fman_tgec_enable_interrupt(p_Tgec->p_MemMap, bitMask);
++ else
++ fman_tgec_disable_interrupt(p_Tgec->p_MemMap, bitMask);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static uint16_t TgecGetMaxFrameLength(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_VALUE(p_Tgec, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE, 0);
++
++ return fman_tgec_get_max_frame_len(p_Tgec->p_MemMap);
++}
++
++/* ......................................................................... */
++
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++static t_Error TgecTxEccWorkaround(t_Tgec *p_Tgec)
++{
++ t_Error err;
++
++#if defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)
++ XX_Print("Applying 10G TX ECC workaround (10GMAC-A004) ... ");
++#endif /* (DEBUG_ERRORS > 0) */
++ /* enable and set promiscuous */
++ fman_tgec_enable(p_Tgec->p_MemMap, TRUE, TRUE);
++ fman_tgec_set_promiscuous(p_Tgec->p_MemMap, TRUE);
++ err = Fm10GTxEccWorkaround(p_Tgec->fmMacControllerDriver.h_Fm, p_Tgec->macId);
++ /* disable */
++ fman_tgec_set_promiscuous(p_Tgec->p_MemMap, FALSE);
++ fman_tgec_enable(p_Tgec->p_MemMap, FALSE, FALSE);
++ fman_tgec_reset_stat(p_Tgec->p_MemMap);
++ fman_tgec_ack_event(p_Tgec->p_MemMap, 0xffffffff);
++#if defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)
++ if (err)
++ XX_Print("FAILED!\n");
++ else
++ XX_Print("done.\n");
++#endif /* (DEBUG_ERRORS > 0) */
++
++ return err;
++}
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++/*****************************************************************************/
++/* FM Init & Free API */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++static t_Error TgecInit(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ struct tgec_cfg *p_TgecDriverParam;
++ t_EnetAddr ethAddr;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->fmMacControllerDriver.h_Fm, E_INVALID_HANDLE);
++
++ FM_GetRevision(p_Tgec->fmMacControllerDriver.h_Fm, &p_Tgec->fmMacControllerDriver.fmRevInfo);
++ CHECK_INIT_PARAMETERS(p_Tgec, CheckInitParameters);
++
++ p_TgecDriverParam = p_Tgec->p_TgecDriverParam;
++
++ MAKE_ENET_ADDR_FROM_UINT64(p_Tgec->addr, ethAddr);
++ fman_tgec_set_mac_address(p_Tgec->p_MemMap, (uint8_t *)ethAddr);
++
++ /* interrupts */
++#ifdef FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005
++ {
++ if (p_Tgec->fmMacControllerDriver.fmRevInfo.majorRev <=2)
++ p_Tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT | TGEC_IMASK_LOC_FAULT);
++ }
++#endif /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 */
++
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++ if (!p_Tgec->p_TgecDriverParam->skip_fman11_workaround &&
++ ((err = TgecTxEccWorkaround(p_Tgec)) != E_OK))
++ {
++ FreeInitResources(p_Tgec);
++ REPORT_ERROR(MINOR, err, ("TgecTxEccWorkaround FAILED"));
++ }
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++ err = fman_tgec_init(p_Tgec->p_MemMap, p_TgecDriverParam, p_Tgec->exceptions);
++ if (err)
++ {
++ FreeInitResources(p_Tgec);
++ RETURN_ERROR(MAJOR, err, ("This TGEC version does not support the required i/f mode"));
++ }
++
++ /* Max Frame Length */
++ err = FmSetMacMaxFrame(p_Tgec->fmMacControllerDriver.h_Fm,
++ e_FM_MAC_10G,
++ p_Tgec->fmMacControllerDriver.macId,
++ p_TgecDriverParam->max_frame_length);
++ if (err != E_OK)
++ {
++ FreeInitResources(p_Tgec);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++/* we consider having no IPC a non crasher... */
++
++#ifdef FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007
++ if (p_Tgec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
++ fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007(p_Tgec->p_MemMap);
++#endif /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 */
++
++ p_Tgec->p_MulticastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
++ if (!p_Tgec->p_MulticastAddrHash)
++ {
++ FreeInitResources(p_Tgec);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
++ }
++
++ p_Tgec->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
++ if (!p_Tgec->p_UnicastAddrHash)
++ {
++ FreeInitResources(p_Tgec);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
++ }
++
++ FmRegisterIntr(p_Tgec->fmMacControllerDriver.h_Fm,
++ e_FM_MOD_10G_MAC,
++ p_Tgec->macId,
++ e_FM_INTR_TYPE_ERR,
++ TgecErrException,
++ p_Tgec);
++ if (p_Tgec->mdioIrq != NO_IRQ)
++ {
++ XX_SetIntr(p_Tgec->mdioIrq, TgecException, p_Tgec);
++ XX_EnableIntr(p_Tgec->mdioIrq);
++ }
++
++ XX_Free(p_TgecDriverParam);
++ p_Tgec->p_TgecDriverParam = NULL;
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static t_Error TgecFree(t_Handle h_Tgec)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++
++ if (p_Tgec->p_TgecDriverParam)
++ {
++ /* Called after config */
++ XX_Free(p_Tgec->p_TgecDriverParam);
++ p_Tgec->p_TgecDriverParam = NULL;
++ }
++ else
++ /* Called after init */
++ FreeInitResources(p_Tgec);
++
++ XX_Free(p_Tgec);
++
++ return E_OK;
++}
++
++/* ......................................................................... */
++
++static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver)
++{
++ p_FmMacControllerDriver->f_FM_MAC_Init = TgecInit;
++ p_FmMacControllerDriver->f_FM_MAC_Free = TgecFree;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetStatistics = NULL;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = TgecConfigLoopback;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = TgecConfigMaxFrameLength;
++
++ p_FmMacControllerDriver->f_FM_MAC_ConfigWan = TgecConfigWan;
++
++ p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = NULL; /* TGEC always works with pad+crc */
++ p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = NULL; /* half-duplex is not supported in xgec */
++ p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = TgecConfigLengthCheck;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigException = TgecConfigException;
++ p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit = NULL;
++
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++ p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround= TgecConfigSkipFman11Workaround;
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++ p_FmMacControllerDriver->f_FM_MAC_SetException = TgecSetExcpetion;
++
++ p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = TgecEnable1588TimeStamp;
++ p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = TgecDisable1588TimeStamp;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = TgecSetPromiscuous;
++ p_FmMacControllerDriver->f_FM_MAC_AdjustLink = NULL;
++ p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan = NULL;
++ p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg = NULL;
++
++ p_FmMacControllerDriver->f_FM_MAC_Enable = TgecEnable;
++ p_FmMacControllerDriver->f_FM_MAC_Disable = TgecDisable;
++ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL;
++
++ p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = TgecTxMacPause;
++ p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = TgecSetTxPauseFrames;
++ p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = TgecRxIgnoreMacPause;
++
++ p_FmMacControllerDriver->f_FM_MAC_ResetCounters = TgecResetCounters;
++ p_FmMacControllerDriver->f_FM_MAC_GetStatistics = TgecGetStatistics;
++
++ p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = TgecModifyMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = TgecAddHashMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = TgecDelHashMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = TgecAddExactMatchMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = TgecDelExactMatchMacAddress;
++ p_FmMacControllerDriver->f_FM_MAC_GetId = TgecGetId;
++ p_FmMacControllerDriver->f_FM_MAC_GetVersion = TgecGetVersion;
++ p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = TgecGetMaxFrameLength;
++
++ p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = TGEC_MII_WritePhyReg;
++ p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = TGEC_MII_ReadPhyReg;
++}
++
++
++/*****************************************************************************/
++/* Tgec Config Main Entry */
++/*****************************************************************************/
++
++/* ......................................................................... */
++
++t_Handle TGEC_Config(t_FmMacParams *p_FmMacParam)
++{
++ t_Tgec *p_Tgec;
++ struct tgec_cfg *p_TgecDriverParam;
++ uintptr_t baseAddr;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL);
++
++ baseAddr = p_FmMacParam->baseAddr;
++ /* allocate memory for the UCC GETH data structure. */
++ p_Tgec = (t_Tgec *)XX_Malloc(sizeof(t_Tgec));
++ if (!p_Tgec)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("10G MAC driver structure"));
++ return NULL;
++ }
++ memset(p_Tgec, 0, sizeof(t_Tgec));
++ InitFmMacControllerDriver(&p_Tgec->fmMacControllerDriver);
++
++ /* allocate memory for the 10G MAC driver parameters data structure. */
++ p_TgecDriverParam = (struct tgec_cfg *) XX_Malloc(sizeof(struct tgec_cfg));
++ if (!p_TgecDriverParam)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("10G MAC driver parameters"));
++ XX_Free(p_Tgec);
++ return NULL;
++ }
++ memset(p_TgecDriverParam, 0, sizeof(struct tgec_cfg));
++
++ /* Plant parameter structure pointer */
++ p_Tgec->p_TgecDriverParam = p_TgecDriverParam;
++
++ fman_tgec_defconfig(p_TgecDriverParam);
++
++ p_Tgec->p_MemMap = (struct tgec_regs *)UINT_TO_PTR(baseAddr);
++ p_Tgec->p_MiiMemMap = (t_TgecMiiAccessMemMap *)UINT_TO_PTR(baseAddr + TGEC_TO_MII_OFFSET);
++ p_Tgec->addr = ENET_ADDR_TO_UINT64(p_FmMacParam->addr);
++ p_Tgec->enetMode = p_FmMacParam->enetMode;
++ p_Tgec->macId = p_FmMacParam->macId;
++ p_Tgec->exceptions = DEFAULT_exceptions;
++ p_Tgec->mdioIrq = p_FmMacParam->mdioIrq;
++ p_Tgec->f_Exception = p_FmMacParam->f_Exception;
++ p_Tgec->f_Event = p_FmMacParam->f_Event;
++ p_Tgec->h_App = p_FmMacParam->h_App;
++
++ return p_Tgec;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.h
+new file mode 100644
+index 00000000..2aa39238
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.h
+@@ -0,0 +1,151 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File tgec.h
++
++ @Description FM 10G MAC ...
++*//***************************************************************************/
++#ifndef __TGEC_H
++#define __TGEC_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++#include "enet_ext.h"
++
++#include "tgec_mii_acc.h"
++#include "fm_mac.h"
++
++
++#define DEFAULT_exceptions \
++ ((uint32_t)(TGEC_IMASK_MDIO_SCAN_EVENT | \
++ TGEC_IMASK_REM_FAULT | \
++ TGEC_IMASK_LOC_FAULT | \
++ TGEC_IMASK_TX_ECC_ER | \
++ TGEC_IMASK_TX_FIFO_UNFL | \
++ TGEC_IMASK_TX_FIFO_OVFL | \
++ TGEC_IMASK_TX_ER | \
++ TGEC_IMASK_RX_FIFO_OVFL | \
++ TGEC_IMASK_RX_ECC_ER | \
++ TGEC_IMASK_RX_JAB_FRM | \
++ TGEC_IMASK_RX_OVRSZ_FRM | \
++ TGEC_IMASK_RX_RUNT_FRM | \
++ TGEC_IMASK_RX_FRAG_FRM | \
++ TGEC_IMASK_RX_CRC_ER | \
++ TGEC_IMASK_RX_ALIGN_ER))
++
++#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
++ case e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO: \
++ bitMask = TGEC_IMASK_MDIO_SCAN_EVENT ; break; \
++ case e_FM_MAC_EX_10G_MDIO_CMD_CMPL: \
++ bitMask = TGEC_IMASK_MDIO_CMD_CMPL ; break; \
++ case e_FM_MAC_EX_10G_REM_FAULT: \
++ bitMask = TGEC_IMASK_REM_FAULT ; break; \
++ case e_FM_MAC_EX_10G_LOC_FAULT: \
++ bitMask = TGEC_IMASK_LOC_FAULT ; break; \
++ case e_FM_MAC_EX_10G_1TX_ECC_ER: \
++ bitMask = TGEC_IMASK_TX_ECC_ER ; break; \
++ case e_FM_MAC_EX_10G_TX_FIFO_UNFL: \
++ bitMask = TGEC_IMASK_TX_FIFO_UNFL ; break; \
++ case e_FM_MAC_EX_10G_TX_FIFO_OVFL: \
++ bitMask = TGEC_IMASK_TX_FIFO_OVFL ; break; \
++ case e_FM_MAC_EX_10G_TX_ER: \
++ bitMask = TGEC_IMASK_TX_ER ; break; \
++ case e_FM_MAC_EX_10G_RX_FIFO_OVFL: \
++ bitMask = TGEC_IMASK_RX_FIFO_OVFL ; break; \
++ case e_FM_MAC_EX_10G_RX_ECC_ER: \
++ bitMask = TGEC_IMASK_RX_ECC_ER ; break; \
++ case e_FM_MAC_EX_10G_RX_JAB_FRM: \
++ bitMask = TGEC_IMASK_RX_JAB_FRM ; break; \
++ case e_FM_MAC_EX_10G_RX_OVRSZ_FRM: \
++ bitMask = TGEC_IMASK_RX_OVRSZ_FRM ; break; \
++ case e_FM_MAC_EX_10G_RX_RUNT_FRM: \
++ bitMask = TGEC_IMASK_RX_RUNT_FRM ; break; \
++ case e_FM_MAC_EX_10G_RX_FRAG_FRM: \
++ bitMask = TGEC_IMASK_RX_FRAG_FRM ; break; \
++ case e_FM_MAC_EX_10G_RX_LEN_ER: \
++ bitMask = TGEC_IMASK_RX_LEN_ER ; break; \
++ case e_FM_MAC_EX_10G_RX_CRC_ER: \
++ bitMask = TGEC_IMASK_RX_CRC_ER ; break; \
++ case e_FM_MAC_EX_10G_RX_ALIGN_ER: \
++ bitMask = TGEC_IMASK_RX_ALIGN_ER ; break; \
++ default: bitMask = 0;break;}
++
++#define MAX_PACKET_ALIGNMENT 31
++#define MAX_INTER_PACKET_GAP 0x7f
++#define MAX_INTER_PALTERNATE_BEB 0x0f
++#define MAX_RETRANSMISSION 0x0f
++#define MAX_COLLISION_WINDOW 0x03ff
++
++#define TGEC_NUM_OF_PADDRS 1 /* number of pattern match registers (entries) */
++
++#define GROUP_ADDRESS 0x0000010000000000LL /* Group address bit indication */
++
++#define HASH_TABLE_SIZE 512 /* Hash table size (= 32 bits * 8 regs) */
++
++#define TGEC_TO_MII_OFFSET 0x1030 /* Offset from the MEM map to the MDIO mem map */
++
++/* 10-gigabit Ethernet MAC Controller ID (10GEC_ID) */
++#define TGEC_ID_ID 0xffff0000
++#define TGEC_ID_MAC_VERSION 0x0000FF00
++#define TGEC_ID_MAC_REV 0x000000ff
++
++
++typedef struct {
++ t_FmMacControllerDriver fmMacControllerDriver; /**< Upper Mac control block */
++ t_Handle h_App; /**< Handle to the upper layer application */
++ struct tgec_regs *p_MemMap; /**< pointer to 10G memory mapped registers. */
++ t_TgecMiiAccessMemMap *p_MiiMemMap; /**< pointer to MII memory mapped registers. */
++ uint64_t addr; /**< MAC address of device; */
++ e_EnetMode enetMode; /**< Ethernet physical interface */
++ t_FmMacExceptionCallback *f_Exception;
++ int mdioIrq;
++ t_FmMacExceptionCallback *f_Event;
++ bool indAddrRegUsed[TGEC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */
++ uint64_t paddr[TGEC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */
++ uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */
++ t_EthHash *p_MulticastAddrHash; /**< pointer to driver's global address hash table */
++ t_EthHash *p_UnicastAddrHash; /**< pointer to driver's individual address hash table */
++ bool debugMode;
++ uint8_t macId;
++ uint32_t exceptions;
++ struct tgec_cfg *p_TgecDriverParam;
++} t_Tgec;
++
++
++t_Error TGEC_MII_WritePhyReg(t_Handle h_Tgec, uint8_t phyAddr, uint8_t reg, uint16_t data);
++t_Error TGEC_MII_ReadPhyReg(t_Handle h_Tgec, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
++
++
++#endif /* __TGEC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c
+new file mode 100644
+index 00000000..e0fafd1d
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c
+@@ -0,0 +1,139 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_mac.h"
++#include "tgec.h"
++#include "xx_ext.h"
++
++#include "fm_common.h"
++
++
++/*****************************************************************************/
++t_Error TGEC_MII_WritePhyReg(t_Handle h_Tgec,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t data)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ t_TgecMiiAccessMemMap *p_MiiAccess;
++ uint32_t cfgStatusReg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MiiMemMap, E_INVALID_HANDLE);
++
++ p_MiiAccess = p_Tgec->p_MiiMemMap;
++
++ /* Configure MII */
++ cfgStatusReg = GET_UINT32(p_MiiAccess->mdio_cfg_status);
++ cfgStatusReg &= ~MIIMCOM_DIV_MASK;
++ /* (one half of fm clock => 2.5Mhz) */
++ cfgStatusReg |=((((p_Tgec->fmMacControllerDriver.clkFreq*10)/2)/25) << MIIMCOM_DIV_SHIFT);
++ WRITE_UINT32(p_MiiAccess->mdio_cfg_status, cfgStatusReg);
++
++ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
++ XX_UDelay (1);
++
++ WRITE_UINT32(p_MiiAccess->mdio_command, phyAddr);
++
++ WRITE_UINT32(p_MiiAccess->mdio_regaddr, reg);
++
++ CORE_MemoryBarrier();
++
++ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
++ XX_UDelay (1);
++
++ WRITE_UINT32(p_MiiAccess->mdio_data, data);
++
++ CORE_MemoryBarrier();
++
++ while ((GET_UINT32(p_MiiAccess->mdio_data)) & MIIDATA_BUSY)
++ XX_UDelay (1);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error TGEC_MII_ReadPhyReg(t_Handle h_Tgec,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t *p_Data)
++{
++ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
++ t_TgecMiiAccessMemMap *p_MiiAccess;
++ uint32_t cfgStatusReg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MiiMemMap, E_INVALID_HANDLE);
++
++ p_MiiAccess = p_Tgec->p_MiiMemMap;
++
++ /* Configure MII */
++ cfgStatusReg = GET_UINT32(p_MiiAccess->mdio_cfg_status);
++ cfgStatusReg &= ~MIIMCOM_DIV_MASK;
++ /* (one half of fm clock => 2.5Mhz) */
++ cfgStatusReg |=((((p_Tgec->fmMacControllerDriver.clkFreq*10)/2)/25) << MIIMCOM_DIV_SHIFT);
++ WRITE_UINT32(p_MiiAccess->mdio_cfg_status, cfgStatusReg);
++
++ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
++ XX_UDelay (1);
++
++ WRITE_UINT32(p_MiiAccess->mdio_command, phyAddr);
++
++ WRITE_UINT32(p_MiiAccess->mdio_regaddr, reg);
++
++ CORE_MemoryBarrier();
++
++ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
++ XX_UDelay (1);
++
++ WRITE_UINT32(p_MiiAccess->mdio_command, (uint32_t)(phyAddr | MIIMCOM_READ_CYCLE));
++
++ CORE_MemoryBarrier();
++
++ while ((GET_UINT32(p_MiiAccess->mdio_data)) & MIIDATA_BUSY)
++ XX_UDelay (1);
++
++ *p_Data = (uint16_t)GET_UINT32(p_MiiAccess->mdio_data);
++
++ cfgStatusReg = GET_UINT32(p_MiiAccess->mdio_cfg_status);
++
++ if (cfgStatusReg & MIIMIND_READ_ERROR)
++ RETURN_ERROR(MINOR, E_INVALID_VALUE,
++ ("Read Error: phyAddr 0x%x, dev 0x%x, reg 0x%x, cfgStatusReg 0x%x",
++ ((phyAddr & 0xe0)>>5), (phyAddr & 0x1f), reg, cfgStatusReg));
++
++ return E_OK;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h
+new file mode 100644
+index 00000000..645cdde5
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h
+@@ -0,0 +1,80 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __TGEC_MII_ACC_H
++#define __TGEC_MII_ACC_H
++
++#include "std_ext.h"
++
++
++/* MII Management Command Register */
++#define MIIMCOM_READ_POST_INCREMENT 0x00004000
++#define MIIMCOM_READ_CYCLE 0x00008000
++#define MIIMCOM_SCAN_CYCLE 0x00000800
++#define MIIMCOM_PREAMBLE_DISABLE 0x00000400
++
++#define MIIMCOM_MDIO_HOLD_1_REG_CLK 0
++#define MIIMCOM_MDIO_HOLD_2_REG_CLK 1
++#define MIIMCOM_MDIO_HOLD_3_REG_CLK 2
++#define MIIMCOM_MDIO_HOLD_4_REG_CLK 3
++
++#define MIIMCOM_DIV_MASK 0x0000ff00
++#define MIIMCOM_DIV_SHIFT 8
++
++/* MII Management Indicator Register */
++#define MIIMIND_BUSY 0x00000001
++#define MIIMIND_READ_ERROR 0x00000002
++
++#define MIIDATA_BUSY 0x80000000
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/*----------------------------------------------------*/
++/* MII Configuration Control Memory Map Registers */
++/*----------------------------------------------------*/
++typedef _Packed struct t_TgecMiiAccessMemMap
++{
++ volatile uint32_t mdio_cfg_status; /* 0x030 */
++ volatile uint32_t mdio_command; /* 0x034 */
++ volatile uint32_t mdio_data; /* 0x038 */
++ volatile uint32_t mdio_regaddr; /* 0x03c */
++} _PackedType t_TgecMiiAccessMemMap ;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++#endif /* __TGEC_MII_ACC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/Makefile
+new file mode 100644
+index 00000000..bfa02f5e
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++obj-y += fsl-ncsw-macsec.o
++
++fsl-ncsw-macsec-objs := fm_macsec.o fm_macsec_guest.o fm_macsec_master.o fm_macsec_secy.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c
+new file mode 100644
+index 00000000..0a1b31f1
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c
+@@ -0,0 +1,237 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++/******************************************************************************
++
++ @File fm_macsec.c
++
++ @Description FM MACSEC driver routines implementation.
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "xx_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++#include "debug_ext.h"
++
++#include "fm_macsec.h"
++
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++t_Handle FM_MACSEC_Config(t_FmMacsecParams *p_FmMacsecParam)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmMacsecParam, E_INVALID_HANDLE, NULL);
++
++ if (p_FmMacsecParam->guestMode)
++ p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)FM_MACSEC_GUEST_Config(p_FmMacsecParam);
++ else
++ p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)FM_MACSEC_MASTER_Config(p_FmMacsecParam);
++
++ if (!p_FmMacsecControllerDriver)
++ return NULL;
++
++ return (t_Handle)p_FmMacsecControllerDriver;
++}
++
++t_Error FM_MACSEC_Init(t_Handle h_FmMacsec)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Init)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_Init(h_FmMacsec);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_Free(t_Handle h_FmMacsec)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Free)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_Free(h_FmMacsec);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigUnknownSciFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUnknownSciFrameTreatment)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUnknownSciFrameTreatment(h_FmMacsec, treatMode);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigInvalidTagsFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigInvalidTagsFrameTreatment)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigInvalidTagsFrameTreatment(h_FmMacsec, deliverUncontrolled);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(t_Handle h_FmMacsec, bool discardUncontrolled)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(h_FmMacsec, discardUncontrolled);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigUntagFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUntagFrameTreatment)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUntagFrameTreatment(h_FmMacsec, treatMode);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigPnExhaustionThreshold(t_Handle h_FmMacsec, uint32_t pnExhThr)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigPnExhaustionThreshold)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigPnExhaustionThreshold(h_FmMacsec, pnExhThr);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigKeysUnreadable(t_Handle h_FmMacsec)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigKeysUnreadable)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigKeysUnreadable(h_FmMacsec);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigSectagWithoutSCI(t_Handle h_FmMacsec)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigSectagWithoutSCI)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigSectagWithoutSCI(h_FmMacsec);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_ConfigException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigException)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigException(h_FmMacsec, exception, enable);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_GetRevision(t_Handle h_FmMacsec, uint32_t *p_MacsecRevision)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_GetRevision)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_GetRevision(h_FmMacsec, p_MacsecRevision);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++
++t_Error FM_MACSEC_Enable(t_Handle h_FmMacsec)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Enable)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_Enable(h_FmMacsec);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_Disable(t_Handle h_FmMacsec)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Disable)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_Disable(h_FmMacsec);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_SetException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
++{
++ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
++
++ if (p_FmMacsecControllerDriver->f_FM_MACSEC_SetException)
++ return p_FmMacsecControllerDriver->f_FM_MACSEC_SetException(h_FmMacsec, exception, enable);
++
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h
+new file mode 100644
+index 00000000..fbe51875
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h
+@@ -0,0 +1,203 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fm_macsec.h
++
++ @Description FM MACSEC internal structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_MACSEC_H
++#define __FM_MACSEC_H
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_macsec_ext.h"
++
++#include "fm_common.h"
++
++
++#define __ERR_MODULE__ MODULE_FM_MACSEC
++
++
++typedef struct
++{
++ t_Error (*f_FM_MACSEC_Init) (t_Handle h_FmMacsec);
++ t_Error (*f_FM_MACSEC_Free) (t_Handle h_FmMacsec);
++
++ t_Error (*f_FM_MACSEC_ConfigUnknownSciFrameTreatment) (t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode);
++ t_Error (*f_FM_MACSEC_ConfigInvalidTagsFrameTreatment) (t_Handle h_FmMacsec, bool deliverUncontrolled);
++ t_Error (*f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment) (t_Handle h_FmMacsec, bool discardUncontrolled);
++ t_Error (*f_FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment) (t_Handle h_FmMacsec, bool deliverUncontrolled);
++ t_Error (*f_FM_MACSEC_ConfigUntagFrameTreatment) (t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode);
++ t_Error (*f_FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment) (t_Handle h_FmMacsec, bool deliverUncontrolled);
++ t_Error (*f_FM_MACSEC_ConfigPnExhaustionThreshold) (t_Handle h_FmMacsec, uint32_t pnExhThr);
++ t_Error (*f_FM_MACSEC_ConfigKeysUnreadable) (t_Handle h_FmMacsec);
++ t_Error (*f_FM_MACSEC_ConfigSectagWithoutSCI) (t_Handle h_FmMacsec);
++ t_Error (*f_FM_MACSEC_ConfigException) (t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
++
++ t_Error (*f_FM_MACSEC_GetRevision) (t_Handle h_FmMacsec, uint32_t *p_MacsecRevision);
++ t_Error (*f_FM_MACSEC_Enable) (t_Handle h_FmMacsec);
++ t_Error (*f_FM_MACSEC_Disable) (t_Handle h_FmMacsec);
++ t_Error (*f_FM_MACSEC_SetException) (t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
++
++} t_FmMacsecControllerDriver;
++
++t_Handle FM_MACSEC_GUEST_Config(t_FmMacsecParams *p_FmMacsecParam);
++t_Handle FM_MACSEC_MASTER_Config(t_FmMacsecParams *p_FmMacsecParams);
++
++/***********************************************************************/
++/* MACSEC internal routines */
++/***********************************************************************/
++
++/**************************************************************************//**
++
++ @Group FM_MACSEC_InterModule_grp FM MACSEC Inter-Module Unit
++
++ @Description FM MACSEC Inter Module functions -
++ These are not User API routines but routines that may be called
++ from other modules. This will be the case in a single core environment,
++ where instead of using the XX messaging mechanism, the routines may be
++ called from other modules. In a multicore environment, the other modules may
++ be run by other cores and therefore these routines may not be called directly.
++
++ @{
++*//***************************************************************************/
++
++#define MAX_NUM_OF_SA_PER_SC 4
++
++typedef enum
++{
++ e_SC_RX = 0,
++ e_SC_TX
++} e_ScType;
++
++typedef enum
++{
++ e_SC_SA_A = 0,
++ e_SC_SA_B ,
++ e_SC_SA_C ,
++ e_SC_SA_D
++} e_ScSaId;
++
++typedef struct
++{
++ uint32_t scId;
++ macsecSCI_t sci;
++ bool replayProtect;
++ uint32_t replayWindow;
++ e_FmMacsecValidFrameBehavior validateFrames;
++ uint16_t confidentialityOffset;
++ e_FmMacsecSecYCipherSuite cipherSuite;
++} t_RxScParams;
++
++typedef struct
++{
++ uint32_t scId;
++ macsecSCI_t sci;
++ bool protectFrames;
++ e_FmMacsecSciInsertionMode sciInsertionMode;
++ bool confidentialityEnable;
++ uint16_t confidentialityOffset;
++ e_FmMacsecSecYCipherSuite cipherSuite;
++} t_TxScParams;
++
++typedef enum e_FmMacsecGlobalExceptions {
++ e_FM_MACSEC_EX_TX_SC, /**< Tx Sc 0 frame discarded error. */
++ e_FM_MACSEC_EX_ECC /**< MACSEC memory ECC multiple-bit error. */
++} e_FmMacsecGlobalExceptions;
++
++typedef enum e_FmMacsecGlobalEvents {
++ e_FM_MACSEC_EV_TX_SC_NEXT_PN /**< Tx Sc 0 Next Pn exhaustion threshold reached. */
++} e_FmMacsecGlobalEvents;
++
++/**************************************************************************//**
++ @Description Enum for inter-module interrupts registration
++*//***************************************************************************/
++typedef enum e_FmMacsecEventModules{
++ e_FM_MACSEC_MOD_SC_TX,
++ e_FM_MACSEC_MOD_DUMMY_LAST
++} e_FmMacsecEventModules;
++
++typedef enum e_FmMacsecInterModuleEvent {
++ e_FM_MACSEC_EV_SC_TX,
++ e_FM_MACSEC_EV_ERR_SC_TX,
++ e_FM_MACSEC_EV_DUMMY_LAST
++} e_FmMacsecInterModuleEvent;
++
++#define NUM_OF_INTER_MODULE_EVENTS (NUM_OF_TX_SC * 2)
++
++#define GET_MACSEC_MODULE_EVENT(mod, id, intrType, event) \
++ switch(mod){ \
++ case e_FM_MACSEC_MOD_SC_TX: \
++ event = (intrType == e_FM_INTR_TYPE_ERR) ? \
++ e_FM_MACSEC_EV_ERR_SC_TX: \
++ e_FM_MACSEC_EV_SC_TX; \
++ event += (uint8_t)(2 * id);break; \
++ break; \
++ default:event = e_FM_MACSEC_EV_DUMMY_LAST; \
++ break;}
++
++void FmMacsecRegisterIntr(t_Handle h_FmMacsec,
++ e_FmMacsecEventModules module,
++ uint8_t modId,
++ e_FmIntrType intrType,
++ void (*f_Isr) (t_Handle h_Arg, uint32_t id),
++ t_Handle h_Arg);
++
++void FmMacsecUnregisterIntr(t_Handle h_FmMacsec,
++ e_FmMacsecEventModules module,
++ uint8_t modId,
++ e_FmIntrType intrType);
++
++t_Error FmMacsecAllocScs(t_Handle h_FmMacsec, e_ScType type, bool isPtp, uint32_t numOfScs, uint32_t *p_ScIds);
++t_Error FmMacsecFreeScs(t_Handle h_FmMacsec, e_ScType type, uint32_t numOfScs, uint32_t *p_ScIds);
++t_Error FmMacsecCreateRxSc(t_Handle h_FmMacsec, t_RxScParams *p_RxScParams);
++t_Error FmMacsecDeleteRxSc(t_Handle h_FmMacsec, uint32_t scId);
++t_Error FmMacsecCreateTxSc(t_Handle h_FmMacsec, t_TxScParams *p_RxScParams);
++t_Error FmMacsecDeleteTxSc(t_Handle h_FmMacsec, uint32_t scId);
++t_Error FmMacsecCreateRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key);
++t_Error FmMacsecCreateTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecSAKey_t key);
++t_Error FmMacsecDeleteRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId);
++t_Error FmMacsecDeleteTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId);
++t_Error FmMacsecRxSaSetReceive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, bool enableReceive);
++t_Error FmMacsecRxSaUpdateNextPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtNextPN);
++t_Error FmMacsecRxSaUpdateLowestPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtLowestPN);
++t_Error FmMacsecTxSaSetActive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an);
++t_Error FmMacsecTxSaGetActive(t_Handle h_FmMacsec, uint32_t scId, macsecAN_t *p_An);
++t_Error FmMacsecSetPTP(t_Handle h_FmMacsec, bool enable);
++
++t_Error FmMacsecSetException(t_Handle h_FmMacsec, e_FmMacsecGlobalExceptions exception, uint32_t scId, bool enable);
++t_Error FmMacsecSetEvent(t_Handle h_FmMacsec, e_FmMacsecGlobalEvents event, uint32_t scId, bool enable);
++
++
++
++#endif /* __FM_MACSEC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_guest.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_guest.c
+new file mode 100644
+index 00000000..31d789d0
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_guest.c
+@@ -0,0 +1,59 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fm_macsec.c
++
++ @Description FM MACSEC driver routines implementation.
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "xx_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++#include "debug_ext.h"
++#include "fm_macsec.h"
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++t_Handle FM_MACSEC_GUEST_Config(t_FmMacsecParams *p_FmMacsecParam)
++{
++ UNUSED(p_FmMacsecParam);
++ return NULL;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.c
+new file mode 100644
+index 00000000..623612ac
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.c
+@@ -0,0 +1,1031 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fm_macsec.c
++
++ @Description FM MACSEC driver routines implementation.
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "xx_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++#include "fm_mac_ext.h"
++
++#include "fm_macsec_master.h"
++
++
++extern uint16_t FM_MAC_GetMaxFrameLength(t_Handle FmMac);
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++static t_Error CheckFmMacsecParameters(t_FmMacsec *p_FmMacsec)
++{
++ if (!p_FmMacsec->f_Exception)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
++
++ return E_OK;
++}
++
++static void UnimplementedIsr(t_Handle h_Arg, uint32_t id)
++{
++ UNUSED(h_Arg); UNUSED(id);
++
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented Isr!"));
++}
++
++static void MacsecEventIsr(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t events,event,i;
++
++ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
++
++ events = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->evr);
++ events |= GET_UINT32(p_FmMacsec->p_FmMacsecRegs->ever);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->evr,events);
++
++ for (i=0; i<NUM_OF_TX_SC; i++)
++ if (events & FM_MACSEC_EV_TX_SC_NEXT_PN(i))
++ {
++ GET_MACSEC_MODULE_EVENT(e_FM_MACSEC_MOD_SC_TX, i, e_FM_INTR_TYPE_NORMAL, event);
++ p_FmMacsec->intrMng[event].f_Isr(p_FmMacsec->intrMng[event].h_SrcHandle, i);
++ }
++}
++
++static void MacsecErrorIsr(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t errors,error,i;
++
++ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
++
++ errors = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->err);
++ errors |= GET_UINT32(p_FmMacsec->p_FmMacsecRegs->erer);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->err,errors);
++
++ for (i=0; i<NUM_OF_TX_SC; i++)
++ if (errors & FM_MACSEC_EX_TX_SC(i))
++ {
++ GET_MACSEC_MODULE_EVENT(e_FM_MACSEC_MOD_SC_TX, i, e_FM_INTR_TYPE_ERR, error);
++ p_FmMacsec->intrMng[error].f_Isr(p_FmMacsec->intrMng[error].h_SrcHandle, i);
++ }
++
++ if (errors & FM_MACSEC_EX_ECC)
++ {
++ uint8_t eccType;
++ uint32_t tmpReg;
++
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->meec);
++ ASSERT_COND(tmpReg & MECC_CAP);
++ eccType = (uint8_t)((tmpReg & MECC_CET) >> MECC_CET_SHIFT);
++
++ if (!eccType && (p_FmMacsec->userExceptions & FM_MACSEC_USER_EX_SINGLE_BIT_ECC))
++ p_FmMacsec->f_Exception(p_FmMacsec->h_App,e_FM_MACSEC_EX_SINGLE_BIT_ECC);
++ else if (eccType && (p_FmMacsec->userExceptions & FM_MACSEC_USER_EX_MULTI_BIT_ECC))
++ p_FmMacsec->f_Exception(p_FmMacsec->h_App,e_FM_MACSEC_EX_MULTI_BIT_ECC);
++ else
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->meec,tmpReg);
++ }
++}
++
++static t_Error MacsecInit(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_FmMacsecDriverParam *p_FmMacsecDriverParam = NULL;
++ uint32_t tmpReg,i,macId;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ CHECK_INIT_PARAMETERS(p_FmMacsec, CheckFmMacsecParameters);
++
++ p_FmMacsecDriverParam = p_FmMacsec->p_FmMacsecDriverParam;
++
++ for (i=0;i<e_FM_MACSEC_EV_DUMMY_LAST;i++)
++ p_FmMacsec->intrMng[i].f_Isr = UnimplementedIsr;
++
++ tmpReg = 0;
++ tmpReg |= (p_FmMacsecDriverParam->changedTextWithNoEncryptDeliverUncontrolled << CFG_UECT_SHIFT)|
++ (p_FmMacsecDriverParam->onlyScbIsSetDeliverUncontrolled << CFG_ESCBT_SHIFT) |
++ (p_FmMacsecDriverParam->unknownSciTreatMode << CFG_USFT_SHIFT) |
++ (p_FmMacsecDriverParam->invalidTagsDeliverUncontrolled << CFG_ITT_SHIFT) |
++ (p_FmMacsecDriverParam->encryptWithNoChangedTextDiscardUncontrolled << CFG_KFT_SHIFT) |
++ (p_FmMacsecDriverParam->untagTreatMode << CFG_UFT_SHIFT) |
++ (p_FmMacsecDriverParam->keysUnreadable << CFG_KSS_SHIFT) |
++ (p_FmMacsecDriverParam->reservedSc0 << CFG_S0I_SHIFT) |
++ (p_FmMacsecDriverParam->byPassMode << CFG_BYPN_SHIFT);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg, tmpReg);
++
++ tmpReg = FM_MAC_GetMaxFrameLength(p_FmMacsec->h_FmMac);
++ /* At least Ethernet FCS (4 bytes) overhead must be subtracted from MFL.
++ * In addition, the SCI (8 bytes) overhead might be subtracted as well. */
++ tmpReg -= p_FmMacsecDriverParam->mflSubtract;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->mfl, tmpReg);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->tpnet, p_FmMacsecDriverParam->pnExhThr);
++
++ if (!p_FmMacsec->userExceptions)
++ p_FmMacsec->exceptions &= ~FM_MACSEC_EX_ECC;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->erer, p_FmMacsec->exceptions);
++
++ p_FmMacsec->numRxScAvailable = NUM_OF_RX_SC;
++ if (p_FmMacsecDriverParam->reservedSc0)
++ p_FmMacsec->numRxScAvailable --;
++ p_FmMacsec->numTxScAvailable = NUM_OF_TX_SC;
++
++ XX_Free(p_FmMacsecDriverParam);
++ p_FmMacsec->p_FmMacsecDriverParam = NULL;
++
++ FM_MAC_GetId(p_FmMacsec->h_FmMac, &macId);
++ FmRegisterIntr(p_FmMacsec->h_Fm,
++ e_FM_MOD_MACSEC,
++ (uint8_t)macId,
++ e_FM_INTR_TYPE_NORMAL,
++ MacsecEventIsr,
++ p_FmMacsec);
++
++ FmRegisterIntr(p_FmMacsec->h_Fm,
++ e_FM_MOD_MACSEC,
++ 0,
++ e_FM_INTR_TYPE_ERR,
++ MacsecErrorIsr,
++ p_FmMacsec);
++
++ return E_OK;
++}
++
++static t_Error MacsecFree(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t macId;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ FM_MAC_GetId(p_FmMacsec->h_FmMac, &macId);
++ FmUnregisterIntr(p_FmMacsec->h_Fm,
++ e_FM_MOD_MACSEC,
++ (uint8_t)macId,
++ e_FM_INTR_TYPE_NORMAL);
++
++ FmUnregisterIntr(p_FmMacsec->h_Fm,
++ e_FM_MOD_MACSEC,
++ 0,
++ e_FM_INTR_TYPE_ERR);
++
++ if (p_FmMacsec->rxScSpinLock)
++ XX_FreeSpinlock(p_FmMacsec->rxScSpinLock);
++ if (p_FmMacsec->txScSpinLock)
++ XX_FreeSpinlock(p_FmMacsec->txScSpinLock);
++
++ XX_Free(p_FmMacsec);
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigUnknownSciFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->unknownSciTreatMode = treatMode;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigInvalidTagsFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->invalidTagsDeliverUncontrolled = deliverUncontrolled;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigChangedTextWithNoEncryptFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->changedTextWithNoEncryptDeliverUncontrolled = deliverUncontrolled;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigOnlyScbIsSetFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->onlyScbIsSetDeliverUncontrolled = deliverUncontrolled;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigEncryptWithNoChangedTextFrameTreatment(t_Handle h_FmMacsec, bool discardUncontrolled)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->encryptWithNoChangedTextDiscardUncontrolled = discardUncontrolled;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigUntagFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->untagTreatMode = treatMode;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigPnExhaustionThreshold(t_Handle h_FmMacsec, uint32_t pnExhThr)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->pnExhThr = pnExhThr;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigKeysUnreadable(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->keysUnreadable = TRUE;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigSectagWithoutSCI(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ p_FmMacsec->p_FmMacsecDriverParam->sectagOverhead -= MACSEC_SCI_SIZE;
++ p_FmMacsec->p_FmMacsecDriverParam->mflSubtract += MACSEC_SCI_SIZE;
++
++ return E_OK;
++}
++
++static t_Error MacsecConfigException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ GET_USER_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_FmMacsec->userExceptions |= bitMask;
++ else
++ p_FmMacsec->userExceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ return E_OK;
++}
++
++static t_Error MacsecGetRevision(t_Handle h_FmMacsec, uint32_t *p_MacsecRevision)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ *p_MacsecRevision = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->ip_rev1);
++
++ return E_OK;
++}
++
++static t_Error MacsecEnable(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t tmpReg;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg);
++ tmpReg |= CFG_BYPN;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg,tmpReg);
++
++ return E_OK;
++}
++
++static t_Error MacsecDisable(t_Handle h_FmMacsec)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t tmpReg;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg);
++ tmpReg &= ~CFG_BYPN;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg,tmpReg);
++
++ return E_OK;
++}
++
++static t_Error MacsecSetException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t bitMask;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ GET_USER_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_FmMacsec->userExceptions |= bitMask;
++ else
++ p_FmMacsec->userExceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ if (!p_FmMacsec->userExceptions)
++ p_FmMacsec->exceptions &= ~FM_MACSEC_EX_ECC;
++ else
++ p_FmMacsec->exceptions |= FM_MACSEC_EX_ECC;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->erer, p_FmMacsec->exceptions);
++
++ return E_OK;
++}
++
++static void InitFmMacsecControllerDriver(t_FmMacsecControllerDriver *p_FmMacsecControllerDriver)
++{
++ p_FmMacsecControllerDriver->f_FM_MACSEC_Init = MacsecInit;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_Free = MacsecFree;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUnknownSciFrameTreatment = MacsecConfigUnknownSciFrameTreatment;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigInvalidTagsFrameTreatment = MacsecConfigInvalidTagsFrameTreatment;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment = MacsecConfigEncryptWithNoChangedTextFrameTreatment;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUntagFrameTreatment = MacsecConfigUntagFrameTreatment;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment = MacsecConfigChangedTextWithNoEncryptFrameTreatment;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment = MacsecConfigOnlyScbIsSetFrameTreatment;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigPnExhaustionThreshold = MacsecConfigPnExhaustionThreshold;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigKeysUnreadable = MacsecConfigKeysUnreadable;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigSectagWithoutSCI = MacsecConfigSectagWithoutSCI;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigException = MacsecConfigException;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_GetRevision = MacsecGetRevision;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_Enable = MacsecEnable;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_Disable = MacsecDisable;
++ p_FmMacsecControllerDriver->f_FM_MACSEC_SetException = MacsecSetException;
++}
++
++/****************************************/
++/* Inter-Module functions */
++/****************************************/
++
++void FmMacsecRegisterIntr(t_Handle h_FmMacsec,
++ e_FmMacsecEventModules module,
++ uint8_t modId,
++ e_FmIntrType intrType,
++ void (*f_Isr) (t_Handle h_Arg, uint32_t id),
++ t_Handle h_Arg)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint8_t event= 0;
++
++ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
++
++ GET_MACSEC_MODULE_EVENT(module, modId, intrType, event);
++
++ ASSERT_COND(event != e_FM_MACSEC_EV_DUMMY_LAST);
++ p_FmMacsec->intrMng[event].f_Isr = f_Isr;
++ p_FmMacsec->intrMng[event].h_SrcHandle = h_Arg;
++}
++
++void FmMacsecUnregisterIntr(t_Handle h_FmMacsec,
++ e_FmMacsecEventModules module,
++ uint8_t modId,
++ e_FmIntrType intrType)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint8_t event= 0;
++
++ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
++
++ GET_MACSEC_MODULE_EVENT(module, modId,intrType, event);
++
++ ASSERT_COND(event != e_FM_MACSEC_EV_DUMMY_LAST);
++ p_FmMacsec->intrMng[event].f_Isr = NULL;
++ p_FmMacsec->intrMng[event].h_SrcHandle = NULL;
++}
++
++t_Error FmMacsecAllocScs(t_Handle h_FmMacsec, e_ScType type, bool isPtp, uint32_t numOfScs, uint32_t *p_ScIds)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ bool *p_ScTable;
++ uint32_t *p_ScAvailable,i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_ScIds, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(numOfScs, E_INVALID_HANDLE);
++
++ if (type == e_SC_RX)
++ {
++ p_ScTable = (bool *)p_FmMacsec->rxScTable;
++ p_ScAvailable = &p_FmMacsec->numRxScAvailable;
++ i = (NUM_OF_RX_SC - 1);
++ }
++ else
++ {
++ p_ScTable = (bool *)p_FmMacsec->txScTable;
++ p_ScAvailable = &p_FmMacsec->numTxScAvailable;
++ i = (NUM_OF_TX_SC - 1);
++
++ }
++ if (*p_ScAvailable < numOfScs)
++ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Not enough SCs available"));
++
++ if (isPtp)
++ {
++ i = 0;
++ if (p_ScTable[i])
++ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Sc 0 Not available"));
++ }
++
++ for (;numOfScs;i--)
++ {
++ if (p_ScTable[i])
++ continue;
++ numOfScs --;
++ (*p_ScAvailable)--;
++ p_ScIds[numOfScs] = i;
++ p_ScTable[i] = TRUE;
++ }
++
++ return err;
++}
++
++t_Error FmMacsecFreeScs(t_Handle h_FmMacsec, e_ScType type, uint32_t numOfScs, uint32_t *p_ScIds)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ bool *p_ScTable;
++ uint32_t *p_ScAvailable,maxNumOfSc,i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_ScIds, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(numOfScs, E_INVALID_HANDLE);
++
++ if (type == e_SC_RX)
++ {
++ p_ScTable = (bool *)p_FmMacsec->rxScTable;
++ p_ScAvailable = &p_FmMacsec->numRxScAvailable;
++ maxNumOfSc = NUM_OF_RX_SC;
++ }
++ else
++ {
++ p_ScTable = (bool *)p_FmMacsec->txScTable;
++ p_ScAvailable = &p_FmMacsec->numTxScAvailable;
++ maxNumOfSc = NUM_OF_TX_SC;
++ }
++
++ if ((*p_ScAvailable + numOfScs) > maxNumOfSc)
++ RETURN_ERROR(MINOR, E_FULL, ("Too much SCs"));
++
++ for (i=0;i<numOfScs;i++)
++ {
++ p_ScTable[p_ScIds[i]] = FALSE;
++ (*p_ScAvailable)++;
++ }
++
++ return err;
++
++}
++
++t_Error FmMacsecSetPTP(t_Handle h_FmMacsec, bool enable)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t tmpReg = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg);
++ if (enable && (tmpReg & CFG_S0I))
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("MACSEC already in point-to-point mode"));
++
++ if (enable)
++ tmpReg |= CFG_S0I;
++ else
++ tmpReg &= ~CFG_S0I;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg, tmpReg);
++
++ return E_OK;
++}
++
++t_Error FmMacsecCreateRxSc(t_Handle h_FmMacsec, t_RxScParams *p_RxScParams)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_RxScParams, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_RxScParams->scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, p_RxScParams->scId);
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsccfg);
++ if (tmpReg & RX_SCCFG_SCI_EN_MASK)
++ {
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Rx Sc %d must be disable",p_RxScParams->scId));
++ }
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsci1h, GET_SCI_FIRST_HALF(p_RxScParams->sci));
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsci2h, GET_SCI_SECOND_HALF(p_RxScParams->sci));
++ tmpReg |= ((p_RxScParams->replayProtect << RX_SCCFG_RP_SHIFT) & RX_SCCFG_RP_MASK);
++ tmpReg |= ((p_RxScParams->validateFrames << RX_SCCFG_VF_SHIFT) & RX_SCCFG_VF_MASK);
++ tmpReg |= ((p_RxScParams->confidentialityOffset << RX_SCCFG_CO_SHIFT) & RX_SCCFG_CO_MASK);
++ tmpReg |= RX_SCCFG_SCI_EN_MASK;
++ tmpReg |= (p_RxScParams->cipherSuite << RX_SCCFG_CS_SHIFT);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsccfg, tmpReg);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rpw, p_RxScParams->replayWindow);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecDeleteRxSc(t_Handle h_FmMacsec, uint32_t scId)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
++
++ tmpReg &= ~RX_SCCFG_SCI_EN_MASK;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsccfg, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecCreateTxSc(t_Handle h_FmMacsec, t_TxScParams *p_TxScParams)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++ bool alwaysIncludeSCI = FALSE, useES = FALSE, useSCB = FALSE;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_TxScParams, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_TxScParams->scId < NUM_OF_TX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, p_TxScParams->scId);
++
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg);
++ if (tmpReg & TX_SCCFG_SCE_MASK)
++ {
++ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tx Sc %d must be disable",p_TxScParams->scId));
++ }
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsci1h, GET_SCI_FIRST_HALF(p_TxScParams->sci));
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsci2h, GET_SCI_SECOND_HALF(p_TxScParams->sci));
++ alwaysIncludeSCI = (p_TxScParams->sciInsertionMode == e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG);
++ useES = (p_TxScParams->sciInsertionMode == e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_MAC_SA);
++
++ tmpReg |= ((p_TxScParams->protectFrames << TX_SCCFG_PF_SHIFT) & TX_SCCFG_PF_MASK);
++ tmpReg |= ((alwaysIncludeSCI << TX_SCCFG_AIS_SHIFT) & TX_SCCFG_AIS_MASK);
++ tmpReg |= ((useES << TX_SCCFG_UES_SHIFT) & TX_SCCFG_UES_MASK);
++ tmpReg |= ((useSCB << TX_SCCFG_USCB_SHIFT) & TX_SCCFG_USCB_MASK);
++ tmpReg |= ((p_TxScParams->confidentialityEnable << TX_SCCFG_CE_SHIFT) & TX_SCCFG_CE_MASK);
++ tmpReg |= ((p_TxScParams->confidentialityOffset << TX_SCCFG_CO_SHIFT) & TX_SCCFG_CO_MASK);
++ tmpReg |= TX_SCCFG_SCE_MASK;
++ tmpReg |= (p_TxScParams->cipherSuite << TX_SCCFG_CS_SHIFT);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecDeleteTxSc(t_Handle h_FmMacsec, uint32_t scId)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_TX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
++
++ tmpReg &= ~TX_SCCFG_SCE_MASK;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecCreateRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsanpn, DEFAULT_initNextPn);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsalpn, lowestPn);
++ MemCpy8((void*)p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsak, key, sizeof(macsecSAKey_t));
++
++ tmpReg |= RX_SACFG_ACTIVE;
++ tmpReg |= ((an << RX_SACFG_AN_SHIFT) & RX_SACFG_AN_MASK);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecCreateTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecSAKey_t key)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_TX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsanpn, DEFAULT_initNextPn);
++ MemCpy8((void*)p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsak, key, sizeof(macsecSAKey_t));
++
++ tmpReg |= TX_SACFG_ACTIVE;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsacs, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecDeleteRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, i, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsanpn, 0x0);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsalpn, 0x0);
++ for (i=0; i<4; i++)
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsak[i], 0x0);
++
++ tmpReg |= RX_SACFG_ACTIVE;
++ tmpReg &= ~RX_SACFG_EN_MASK;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecDeleteTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, i, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_TX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsanpn, 0x0);
++ for (i=0; i<4; i++)
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsak[i], 0x0);
++
++ tmpReg |= TX_SACFG_ACTIVE;
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsacs, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecRxSaSetReceive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, bool enableReceive)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs);
++ if (enableReceive)
++ tmpReg |= RX_SACFG_EN_MASK;
++ else
++ tmpReg &= ~RX_SACFG_EN_MASK;
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecRxSaUpdateNextPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtNextPN)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsanpn, updtNextPN);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecRxSaUpdateLowestPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtLowestPN)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsalpn, updtLowestPN);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecTxSaSetActive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_TX_SC, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
++
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg);
++
++ tmpReg |= ((an << TX_SCCFG_AN_SHIFT) & TX_SCCFG_AN_MASK);
++ tmpReg |= ((saId << TX_SCCFG_ASA_SHIFT) & TX_SCCFG_ASA_MASK);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg, tmpReg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
++
++ return err;
++}
++
++t_Error FmMacsecTxSaGetActive(t_Handle h_FmMacsec, uint32_t scId, macsecAN_t *p_An)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ t_Error err = E_OK;
++ uint32_t tmpReg = 0, intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_An, E_INVALID_HANDLE);
++
++ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
++
++ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg);
++
++ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
++
++ *p_An = (macsecAN_t)((tmpReg & TX_SCCFG_AN_MASK) >> TX_SCCFG_AN_SHIFT);
++
++ return err;
++}
++
++t_Error FmMacsecSetException(t_Handle h_FmMacsec, e_FmMacsecGlobalExceptions exception, uint32_t scId, bool enable)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t bitMask;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ GET_EXCEPTION_FLAG(bitMask, exception, scId);
++ if (bitMask)
++ {
++ if (enable)
++ p_FmMacsec->exceptions |= bitMask;
++ else
++ p_FmMacsec->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->erer, p_FmMacsec->exceptions);
++
++ return E_OK;
++}
++
++t_Error FmMacsecSetEvent(t_Handle h_FmMacsec, e_FmMacsecGlobalEvents event, uint32_t scId, bool enable)
++{
++ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
++ uint32_t bitMask;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
++
++ GET_EVENT_FLAG(bitMask, event, scId);
++ if (bitMask)
++ {
++ if (enable)
++ p_FmMacsec->events |= bitMask;
++ else
++ p_FmMacsec->events &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined event"));
++
++ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->ever, p_FmMacsec->events);
++
++ return E_OK;
++}
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++t_Handle FM_MACSEC_MASTER_Config(t_FmMacsecParams *p_FmMacsecParam)
++{
++ t_FmMacsec *p_FmMacsec;
++ uint32_t macId;
++
++ /* Allocate FM MACSEC structure */
++ p_FmMacsec = (t_FmMacsec *) XX_Malloc(sizeof(t_FmMacsec));
++ if (!p_FmMacsec)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC driver structure"));
++ return NULL;
++ }
++ memset(p_FmMacsec, 0, sizeof(t_FmMacsec));
++ InitFmMacsecControllerDriver(&p_FmMacsec->fmMacsecControllerDriver);
++
++ /* Allocate the FM MACSEC driver's parameters structure */
++ p_FmMacsec->p_FmMacsecDriverParam = (t_FmMacsecDriverParam *)XX_Malloc(sizeof(t_FmMacsecDriverParam));
++ if (!p_FmMacsec->p_FmMacsecDriverParam)
++ {
++ XX_Free(p_FmMacsec);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC driver parameters"));
++ return NULL;
++ }
++ memset(p_FmMacsec->p_FmMacsecDriverParam, 0, sizeof(t_FmMacsecDriverParam));
++
++ /* Initialize FM MACSEC parameters which will be kept by the driver */
++ p_FmMacsec->h_Fm = p_FmMacsecParam->h_Fm;
++ p_FmMacsec->h_FmMac = p_FmMacsecParam->nonGuestParams.h_FmMac;
++ p_FmMacsec->p_FmMacsecRegs = (t_FmMacsecRegs *)UINT_TO_PTR(p_FmMacsecParam->nonGuestParams.baseAddr);
++ p_FmMacsec->f_Exception = p_FmMacsecParam->nonGuestParams.f_Exception;
++ p_FmMacsec->h_App = p_FmMacsecParam->nonGuestParams.h_App;
++ p_FmMacsec->userExceptions = DEFAULT_userExceptions;
++ p_FmMacsec->exceptions = DEFAULT_exceptions;
++ p_FmMacsec->events = DEFAULT_events;
++ p_FmMacsec->rxScSpinLock = XX_InitSpinlock();
++ p_FmMacsec->txScSpinLock = XX_InitSpinlock();
++
++ /* Initialize FM MACSEC driver parameters parameters (for initialization phase only) */
++ p_FmMacsec->p_FmMacsecDriverParam->unknownSciTreatMode = DEFAULT_unknownSciFrameTreatment;
++ p_FmMacsec->p_FmMacsecDriverParam->invalidTagsDeliverUncontrolled = DEFAULT_invalidTagsFrameTreatment;
++ p_FmMacsec->p_FmMacsecDriverParam->encryptWithNoChangedTextDiscardUncontrolled = DEFAULT_encryptWithNoChangedTextFrameTreatment;
++ p_FmMacsec->p_FmMacsecDriverParam->untagTreatMode = DEFAULT_untagFrameTreatment;
++ p_FmMacsec->p_FmMacsecDriverParam->keysUnreadable = DEFAULT_keysUnreadable;
++ p_FmMacsec->p_FmMacsecDriverParam->reservedSc0 = DEFAULT_sc0ReservedForPTP;
++ p_FmMacsec->p_FmMacsecDriverParam->byPassMode = !DEFAULT_normalMode;
++ p_FmMacsec->p_FmMacsecDriverParam->pnExhThr = DEFAULT_pnExhThr;
++ p_FmMacsec->p_FmMacsecDriverParam->sectagOverhead = DEFAULT_sectagOverhead;
++ p_FmMacsec->p_FmMacsecDriverParam->mflSubtract = DEFAULT_mflSubtract;
++ /* build the FM MACSEC master IPC address */
++ memset(p_FmMacsec->fmMacsecModuleName, 0, (sizeof(char))*MODULE_NAME_SIZE);
++ FM_MAC_GetId(p_FmMacsec->h_FmMac,&macId);
++ if (Sprint (p_FmMacsec->fmMacsecModuleName, "FM-%d-MAC-%d-MACSEC-Master",
++ FmGetId(p_FmMacsec->h_Fm),macId) != 24)
++ {
++ XX_Free(p_FmMacsec->p_FmMacsecDriverParam);
++ XX_Free(p_FmMacsec);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++ return NULL;
++ }
++ return p_FmMacsec;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.h
+new file mode 100644
+index 00000000..2296a0f1
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.h
+@@ -0,0 +1,479 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fm_macsec_master.h
++
++ @Description FM MACSEC internal structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_MACSEC_MASTER_H
++#define __FM_MACSEC_MASTER_H
++
++#include "error_ext.h"
++#include "std_ext.h"
++
++#include "fm_macsec.h"
++
++
++#define MACSEC_ICV_SIZE 16
++#define MACSEC_SECTAG_SIZE 16
++#define MACSEC_SCI_SIZE 8
++#define MACSEC_FCS_SIZE 4
++
++/**************************************************************************//**
++ @Description Exceptions
++*//***************************************************************************/
++
++#define FM_MACSEC_EX_TX_SC_0 0x80000000
++#define FM_MACSEC_EX_TX_SC(sc) (FM_MACSEC_EX_TX_SC_0 >> (sc))
++#define FM_MACSEC_EX_ECC 0x00000001
++
++#define GET_EXCEPTION_FLAG(bitMask, exception, id) switch (exception){ \
++ case e_FM_MACSEC_EX_TX_SC: \
++ bitMask = FM_MACSEC_EX_TX_SC(id); break; \
++ case e_FM_MACSEC_EX_ECC: \
++ bitMask = FM_MACSEC_EX_ECC; break; \
++ default: bitMask = 0;break;}
++
++#define FM_MACSEC_USER_EX_SINGLE_BIT_ECC 0x80000000
++#define FM_MACSEC_USER_EX_MULTI_BIT_ECC 0x40000000
++
++#define GET_USER_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
++ case e_FM_MACSEC_EX_SINGLE_BIT_ECC: \
++ bitMask = FM_MACSEC_USER_EX_SINGLE_BIT_ECC; break; \
++ case e_FM_MACSEC_EX_MULTI_BIT_ECC: \
++ bitMask = FM_MACSEC_USER_EX_MULTI_BIT_ECC; break; \
++ default: bitMask = 0;break;}
++
++/**************************************************************************//**
++ @Description Events
++*//***************************************************************************/
++
++#define FM_MACSEC_EV_TX_SC_0_NEXT_PN 0x80000000
++#define FM_MACSEC_EV_TX_SC_NEXT_PN(sc) (FM_MACSEC_EV_TX_SC_0_NEXT_PN >> (sc))
++
++#define GET_EVENT_FLAG(bitMask, event, id) switch (event){ \
++ case e_FM_MACSEC_EV_TX_SC_NEXT_PN: \
++ bitMask = FM_MACSEC_EV_TX_SC_NEXT_PN(id); break; \
++ default: bitMask = 0;break;}
++
++/**************************************************************************//**
++ @Description Defaults
++*//***************************************************************************/
++#define DEFAULT_userExceptions (FM_MACSEC_USER_EX_SINGLE_BIT_ECC |\
++ FM_MACSEC_USER_EX_MULTI_BIT_ECC)
++
++#define DEFAULT_exceptions (FM_MACSEC_EX_TX_SC(0) |\
++ FM_MACSEC_EX_TX_SC(1) |\
++ FM_MACSEC_EX_TX_SC(2) |\
++ FM_MACSEC_EX_TX_SC(3) |\
++ FM_MACSEC_EX_TX_SC(4) |\
++ FM_MACSEC_EX_TX_SC(5) |\
++ FM_MACSEC_EX_TX_SC(6) |\
++ FM_MACSEC_EX_TX_SC(7) |\
++ FM_MACSEC_EX_TX_SC(8) |\
++ FM_MACSEC_EX_TX_SC(9) |\
++ FM_MACSEC_EX_TX_SC(10) |\
++ FM_MACSEC_EX_TX_SC(11) |\
++ FM_MACSEC_EX_TX_SC(12) |\
++ FM_MACSEC_EX_TX_SC(13) |\
++ FM_MACSEC_EX_TX_SC(14) |\
++ FM_MACSEC_EX_TX_SC(15) |\
++ FM_MACSEC_EX_ECC )
++
++#define DEFAULT_events (FM_MACSEC_EV_TX_SC_NEXT_PN(0) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(1) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(2) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(3) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(4) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(5) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(6) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(7) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(8) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(9) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(10) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(11) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(12) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(13) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(14) |\
++ FM_MACSEC_EV_TX_SC_NEXT_PN(15) )
++
++#define DEFAULT_unknownSciFrameTreatment e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_BOTH
++#define DEFAULT_invalidTagsFrameTreatment FALSE
++#define DEFAULT_encryptWithNoChangedTextFrameTreatment FALSE
++#define DEFAULT_untagFrameTreatment e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED
++#define DEFAULT_changedTextWithNoEncryptFrameTreatment FALSE
++#define DEFAULT_onlyScbIsSetFrameTreatment FALSE
++#define DEFAULT_keysUnreadable FALSE
++#define DEFAULT_normalMode TRUE
++#define DEFAULT_sc0ReservedForPTP FALSE
++#define DEFAULT_initNextPn 1
++#define DEFAULT_pnExhThr 0xffffffff
++#define DEFAULT_sectagOverhead (MACSEC_ICV_SIZE + MACSEC_SECTAG_SIZE)
++#define DEFAULT_mflSubtract MACSEC_FCS_SIZE
++
++
++/**************************************************************************//**
++ @Description Memory Mapped Registers
++*//***************************************************************************/
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++typedef _Packed struct
++{
++ /* MACsec configuration */
++ volatile uint32_t cfg; /**< MACsec configuration */
++ volatile uint32_t et; /**< MACsec EtherType */
++ volatile uint8_t res1[56]; /**< reserved */
++ volatile uint32_t mfl; /**< Maximum Frame Length */
++ volatile uint32_t tpnet; /**< TX Packet Number exhaustion threshold */
++ volatile uint8_t res2[56]; /**< reserved */
++ volatile uint32_t rxsca; /**< RX SC access select */
++ volatile uint8_t res3[60]; /**< reserved */
++ volatile uint32_t txsca; /**< TX SC access select */
++ volatile uint8_t res4[60]; /**< reserved */
++
++ /* RX configuration, status and statistic */
++ volatile uint32_t rxsci1h; /**< RX Secure Channel Identifier first half */
++ volatile uint32_t rxsci2h; /**< RX Secure Channel Identifier second half */
++ volatile uint8_t res5[8]; /**< reserved */
++ volatile uint32_t ifio1hs; /**< ifInOctets first half Statistic */
++ volatile uint32_t ifio2hs; /**< ifInOctets second half Statistic */
++ volatile uint32_t ifiups; /**< ifInUcastPkts Statistic */
++ volatile uint8_t res6[4]; /**< reserved */
++ volatile uint32_t ifimps; /**< ifInMulticastPkts Statistic */
++ volatile uint32_t ifibps; /**< ifInBroadcastPkts Statistic */
++ volatile uint32_t rxsccfg; /**< RX Secure Channel configuration */
++ volatile uint32_t rpw; /**< replayWindow */
++ volatile uint8_t res7[16]; /**< reserved */
++ volatile uint32_t inov1hs; /**< InOctetsValidated first half Statistic */
++ volatile uint32_t inov2hs; /**< InOctetsValidated second half Statistic */
++ volatile uint32_t inod1hs; /**< InOctetsDecrypted first half Statistic */
++ volatile uint32_t inod2hs; /**< InOctetsDecrypted second half Statistic */
++ volatile uint32_t rxscipus; /**< RX Secure Channel InPktsUnchecked Statistic */
++ volatile uint32_t rxscipds; /**< RX Secure Channel InPktsDelayed Statistic */
++ volatile uint32_t rxscipls; /**< RX Secure Channel InPktsLate Statistic */
++ volatile uint8_t res8[4]; /**< reserved */
++ volatile uint32_t rxaninuss[MAX_NUM_OF_SA_PER_SC]; /**< RX AN 0-3 InNotUsingSA Statistic */
++ volatile uint32_t rxanipuss[MAX_NUM_OF_SA_PER_SC]; /**< RX AN 0-3 InPktsUnusedSA Statistic */
++ _Packed struct
++ {
++ volatile uint32_t rxsacs; /**< RX Security Association configuration and status */
++ volatile uint32_t rxsanpn; /**< RX Security Association nextPN */
++ volatile uint32_t rxsalpn; /**< RX Security Association lowestPN */
++ volatile uint32_t rxsaipos; /**< RX Security Association InPktsOK Statistic */
++ volatile uint32_t rxsak[4]; /**< RX Security Association key (128 bit) */
++ volatile uint32_t rxsah[4]; /**< RX Security Association hash (128 bit) */
++ volatile uint32_t rxsaipis; /**< RX Security Association InPktsInvalid Statistic */
++ volatile uint32_t rxsaipnvs; /**< RX Security Association InPktsNotValid Statistic */
++ volatile uint8_t res9[8]; /**< reserved */
++ } _PackedType fmMacsecRxScSa[NUM_OF_SA_PER_RX_SC];
++
++ /* TX configuration, status and statistic */
++ volatile uint32_t txsci1h; /**< TX Secure Channel Identifier first half */
++ volatile uint32_t txsci2h; /**< TX Secure Channel Identifier second half */
++ volatile uint8_t res10[8]; /**< reserved */
++ volatile uint32_t ifoo1hs; /**< ifOutOctets first half Statistic */
++ volatile uint32_t ifoo2hs; /**< ifOutOctets second half Statistic */
++ volatile uint32_t ifoups; /**< ifOutUcastPkts Statistic */
++ volatile uint32_t opus; /**< OutPktsUntagged Statistic */
++ volatile uint32_t ifomps; /**< ifOutMulticastPkts Statistic */
++ volatile uint32_t ifobps; /**< ifOutBroadcastPkts Statistic */
++ volatile uint32_t txsccfg; /**< TX Secure Channel configuration */
++ volatile uint32_t optls; /**< OutPktsTooLong Statistic */
++ volatile uint8_t res11[16]; /**< reserved */
++ volatile uint32_t oop1hs; /**< OutOctetsProtected first half Statistic */
++ volatile uint32_t oop2hs; /**< OutOctetsProtected second half Statistic */
++ volatile uint32_t ooe1hs; /**< OutOctetsEncrypted first half Statistic */
++ volatile uint32_t ooe2hs; /**< OutOctetsEncrypted second half Statistic */
++ volatile uint8_t res12[48]; /**< reserved */
++ _Packed struct
++ {
++ volatile uint32_t txsacs; /**< TX Security Association configuration and status */
++ volatile uint32_t txsanpn; /**< TX Security Association nextPN */
++ volatile uint32_t txsaopps; /**< TX Security Association OutPktsProtected Statistic */
++ volatile uint32_t txsaopes; /**< TX Security Association OutPktsEncrypted Statistic */
++ volatile uint32_t txsak[4]; /**< TX Security Association key (128 bit) */
++ volatile uint32_t txsah[4]; /**< TX Security Association hash (128 bit) */
++ volatile uint8_t res13[16]; /**< reserved */
++ } _PackedType fmMacsecTxScSa[NUM_OF_SA_PER_TX_SC];
++ volatile uint8_t res14[248]; /**< reserved */
++
++ /* Global configuration and status */
++ volatile uint32_t ip_rev1; /**< MACsec IP Block Revision 1 register */
++ volatile uint32_t ip_rev2; /**< MACsec IP Block Revision 2 register */
++ volatile uint32_t evr; /**< MACsec Event Register */
++ volatile uint32_t ever; /**< MACsec Event Enable Register */
++ volatile uint32_t evfr; /**< MACsec Event Force Register */
++ volatile uint32_t err; /**< MACsec Error Register */
++ volatile uint32_t erer; /**< MACsec Error Enable Register */
++ volatile uint32_t erfr; /**< MACsec Error Force Register */
++ volatile uint8_t res15[40]; /**< reserved */
++ volatile uint32_t meec; /**< MACsec Memory ECC Error Capture Register */
++ volatile uint32_t idle; /**< MACsec Idle status Register */
++ volatile uint8_t res16[184]; /**< reserved */
++ /* DEBUG */
++ volatile uint32_t rxec; /**< MACsec RX error capture Register */
++ volatile uint8_t res17[28]; /**< reserved */
++ volatile uint32_t txec; /**< MACsec TX error capture Register */
++ volatile uint8_t res18[220]; /**< reserved */
++
++ /* Macsec Rx global statistic */
++ volatile uint32_t ifiocp1hs; /**< ifInOctetsCp first half Statistic */
++ volatile uint32_t ifiocp2hs; /**< ifInOctetsCp second half Statistic */
++ volatile uint32_t ifiupcps; /**< ifInUcastPktsCp Statistic */
++ volatile uint8_t res19[4]; /**< reserved */
++ volatile uint32_t ifioup1hs; /**< ifInOctetsUp first half Statistic */
++ volatile uint32_t ifioup2hs; /**< ifInOctetsUp second half Statistic */
++ volatile uint32_t ifiupups; /**< ifInUcastPktsUp Statistic */
++ volatile uint8_t res20[4]; /**< reserved */
++ volatile uint32_t ifimpcps; /**< ifInMulticastPktsCp Statistic */
++ volatile uint32_t ifibpcps; /**< ifInBroadcastPktsCp Statistic */
++ volatile uint32_t ifimpups; /**< ifInMulticastPktsUp Statistic */
++ volatile uint32_t ifibpups; /**< ifInBroadcastPktsUp Statistic */
++ volatile uint32_t ipwts; /**< InPktsWithoutTag Statistic */
++ volatile uint32_t ipkays; /**< InPktsKaY Statistic */
++ volatile uint32_t ipbts; /**< InPktsBadTag Statistic */
++ volatile uint32_t ipsnfs; /**< InPktsSCINotFound Statistic */
++ volatile uint32_t ipuecs; /**< InPktsUnsupportedEC Statistic */
++ volatile uint32_t ipescbs; /**< InPktsEponSingleCopyBroadcast Statistic */
++ volatile uint32_t iptls; /**< InPktsTooLong Statistic */
++ volatile uint8_t res21[52]; /**< reserved */
++
++ /* Macsec Tx global statistic */
++ volatile uint32_t opds; /**< OutPktsDiscarded Statistic */
++#if (DPAA_VERSION >= 11)
++ volatile uint8_t res22[124]; /**< reserved */
++ _Packed struct
++ {
++ volatile uint32_t rxsak[8]; /**< RX Security Association key (128/256 bit) */
++ volatile uint8_t res23[32]; /**< reserved */
++ } _PackedType rxScSaKey[NUM_OF_SA_PER_RX_SC];
++ _Packed struct
++ {
++ volatile uint32_t txsak[8]; /**< TX Security Association key (128/256 bit) */
++ volatile uint8_t res24[32]; /**< reserved */
++ } _PackedType txScSaKey[NUM_OF_SA_PER_TX_SC];
++#endif /* (DPAA_VERSION >= 11) */
++} _PackedType t_FmMacsecRegs;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/**************************************************************************//**
++ @Description General defines
++*//***************************************************************************/
++
++#define SCI_HIGH_MASK 0xffffffff00000000LL
++#define SCI_LOW_MASK 0x00000000ffffffffLL
++
++#define LONG_SHIFT 32
++
++#define GET_SCI_FIRST_HALF(sci) (uint32_t)((macsecSCI_t)((macsecSCI_t)(sci) & SCI_HIGH_MASK) >> LONG_SHIFT)
++#define GET_SCI_SECOND_HALF(sci) (uint32_t)((macsecSCI_t)(sci) & SCI_LOW_MASK)
++
++/**************************************************************************//**
++ @Description Configuration defines
++*//***************************************************************************/
++
++/* masks */
++#define CFG_UECT 0x00000800
++#define CFG_ESCBT 0x00000400
++#define CFG_USFT 0x00000300
++#define CFG_ITT 0x00000080
++#define CFG_KFT 0x00000040
++#define CFG_UFT 0x00000030
++#define CFG_KSS 0x00000004
++#define CFG_BYPN 0x00000002
++#define CFG_S0I 0x00000001
++
++#define ET_TYPE 0x0000ffff
++
++#define MFL_MAX_LEN 0x0000ffff
++
++#define RXSCA_SC_SEL 0x0000000f
++
++#define TXSCA_SC_SEL 0x0000000f
++
++#define IP_REV_1_IP_ID 0xffff0000
++#define IP_REV_1_IP_MJ 0x0000ff00
++#define IP_REV_1_IP_MM 0x000000ff
++
++#define IP_REV_2_IP_INT 0x00ff0000
++#define IP_REV_2_IP_ERR 0x0000ff00
++#define IP_REV_2_IP_CFG 0x000000ff
++
++#define MECC_CAP 0x80000000
++#define MECC_CET 0x40000000
++#define MECC_SERCNT 0x00ff0000
++#define MECC_MEMADDR 0x000001ff
++
++/* shifts */
++#define CFG_UECT_SHIFT (31-20)
++#define CFG_ESCBT_SHIFT (31-21)
++#define CFG_USFT_SHIFT (31-23)
++#define CFG_ITT_SHIFT (31-24)
++#define CFG_KFT_SHIFT (31-25)
++#define CFG_UFT_SHIFT (31-27)
++#define CFG_KSS_SHIFT (31-29)
++#define CFG_BYPN_SHIFT (31-30)
++#define CFG_S0I_SHIFT (31-31)
++
++#define IP_REV_1_IP_ID_SHIFT (31-15)
++#define IP_REV_1_IP_MJ_SHIFT (31-23)
++#define IP_REV_1_IP_MM_SHIFT (31-31)
++
++#define IP_REV_2_IP_INT_SHIFT (31-15)
++#define IP_REV_2_IP_ERR_SHIFT (31-23)
++#define IP_REV_2_IP_CFG_SHIFT (31-31)
++
++#define MECC_CAP_SHIFT (31-0)
++#define MECC_CET_SHIFT (31-1)
++#define MECC_SERCNT_SHIFT (31-15)
++#define MECC_MEMADDR_SHIFT (31-31)
++
++/**************************************************************************//**
++ @Description RX SC defines
++*//***************************************************************************/
++
++/* masks */
++#define RX_SCCFG_SCI_EN_MASK 0x00000800
++#define RX_SCCFG_RP_MASK 0x00000400
++#define RX_SCCFG_VF_MASK 0x00000300
++#define RX_SCCFG_CO_MASK 0x0000003f
++
++/* shifts */
++#define RX_SCCFG_SCI_EN_SHIFT (31-20)
++#define RX_SCCFG_RP_SHIFT (31-21)
++#define RX_SCCFG_VF_SHIFT (31-23)
++#define RX_SCCFG_CO_SHIFT (31-31)
++#define RX_SCCFG_CS_SHIFT (31-7)
++
++/**************************************************************************//**
++ @Description RX SA defines
++*//***************************************************************************/
++
++/* masks */
++#define RX_SACFG_ACTIVE 0x80000000
++#define RX_SACFG_AN_MASK 0x00000006
++#define RX_SACFG_EN_MASK 0x00000001
++
++/* shifts */
++#define RX_SACFG_AN_SHIFT (31-30)
++#define RX_SACFG_EN_SHIFT (31-31)
++
++/**************************************************************************//**
++ @Description TX SC defines
++*//***************************************************************************/
++
++/* masks */
++#define TX_SCCFG_AN_MASK 0x000c0000
++#define TX_SCCFG_ASA_MASK 0x00020000
++#define TX_SCCFG_SCE_MASK 0x00010000
++#define TX_SCCFG_CO_MASK 0x00003f00
++#define TX_SCCFG_CE_MASK 0x00000010
++#define TX_SCCFG_PF_MASK 0x00000008
++#define TX_SCCFG_AIS_MASK 0x00000004
++#define TX_SCCFG_UES_MASK 0x00000002
++#define TX_SCCFG_USCB_MASK 0x00000001
++
++/* shifts */
++#define TX_SCCFG_AN_SHIFT (31-13)
++#define TX_SCCFG_ASA_SHIFT (31-14)
++#define TX_SCCFG_SCE_SHIFT (31-15)
++#define TX_SCCFG_CO_SHIFT (31-23)
++#define TX_SCCFG_CE_SHIFT (31-27)
++#define TX_SCCFG_PF_SHIFT (31-28)
++#define TX_SCCFG_AIS_SHIFT (31-29)
++#define TX_SCCFG_UES_SHIFT (31-30)
++#define TX_SCCFG_USCB_SHIFT (31-31)
++#define TX_SCCFG_CS_SHIFT (31-7)
++
++/**************************************************************************//**
++ @Description TX SA defines
++*//***************************************************************************/
++
++/* masks */
++#define TX_SACFG_ACTIVE 0x80000000
++
++
++typedef struct
++{
++ void (*f_Isr) (t_Handle h_Arg, uint32_t id);
++ t_Handle h_SrcHandle;
++} t_FmMacsecIntrSrc;
++
++typedef struct
++{
++ e_FmMacsecUnknownSciFrameTreatment unknownSciTreatMode;
++ bool invalidTagsDeliverUncontrolled;
++ bool changedTextWithNoEncryptDeliverUncontrolled;
++ bool onlyScbIsSetDeliverUncontrolled;
++ bool encryptWithNoChangedTextDiscardUncontrolled;
++ e_FmMacsecUntagFrameTreatment untagTreatMode;
++ uint32_t pnExhThr;
++ bool keysUnreadable;
++ bool byPassMode;
++ bool reservedSc0;
++ uint32_t sectagOverhead;
++ uint32_t mflSubtract;
++} t_FmMacsecDriverParam;
++
++typedef struct
++{
++ t_FmMacsecControllerDriver fmMacsecControllerDriver;
++ t_Handle h_Fm;
++ t_FmMacsecRegs *p_FmMacsecRegs;
++ t_Handle h_FmMac; /**< A handle to the FM MAC object related to */
++ char fmMacsecModuleName[MODULE_NAME_SIZE];
++ t_FmMacsecIntrSrc intrMng[NUM_OF_INTER_MODULE_EVENTS];
++ uint32_t events;
++ uint32_t exceptions;
++ uint32_t userExceptions;
++ t_FmMacsecExceptionsCallback *f_Exception; /**< Exception Callback Routine */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks */
++ bool rxScTable[NUM_OF_RX_SC];
++ uint32_t numRxScAvailable;
++ bool txScTable[NUM_OF_TX_SC];
++ uint32_t numTxScAvailable;
++ t_Handle rxScSpinLock;
++ t_Handle txScSpinLock;
++ t_FmMacsecDriverParam *p_FmMacsecDriverParam;
++} t_FmMacsec;
++
++
++#endif /* __FM_MACSEC_MASTER_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.c
+new file mode 100644
+index 00000000..7c72dc98
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.c
+@@ -0,0 +1,883 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fm_macsec_secy.c
++
++ @Description FM MACSEC SECY driver routines implementation.
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "xx_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++
++#include "fm_macsec_secy.h"
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++static void FmMacsecSecYExceptionsIsr(t_Handle h_FmMacsecSecY, uint32_t id)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ UNUSED(id);
++ SANITY_CHECK_RETURN(p_FmMacsecSecY, E_INVALID_HANDLE);
++
++ if (p_FmMacsecSecY->exceptions & FM_MACSEC_SECY_EX_FRAME_DISCARDED)
++ p_FmMacsecSecY->f_Exception(p_FmMacsecSecY->h_App, e_FM_MACSEC_SECY_EX_FRAME_DISCARDED);
++}
++
++static void FmMacsecSecYEventsIsr(t_Handle h_FmMacsecSecY, uint32_t id)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ UNUSED(id);
++ SANITY_CHECK_RETURN(p_FmMacsecSecY, E_INVALID_HANDLE);
++
++ if (p_FmMacsecSecY->events & FM_MACSEC_SECY_EV_NEXT_PN)
++ p_FmMacsecSecY->f_Event(p_FmMacsecSecY->h_App, e_FM_MACSEC_SECY_EV_NEXT_PN);
++}
++
++static t_Error CheckFmMacsecSecYParameters(t_FmMacsecSecY *p_FmMacsecSecY)
++{
++ if (!p_FmMacsecSecY->f_Exception)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
++
++ if (!p_FmMacsecSecY->f_Event)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Events callback not provided"));
++
++ if (!p_FmMacsecSecY->numOfRxSc)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Num of Rx Scs must be greater than '0'"));
++
++
++ return E_OK;
++}
++
++static t_Handle FmMacsecSecYCreateSc(t_FmMacsecSecY *p_FmMacsecSecY,
++ macsecSCI_t sci,
++ e_FmMacsecSecYCipherSuite cipherSuite,
++ e_ScType type)
++{
++ t_SecYSc *p_ScTable;
++ void *p_Params;
++ uint32_t numOfSc,i;
++ t_Error err = E_OK;
++ t_RxScParams rxScParams;
++ t_TxScParams txScParams;
++
++ ASSERT_COND(p_FmMacsecSecY);
++ ASSERT_COND(p_FmMacsecSecY->h_FmMacsec);
++
++ if (type == e_SC_RX)
++ {
++ memset(&rxScParams, 0, sizeof(rxScParams));
++ i = (NUM_OF_RX_SC - 1);
++ p_ScTable = p_FmMacsecSecY->p_RxSc;
++ numOfSc = p_FmMacsecSecY->numOfRxSc;
++ rxScParams.confidentialityOffset = p_FmMacsecSecY->confidentialityOffset;
++ rxScParams.replayProtect = p_FmMacsecSecY->replayProtect;
++ rxScParams.replayWindow = p_FmMacsecSecY->replayWindow;
++ rxScParams.validateFrames = p_FmMacsecSecY->validateFrames;
++ rxScParams.cipherSuite = cipherSuite;
++ p_Params = &rxScParams;
++ }
++ else
++ {
++ memset(&txScParams, 0, sizeof(txScParams));
++ i = (NUM_OF_TX_SC - 1);
++ p_ScTable = p_FmMacsecSecY->p_TxSc;
++ numOfSc = p_FmMacsecSecY->numOfTxSc;
++ txScParams.sciInsertionMode = p_FmMacsecSecY->sciInsertionMode;
++ txScParams.protectFrames = p_FmMacsecSecY->protectFrames;
++ txScParams.confidentialityEnable = p_FmMacsecSecY->confidentialityEnable;
++ txScParams.confidentialityOffset = p_FmMacsecSecY->confidentialityOffset;
++ txScParams.cipherSuite = cipherSuite;
++ p_Params = &txScParams;
++ }
++
++ for (i=0;i<numOfSc;i++)
++ if (!p_ScTable[i].inUse)
++ break;
++ if (i == numOfSc)
++ {
++ REPORT_ERROR(MAJOR, E_FULL, ("FM MACSEC SECY SC"));
++ return NULL;
++ }
++
++ if (type == e_SC_RX)
++ {
++ ((t_RxScParams *)p_Params)->scId = p_ScTable[i].scId;
++ ((t_RxScParams *)p_Params)->sci = sci;
++ if ((err = FmMacsecCreateRxSc(p_FmMacsecSecY->h_FmMacsec, (t_RxScParams *)p_Params)) != E_OK)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY RX SC"));
++ return NULL;
++ }
++ }
++ else
++ {
++ ((t_TxScParams *)p_Params)->scId = p_ScTable[i].scId;
++ ((t_TxScParams *)p_Params)->sci = sci;
++ if ((err = FmMacsecCreateTxSc(p_FmMacsecSecY->h_FmMacsec, (t_TxScParams *)p_Params)) != E_OK)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY TX SC"));
++ return NULL;
++ }
++ }
++
++ p_ScTable[i].inUse = TRUE;
++ return &p_ScTable[i];
++}
++
++static t_Error FmMacsecSecYDeleteSc(t_FmMacsecSecY *p_FmMacsecSecY, t_SecYSc *p_FmSecYSc, e_ScType type)
++{
++ t_Error err = E_OK;
++
++ ASSERT_COND(p_FmMacsecSecY);
++ ASSERT_COND(p_FmMacsecSecY->h_FmMacsec);
++ ASSERT_COND(p_FmSecYSc);
++
++ if (type == e_SC_RX)
++ {
++ if ((err = FmMacsecDeleteRxSc(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++ else
++ if ((err = FmMacsecDeleteTxSc(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ p_FmSecYSc->inUse = FALSE;
++
++ return err;
++}
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++t_Handle FM_MACSEC_SECY_Config(t_FmMacsecSecYParams *p_FmMacsecSecYParam)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY;
++
++ /* Allocate FM MACSEC structure */
++ p_FmMacsecSecY = (t_FmMacsecSecY *) XX_Malloc(sizeof(t_FmMacsecSecY));
++ if (!p_FmMacsecSecY)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY driver structure"));
++ return NULL;
++ }
++ memset(p_FmMacsecSecY, 0, sizeof(t_FmMacsecSecY));
++
++ /* Allocate the FM MACSEC driver's parameters structure */
++ p_FmMacsecSecY->p_FmMacsecSecYDriverParam = (t_FmMacsecSecYDriverParam *)XX_Malloc(sizeof(t_FmMacsecSecYDriverParam));
++ if (!p_FmMacsecSecY->p_FmMacsecSecYDriverParam)
++ {
++ XX_Free(p_FmMacsecSecY);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY driver parameters"));
++ return NULL;
++ }
++ memset(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, 0, sizeof(t_FmMacsecSecYDriverParam));
++
++ /* Initialize FM MACSEC SECY parameters which will be kept by the driver */
++ p_FmMacsecSecY->h_FmMacsec = p_FmMacsecSecYParam->h_FmMacsec;
++ p_FmMacsecSecY->f_Event = p_FmMacsecSecYParam->f_Event;
++ p_FmMacsecSecY->f_Exception = p_FmMacsecSecYParam->f_Exception;
++ p_FmMacsecSecY->h_App = p_FmMacsecSecYParam->h_App;
++ p_FmMacsecSecY->confidentialityEnable = DEFAULT_confidentialityEnable;
++ p_FmMacsecSecY->confidentialityOffset = DEFAULT_confidentialityOffset;
++ p_FmMacsecSecY->validateFrames = DEFAULT_validateFrames;
++ p_FmMacsecSecY->replayProtect = DEFAULT_replayEnable;
++ p_FmMacsecSecY->replayWindow = DEFAULT_replayWindow;
++ p_FmMacsecSecY->protectFrames = DEFAULT_protectFrames;
++ p_FmMacsecSecY->sciInsertionMode = DEFAULT_sciInsertionMode;
++ p_FmMacsecSecY->isPointToPoint = DEFAULT_ptp;
++ p_FmMacsecSecY->numOfRxSc = p_FmMacsecSecYParam->numReceiveChannels;
++ p_FmMacsecSecY->numOfTxSc = DEFAULT_numOfTxSc;
++ p_FmMacsecSecY->exceptions = DEFAULT_exceptions;
++ p_FmMacsecSecY->events = DEFAULT_events;
++
++ memcpy(&p_FmMacsecSecY->p_FmMacsecSecYDriverParam->txScParams,
++ &p_FmMacsecSecYParam->txScParams,
++ sizeof(t_FmMacsecSecYSCParams));
++ return p_FmMacsecSecY;
++}
++
++t_Error FM_MACSEC_SECY_Init(t_Handle h_FmMacsecSecY)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_FmMacsecSecYDriverParam *p_FmMacsecSecYDriverParam = NULL;
++ uint32_t rxScIds[NUM_OF_RX_SC], txScIds[NUM_OF_TX_SC], i, j;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_HANDLE);
++
++ CHECK_INIT_PARAMETERS(p_FmMacsecSecY, CheckFmMacsecSecYParameters);
++
++ p_FmMacsecSecYDriverParam = p_FmMacsecSecY->p_FmMacsecSecYDriverParam;
++
++ if ((p_FmMacsecSecY->isPointToPoint) &&
++ ((err = FmMacsecSetPTP(p_FmMacsecSecY->h_FmMacsec, TRUE)) != E_OK))
++ RETURN_ERROR(MAJOR, err, ("Can't set Poin-to-Point"));
++
++ /* Rx Sc Allocation */
++ p_FmMacsecSecY->p_RxSc = (t_SecYSc *)XX_Malloc(sizeof(t_SecYSc) * p_FmMacsecSecY->numOfRxSc);
++ if (!p_FmMacsecSecY->p_RxSc)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY RX SC"));
++ memset(p_FmMacsecSecY->p_RxSc, 0, sizeof(t_SecYSc) * p_FmMacsecSecY->numOfRxSc);
++ if ((err = FmMacsecAllocScs(p_FmMacsecSecY->h_FmMacsec, e_SC_RX, p_FmMacsecSecY->isPointToPoint, p_FmMacsecSecY->numOfRxSc, rxScIds)) != E_OK)
++ {
++ if (p_FmMacsecSecY->p_TxSc)
++ XX_Free(p_FmMacsecSecY->p_TxSc);
++ if (p_FmMacsecSecY->p_RxSc)
++ XX_Free(p_FmMacsecSecY->p_RxSc);
++ return ERROR_CODE(err);
++ }
++ for (i=0; i<p_FmMacsecSecY->numOfRxSc; i++)
++ {
++ p_FmMacsecSecY->p_RxSc[i].scId = rxScIds[i];
++ p_FmMacsecSecY->p_RxSc[i].type = e_SC_RX;
++ for (j=0; j<MAX_NUM_OF_SA_PER_SC;j++)
++ p_FmMacsecSecY->p_RxSc[i].sa[j].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
++ }
++
++ /* Tx Sc Allocation */
++ p_FmMacsecSecY->p_TxSc = (t_SecYSc *)XX_Malloc(sizeof(t_SecYSc) * p_FmMacsecSecY->numOfTxSc);
++ if (!p_FmMacsecSecY->p_TxSc)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY TX SC"));
++ memset(p_FmMacsecSecY->p_TxSc, 0, sizeof(t_SecYSc) * p_FmMacsecSecY->numOfTxSc);
++
++ if ((err = FmMacsecAllocScs(p_FmMacsecSecY->h_FmMacsec, e_SC_TX, p_FmMacsecSecY->isPointToPoint, p_FmMacsecSecY->numOfTxSc, txScIds)) != E_OK)
++ {
++ if (p_FmMacsecSecY->p_TxSc)
++ XX_Free(p_FmMacsecSecY->p_TxSc);
++ if (p_FmMacsecSecY->p_RxSc)
++ XX_Free(p_FmMacsecSecY->p_RxSc);
++ return ERROR_CODE(err);
++ }
++ for (i=0; i<p_FmMacsecSecY->numOfTxSc; i++)
++ {
++ p_FmMacsecSecY->p_TxSc[i].scId = txScIds[i];
++ p_FmMacsecSecY->p_TxSc[i].type = e_SC_TX;
++ for (j=0; j<MAX_NUM_OF_SA_PER_SC;j++)
++ p_FmMacsecSecY->p_TxSc[i].sa[j].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
++ FmMacsecRegisterIntr(p_FmMacsecSecY->h_FmMacsec,
++ e_FM_MACSEC_MOD_SC_TX,
++ (uint8_t)txScIds[i],
++ e_FM_INTR_TYPE_ERR,
++ FmMacsecSecYExceptionsIsr,
++ p_FmMacsecSecY);
++ FmMacsecRegisterIntr(p_FmMacsecSecY->h_FmMacsec,
++ e_FM_MACSEC_MOD_SC_TX,
++ (uint8_t)txScIds[i],
++ e_FM_INTR_TYPE_NORMAL,
++ FmMacsecSecYEventsIsr,
++ p_FmMacsecSecY);
++
++ if (p_FmMacsecSecY->exceptions & FM_MACSEC_SECY_EX_FRAME_DISCARDED)
++ FmMacsecSetException(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EX_TX_SC, txScIds[i], TRUE);
++ if (p_FmMacsecSecY->events & FM_MACSEC_SECY_EV_NEXT_PN)
++ FmMacsecSetEvent(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EV_TX_SC_NEXT_PN, txScIds[i], TRUE);
++ }
++
++ FmMacsecSecYCreateSc(p_FmMacsecSecY,
++ p_FmMacsecSecYDriverParam->txScParams.sci,
++ p_FmMacsecSecYDriverParam->txScParams.cipherSuite,
++ e_SC_TX);
++ XX_Free(p_FmMacsecSecYDriverParam);
++ p_FmMacsecSecY->p_FmMacsecSecYDriverParam = NULL;
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_Free(t_Handle h_FmMacsecSecY)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_Error err = E_OK;
++ uint32_t rxScIds[NUM_OF_RX_SC], txScIds[NUM_OF_TX_SC], i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ if (p_FmMacsecSecY->isPointToPoint)
++ FmMacsecSetPTP(p_FmMacsecSecY->h_FmMacsec, FALSE);
++ if (p_FmMacsecSecY->p_RxSc)
++ {
++ for (i=0; i<p_FmMacsecSecY->numOfRxSc; i++)
++ rxScIds[i] = p_FmMacsecSecY->p_RxSc[i].scId;
++ if ((err = FmMacsecFreeScs(p_FmMacsecSecY->h_FmMacsec, e_SC_RX, p_FmMacsecSecY->numOfRxSc, rxScIds)) != E_OK)
++ return ERROR_CODE(err);
++ XX_Free(p_FmMacsecSecY->p_RxSc);
++ }
++ if (p_FmMacsecSecY->p_TxSc)
++ {
++ FmMacsecSecYDeleteSc(p_FmMacsecSecY, &p_FmMacsecSecY->p_TxSc[0], e_SC_TX);
++
++ for (i=0; i<p_FmMacsecSecY->numOfTxSc; i++) {
++ txScIds[i] = p_FmMacsecSecY->p_TxSc[i].scId;
++ FmMacsecUnregisterIntr(p_FmMacsecSecY->h_FmMacsec,
++ e_FM_MACSEC_MOD_SC_TX,
++ (uint8_t)txScIds[i],
++ e_FM_INTR_TYPE_ERR);
++ FmMacsecUnregisterIntr(p_FmMacsecSecY->h_FmMacsec,
++ e_FM_MACSEC_MOD_SC_TX,
++ (uint8_t)txScIds[i],
++ e_FM_INTR_TYPE_NORMAL);
++
++ if (p_FmMacsecSecY->exceptions & FM_MACSEC_SECY_EX_FRAME_DISCARDED)
++ FmMacsecSetException(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EX_TX_SC, txScIds[i], FALSE);
++ if (p_FmMacsecSecY->events & FM_MACSEC_SECY_EV_NEXT_PN)
++ FmMacsecSetEvent(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EV_TX_SC_NEXT_PN, txScIds[i], FALSE);
++ }
++
++ if ((err = FmMacsecFreeScs(p_FmMacsecSecY->h_FmMacsec, e_SC_TX, p_FmMacsecSecY->numOfTxSc, txScIds)) != E_OK)
++ return ERROR_CODE(err);
++ XX_Free(p_FmMacsecSecY->p_TxSc);
++ }
++
++ XX_Free(p_FmMacsecSecY);
++
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_ConfigSciInsertionMode(t_Handle h_FmMacsecSecY, e_FmMacsecSciInsertionMode sciInsertionMode)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ p_FmMacsecSecY->sciInsertionMode = sciInsertionMode;
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_ConfigProtectFrames(t_Handle h_FmMacsecSecY, bool protectFrames)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ p_FmMacsecSecY->protectFrames = protectFrames;
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_ConfigReplayWindow(t_Handle h_FmMacsecSecY, bool replayProtect, uint32_t replayWindow)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ p_FmMacsecSecY->replayProtect = replayProtect;
++ p_FmMacsecSecY->replayWindow = replayWindow;
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_ConfigValidationMode(t_Handle h_FmMacsecSecY, e_FmMacsecValidFrameBehavior validateFrames)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ p_FmMacsecSecY->validateFrames = validateFrames;
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_ConfigConfidentiality(t_Handle h_FmMacsecSecY, bool confidentialityEnable, uint16_t confidentialityOffset)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ p_FmMacsecSecY->confidentialityEnable = confidentialityEnable;
++ p_FmMacsecSecY->confidentialityOffset = confidentialityOffset;
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_ConfigPointToPoint(t_Handle h_FmMacsecSecY)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ p_FmMacsecSecY->numOfRxSc = 1;
++ p_FmMacsecSecY->isPointToPoint = TRUE;
++ p_FmMacsecSecY->sciInsertionMode = e_FM_MACSEC_SCI_INSERTION_MODE_IMPLICT_PTP;
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_ConfigException(t_Handle h_FmMacsecSecY, e_FmMacsecSecYExceptions exception, bool enable)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_FmMacsecSecY->exceptions |= bitMask;
++ else
++ p_FmMacsecSecY->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ return E_OK;
++}
++
++t_Error FM_MACSEC_SECY_ConfigEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++
++ GET_EVENT_FLAG(bitMask, event);
++ if (bitMask)
++ {
++ if (enable)
++ p_FmMacsecSecY->events |= bitMask;
++ else
++ p_FmMacsecSecY->events &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined event"));
++
++ return E_OK;
++}
++
++t_Handle FM_MACSEC_SECY_CreateRxSc(t_Handle h_FmMacsecSecY, t_FmMacsecSecYSCParams *p_ScParams)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmMacsecSecY, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_ScParams, E_NULL_POINTER, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE, NULL);
++
++ return FmMacsecSecYCreateSc(p_FmMacsecSecY, p_ScParams->sci, p_ScParams->cipherSuite, e_SC_RX);
++}
++
++t_Error FM_MACSEC_SECY_DeleteRxSc(t_Handle h_FmMacsecSecY, t_Handle h_Sc)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++
++ return FmMacsecSecYDeleteSc(p_FmMacsecSecY, p_FmSecYSc, e_SC_RX);
++}
++
++t_Error FM_MACSEC_SECY_CreateRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId != SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is already assigned",an));
++
++ if ((err = FmMacsecCreateRxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, (e_ScSaId)p_FmSecYSc->numOfSa, an, lowestPn, key)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ p_FmSecYSc->sa[an].saId = (e_ScSaId)p_FmSecYSc->numOfSa++;
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_DeleteRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is already deleted",an));
++
++ if ((err = FmMacsecDeleteRxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ p_FmSecYSc->numOfSa--;
++ p_FmSecYSc->sa[an].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
++ /* TODO - check if statistics need to be read*/
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_RxSaEnableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
++
++ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, TRUE)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ p_FmSecYSc->sa[an].active = TRUE;
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_RxSaDisableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
++
++ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, FALSE)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ p_FmSecYSc->sa[an].active = FALSE;
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_RxSaUpdateNextPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtNextPN)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
++
++ if ((err = FmMacsecRxSaUpdateNextPn(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, updtNextPN)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_RxSaUpdateLowestPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtLowestPN)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
++
++ if ((err = FmMacsecRxSaUpdateLowestPn(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, updtLowestPN)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_RxSaModifyKey(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, macsecSAKey_t key)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
++
++ if (p_FmSecYSc->sa[an].active)
++ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, FALSE)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ /* TODO - statistics should be read */
++
++ if ((err = FmMacsecCreateRxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, an, 1, key)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ if (p_FmSecYSc->sa[an].active)
++ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, TRUE)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ return err;
++}
++
++
++t_Error FM_MACSEC_SECY_CreateTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an, macsecSAKey_t key)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId != SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, err, ("An %d is already assigned",an));
++
++ if ((err = FmMacsecCreateTxSa(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, (e_ScSaId)p_FmSecYSc->numOfSa, key)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ p_FmSecYSc->sa[an].saId = (e_ScSaId)p_FmSecYSc->numOfSa++;
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_DeleteTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is already deleted",an));
++
++ if ((err = FmMacsecDeleteTxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ p_FmSecYSc->numOfSa--;
++ p_FmSecYSc->sa[an].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
++ /* TODO - check if statistics need to be read*/
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_TxSaModifyKey(t_Handle h_FmMacsecSecY, macsecAN_t nextActiveAn, macsecSAKey_t key)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc;
++ macsecAN_t currentAn;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(nextActiveAn < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if ((err = FmMacsecTxSaGetActive(p_FmMacsecSecY->h_FmMacsec,
++ p_FmSecYSc->scId,
++ &currentAn)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ if ((err = FmMacsecTxSaSetActive(p_FmMacsecSecY->h_FmMacsec,
++ p_FmSecYSc->scId,
++ p_FmSecYSc->sa[nextActiveAn].saId,
++ nextActiveAn)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ /* TODO - statistics should be read */
++
++ if ((err = FmMacsecCreateTxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[currentAn].saId, key)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_TxSaSetActive(t_Handle h_FmMacsecSecY, macsecAN_t an)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
++
++ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
++
++ if ((err = FmMacsecTxSaSetActive(p_FmMacsecSecY->h_FmMacsec,
++ p_FmSecYSc->scId,
++ p_FmSecYSc->sa[an].saId,
++ an)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_TxSaGetActive(t_Handle h_FmMacsecSecY, macsecAN_t *p_An)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_An, E_INVALID_HANDLE);
++
++ if ((err = FmMacsecTxSaGetActive(p_FmMacsecSecY->h_FmMacsec,
++ p_FmSecYSc->scId,
++ p_An)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_GetRxScPhysId(t_Handle h_FmMacsecSecY, t_Handle h_Sc, uint32_t *p_ScPhysId)
++{
++ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((t_FmMacsecSecY *)h_FmMacsecSecY)->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!((t_FmMacsecSecY *)h_FmMacsecSecY)->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++#ifdef DISABLE_SANITY_CHECKS
++ UNUSED(h_FmMacsecSecY);
++#endif /* DISABLE_SANITY_CHECKS */
++
++ *p_ScPhysId = p_FmSecYSc->scId;
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_GetTxScPhysId(t_Handle h_FmMacsecSecY, uint32_t *p_ScPhysId)
++{
++ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
++ t_SecYSc *p_FmSecYSc;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
++ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
++ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
++
++ *p_ScPhysId = p_FmSecYSc->scId;
++ return err;
++}
++
++t_Error FM_MACSEC_SECY_SetException(t_Handle h_FmMacsecSecY, e_FmMacsecExceptions exception, bool enable)
++{
++ UNUSED(h_FmMacsecSecY);UNUSED(exception);UNUSED(enable);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_SECY_SetEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable)
++{
++ UNUSED(h_FmMacsecSecY);UNUSED(event);UNUSED(enable);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_SECY_GetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYStatistics *p_Statistics)
++{
++ UNUSED(h_FmMacsecSecY);UNUSED(p_Statistics);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_SECY_RxScGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, t_FmMacsecSecYRxScStatistics *p_Statistics)
++{
++ UNUSED(h_FmMacsecSecY);UNUSED(h_Sc);UNUSED(p_Statistics);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_SECY_RxSaGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, t_FmMacsecSecYRxSaStatistics *p_Statistics)
++{
++ UNUSED(h_FmMacsecSecY);UNUSED(h_Sc);UNUSED(an);UNUSED(p_Statistics);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_SECY_TxScGetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYTxScStatistics *p_Statistics)
++{
++ UNUSED(h_FmMacsecSecY);UNUSED(p_Statistics);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
++t_Error FM_MACSEC_SECY_TxSaGetStatistics(t_Handle h_FmMacsecSecY, macsecAN_t an, t_FmMacsecSecYTxSaStatistics *p_Statistics)
++{
++ UNUSED(h_FmMacsecSecY);UNUSED(an);UNUSED(p_Statistics);
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++}
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.h
+new file mode 100644
+index 00000000..0cf624e6
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.h
+@@ -0,0 +1,144 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fm_macsec_secy.h
++
++ @Description FM MACSEC SecY internal structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_MACSEC_SECY_H
++#define __FM_MACSEC_SECY_H
++
++#include "error_ext.h"
++#include "std_ext.h"
++
++#include "fm_macsec.h"
++
++
++/**************************************************************************//**
++ @Description Exceptions
++*//***************************************************************************/
++
++#define FM_MACSEC_SECY_EX_FRAME_DISCARDED 0x80000000
++
++#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
++ case e_FM_MACSEC_SECY_EX_FRAME_DISCARDED: \
++ bitMask = FM_MACSEC_SECY_EX_FRAME_DISCARDED; break; \
++ default: bitMask = 0;break;}
++
++/**************************************************************************//**
++ @Description Events
++*//***************************************************************************/
++
++#define FM_MACSEC_SECY_EV_NEXT_PN 0x80000000
++
++#define GET_EVENT_FLAG(bitMask, event) switch (event){ \
++ case e_FM_MACSEC_SECY_EV_NEXT_PN: \
++ bitMask = FM_MACSEC_SECY_EV_NEXT_PN; break; \
++ default: bitMask = 0;break;}
++
++/**************************************************************************//**
++ @Description Defaults
++*//***************************************************************************/
++
++#define DEFAULT_exceptions (FM_MACSEC_SECY_EX_FRAME_DISCARDED)
++#define DEFAULT_events (FM_MACSEC_SECY_EV_NEXT_PN)
++#define DEFAULT_numOfTxSc 1
++#define DEFAULT_confidentialityEnable FALSE
++#define DEFAULT_confidentialityOffset 0
++#define DEFAULT_sciInsertionMode e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG
++#define DEFAULT_validateFrames e_FM_MACSEC_VALID_FRAME_BEHAVIOR_STRICT
++#define DEFAULT_replayEnable FALSE
++#define DEFAULT_replayWindow 0
++#define DEFAULT_protectFrames TRUE
++#define DEFAULT_ptp FALSE
++
++/**************************************************************************//**
++ @Description General defines
++*//***************************************************************************/
++
++#define SECY_AN_FREE_VALUE MAX_NUM_OF_SA_PER_SC
++
++
++typedef struct {
++ e_ScSaId saId;
++ bool active;
++ union {
++ t_FmMacsecSecYRxSaStatistics rxSaStatistics;
++ t_FmMacsecSecYTxSaStatistics txSaStatistics;
++ };
++} t_SecYSa;
++
++typedef struct {
++ bool inUse;
++ uint32_t scId;
++ e_ScType type;
++ uint8_t numOfSa;
++ t_SecYSa sa[MAX_NUM_OF_SA_PER_SC];
++ union {
++ t_FmMacsecSecYRxScStatistics rxScStatistics;
++ t_FmMacsecSecYTxScStatistics txScStatistics;
++ };
++} t_SecYSc;
++
++typedef struct {
++ t_FmMacsecSecYSCParams txScParams; /**< Tx SC Params */
++} t_FmMacsecSecYDriverParam;
++
++typedef struct {
++ t_Handle h_FmMacsec;
++ bool confidentialityEnable; /**< TRUE - confidentiality protection and integrity protection
++ FALSE - no confidentiality protection, only integrity protection*/
++ uint16_t confidentialityOffset; /**< The number of initial octets of each MSDU without confidentiality protection
++ common values are 0, 30, and 50 */
++ bool replayProtect; /**< replay protection function mode */
++ uint32_t replayWindow; /**< the size of the replay window */
++ e_FmMacsecValidFrameBehavior validateFrames; /**< validation function mode */
++ e_FmMacsecSciInsertionMode sciInsertionMode;
++ bool protectFrames;
++ bool isPointToPoint;
++ e_FmMacsecSecYCipherSuite cipherSuite; /**< Cipher suite to be used for this SecY */
++ uint32_t numOfRxSc; /**< Number of receive channels */
++ uint32_t numOfTxSc; /**< Number of transmit channels */
++ t_SecYSc *p_RxSc;
++ t_SecYSc *p_TxSc;
++ uint32_t events;
++ uint32_t exceptions;
++ t_FmMacsecSecYExceptionsCallback *f_Exception; /**< TODO */
++ t_FmMacsecSecYEventsCallback *f_Event; /**< TODO */
++ t_Handle h_App;
++ t_FmMacsecSecYStatistics statistics;
++ t_FmMacsecSecYDriverParam *p_FmMacsecSecYDriverParam;
++} t_FmMacsecSecY;
++
++
++#endif /* __FM_MACSEC_SECY_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Makefile
+new file mode 100644
+index 00000000..619f6608
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Makefile
+@@ -0,0 +1,23 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++
++obj-y += fsl-ncsw-PFM1.o
++
++fsl-ncsw-PFM1-objs := fm.o fm_muram.o fman.o
++
++obj-y += MAC/
++obj-y += Pcd/
++obj-y += SP/
++obj-y += Port/
++obj-y += HC/
++obj-y += Rtc/
++obj-y += MACSEC/
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/Makefile
+new file mode 100644
+index 00000000..62fbd73c
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/Makefile
+@@ -0,0 +1,26 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++obj-y += fsl-ncsw-Pcd.o
++
++fsl-ncsw-Pcd-objs := fman_kg.o fman_prs.o fm_cc.o fm_kg.o fm_pcd.o fm_plcr.o fm_prs.o fm_manip.o
++
++ifeq ($(CONFIG_FMAN_V3H),y)
++fsl-ncsw-Pcd-objs += fm_replic.o
++endif
++ifeq ($(CONFIG_FMAN_V3L),y)
++fsl-ncsw-Pcd-objs += fm_replic.o
++endif
++ifeq ($(CONFIG_FMAN_ARM),y)
++fsl-ncsw-Pcd-objs += fm_replic.o
++endif
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h
+new file mode 100644
+index 00000000..335ee681
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h
+@@ -0,0 +1,360 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++ /**************************************************************************//**
++ @File crc64.h
++
++ @Description brief This file contains the CRC64 Table, and __inline__
++ functions used for calculating crc.
++*//***************************************************************************/
++#ifndef __CRC64_H
++#define __CRC64_H
++
++#include "std_ext.h"
++
++
++#define BITS_PER_BYTE 8
++
++#define CRC64_EXPON_ECMA_182 0xC96C5795D7870F42ULL
++#define CRC64_DEFAULT_INITVAL 0xFFFFFFFFFFFFFFFFULL
++
++#define CRC64_BYTE_MASK 0xFF
++#define CRC64_TABLE_ENTRIES ( 1 << BITS_PER_BYTE )
++#define CRC64_ODD_MASK 1
++
++
++/**
++ \brief '64 bit crc' Table
++ */
++struct crc64_t {
++ uint64_t initial; /**< Initial seed */
++ uint64_t table[CRC64_TABLE_ENTRIES]; /**< CRC table entries */
++};
++
++
++static struct crc64_t CRC64_ECMA_182 = {
++ CRC64_DEFAULT_INITVAL,
++ {
++ 0x0000000000000000ULL,
++ 0xb32e4cbe03a75f6fULL,
++ 0xf4843657a840a05bULL,
++ 0x47aa7ae9abe7ff34ULL,
++ 0x7bd0c384ff8f5e33ULL,
++ 0xc8fe8f3afc28015cULL,
++ 0x8f54f5d357cffe68ULL,
++ 0x3c7ab96d5468a107ULL,
++ 0xf7a18709ff1ebc66ULL,
++ 0x448fcbb7fcb9e309ULL,
++ 0x0325b15e575e1c3dULL,
++ 0xb00bfde054f94352ULL,
++ 0x8c71448d0091e255ULL,
++ 0x3f5f08330336bd3aULL,
++ 0x78f572daa8d1420eULL,
++ 0xcbdb3e64ab761d61ULL,
++ 0x7d9ba13851336649ULL,
++ 0xceb5ed8652943926ULL,
++ 0x891f976ff973c612ULL,
++ 0x3a31dbd1fad4997dULL,
++ 0x064b62bcaebc387aULL,
++ 0xb5652e02ad1b6715ULL,
++ 0xf2cf54eb06fc9821ULL,
++ 0x41e11855055bc74eULL,
++ 0x8a3a2631ae2dda2fULL,
++ 0x39146a8fad8a8540ULL,
++ 0x7ebe1066066d7a74ULL,
++ 0xcd905cd805ca251bULL,
++ 0xf1eae5b551a2841cULL,
++ 0x42c4a90b5205db73ULL,
++ 0x056ed3e2f9e22447ULL,
++ 0xb6409f5cfa457b28ULL,
++ 0xfb374270a266cc92ULL,
++ 0x48190ecea1c193fdULL,
++ 0x0fb374270a266cc9ULL,
++ 0xbc9d3899098133a6ULL,
++ 0x80e781f45de992a1ULL,
++ 0x33c9cd4a5e4ecdceULL,
++ 0x7463b7a3f5a932faULL,
++ 0xc74dfb1df60e6d95ULL,
++ 0x0c96c5795d7870f4ULL,
++ 0xbfb889c75edf2f9bULL,
++ 0xf812f32ef538d0afULL,
++ 0x4b3cbf90f69f8fc0ULL,
++ 0x774606fda2f72ec7ULL,
++ 0xc4684a43a15071a8ULL,
++ 0x83c230aa0ab78e9cULL,
++ 0x30ec7c140910d1f3ULL,
++ 0x86ace348f355aadbULL,
++ 0x3582aff6f0f2f5b4ULL,
++ 0x7228d51f5b150a80ULL,
++ 0xc10699a158b255efULL,
++ 0xfd7c20cc0cdaf4e8ULL,
++ 0x4e526c720f7dab87ULL,
++ 0x09f8169ba49a54b3ULL,
++ 0xbad65a25a73d0bdcULL,
++ 0x710d64410c4b16bdULL,
++ 0xc22328ff0fec49d2ULL,
++ 0x85895216a40bb6e6ULL,
++ 0x36a71ea8a7ace989ULL,
++ 0x0adda7c5f3c4488eULL,
++ 0xb9f3eb7bf06317e1ULL,
++ 0xfe5991925b84e8d5ULL,
++ 0x4d77dd2c5823b7baULL,
++ 0x64b62bcaebc387a1ULL,
++ 0xd7986774e864d8ceULL,
++ 0x90321d9d438327faULL,
++ 0x231c512340247895ULL,
++ 0x1f66e84e144cd992ULL,
++ 0xac48a4f017eb86fdULL,
++ 0xebe2de19bc0c79c9ULL,
++ 0x58cc92a7bfab26a6ULL,
++ 0x9317acc314dd3bc7ULL,
++ 0x2039e07d177a64a8ULL,
++ 0x67939a94bc9d9b9cULL,
++ 0xd4bdd62abf3ac4f3ULL,
++ 0xe8c76f47eb5265f4ULL,
++ 0x5be923f9e8f53a9bULL,
++ 0x1c4359104312c5afULL,
++ 0xaf6d15ae40b59ac0ULL,
++ 0x192d8af2baf0e1e8ULL,
++ 0xaa03c64cb957be87ULL,
++ 0xeda9bca512b041b3ULL,
++ 0x5e87f01b11171edcULL,
++ 0x62fd4976457fbfdbULL,
++ 0xd1d305c846d8e0b4ULL,
++ 0x96797f21ed3f1f80ULL,
++ 0x2557339fee9840efULL,
++ 0xee8c0dfb45ee5d8eULL,
++ 0x5da24145464902e1ULL,
++ 0x1a083bacedaefdd5ULL,
++ 0xa9267712ee09a2baULL,
++ 0x955cce7fba6103bdULL,
++ 0x267282c1b9c65cd2ULL,
++ 0x61d8f8281221a3e6ULL,
++ 0xd2f6b4961186fc89ULL,
++ 0x9f8169ba49a54b33ULL,
++ 0x2caf25044a02145cULL,
++ 0x6b055fede1e5eb68ULL,
++ 0xd82b1353e242b407ULL,
++ 0xe451aa3eb62a1500ULL,
++ 0x577fe680b58d4a6fULL,
++ 0x10d59c691e6ab55bULL,
++ 0xa3fbd0d71dcdea34ULL,
++ 0x6820eeb3b6bbf755ULL,
++ 0xdb0ea20db51ca83aULL,
++ 0x9ca4d8e41efb570eULL,
++ 0x2f8a945a1d5c0861ULL,
++ 0x13f02d374934a966ULL,
++ 0xa0de61894a93f609ULL,
++ 0xe7741b60e174093dULL,
++ 0x545a57dee2d35652ULL,
++ 0xe21ac88218962d7aULL,
++ 0x5134843c1b317215ULL,
++ 0x169efed5b0d68d21ULL,
++ 0xa5b0b26bb371d24eULL,
++ 0x99ca0b06e7197349ULL,
++ 0x2ae447b8e4be2c26ULL,
++ 0x6d4e3d514f59d312ULL,
++ 0xde6071ef4cfe8c7dULL,
++ 0x15bb4f8be788911cULL,
++ 0xa6950335e42fce73ULL,
++ 0xe13f79dc4fc83147ULL,
++ 0x521135624c6f6e28ULL,
++ 0x6e6b8c0f1807cf2fULL,
++ 0xdd45c0b11ba09040ULL,
++ 0x9aefba58b0476f74ULL,
++ 0x29c1f6e6b3e0301bULL,
++ 0xc96c5795d7870f42ULL,
++ 0x7a421b2bd420502dULL,
++ 0x3de861c27fc7af19ULL,
++ 0x8ec62d7c7c60f076ULL,
++ 0xb2bc941128085171ULL,
++ 0x0192d8af2baf0e1eULL,
++ 0x4638a2468048f12aULL,
++ 0xf516eef883efae45ULL,
++ 0x3ecdd09c2899b324ULL,
++ 0x8de39c222b3eec4bULL,
++ 0xca49e6cb80d9137fULL,
++ 0x7967aa75837e4c10ULL,
++ 0x451d1318d716ed17ULL,
++ 0xf6335fa6d4b1b278ULL,
++ 0xb199254f7f564d4cULL,
++ 0x02b769f17cf11223ULL,
++ 0xb4f7f6ad86b4690bULL,
++ 0x07d9ba1385133664ULL,
++ 0x4073c0fa2ef4c950ULL,
++ 0xf35d8c442d53963fULL,
++ 0xcf273529793b3738ULL,
++ 0x7c0979977a9c6857ULL,
++ 0x3ba3037ed17b9763ULL,
++ 0x888d4fc0d2dcc80cULL,
++ 0x435671a479aad56dULL,
++ 0xf0783d1a7a0d8a02ULL,
++ 0xb7d247f3d1ea7536ULL,
++ 0x04fc0b4dd24d2a59ULL,
++ 0x3886b22086258b5eULL,
++ 0x8ba8fe9e8582d431ULL,
++ 0xcc0284772e652b05ULL,
++ 0x7f2cc8c92dc2746aULL,
++ 0x325b15e575e1c3d0ULL,
++ 0x8175595b76469cbfULL,
++ 0xc6df23b2dda1638bULL,
++ 0x75f16f0cde063ce4ULL,
++ 0x498bd6618a6e9de3ULL,
++ 0xfaa59adf89c9c28cULL,
++ 0xbd0fe036222e3db8ULL,
++ 0x0e21ac88218962d7ULL,
++ 0xc5fa92ec8aff7fb6ULL,
++ 0x76d4de52895820d9ULL,
++ 0x317ea4bb22bfdfedULL,
++ 0x8250e80521188082ULL,
++ 0xbe2a516875702185ULL,
++ 0x0d041dd676d77eeaULL,
++ 0x4aae673fdd3081deULL,
++ 0xf9802b81de97deb1ULL,
++ 0x4fc0b4dd24d2a599ULL,
++ 0xfceef8632775faf6ULL,
++ 0xbb44828a8c9205c2ULL,
++ 0x086ace348f355aadULL,
++ 0x34107759db5dfbaaULL,
++ 0x873e3be7d8faa4c5ULL,
++ 0xc094410e731d5bf1ULL,
++ 0x73ba0db070ba049eULL,
++ 0xb86133d4dbcc19ffULL,
++ 0x0b4f7f6ad86b4690ULL,
++ 0x4ce50583738cb9a4ULL,
++ 0xffcb493d702be6cbULL,
++ 0xc3b1f050244347ccULL,
++ 0x709fbcee27e418a3ULL,
++ 0x3735c6078c03e797ULL,
++ 0x841b8ab98fa4b8f8ULL,
++ 0xadda7c5f3c4488e3ULL,
++ 0x1ef430e13fe3d78cULL,
++ 0x595e4a08940428b8ULL,
++ 0xea7006b697a377d7ULL,
++ 0xd60abfdbc3cbd6d0ULL,
++ 0x6524f365c06c89bfULL,
++ 0x228e898c6b8b768bULL,
++ 0x91a0c532682c29e4ULL,
++ 0x5a7bfb56c35a3485ULL,
++ 0xe955b7e8c0fd6beaULL,
++ 0xaeffcd016b1a94deULL,
++ 0x1dd181bf68bdcbb1ULL,
++ 0x21ab38d23cd56ab6ULL,
++ 0x9285746c3f7235d9ULL,
++ 0xd52f0e859495caedULL,
++ 0x6601423b97329582ULL,
++ 0xd041dd676d77eeaaULL,
++ 0x636f91d96ed0b1c5ULL,
++ 0x24c5eb30c5374ef1ULL,
++ 0x97eba78ec690119eULL,
++ 0xab911ee392f8b099ULL,
++ 0x18bf525d915feff6ULL,
++ 0x5f1528b43ab810c2ULL,
++ 0xec3b640a391f4fadULL,
++ 0x27e05a6e926952ccULL,
++ 0x94ce16d091ce0da3ULL,
++ 0xd3646c393a29f297ULL,
++ 0x604a2087398eadf8ULL,
++ 0x5c3099ea6de60cffULL,
++ 0xef1ed5546e415390ULL,
++ 0xa8b4afbdc5a6aca4ULL,
++ 0x1b9ae303c601f3cbULL,
++ 0x56ed3e2f9e224471ULL,
++ 0xe5c372919d851b1eULL,
++ 0xa26908783662e42aULL,
++ 0x114744c635c5bb45ULL,
++ 0x2d3dfdab61ad1a42ULL,
++ 0x9e13b115620a452dULL,
++ 0xd9b9cbfcc9edba19ULL,
++ 0x6a978742ca4ae576ULL,
++ 0xa14cb926613cf817ULL,
++ 0x1262f598629ba778ULL,
++ 0x55c88f71c97c584cULL,
++ 0xe6e6c3cfcadb0723ULL,
++ 0xda9c7aa29eb3a624ULL,
++ 0x69b2361c9d14f94bULL,
++ 0x2e184cf536f3067fULL,
++ 0x9d36004b35545910ULL,
++ 0x2b769f17cf112238ULL,
++ 0x9858d3a9ccb67d57ULL,
++ 0xdff2a94067518263ULL,
++ 0x6cdce5fe64f6dd0cULL,
++ 0x50a65c93309e7c0bULL,
++ 0xe388102d33392364ULL,
++ 0xa4226ac498dedc50ULL,
++ 0x170c267a9b79833fULL,
++ 0xdcd7181e300f9e5eULL,
++ 0x6ff954a033a8c131ULL,
++ 0x28532e49984f3e05ULL,
++ 0x9b7d62f79be8616aULL,
++ 0xa707db9acf80c06dULL,
++ 0x14299724cc279f02ULL,
++ 0x5383edcd67c06036ULL,
++ 0xe0ada17364673f59ULL
++ }
++};
++
++
++/**
++ \brief Initializes the crc seed
++ */
++static __inline__ uint64_t crc64_init(void)
++{
++ return CRC64_ECMA_182.initial;
++}
++
++/**
++ \brief Computes 64 bit the crc
++ \param[in] data Pointer to the Data in the frame
++ \param[in] len Length of the Data
++ \param[in] crc seed
++ \return calculated crc
++ */
++static __inline__ uint64_t crc64_compute(void const *data,
++ uint32_t len,
++ uint64_t seed)
++{
++ uint32_t i;
++ uint64_t crc = seed;
++ uint8_t *bdata = (uint8_t *) data;
++
++ for (i = 0; i < len; i++)
++ crc =
++ CRC64_ECMA_182.
++ table[(crc ^ *bdata++) & CRC64_BYTE_MASK] ^ (crc >> 8);
++
++ return crc;
++}
++
++
++#endif /* __CRC64_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c
+new file mode 100644
+index 00000000..17c933b4
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c
+@@ -0,0 +1,7582 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_cc.c
++
++ @Description FM Coarse Classifier implementation
++ *//***************************************************************************/
++#include <linux/math64.h>
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_muram_ext.h"
++
++#include "fm_common.h"
++#include "fm_pcd.h"
++#include "fm_hc.h"
++#include "fm_cc.h"
++#include "crc64.h"
++
++/****************************************/
++/* static functions */
++/****************************************/
++
++
++static t_Error CcRootTryLock(t_Handle h_FmPcdCcTree)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
++
++ ASSERT_COND(h_FmPcdCcTree);
++
++ if (FmPcdLockTryLock(p_FmPcdCcTree->p_Lock))
++ return E_OK;
++
++ return ERROR_CODE(E_BUSY);
++}
++
++static void CcRootReleaseLock(t_Handle h_FmPcdCcTree)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
++
++ ASSERT_COND(h_FmPcdCcTree);
++
++ FmPcdLockUnlock(p_FmPcdCcTree->p_Lock);
++}
++
++static void UpdateNodeOwner(t_FmPcdCcNode *p_CcNode, bool add)
++{
++ uint32_t intFlags;
++
++ ASSERT_COND(p_CcNode);
++
++ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
++
++ if (add)
++ p_CcNode->owners++;
++ else
++ {
++ ASSERT_COND(p_CcNode->owners);
++ p_CcNode->owners--;
++ }
++
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++}
++
++static __inline__ t_FmPcdStatsObj* DequeueStatsObj(t_List *p_List)
++{
++ t_FmPcdStatsObj *p_StatsObj = NULL;
++ t_List *p_Next;
++
++ if (!LIST_IsEmpty(p_List))
++ {
++ p_Next = LIST_FIRST(p_List);
++ p_StatsObj = LIST_OBJECT(p_Next, t_FmPcdStatsObj, node);
++ ASSERT_COND(p_StatsObj);
++ LIST_DelAndInit(p_Next);
++ }
++
++ return p_StatsObj;
++}
++
++static __inline__ void EnqueueStatsObj(t_List *p_List,
++ t_FmPcdStatsObj *p_StatsObj)
++{
++ LIST_AddToTail(&p_StatsObj->node, p_List);
++}
++
++static void FreeStatObjects(t_List *p_List, t_Handle h_FmMuram)
++{
++ t_FmPcdStatsObj *p_StatsObj;
++
++ while (!LIST_IsEmpty(p_List))
++ {
++ p_StatsObj = DequeueStatsObj(p_List);
++ ASSERT_COND(p_StatsObj);
++
++ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd);
++ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters);
++
++ XX_Free(p_StatsObj);
++ }
++}
++
++static t_FmPcdStatsObj* GetStatsObj(t_FmPcdCcNode *p_CcNode)
++{
++ t_FmPcdStatsObj* p_StatsObj;
++ t_Handle h_FmMuram;
++
++ ASSERT_COND(p_CcNode);
++
++ /* If 'maxNumOfKeys' was passed, all statistics object were preallocated
++ upon node initialization */
++ if (p_CcNode->maxNumOfKeys)
++ {
++ p_StatsObj = DequeueStatsObj(&p_CcNode->availableStatsLst);
++
++ /* Clean statistics counters & ADs */
++ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize);
++ }
++ else
++ {
++ h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram;
++ ASSERT_COND(h_FmMuram);
++
++ p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj));
++ if (!p_StatsObj)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("statistics object"));
++ return NULL;
++ }
++
++ p_StatsObj->h_StatsAd = (t_Handle)FM_MURAM_AllocMem(
++ h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_StatsObj->h_StatsAd)
++ {
++ XX_Free(p_StatsObj);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics ADs"));
++ return NULL;
++ }
++ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ p_StatsObj->h_StatsCounters = (t_Handle)FM_MURAM_AllocMem(
++ h_FmMuram, p_CcNode->countersArraySize,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_StatsObj->h_StatsCounters)
++ {
++ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd);
++ XX_Free(p_StatsObj);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics counters"));
++ return NULL;
++ }
++ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize);
++ }
++
++ return p_StatsObj;
++}
++
++static void PutStatsObj(t_FmPcdCcNode *p_CcNode, t_FmPcdStatsObj *p_StatsObj)
++{
++ t_Handle h_FmMuram;
++
++ ASSERT_COND(p_CcNode);
++ ASSERT_COND(p_StatsObj);
++
++ /* If 'maxNumOfKeys' was passed, all statistics object were preallocated
++ upon node initialization and now will be enqueued back to the list */
++ if (p_CcNode->maxNumOfKeys)
++ {
++ /* Clean statistics counters */
++ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize);
++
++ /* Clean statistics ADs */
++ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj);
++ }
++ else
++ {
++ h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram;
++ ASSERT_COND(h_FmMuram);
++
++ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd);
++ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters);
++
++ XX_Free(p_StatsObj);
++ }
++}
++
++static void SetStatsCounters(t_AdOfTypeStats *p_StatsAd,
++ uint32_t statsCountersAddr)
++{
++ uint32_t tmp = (statsCountersAddr & FM_PCD_AD_STATS_COUNTERS_ADDR_MASK);
++
++ WRITE_UINT32(p_StatsAd->statsTableAddr, tmp);
++}
++
++
++static void UpdateStatsAd(t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
++ t_Handle h_Ad, uint64_t physicalMuramBase)
++{
++ t_AdOfTypeStats *p_StatsAd;
++ uint32_t statsCountersAddr, nextActionAddr, tmp;
++#if (DPAA_VERSION >= 11)
++ uint32_t frameLengthRangesAddr;
++#endif /* (DPAA_VERSION >= 11) */
++
++ p_StatsAd = (t_AdOfTypeStats *)p_FmPcdCcStatsParams->h_StatsAd;
++
++ tmp = FM_PCD_AD_STATS_TYPE;
++
++#if (DPAA_VERSION >= 11)
++ if (p_FmPcdCcStatsParams->h_StatsFLRs)
++ {
++ frameLengthRangesAddr = (uint32_t)((XX_VirtToPhys(
++ p_FmPcdCcStatsParams->h_StatsFLRs) - physicalMuramBase));
++ tmp |= (frameLengthRangesAddr & FM_PCD_AD_STATS_FLR_ADDR_MASK);
++ }
++#endif /* (DPAA_VERSION >= 11) */
++ WRITE_UINT32(p_StatsAd->profileTableAddr, tmp);
++
++ nextActionAddr = (uint32_t)((XX_VirtToPhys(h_Ad) - physicalMuramBase));
++ tmp = 0;
++ tmp |= (uint32_t)((nextActionAddr << FM_PCD_AD_STATS_NEXT_ACTION_SHIFT)
++ & FM_PCD_AD_STATS_NEXT_ACTION_MASK);
++ tmp |= (FM_PCD_AD_STATS_NAD_EN | FM_PCD_AD_STATS_OP_CODE);
++
++#if (DPAA_VERSION >= 11)
++ if (p_FmPcdCcStatsParams->h_StatsFLRs)
++ tmp |= FM_PCD_AD_STATS_FLR_EN;
++#endif /* (DPAA_VERSION >= 11) */
++
++ WRITE_UINT32(p_StatsAd->nextActionIndx, tmp);
++
++ statsCountersAddr = (uint32_t)((XX_VirtToPhys(
++ p_FmPcdCcStatsParams->h_StatsCounters) - physicalMuramBase));
++ SetStatsCounters(p_StatsAd, statsCountersAddr);
++}
++
++static void FillAdOfTypeContLookup(t_Handle h_Ad,
++ t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
++ t_Handle h_FmPcd, t_Handle p_CcNode,
++ t_Handle h_Manip, t_Handle h_FrmReplic)
++{
++ t_FmPcdCcNode *p_Node = (t_FmPcdCcNode *)p_CcNode;
++ t_AdOfTypeContLookup *p_AdContLookup = (t_AdOfTypeContLookup *)h_Ad;
++ t_Handle h_TmpAd;
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t tmpReg32;
++ t_Handle p_AdNewPtr = NULL;
++
++ UNUSED(h_Manip);
++ UNUSED(h_FrmReplic);
++
++ /* there are 3 cases handled in this routine of building a "Continue lookup" type AD.
++ * Case 1: No Manip. The action descriptor is built within the match table.
++ * p_AdResult = p_AdNewPtr;
++ * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized
++ * either in the FmPcdManipUpdateAdResultForCc routine or it was already
++ * initialized and returned here.
++ * p_AdResult (within the match table) will be initialized after
++ * this routine returns and point to the existing AD.
++ * Case 3: Manip exists. The action descriptor is built within the match table.
++ * FmPcdManipUpdateAdContLookupForCc returns a NULL p_AdNewPtr.
++ */
++
++ /* As default, the "new" ptr is the current one. i.e. the content of the result
++ * AD will be written into the match table itself (case (1))*/
++ p_AdNewPtr = p_AdContLookup;
++
++ /* Initialize an action descriptor, if current statistics mode requires an Ad */
++ if (p_FmPcdCcStatsParams)
++ {
++ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd);
++ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters);
++
++ /* Swapping addresses between statistics Ad and the current lookup AD */
++ h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd;
++ p_FmPcdCcStatsParams->h_StatsAd = h_Ad;
++ h_Ad = h_TmpAd;
++
++ p_AdNewPtr = h_Ad;
++ p_AdContLookup = h_Ad;
++
++ /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */
++ UpdateStatsAd(p_FmPcdCcStatsParams, h_Ad, p_FmPcd->physicalMuramBase);
++ }
++
++#if DPAA_VERSION >= 11
++ if (h_Manip && h_FrmReplic)
++ FmPcdManipUpdateAdContLookupForCc(
++ h_Manip,
++ h_Ad,
++ &p_AdNewPtr,
++ (uint32_t)((XX_VirtToPhys(
++ FrmReplicGroupGetSourceTableDescriptor(h_FrmReplic))
++ - p_FmPcd->physicalMuramBase)));
++ else
++ if (h_FrmReplic)
++ FrmReplicGroupUpdateAd(h_FrmReplic, h_Ad, &p_AdNewPtr);
++ else
++#endif /* (DPAA_VERSION >= 11) */
++ if (h_Manip)
++ FmPcdManipUpdateAdContLookupForCc(
++ h_Manip,
++ h_Ad,
++ &p_AdNewPtr,
++
++#ifdef FM_CAPWAP_SUPPORT
++ /*no check for opcode of manip - this step can be reached only with capwap_applic_specific*/
++ (uint32_t)((XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase))
++#else /* not FM_CAPWAP_SUPPORT */
++ (uint32_t)((XX_VirtToPhys(p_Node->h_Ad)
++ - p_FmPcd->physicalMuramBase))
++#endif /* not FM_CAPWAP_SUPPORT */
++ );
++
++ /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */
++ if (p_AdNewPtr)
++ {
++ /* cases (1) & (2) */
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ tmpReg32 |=
++ p_Node->sizeOfExtraction ? ((p_Node->sizeOfExtraction - 1) << 24) :
++ 0;
++ tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Node->h_AdTable)
++ - p_FmPcd->physicalMuramBase);
++ WRITE_UINT32(p_AdContLookup->ccAdBase, tmpReg32);
++
++ tmpReg32 = 0;
++ tmpReg32 |= p_Node->numOfKeys << 24;
++ tmpReg32 |= (p_Node->lclMask ? FM_PCD_AD_CONT_LOOKUP_LCL_MASK : 0);
++ tmpReg32 |=
++ p_Node->h_KeysMatchTable ? (uint32_t)(XX_VirtToPhys(
++ p_Node->h_KeysMatchTable) - p_FmPcd->physicalMuramBase) :
++ 0;
++ WRITE_UINT32(p_AdContLookup->matchTblPtr, tmpReg32);
++
++ tmpReg32 = 0;
++ tmpReg32 |= p_Node->prsArrayOffset << 24;
++ tmpReg32 |= p_Node->offset << 16;
++ tmpReg32 |= p_Node->parseCode;
++ WRITE_UINT32(p_AdContLookup->pcAndOffsets, tmpReg32);
++
++ MemCpy8((void*)&p_AdContLookup->gmask, p_Node->p_GlblMask,
++ CC_GLBL_MASK_SIZE);
++ }
++}
++
++static t_Error AllocAndFillAdForContLookupManip(t_Handle h_CcNode)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint32_t intFlags;
++
++ ASSERT_COND(p_CcNode);
++
++ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
++
++ if (!p_CcNode->h_Ad)
++ {
++ if (p_CcNode->maxNumOfKeys)
++ p_CcNode->h_Ad = p_CcNode->h_TmpAd;
++ else
++ p_CcNode->h_Ad = (t_Handle)FM_MURAM_AllocMem(
++ ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram,
++ FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN);
++
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++
++ if (!p_CcNode->h_Ad)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC action descriptor"));
++
++ MemSet8(p_CcNode->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ FillAdOfTypeContLookup(p_CcNode->h_Ad, NULL, p_CcNode->h_FmPcd,
++ p_CcNode, NULL, NULL);
++ }
++ else
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++
++ return E_OK;
++}
++
++static t_Error SetRequiredAction1(
++ t_Handle h_FmPcd, uint32_t requiredAction,
++ t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParamsTmp,
++ t_Handle h_AdTmp, uint16_t numOfEntries, t_Handle h_Tree)
++{
++ t_AdOfTypeResult *p_AdTmp = (t_AdOfTypeResult *)h_AdTmp;
++ uint32_t tmpReg32;
++ t_Error err;
++ t_FmPcdCcNode *p_CcNode;
++ int i = 0;
++ uint16_t tmp = 0;
++ uint16_t profileId;
++ uint8_t relativeSchemeId, physicalSchemeId;
++ t_CcNodeInformation ccNodeInfo;
++
++ for (i = 0; i < numOfEntries; i++)
++ {
++ if (i == 0)
++ h_AdTmp = PTR_MOVE(h_AdTmp, i*FM_PCD_CC_AD_ENTRY_SIZE);
++ else
++ h_AdTmp = PTR_MOVE(h_AdTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ switch (p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.nextEngine)
++ {
++ case (e_FM_PCD_CC):
++ if (requiredAction)
++ {
++ p_CcNode =
++ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.ccParams.h_CcNode;
++ ASSERT_COND(p_CcNode);
++ if (p_CcNode->shadowAction == requiredAction)
++ break;
++ if ((requiredAction & UPDATE_CC_WITH_TREE)
++ && !(p_CcNode->shadowAction & UPDATE_CC_WITH_TREE))
++ {
++
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = h_Tree;
++ EnqueueNodeInfoToRelevantLst(&p_CcNode->ccTreesLst,
++ &ccNodeInfo, NULL);
++ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
++ UPDATE_CC_WITH_TREE;
++ }
++ if ((requiredAction & UPDATE_CC_SHADOW_CLEAR)
++ && !(p_CcNode->shadowAction & UPDATE_CC_SHADOW_CLEAR))
++ {
++
++ p_CcNode->shadowAction = 0;
++ }
++
++ if ((requiredAction & UPDATE_CC_WITH_DELETE_TREE)
++ && !(p_CcNode->shadowAction
++ & UPDATE_CC_WITH_DELETE_TREE))
++ {
++ DequeueNodeInfoFromRelevantLst(&p_CcNode->ccTreesLst,
++ h_Tree, NULL);
++ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
++ UPDATE_CC_WITH_DELETE_TREE;
++ }
++ if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
++ != e_FM_PCD_INVALID)
++ tmp = (uint8_t)(p_CcNode->numOfKeys + 1);
++ else
++ tmp = p_CcNode->numOfKeys;
++ err = SetRequiredAction1(h_FmPcd, requiredAction,
++ p_CcNode->keyAndNextEngineParams,
++ p_CcNode->h_AdTable, tmp, h_Tree);
++ if (err != E_OK)
++ return err;
++ if (requiredAction != UPDATE_CC_SHADOW_CLEAR)
++ p_CcNode->shadowAction |= requiredAction;
++ }
++ break;
++
++ case (e_FM_PCD_KG):
++ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
++ && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction
++ & UPDATE_NIA_ENQ_WITHOUT_DMA))
++ {
++ physicalSchemeId =
++ FmPcdKgGetSchemeId(
++ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme);
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(
++ h_FmPcd, physicalSchemeId);
++ if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++ if (!FmPcdKgIsSchemeValidSw(
++ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Invalid direct scheme."));
++ if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("For this action scheme has to be direct."));
++ err =
++ FmPcdKgCcGetSetParams(
++ h_FmPcd,
++ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme,
++ requiredAction, 0);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
++ requiredAction;
++ }
++ break;
++
++ case (e_FM_PCD_PLCR):
++ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
++ && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction
++ & UPDATE_NIA_ENQ_WITHOUT_DMA))
++ {
++ if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.overrideParams)
++ RETURN_ERROR(
++ MAJOR,
++ E_NOT_SUPPORTED,
++ ("In this initialization only overrideFqid can be initialized"));
++ if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.sharedProfile)
++ RETURN_ERROR(
++ MAJOR,
++ E_NOT_SUPPORTED,
++ ("In this initialization only overrideFqid can be initialized"));
++ err =
++ FmPcdPlcrGetAbsoluteIdByProfileParams(
++ h_FmPcd,
++ e_FM_PCD_PLCR_SHARED,
++ NULL,
++ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.newRelativeProfileId,
++ &profileId);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ err = FmPcdPlcrCcGetSetParams(h_FmPcd, profileId,
++ requiredAction);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
++ requiredAction;
++ }
++ break;
++
++ case (e_FM_PCD_DONE):
++ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
++ && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction
++ & UPDATE_NIA_ENQ_WITHOUT_DMA))
++ {
++ tmpReg32 = GET_UINT32(p_AdTmp->nia);
++ if ((tmpReg32 & GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd))
++ != GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("Next engine was previously assigned not as PCD_DONE"));
++ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++ WRITE_UINT32(p_AdTmp->nia, tmpReg32);
++ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
++ requiredAction;
++ }
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ return E_OK;
++}
++
++static t_Error SetRequiredAction(
++ t_Handle h_FmPcd, uint32_t requiredAction,
++ t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParamsTmp,
++ t_Handle h_AdTmp, uint16_t numOfEntries, t_Handle h_Tree)
++{
++ t_Error err = SetRequiredAction1(h_FmPcd, requiredAction,
++ p_CcKeyAndNextEngineParamsTmp, h_AdTmp,
++ numOfEntries, h_Tree);
++ if (err != E_OK)
++ return err;
++ return SetRequiredAction1(h_FmPcd, UPDATE_CC_SHADOW_CLEAR,
++ p_CcKeyAndNextEngineParamsTmp, h_AdTmp,
++ numOfEntries, h_Tree);
++}
++
++static t_Error ReleaseModifiedDataStructure(
++ t_Handle h_FmPcd, t_List *h_FmPcdOldPointersLst,
++ t_List *h_FmPcdNewPointersLst,
++ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams,
++ bool useShadowStructs)
++{
++ t_List *p_Pos;
++ t_Error err = E_OK;
++ t_CcNodeInformation ccNodeInfo, *p_CcNodeInformation;
++ t_Handle h_Muram;
++ t_FmPcdCcNode *p_FmPcdCcNextNode, *p_FmPcdCcWorkingOnNode;
++ t_List *p_UpdateLst;
++ uint32_t intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_AdditionalParams->h_CurrentNode,
++ E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcdOldPointersLst, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcdNewPointersLst, E_INVALID_HANDLE);
++
++ /* We don't update subtree of the new node with new tree because it was done in the previous stage */
++ if (p_AdditionalParams->h_NodeForAdd)
++ {
++ p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForAdd;
++
++ if (!p_AdditionalParams->tree)
++ p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst;
++ else
++ p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst;
++
++ p_CcNodeInformation = FindNodeInfoInReleventLst(
++ p_UpdateLst, p_AdditionalParams->h_CurrentNode,
++ p_FmPcdCcNextNode->h_Spinlock);
++
++ if (p_CcNodeInformation)
++ p_CcNodeInformation->index++;
++ else
++ {
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = (t_Handle)p_AdditionalParams->h_CurrentNode;
++ ccNodeInfo.index = 1;
++ EnqueueNodeInfoToRelevantLst(p_UpdateLst, &ccNodeInfo,
++ p_FmPcdCcNextNode->h_Spinlock);
++ }
++ if (p_AdditionalParams->h_ManipForAdd)
++ {
++ p_CcNodeInformation = FindNodeInfoInReleventLst(
++ FmPcdManipGetNodeLstPointedOnThisManip(
++ p_AdditionalParams->h_ManipForAdd),
++ p_AdditionalParams->h_CurrentNode,
++ FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForAdd));
++
++ if (p_CcNodeInformation)
++ p_CcNodeInformation->index++;
++ else
++ {
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode =
++ (t_Handle)p_AdditionalParams->h_CurrentNode;
++ ccNodeInfo.index = 1;
++ EnqueueNodeInfoToRelevantLst(
++ FmPcdManipGetNodeLstPointedOnThisManip(
++ p_AdditionalParams->h_ManipForAdd),
++ &ccNodeInfo,
++ FmPcdManipGetSpinlock(
++ p_AdditionalParams->h_ManipForAdd));
++ }
++ }
++ }
++
++ if (p_AdditionalParams->h_NodeForRmv)
++ {
++ p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForRmv;
++
++ if (!p_AdditionalParams->tree)
++ {
++ p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst;
++ p_FmPcdCcWorkingOnNode =
++ (t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode);
++
++ for (p_Pos = LIST_FIRST(&p_FmPcdCcWorkingOnNode->ccTreesLst);
++ p_Pos != (&p_FmPcdCcWorkingOnNode->ccTreesLst); p_Pos =
++ LIST_NEXT(p_Pos))
++ {
++ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
++
++ ASSERT_COND(p_CcNodeInformation->h_CcNode);
++
++ err =
++ SetRequiredAction(
++ h_FmPcd,
++ UPDATE_CC_WITH_DELETE_TREE,
++ &((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex],
++ PTR_MOVE(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable, p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
++ 1, p_CcNodeInformation->h_CcNode);
++ }
++ }
++ else
++ {
++ p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst;
++
++ err =
++ SetRequiredAction(
++ h_FmPcd,
++ UPDATE_CC_WITH_DELETE_TREE,
++ &((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex],
++ UINT_TO_PTR(((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->ccTreeBaseAddr + p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
++ 1, p_AdditionalParams->h_CurrentNode);
++ }
++ if (err)
++ return err;
++
++ /* We remove from the subtree of the removed node tree because it wasn't done in the previous stage
++ Update ccPrevNodesLst or ccTreeIdLst of the removed node
++ Update of the node owner */
++ p_CcNodeInformation = FindNodeInfoInReleventLst(
++ p_UpdateLst, p_AdditionalParams->h_CurrentNode,
++ p_FmPcdCcNextNode->h_Spinlock);
++
++ ASSERT_COND(p_CcNodeInformation);
++ ASSERT_COND(p_CcNodeInformation->index);
++
++ p_CcNodeInformation->index--;
++
++ if (p_CcNodeInformation->index == 0)
++ DequeueNodeInfoFromRelevantLst(p_UpdateLst,
++ p_AdditionalParams->h_CurrentNode,
++ p_FmPcdCcNextNode->h_Spinlock);
++
++ UpdateNodeOwner(p_FmPcdCcNextNode, FALSE);
++
++ if (p_AdditionalParams->h_ManipForRmv)
++ {
++ p_CcNodeInformation = FindNodeInfoInReleventLst(
++ FmPcdManipGetNodeLstPointedOnThisManip(
++ p_AdditionalParams->h_ManipForRmv),
++ p_AdditionalParams->h_CurrentNode,
++ FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForRmv));
++
++ ASSERT_COND(p_CcNodeInformation);
++ ASSERT_COND(p_CcNodeInformation->index);
++
++ p_CcNodeInformation->index--;
++
++ if (p_CcNodeInformation->index == 0)
++ DequeueNodeInfoFromRelevantLst(
++ FmPcdManipGetNodeLstPointedOnThisManip(
++ p_AdditionalParams->h_ManipForRmv),
++ p_AdditionalParams->h_CurrentNode,
++ FmPcdManipGetSpinlock(
++ p_AdditionalParams->h_ManipForRmv));
++ }
++ }
++
++ if (p_AdditionalParams->h_ManipForRmv)
++ FmPcdManipUpdateOwner(p_AdditionalParams->h_ManipForRmv, FALSE);
++
++ if (p_AdditionalParams->p_StatsObjForRmv)
++ PutStatsObj((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode),
++ p_AdditionalParams->p_StatsObjForRmv);
++
++#if (DPAA_VERSION >= 11)
++ if (p_AdditionalParams->h_FrmReplicForRmv)
++ FrmReplicGroupUpdateOwner(p_AdditionalParams->h_FrmReplicForRmv,
++ FALSE/* remove */);
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (!useShadowStructs)
++ {
++ h_Muram = FmPcdGetMuramHandle(h_FmPcd);
++ ASSERT_COND(h_Muram);
++
++ if ((p_AdditionalParams->tree && !((t_FmPcd *)h_FmPcd)->p_CcShadow)
++ || (!p_AdditionalParams->tree
++ && !((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->maxNumOfKeys))
++ {
++ /* We release new AD which was allocated and updated for copy from to actual AD */
++ for (p_Pos = LIST_FIRST(h_FmPcdNewPointersLst);
++ p_Pos != (h_FmPcdNewPointersLst); p_Pos = LIST_NEXT(p_Pos))
++ {
++
++ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
++ ASSERT_COND(p_CcNodeInformation->h_CcNode);
++ FM_MURAM_FreeMem(h_Muram, p_CcNodeInformation->h_CcNode);
++ }
++ }
++
++ /* Free Old data structure if it has to be freed - new data structure was allocated*/
++ if (p_AdditionalParams->p_AdTableOld)
++ FM_MURAM_FreeMem(h_Muram, p_AdditionalParams->p_AdTableOld);
++
++ if (p_AdditionalParams->p_KeysMatchTableOld)
++ FM_MURAM_FreeMem(h_Muram, p_AdditionalParams->p_KeysMatchTableOld);
++ }
++
++ /* Update current modified node with changed fields if it's required*/
++ if (!p_AdditionalParams->tree)
++ {
++ if (p_AdditionalParams->p_AdTableNew)
++ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable =
++ p_AdditionalParams->p_AdTableNew;
++
++ if (p_AdditionalParams->p_KeysMatchTableNew)
++ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_KeysMatchTable =
++ p_AdditionalParams->p_KeysMatchTableNew;
++
++ /* Locking node's spinlock before updating 'keys and next engine' structure,
++ as it maybe used to retrieve keys statistics */
++ intFlags =
++ XX_LockIntrSpinlock(
++ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock);
++
++ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->numOfKeys =
++ p_AdditionalParams->numOfKeys;
++
++ memcpy(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams,
++ &p_AdditionalParams->keyAndNextEngineParams,
++ sizeof(t_FmPcdCcKeyAndNextEngineParams) * (CC_MAX_NUM_OF_KEYS));
++
++ XX_UnlockIntrSpinlock(
++ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock,
++ intFlags);
++ }
++ else
++ {
++ uint8_t numEntries =
++ ((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->numOfEntries;
++ ASSERT_COND(numEntries < FM_PCD_MAX_NUM_OF_CC_GROUPS);
++ memcpy(&((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams,
++ &p_AdditionalParams->keyAndNextEngineParams,
++ sizeof(t_FmPcdCcKeyAndNextEngineParams) * numEntries);
++ }
++
++ ReleaseLst(h_FmPcdOldPointersLst);
++ ReleaseLst(h_FmPcdNewPointersLst);
++
++ XX_Free(p_AdditionalParams);
++
++ return E_OK;
++}
++
++static t_Handle BuildNewAd(
++ t_Handle h_Ad,
++ t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams,
++ t_FmPcdCcNode *p_CcNode,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcNode *p_FmPcdCcNodeTmp;
++ t_Handle h_OrigAd = NULL;
++
++ p_FmPcdCcNodeTmp = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode));
++ if (!p_FmPcdCcNodeTmp)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcNodeTmp"));
++ return NULL;
++ }
++ memset(p_FmPcdCcNodeTmp, 0, sizeof(t_FmPcdCcNode));
++
++ p_FmPcdCcNodeTmp->numOfKeys = p_FmPcdModifyCcKeyAdditionalParams->numOfKeys;
++ p_FmPcdCcNodeTmp->h_KeysMatchTable =
++ p_FmPcdModifyCcKeyAdditionalParams->p_KeysMatchTableNew;
++ p_FmPcdCcNodeTmp->h_AdTable =
++ p_FmPcdModifyCcKeyAdditionalParams->p_AdTableNew;
++
++ p_FmPcdCcNodeTmp->lclMask = p_CcNode->lclMask;
++ p_FmPcdCcNodeTmp->parseCode = p_CcNode->parseCode;
++ p_FmPcdCcNodeTmp->offset = p_CcNode->offset;
++ p_FmPcdCcNodeTmp->prsArrayOffset = p_CcNode->prsArrayOffset;
++ p_FmPcdCcNodeTmp->ctrlFlow = p_CcNode->ctrlFlow;
++ p_FmPcdCcNodeTmp->ccKeySizeAccExtraction = p_CcNode->ccKeySizeAccExtraction;
++ p_FmPcdCcNodeTmp->sizeOfExtraction = p_CcNode->sizeOfExtraction;
++ p_FmPcdCcNodeTmp->glblMaskSize = p_CcNode->glblMaskSize;
++ p_FmPcdCcNodeTmp->p_GlblMask = p_CcNode->p_GlblMask;
++
++ if (p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC)
++ {
++ if (p_FmPcdCcNextEngineParams->h_Manip)
++ {
++ h_OrigAd = p_CcNode->h_Ad;
++ if (AllocAndFillAdForContLookupManip(
++ p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)
++ != E_OK)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++ XX_Free(p_FmPcdCcNodeTmp);
++ return NULL;
++ }
++ }
++ FillAdOfTypeContLookup(h_Ad, NULL, p_CcNode->h_FmPcd, p_FmPcdCcNodeTmp,
++ h_OrigAd ? NULL : p_FmPcdCcNextEngineParams->h_Manip, NULL);
++ }
++
++#if (DPAA_VERSION >= 11)
++ if ((p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_FR)
++ && (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic))
++ {
++ FillAdOfTypeContLookup(
++ h_Ad, NULL, p_CcNode->h_FmPcd, p_FmPcdCcNodeTmp,
++ p_FmPcdCcNextEngineParams->h_Manip,
++ p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic);
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ XX_Free(p_FmPcdCcNodeTmp);
++
++ return E_OK;
++}
++
++static t_Error DynamicChangeHc(
++ t_Handle h_FmPcd, t_List *h_OldPointersLst, t_List *h_NewPointersLst,
++ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams,
++ bool useShadowStructs)
++{
++ t_List *p_PosOld, *p_PosNew;
++ uint32_t oldAdAddrOffset, newAdAddrOffset;
++ uint16_t i = 0;
++ t_Error err = E_OK;
++ uint8_t numOfModifiedPtr;
++
++ ASSERT_COND(h_FmPcd);
++ ASSERT_COND(h_OldPointersLst);
++ ASSERT_COND(h_NewPointersLst);
++
++ numOfModifiedPtr = (uint8_t)LIST_NumOfObjs(h_OldPointersLst);
++
++ if (numOfModifiedPtr)
++ {
++ p_PosNew = LIST_FIRST(h_NewPointersLst);
++ p_PosOld = LIST_FIRST(h_OldPointersLst);
++
++ /* Retrieve address of new AD */
++ newAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd,
++ p_PosNew);
++ if (newAdAddrOffset == (uint32_t)ILLEGAL_BASE)
++ {
++ ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
++ h_NewPointersLst,
++ p_AdditionalParams, useShadowStructs);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("New AD address"));
++ }
++
++ for (i = 0; i < numOfModifiedPtr; i++)
++ {
++ /* Retrieve address of current AD */
++ oldAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd,
++ p_PosOld);
++ if (oldAdAddrOffset == (uint32_t)ILLEGAL_BASE)
++ {
++ ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
++ h_NewPointersLst,
++ p_AdditionalParams,
++ useShadowStructs);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Old AD address"));
++ }
++
++ /* Invoke host command to copy from new AD to old AD */
++ err = FmHcPcdCcDoDynamicChange(((t_FmPcd *)h_FmPcd)->h_Hc,
++ oldAdAddrOffset, newAdAddrOffset);
++ if (err)
++ {
++ ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
++ h_NewPointersLst,
++ p_AdditionalParams,
++ useShadowStructs);
++ RETURN_ERROR(
++ MAJOR,
++ err,
++ ("For part of nodes changes are done - situation is danger"));
++ }
++
++ p_PosOld = LIST_NEXT(p_PosOld);
++ }
++ }
++ return E_OK;
++}
++
++static t_Error DoDynamicChange(
++ t_Handle h_FmPcd, t_List *h_OldPointersLst, t_List *h_NewPointersLst,
++ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams,
++ bool useShadowStructs)
++{
++ t_FmPcdCcNode *p_CcNode =
++ (t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode);
++ t_List *p_PosNew;
++ t_CcNodeInformation *p_CcNodeInfo;
++ t_FmPcdCcNextEngineParams nextEngineParams;
++ t_Handle h_Ad;
++ uint32_t keySize;
++ t_Error err = E_OK;
++ uint8_t numOfModifiedPtr;
++
++ ASSERT_COND(h_FmPcd);
++
++ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams));
++
++ numOfModifiedPtr = (uint8_t)LIST_NumOfObjs(h_OldPointersLst);
++
++ if (numOfModifiedPtr)
++ {
++
++ p_PosNew = LIST_FIRST(h_NewPointersLst);
++
++ /* Invoke host-command to copy from the new Ad to existing Ads */
++ err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst,
++ p_AdditionalParams, useShadowStructs);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (useShadowStructs)
++ {
++ /* When the host-command above has ended, the old structures are 'free'and we can update
++ them by copying from the new shadow structures. */
++ if (p_CcNode->lclMask)
++ keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction);
++ else
++ keySize = p_CcNode->ccKeySizeAccExtraction;
++
++ MemCpy8(p_AdditionalParams->p_KeysMatchTableOld,
++ p_AdditionalParams->p_KeysMatchTableNew,
++ p_CcNode->maxNumOfKeys * keySize * sizeof(uint8_t));
++
++ MemCpy8(
++ p_AdditionalParams->p_AdTableOld,
++ p_AdditionalParams->p_AdTableNew,
++ (uint32_t)((p_CcNode->maxNumOfKeys + 1)
++ * FM_PCD_CC_AD_ENTRY_SIZE));
++
++ /* Retrieve the address of the allocated Ad */
++ p_CcNodeInfo = CC_NODE_F_OBJECT(p_PosNew);
++ h_Ad = p_CcNodeInfo->h_CcNode;
++
++ /* Build a new Ad that holds the old (now updated) structures */
++ p_AdditionalParams->p_KeysMatchTableNew =
++ p_AdditionalParams->p_KeysMatchTableOld;
++ p_AdditionalParams->p_AdTableNew = p_AdditionalParams->p_AdTableOld;
++
++ nextEngineParams.nextEngine = e_FM_PCD_CC;
++ nextEngineParams.params.ccParams.h_CcNode = (t_Handle)p_CcNode;
++
++ BuildNewAd(h_Ad, p_AdditionalParams, p_CcNode, &nextEngineParams);
++
++ /* HC to copy from the new Ad (old updated structures) to current Ad (uses shadow structures) */
++ err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst,
++ p_AdditionalParams, useShadowStructs);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++
++ err = ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
++ h_NewPointersLst,
++ p_AdditionalParams, useShadowStructs);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++#ifdef FM_CAPWAP_SUPPORT
++static bool IsCapwapApplSpecific(t_Handle h_Node)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_Node;
++ bool isManipForCapwapApplSpecificBuild = FALSE;
++ int i = 0;
++
++ ASSERT_COND(h_Node);
++ /* assumption that this function called only for INDEXED_FLOW_ID - so no miss*/
++ for (i = 0; i < p_CcNode->numOfKeys; i++)
++ {
++ if ( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip &&
++ FmPcdManipIsCapwapApplSpecific(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip))
++ {
++ isManipForCapwapApplSpecificBuild = TRUE;
++ break;
++ }
++ }
++ return isManipForCapwapApplSpecificBuild;
++
++}
++#endif /* FM_CAPWAP_SUPPORT */
++
++static t_Error CcUpdateParam(
++ t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_FmPort,
++ t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParams,
++ uint16_t numOfEntries, t_Handle h_Ad, bool validate, uint16_t level,
++ t_Handle h_FmTree, bool modify)
++{
++ t_FmPcdCcNode *p_CcNode;
++ t_Error err;
++ uint16_t tmp = 0;
++ int i = 0;
++ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_FmTree;
++
++ level++;
++
++ if (p_CcTree->h_IpReassemblyManip)
++ {
++ err = FmPcdManipUpdate(h_FmPcd, h_PcdParams, h_FmPort,
++ p_CcTree->h_IpReassemblyManip, NULL, validate,
++ level, h_FmTree, modify);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (p_CcTree->h_CapwapReassemblyManip)
++ {
++ err = FmPcdManipUpdate(h_FmPcd, h_PcdParams, h_FmPort,
++ p_CcTree->h_CapwapReassemblyManip, NULL, validate,
++ level, h_FmTree, modify);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (numOfEntries)
++ {
++ for (i = 0; i < numOfEntries; i++)
++ {
++ if (i == 0)
++ h_Ad = PTR_MOVE(h_Ad, i*FM_PCD_CC_AD_ENTRY_SIZE);
++ else
++ h_Ad = PTR_MOVE(h_Ad, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ if (p_CcKeyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ {
++ p_CcNode =
++ p_CcKeyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
++ ASSERT_COND(p_CcNode);
++
++ if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip)
++ {
++ err =
++ FmPcdManipUpdate(
++ h_FmPcd,
++ NULL,
++ h_FmPort,
++ p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip,
++ h_Ad, validate, level, h_FmTree, modify);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
++ != e_FM_PCD_INVALID)
++ tmp = (uint8_t)(p_CcNode->numOfKeys + 1);
++ else
++ tmp = p_CcNode->numOfKeys;
++
++ err = CcUpdateParam(h_FmPcd, h_PcdParams, h_FmPort,
++ p_CcNode->keyAndNextEngineParams, tmp,
++ p_CcNode->h_AdTable, validate, level,
++ h_FmTree, modify);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ else
++ {
++ if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip)
++ {
++ err =
++ FmPcdManipUpdate(
++ h_FmPcd,
++ NULL,
++ h_FmPort,
++ p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip,
++ h_Ad, validate, level, h_FmTree, modify);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++ }
++ }
++
++ return E_OK;
++}
++
++static ccPrivateInfo_t IcDefineCode(t_FmPcdCcNodeParams *p_CcNodeParam)
++{
++ switch (p_CcNodeParam->extractCcParams.extractNonHdr.action)
++ {
++ case (e_FM_PCD_ACTION_EXACT_MATCH):
++ switch (p_CcNodeParam->extractCcParams.extractNonHdr.src)
++ {
++ case (e_FM_PCD_EXTRACT_FROM_KEY):
++ return CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH;
++ case (e_FM_PCD_EXTRACT_FROM_HASH):
++ return CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH;
++ default:
++ return CC_PRIVATE_INFO_NONE;
++ }
++
++ case (e_FM_PCD_ACTION_INDEXED_LOOKUP):
++ switch (p_CcNodeParam->extractCcParams.extractNonHdr.src)
++ {
++ case (e_FM_PCD_EXTRACT_FROM_HASH):
++ return CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP;
++ case (e_FM_PCD_EXTRACT_FROM_FLOW_ID):
++ return CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP;
++ default:
++ return CC_PRIVATE_INFO_NONE;
++ }
++
++ default:
++ break;
++ }
++
++ return CC_PRIVATE_INFO_NONE;
++}
++
++static t_CcNodeInformation * DequeueAdditionalInfoFromRelevantLst(
++ t_List *p_List)
++{
++ t_CcNodeInformation *p_CcNodeInfo = NULL;
++
++ if (!LIST_IsEmpty(p_List))
++ {
++ p_CcNodeInfo = CC_NODE_F_OBJECT(p_List->p_Next);
++ LIST_DelAndInit(&p_CcNodeInfo->node);
++ }
++
++ return p_CcNodeInfo;
++}
++
++void ReleaseLst(t_List *p_List)
++{
++ t_CcNodeInformation *p_CcNodeInfo = NULL;
++
++ if (!LIST_IsEmpty(p_List))
++ {
++ p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List);
++ while (p_CcNodeInfo)
++ {
++ XX_Free(p_CcNodeInfo);
++ p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List);
++ }
++ }
++
++ LIST_Del(p_List);
++}
++
++static void DeleteNode(t_FmPcdCcNode *p_CcNode)
++{
++ uint32_t i;
++
++ if (!p_CcNode)
++ return;
++
++ if (p_CcNode->p_GlblMask)
++ {
++ XX_Free(p_CcNode->p_GlblMask);
++ p_CcNode->p_GlblMask = NULL;
++ }
++
++ if (p_CcNode->h_KeysMatchTable)
++ {
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
++ p_CcNode->h_KeysMatchTable);
++ p_CcNode->h_KeysMatchTable = NULL;
++ }
++
++ if (p_CcNode->h_AdTable)
++ {
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
++ p_CcNode->h_AdTable);
++ p_CcNode->h_AdTable = NULL;
++ }
++
++ if (p_CcNode->h_Ad)
++ {
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
++ p_CcNode->h_Ad);
++ p_CcNode->h_Ad = NULL;
++ p_CcNode->h_TmpAd = NULL;
++ }
++
++ if (p_CcNode->h_StatsFLRs)
++ {
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
++ p_CcNode->h_StatsFLRs);
++ p_CcNode->h_StatsFLRs = NULL;
++ }
++
++ if (p_CcNode->h_Spinlock)
++ {
++ XX_FreeSpinlock(p_CcNode->h_Spinlock);
++ p_CcNode->h_Spinlock = NULL;
++ }
++
++ /* Restore the original counters pointer instead of the mutual pointer (mutual to all hash buckets) */
++ if (p_CcNode->isHashBucket
++ && (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE))
++ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].p_StatsObj->h_StatsCounters =
++ p_CcNode->h_PrivMissStatsCounters;
++
++ /* Releasing all currently used statistics objects, including 'miss' entry */
++ for (i = 0; i < p_CcNode->numOfKeys + 1; i++)
++ if (p_CcNode->keyAndNextEngineParams[i].p_StatsObj)
++ PutStatsObj(p_CcNode,
++ p_CcNode->keyAndNextEngineParams[i].p_StatsObj);
++
++ if (!LIST_IsEmpty(&p_CcNode->availableStatsLst))
++ {
++ t_Handle h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd);
++ ASSERT_COND(h_FmMuram);
++
++ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
++ }
++
++ LIST_Del(&p_CcNode->availableStatsLst);
++
++ ReleaseLst(&p_CcNode->availableStatsLst);
++ ReleaseLst(&p_CcNode->ccPrevNodesLst);
++ ReleaseLst(&p_CcNode->ccTreeIdLst);
++ ReleaseLst(&p_CcNode->ccTreesLst);
++
++ XX_Free(p_CcNode);
++}
++
++static void DeleteTree(t_FmPcdCcTree *p_FmPcdTree, t_FmPcd *p_FmPcd)
++{
++ if (p_FmPcdTree)
++ {
++ if (p_FmPcdTree->ccTreeBaseAddr)
++ {
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd),
++ UINT_TO_PTR(p_FmPcdTree->ccTreeBaseAddr));
++ p_FmPcdTree->ccTreeBaseAddr = 0;
++ }
++
++ ReleaseLst(&p_FmPcdTree->fmPortsLst);
++
++ XX_Free(p_FmPcdTree);
++ }
++}
++
++static void GetCcExtractKeySize(uint8_t parseCodeRealSize,
++ uint8_t *parseCodeCcSize)
++{
++ if ((parseCodeRealSize > 0) && (parseCodeRealSize < 2))
++ *parseCodeCcSize = 1;
++ else
++ if (parseCodeRealSize == 2)
++ *parseCodeCcSize = 2;
++ else
++ if ((parseCodeRealSize > 2) && (parseCodeRealSize <= 4))
++ *parseCodeCcSize = 4;
++ else
++ if ((parseCodeRealSize > 4) && (parseCodeRealSize <= 8))
++ *parseCodeCcSize = 8;
++ else
++ if ((parseCodeRealSize > 8) && (parseCodeRealSize <= 16))
++ *parseCodeCcSize = 16;
++ else
++ if ((parseCodeRealSize > 16)
++ && (parseCodeRealSize <= 24))
++ *parseCodeCcSize = 24;
++ else
++ if ((parseCodeRealSize > 24)
++ && (parseCodeRealSize <= 32))
++ *parseCodeCcSize = 32;
++ else
++ if ((parseCodeRealSize > 32)
++ && (parseCodeRealSize <= 40))
++ *parseCodeCcSize = 40;
++ else
++ if ((parseCodeRealSize > 40)
++ && (parseCodeRealSize <= 48))
++ *parseCodeCcSize = 48;
++ else
++ if ((parseCodeRealSize > 48)
++ && (parseCodeRealSize <= 56))
++ *parseCodeCcSize = 56;
++ else
++ *parseCodeCcSize = 0;
++}
++
++static void GetSizeHeaderField(e_NetHeaderType hdr, t_FmPcdFields field,
++ uint8_t *parseCodeRealSize)
++{
++ switch (hdr)
++ {
++ case (HEADER_TYPE_ETH):
++ switch (field.eth)
++ {
++ case (NET_HEADER_FIELD_ETH_DA):
++ *parseCodeRealSize = 6;
++ break;
++
++ case (NET_HEADER_FIELD_ETH_SA):
++ *parseCodeRealSize = 6;
++ break;
++
++ case (NET_HEADER_FIELD_ETH_TYPE):
++ *parseCodeRealSize = 2;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_PPPoE):
++ switch (field.pppoe)
++ {
++ case (NET_HEADER_FIELD_PPPoE_PID):
++ *parseCodeRealSize = 2;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_VLAN):
++ switch (field.vlan)
++ {
++ case (NET_HEADER_FIELD_VLAN_TCI):
++ *parseCodeRealSize = 2;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported2"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_MPLS):
++ switch (field.mpls)
++ {
++ case (NET_HEADER_FIELD_MPLS_LABEL_STACK):
++ *parseCodeRealSize = 4;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported3"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_IPv4):
++ switch (field.ipv4)
++ {
++ case (NET_HEADER_FIELD_IPv4_DST_IP):
++ case (NET_HEADER_FIELD_IPv4_SRC_IP):
++ *parseCodeRealSize = 4;
++ break;
++
++ case (NET_HEADER_FIELD_IPv4_TOS):
++ case (NET_HEADER_FIELD_IPv4_PROTO):
++ *parseCodeRealSize = 1;
++ break;
++
++ case (NET_HEADER_FIELD_IPv4_DST_IP
++ | NET_HEADER_FIELD_IPv4_SRC_IP):
++ *parseCodeRealSize = 8;
++ break;
++
++ case (NET_HEADER_FIELD_IPv4_TTL):
++ *parseCodeRealSize = 1;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported4"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_IPv6):
++ switch (field.ipv6)
++ {
++ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL
++ | NET_HEADER_FIELD_IPv6_TC):
++ *parseCodeRealSize = 4;
++ break;
++
++ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
++ case (NET_HEADER_FIELD_IPv6_HOP_LIMIT):
++ *parseCodeRealSize = 1;
++ break;
++
++ case (NET_HEADER_FIELD_IPv6_DST_IP):
++ case (NET_HEADER_FIELD_IPv6_SRC_IP):
++ *parseCodeRealSize = 16;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_IP):
++ switch (field.ip)
++ {
++ case (NET_HEADER_FIELD_IP_DSCP):
++ case (NET_HEADER_FIELD_IP_PROTO):
++ *parseCodeRealSize = 1;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_GRE):
++ switch (field.gre)
++ {
++ case (NET_HEADER_FIELD_GRE_TYPE):
++ *parseCodeRealSize = 2;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported6"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_MINENCAP):
++ switch (field.minencap)
++ {
++ case (NET_HEADER_FIELD_MINENCAP_TYPE):
++ *parseCodeRealSize = 1;
++ break;
++
++ case (NET_HEADER_FIELD_MINENCAP_DST_IP):
++ case (NET_HEADER_FIELD_MINENCAP_SRC_IP):
++ *parseCodeRealSize = 4;
++ break;
++
++ case (NET_HEADER_FIELD_MINENCAP_SRC_IP
++ | NET_HEADER_FIELD_MINENCAP_DST_IP):
++ *parseCodeRealSize = 8;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported7"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_TCP):
++ switch (field.tcp)
++ {
++ case (NET_HEADER_FIELD_TCP_PORT_SRC):
++ case (NET_HEADER_FIELD_TCP_PORT_DST):
++ *parseCodeRealSize = 2;
++ break;
++
++ case (NET_HEADER_FIELD_TCP_PORT_SRC
++ | NET_HEADER_FIELD_TCP_PORT_DST):
++ *parseCodeRealSize = 4;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported8"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ case (HEADER_TYPE_UDP):
++ switch (field.udp)
++ {
++ case (NET_HEADER_FIELD_UDP_PORT_SRC):
++ case (NET_HEADER_FIELD_UDP_PORT_DST):
++ *parseCodeRealSize = 2;
++ break;
++
++ case (NET_HEADER_FIELD_UDP_PORT_SRC
++ | NET_HEADER_FIELD_UDP_PORT_DST):
++ *parseCodeRealSize = 4;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported9"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported10"));
++ *parseCodeRealSize = CC_SIZE_ILLEGAL;
++ break;
++ }
++}
++
++t_Error ValidateNextEngineParams(
++ t_Handle h_FmPcd, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
++ e_FmPcdCcStatsMode statsMode)
++{
++ uint16_t absoluteProfileId;
++ t_Error err = E_OK;
++ uint8_t relativeSchemeId;
++
++ if ((statsMode == e_FM_PCD_CC_STATS_MODE_NONE)
++ && (p_FmPcdCcNextEngineParams->statisticsEn))
++ RETURN_ERROR(
++ MAJOR,
++ E_CONFLICT,
++ ("Statistics are requested for a key, but statistics mode was set"
++ "to 'NONE' upon initialization"));
++
++ switch (p_FmPcdCcNextEngineParams->nextEngine)
++ {
++ case (e_FM_PCD_INVALID):
++ err = E_NOT_SUPPORTED;
++ break;
++
++ case (e_FM_PCD_DONE):
++ if ((p_FmPcdCcNextEngineParams->params.enqueueParams.action
++ == e_FM_PCD_ENQ_FRAME)
++ && p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid)
++ {
++ if (!p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid)
++ RETURN_ERROR(
++ MAJOR,
++ E_CONFLICT,
++ ("When overrideFqid is set, newFqid must not be zero"));
++ if (p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid
++ & ~0x00FFFFFF)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("fqidForCtrlFlow must be between 1 and 2^24-1"));
++ }
++ break;
++
++ case (e_FM_PCD_KG):
++ relativeSchemeId =
++ FmPcdKgGetRelativeSchemeId(
++ h_FmPcd,
++ FmPcdKgGetSchemeId(
++ p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme));
++ if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++ if (!FmPcdKgIsSchemeValidSw(
++ p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("not valid schemeIndex in KG next engine param"));
++ if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("CC Node may point only to a scheme that is always direct."));
++ break;
++
++ case (e_FM_PCD_PLCR):
++ if (p_FmPcdCcNextEngineParams->params.plcrParams.overrideParams)
++ {
++ /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */
++ if (p_FmPcdCcNextEngineParams->params.plcrParams.sharedProfile)
++ {
++ err =
++ FmPcdPlcrGetAbsoluteIdByProfileParams(
++ h_FmPcd,
++ e_FM_PCD_PLCR_SHARED,
++ NULL,
++ p_FmPcdCcNextEngineParams->params.plcrParams.newRelativeProfileId,
++ &absoluteProfileId);
++ if (err)
++ RETURN_ERROR(MAJOR, err,
++ ("Shared profile offset is out of range"));
++ if (!FmPcdPlcrIsProfileValid(h_FmPcd, absoluteProfileId))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Invalid profile"));
++ }
++ }
++ break;
++
++ case (e_FM_PCD_HASH):
++ p_FmPcdCcNextEngineParams->nextEngine = e_FM_PCD_CC;
++ case (e_FM_PCD_CC):
++ if (!p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)
++ RETURN_ERROR(MAJOR, E_NULL_POINTER,
++ ("handler to next Node is NULL"));
++ break;
++
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_FR):
++ if (!p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic)
++ err = E_NOT_SUPPORTED;
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Next engine is not correct"));
++ }
++
++
++ return err;
++}
++
++static uint8_t GetGenParseCode(e_FmPcdExtractFrom src,
++ uint32_t offset, bool glblMask,
++ uint8_t *parseArrayOffset, bool fromIc,
++ ccPrivateInfo_t icCode)
++{
++ if (!fromIc)
++ {
++ switch (src)
++ {
++ case (e_FM_PCD_EXTRACT_FROM_FRAME_START):
++ if (glblMask)
++ return CC_PC_GENERIC_WITH_MASK;
++ else
++ return CC_PC_GENERIC_WITHOUT_MASK;
++
++ case (e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE):
++ *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET;
++ if (offset)
++ return CC_PR_OFFSET;
++ else
++ return CC_PR_WITHOUT_OFFSET;
++
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src"));
++ return CC_PC_ILLEGAL;
++ }
++ }
++ else
++ {
++ switch (icCode)
++ {
++ case (CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH):
++ *parseArrayOffset = 0x50;
++ return CC_PC_GENERIC_IC_GMASK;
++
++ case (CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH):
++ *parseArrayOffset = 0x48;
++ return CC_PC_GENERIC_IC_GMASK;
++
++ case (CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP):
++ *parseArrayOffset = 0x48;
++ return CC_PC_GENERIC_IC_HASH_INDEXED;
++
++ case (CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP):
++ *parseArrayOffset = 0x16;
++ return CC_PC_GENERIC_IC_HASH_INDEXED;
++
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src"));
++ break;
++ }
++ }
++
++ return CC_PC_ILLEGAL;
++}
++
++static uint8_t GetFullFieldParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex index,
++ t_FmPcdFields field)
++{
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ return CC_PC_ILLEGAL;
++
++ case (HEADER_TYPE_ETH):
++ switch (field.eth)
++ {
++ case (NET_HEADER_FIELD_ETH_DA):
++ return CC_PC_FF_MACDST;
++ case (NET_HEADER_FIELD_ETH_SA):
++ return CC_PC_FF_MACSRC;
++ case (NET_HEADER_FIELD_ETH_TYPE):
++ return CC_PC_FF_ETYPE;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_VLAN):
++ switch (field.vlan)
++ {
++ case (NET_HEADER_FIELD_VLAN_TCI):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_TCI1;
++ if (index == e_FM_PCD_HDR_INDEX_LAST)
++ return CC_PC_FF_TCI2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_MPLS):
++ switch (field.mpls)
++ {
++ case (NET_HEADER_FIELD_MPLS_LABEL_STACK):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_MPLS1;
++ if (index == e_FM_PCD_HDR_INDEX_LAST)
++ return CC_PC_FF_MPLS_LAST;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index"));
++ return CC_PC_ILLEGAL;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_IPv4):
++ switch (field.ipv4)
++ {
++ case (NET_HEADER_FIELD_IPv4_DST_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV4DST1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV4DST2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return CC_PC_ILLEGAL;
++ case (NET_HEADER_FIELD_IPv4_TOS):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV4IPTOS_TC1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV4IPTOS_TC2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return CC_PC_ILLEGAL;
++ case (NET_HEADER_FIELD_IPv4_PROTO):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV4PTYPE1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV4PTYPE2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return CC_PC_ILLEGAL;
++ case (NET_HEADER_FIELD_IPv4_SRC_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV4SRC1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV4SRC2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return CC_PC_ILLEGAL;
++ case (NET_HEADER_FIELD_IPv4_SRC_IP
++ | NET_HEADER_FIELD_IPv4_DST_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV4SRC1_IPV4DST1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV4SRC2_IPV4DST2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return CC_PC_ILLEGAL;
++ case (NET_HEADER_FIELD_IPv4_TTL):
++ return CC_PC_FF_IPV4TTL;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_IPv6):
++ switch (field.ipv6)
++ {
++ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL
++ | NET_HEADER_FIELD_IPv6_TC):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return CC_PC_ILLEGAL;
++
++ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV6PTYPE1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV6PTYPE2;
++ if (index == e_FM_PCD_HDR_INDEX_LAST)
++ return CC_PC_FF_IPPID;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return CC_PC_ILLEGAL;
++
++ case (NET_HEADER_FIELD_IPv6_DST_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV6DST1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV6DST2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return CC_PC_ILLEGAL;
++
++ case (NET_HEADER_FIELD_IPv6_SRC_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPV6SRC1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return CC_PC_FF_IPV6SRC2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return CC_PC_ILLEGAL;
++
++ case (NET_HEADER_FIELD_IPv6_HOP_LIMIT):
++ return CC_PC_FF_IPV6HOP_LIMIT;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_IP):
++ switch (field.ip)
++ {
++ case (NET_HEADER_FIELD_IP_DSCP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE)
++ || (index == e_FM_PCD_HDR_INDEX_1))
++ return CC_PC_FF_IPDSCP;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index"));
++ return CC_PC_ILLEGAL;
++
++ case (NET_HEADER_FIELD_IP_PROTO):
++ if (index == e_FM_PCD_HDR_INDEX_LAST)
++ return CC_PC_FF_IPPID;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index"));
++ return CC_PC_ILLEGAL;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_GRE):
++ switch (field.gre)
++ {
++ case (NET_HEADER_FIELD_GRE_TYPE):
++ return CC_PC_FF_GREPTYPE;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_MINENCAP):
++ switch (field.minencap)
++ {
++ case (NET_HEADER_FIELD_MINENCAP_TYPE):
++ return CC_PC_FF_MINENCAP_PTYPE;
++
++ case (NET_HEADER_FIELD_MINENCAP_DST_IP):
++ return CC_PC_FF_MINENCAP_IPDST;
++
++ case (NET_HEADER_FIELD_MINENCAP_SRC_IP):
++ return CC_PC_FF_MINENCAP_IPSRC;
++
++ case (NET_HEADER_FIELD_MINENCAP_SRC_IP
++ | NET_HEADER_FIELD_MINENCAP_DST_IP):
++ return CC_PC_FF_MINENCAP_IPSRC_IPDST;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_TCP):
++ switch (field.tcp)
++ {
++ case (NET_HEADER_FIELD_TCP_PORT_SRC):
++ return CC_PC_FF_L4PSRC;
++
++ case (NET_HEADER_FIELD_TCP_PORT_DST):
++ return CC_PC_FF_L4PDST;
++
++ case (NET_HEADER_FIELD_TCP_PORT_DST
++ | NET_HEADER_FIELD_TCP_PORT_SRC):
++ return CC_PC_FF_L4PSRC_L4PDST;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_PPPoE):
++ switch (field.pppoe)
++ {
++ case (NET_HEADER_FIELD_PPPoE_PID):
++ return CC_PC_FF_PPPPID;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ case (HEADER_TYPE_UDP):
++ switch (field.udp)
++ {
++ case (NET_HEADER_FIELD_UDP_PORT_SRC):
++ return CC_PC_FF_L4PSRC;
++
++ case (NET_HEADER_FIELD_UDP_PORT_DST):
++ return CC_PC_FF_L4PDST;
++
++ case (NET_HEADER_FIELD_UDP_PORT_DST
++ | NET_HEADER_FIELD_UDP_PORT_SRC):
++ return CC_PC_FF_L4PSRC_L4PDST;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++}
++
++static uint8_t GetPrParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex hdrIndex,
++ uint32_t offset, bool glblMask,
++ uint8_t *parseArrayOffset)
++{
++ bool offsetRelevant = FALSE;
++
++ if (offset)
++ offsetRelevant = TRUE;
++
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ return CC_PC_ILLEGAL;
++
++ case (HEADER_TYPE_ETH):
++ *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET;
++ break;
++
++ case (HEADER_TYPE_USER_DEFINED_SHIM1):
++ if (offset || glblMask)
++ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET;
++ else
++ return CC_PC_PR_SHIM1;
++ break;
++
++ case (HEADER_TYPE_USER_DEFINED_SHIM2):
++ if (offset || glblMask)
++ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET;
++ else
++ return CC_PC_PR_SHIM2;
++ break;
++
++ case (HEADER_TYPE_LLC_SNAP):
++ *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET;
++ break;
++
++ case (HEADER_TYPE_PPPoE):
++ *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET;
++ break;
++
++ case (HEADER_TYPE_MPLS):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
++ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET;
++ else
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET;
++ else
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index"));
++ return CC_PC_ILLEGAL;
++ }
++ break;
++
++ case (HEADER_TYPE_IPv4):
++ case (HEADER_TYPE_IPv6):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
++ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ *parseArrayOffset = CC_PC_PR_IP1_OFFSET;
++ else
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_2)
++ *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET;
++ else
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index"));
++ return CC_PC_ILLEGAL;
++ }
++ break;
++
++ case (HEADER_TYPE_MINENCAP):
++ *parseArrayOffset = CC_PC_PR_MINENC_OFFSET;
++ break;
++
++ case (HEADER_TYPE_GRE):
++ *parseArrayOffset = CC_PC_PR_GRE_OFFSET;
++ break;
++
++ case (HEADER_TYPE_TCP):
++ case (HEADER_TYPE_UDP):
++ case (HEADER_TYPE_IPSEC_AH):
++ case (HEADER_TYPE_IPSEC_ESP):
++ case (HEADER_TYPE_DCCP):
++ case (HEADER_TYPE_SCTP):
++ *parseArrayOffset = CC_PC_PR_L4_OFFSET;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header for this type of operation"));
++ return CC_PC_ILLEGAL;
++ }
++
++ if (offsetRelevant)
++ return CC_PR_OFFSET;
++ else
++ return CC_PR_WITHOUT_OFFSET;
++}
++
++static uint8_t GetFieldParseCode(e_NetHeaderType hdr, t_FmPcdFields field,
++ uint32_t offset, uint8_t *parseArrayOffset,
++ e_FmPcdHdrIndex hdrIndex)
++{
++ bool offsetRelevant = FALSE;
++
++ if (offset)
++ offsetRelevant = TRUE;
++
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ break;
++ case (HEADER_TYPE_ETH):
++ switch (field.eth)
++ {
++ case (NET_HEADER_FIELD_ETH_TYPE):
++ *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++ break;
++
++ case (HEADER_TYPE_VLAN):
++ switch (field.vlan)
++ {
++ case (NET_HEADER_FIELD_VLAN_TCI):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
++ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET;
++ else
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET;
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return CC_PC_ILLEGAL;
++ }
++ break;
++
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal header "));
++ return CC_PC_ILLEGAL;
++ }
++
++ if (offsetRelevant)
++ return CC_PR_OFFSET;
++ else
++ return CC_PR_WITHOUT_OFFSET;
++}
++
++static void FillAdOfTypeResult(t_Handle h_Ad,
++ t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
++ t_FmPcd *p_FmPcd,
++ t_FmPcdCcNextEngineParams *p_CcNextEngineParams)
++{
++ t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult *)h_Ad;
++ t_Handle h_TmpAd;
++ uint32_t tmp = 0, tmpNia = 0;
++ uint16_t profileId;
++ t_Handle p_AdNewPtr = NULL;
++ t_Error err = E_OK;
++
++ /* There are 3 cases handled in this routine of building a "result" type AD.
++ * Case 1: No Manip. The action descriptor is built within the match table.
++ * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized
++ * either in the FmPcdManipUpdateAdResultForCc routine or it was already
++ * initialized and returned here.
++ * p_AdResult (within the match table) will be initialized after
++ * this routine returns and point to the existing AD.
++ * Case 3: Manip exists. The action descriptor is built within the match table.
++ * FmPcdManipUpdateAdResultForCc returns a NULL p_AdNewPtr.
++ *
++ * If statistics were enabled and the statistics mode of this node requires
++ * a statistics Ad, it will be placed after the result Ad and before the
++ * manip Ad, if manip Ad exists here.
++ */
++
++ /* As default, the "new" ptr is the current one. i.e. the content of the result
++ * AD will be written into the match table itself (case (1))*/
++ p_AdNewPtr = p_AdResult;
++
++ /* Initialize an action descriptor, if current statistics mode requires an Ad */
++ if (p_FmPcdCcStatsParams)
++ {
++ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd);
++ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters);
++
++ /* Swapping addresses between statistics Ad and the current lookup AD addresses */
++ h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd;
++ p_FmPcdCcStatsParams->h_StatsAd = h_Ad;
++ h_Ad = h_TmpAd;
++
++ p_AdNewPtr = h_Ad;
++ p_AdResult = h_Ad;
++
++ /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */
++ UpdateStatsAd(p_FmPcdCcStatsParams, h_Ad, p_FmPcd->physicalMuramBase);
++ }
++
++ /* Create manip and return p_AdNewPtr to either a new descriptor or NULL */
++ if (p_CcNextEngineParams->h_Manip)
++ FmPcdManipUpdateAdResultForCc(p_CcNextEngineParams->h_Manip,
++ p_CcNextEngineParams, h_Ad, &p_AdNewPtr);
++
++ /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */
++ if (p_AdNewPtr)
++ {
++ /* case (1) and (2) */
++ switch (p_CcNextEngineParams->nextEngine)
++ {
++ case (e_FM_PCD_DONE):
++ if (p_CcNextEngineParams->params.enqueueParams.action
++ == e_FM_PCD_ENQ_FRAME)
++ {
++ if (p_CcNextEngineParams->params.enqueueParams.overrideFqid)
++ {
++ tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE;
++ tmp |=
++ p_CcNextEngineParams->params.enqueueParams.newFqid;
++#if (DPAA_VERSION >= 11)
++ tmp |=
++ (p_CcNextEngineParams->params.enqueueParams.newRelativeStorageProfileId
++ & FM_PCD_AD_RESULT_VSP_MASK)
++ << FM_PCD_AD_RESULT_VSP_SHIFT;
++#endif /* (DPAA_VERSION >= 11) */
++ }
++ else
++ {
++ tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE;
++ tmp |= FM_PCD_AD_RESULT_PLCR_DIS;
++ }
++ }
++
++ if (p_CcNextEngineParams->params.enqueueParams.action
++ == e_FM_PCD_DROP_FRAME)
++ tmpNia |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd);
++ else
++ tmpNia |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
++ break;
++
++ case (e_FM_PCD_KG):
++ if (p_CcNextEngineParams->params.kgParams.overrideFqid)
++ {
++ tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE;
++ tmp |= p_CcNextEngineParams->params.kgParams.newFqid;
++#if (DPAA_VERSION >= 11)
++ tmp |=
++ (p_CcNextEngineParams->params.kgParams.newRelativeStorageProfileId
++ & FM_PCD_AD_RESULT_VSP_MASK)
++ << FM_PCD_AD_RESULT_VSP_SHIFT;
++#endif /* (DPAA_VERSION >= 11) */
++ }
++ else
++ {
++ tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE;
++ tmp |= FM_PCD_AD_RESULT_PLCR_DIS;
++ }
++ tmpNia = NIA_KG_DIRECT;
++ tmpNia |= NIA_ENG_KG;
++ tmpNia |= NIA_KG_CC_EN;
++ tmpNia |= FmPcdKgGetSchemeId(
++ p_CcNextEngineParams->params.kgParams.h_DirectScheme);
++ break;
++
++ case (e_FM_PCD_PLCR):
++ if (p_CcNextEngineParams->params.plcrParams.overrideParams)
++ {
++ tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE;
++
++ /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */
++ if (p_CcNextEngineParams->params.plcrParams.sharedProfile)
++ {
++ tmpNia |= NIA_PLCR_ABSOLUTE;
++ err = FmPcdPlcrGetAbsoluteIdByProfileParams(
++ (t_Handle)p_FmPcd,
++ e_FM_PCD_PLCR_SHARED,
++ NULL,
++ p_CcNextEngineParams->params.plcrParams.newRelativeProfileId,
++ &profileId);
++
++ if (err != E_OK) {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return;
++ }
++
++ }
++ else
++ profileId =
++ p_CcNextEngineParams->params.plcrParams.newRelativeProfileId;
++
++ tmp |= p_CcNextEngineParams->params.plcrParams.newFqid;
++#if (DPAA_VERSION >= 11)
++ tmp |=
++ (p_CcNextEngineParams->params.plcrParams.newRelativeStorageProfileId
++ & FM_PCD_AD_RESULT_VSP_MASK)
++ << FM_PCD_AD_RESULT_VSP_SHIFT;
++#endif /* (DPAA_VERSION >= 11) */
++ WRITE_UINT32(
++ p_AdResult->plcrProfile,
++ (uint32_t)((uint32_t)profileId << FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT));
++ }
++ else
++ tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE;
++
++ tmpNia |=
++ NIA_ENG_PLCR
++ | p_CcNextEngineParams->params.plcrParams.newRelativeProfileId;
++ break;
++
++ default:
++ return;
++ }WRITE_UINT32(p_AdResult->fqid, tmp);
++
++ if (p_CcNextEngineParams->h_Manip)
++ {
++ tmp = GET_UINT32(p_AdResult->plcrProfile);
++ tmp |= (uint32_t)(XX_VirtToPhys(p_AdNewPtr)
++ - (p_FmPcd->physicalMuramBase)) >> 4;
++ WRITE_UINT32(p_AdResult->plcrProfile, tmp);
++
++ tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE;
++ tmpNia |= FM_PCD_AD_RESULT_NADEN;
++ }
++
++#if (DPAA_VERSION >= 11)
++ tmpNia |= FM_PCD_AD_RESULT_NO_OM_VSPE;
++#endif /* (DPAA_VERSION >= 11) */
++ WRITE_UINT32(p_AdResult->nia, tmpNia);
++ }
++}
++
++static t_Error CcUpdateParams(t_Handle h_FmPcd, t_Handle h_PcdParams,
++ t_Handle h_FmPort, t_Handle h_FmTree,
++ bool validate)
++{
++ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_FmTree;
++
++ return CcUpdateParam(h_FmPcd, h_PcdParams, h_FmPort,
++ p_CcTree->keyAndNextEngineParams,
++ p_CcTree->numOfEntries,
++ UINT_TO_PTR(p_CcTree->ccTreeBaseAddr), validate, 0,
++ h_FmTree, FALSE);
++}
++
++
++static void ReleaseNewNodeCommonPart(
++ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
++{
++ if (p_AdditionalInfo->p_AdTableNew)
++ FM_MURAM_FreeMem(
++ FmPcdGetMuramHandle(
++ ((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd),
++ p_AdditionalInfo->p_AdTableNew);
++
++ if (p_AdditionalInfo->p_KeysMatchTableNew)
++ FM_MURAM_FreeMem(
++ FmPcdGetMuramHandle(
++ ((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd),
++ p_AdditionalInfo->p_KeysMatchTableNew);
++}
++
++static t_Error UpdateGblMask(t_FmPcdCcNode *p_CcNode, uint8_t keySize,
++ uint8_t *p_Mask)
++{
++ uint8_t prvGlblMaskSize = p_CcNode->glblMaskSize;
++
++ if (p_Mask && !p_CcNode->glblMaskUpdated && (keySize <= 4)
++ && !p_CcNode->lclMask)
++ {
++ if (p_CcNode->parseCode && (p_CcNode->parseCode != CC_PC_FF_TCI1)
++ && (p_CcNode->parseCode != CC_PC_FF_TCI2)
++ && (p_CcNode->parseCode != CC_PC_FF_MPLS1)
++ && (p_CcNode->parseCode != CC_PC_FF_MPLS_LAST)
++ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1)
++ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2)
++ && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1)
++ && (p_CcNode->parseCode != CC_PC_FF_IPDSCP)
++ && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2))
++ {
++ p_CcNode->glblMaskSize = 0;
++ p_CcNode->lclMask = TRUE;
++ }
++ else
++ {
++ memcpy(p_CcNode->p_GlblMask, p_Mask, (sizeof(uint8_t)) * keySize);
++ p_CcNode->glblMaskUpdated = TRUE;
++ p_CcNode->glblMaskSize = 4;
++ }
++ }
++ else
++ if (p_Mask && (keySize <= 4) && !p_CcNode->lclMask)
++ {
++ if (memcmp(p_CcNode->p_GlblMask, p_Mask, keySize) != 0)
++ {
++ p_CcNode->lclMask = TRUE;
++ p_CcNode->glblMaskSize = 0;
++ }
++ }
++ else
++ if (!p_Mask && p_CcNode->glblMaskUpdated && (keySize <= 4))
++ {
++ uint32_t tmpMask = 0xffffffff;
++ if (memcmp(p_CcNode->p_GlblMask, &tmpMask, 4) != 0)
++ {
++ p_CcNode->lclMask = TRUE;
++ p_CcNode->glblMaskSize = 0;
++ }
++ }
++ else
++ if (p_Mask)
++ {
++ p_CcNode->lclMask = TRUE;
++ p_CcNode->glblMaskSize = 0;
++ }
++
++ /* In static mode (maxNumOfKeys > 0), local mask is supported
++ only is mask support was enabled at initialization */
++ if (p_CcNode->maxNumOfKeys && (!p_CcNode->maskSupport) && p_CcNode->lclMask)
++ {
++ p_CcNode->lclMask = FALSE;
++ p_CcNode->glblMaskSize = prvGlblMaskSize;
++ return ERROR_CODE(E_NOT_SUPPORTED);
++ }
++
++ return E_OK;
++}
++
++static __inline__ t_Handle GetNewAd(t_Handle h_FmPcdCcNodeOrTree, bool isTree)
++{
++ t_FmPcd *p_FmPcd;
++ t_Handle h_Ad;
++
++ if (isTree)
++ p_FmPcd = (t_FmPcd *)(((t_FmPcdCcTree *)h_FmPcdCcNodeOrTree)->h_FmPcd);
++ else
++ p_FmPcd = (t_FmPcd *)(((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_FmPcd);
++
++ if ((isTree && p_FmPcd->p_CcShadow)
++ || (!isTree && ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->maxNumOfKeys))
++ {
++ /* The allocated shadow is divided as follows:
++ 0 . . . 16 . . .
++ ---------------------------------------------------
++ | Shadow | Shadow Keys | Shadow Next |
++ | Ad | Match Table | Engine Table |
++ | (16 bytes) | (maximal size) | (maximal size) |
++ ---------------------------------------------------
++ */
++ if (!p_FmPcd->p_CcShadow)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated"));
++ return NULL;
++ }
++
++ h_Ad = p_FmPcd->p_CcShadow;
++ }
++ else
++ {
++ h_Ad = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd),
++ FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!h_Ad)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptor"));
++ return NULL;
++ }
++ }
++
++ return h_Ad;
++}
++
++static t_Error BuildNewNodeCommonPart(
++ t_FmPcdCcNode *p_CcNode, int *size,
++ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++
++ if (p_CcNode->lclMask)
++ *size = 2 * p_CcNode->ccKeySizeAccExtraction;
++ else
++ *size = p_CcNode->ccKeySizeAccExtraction;
++
++ if (p_CcNode->maxNumOfKeys == 0)
++ {
++ p_AdditionalInfo->p_AdTableNew = (t_Handle)FM_MURAM_AllocMem(
++ FmPcdGetMuramHandle(p_FmPcd),
++ (uint32_t)((p_AdditionalInfo->numOfKeys + 1)
++ * FM_PCD_CC_AD_ENTRY_SIZE),
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_AdditionalInfo->p_AdTableNew)
++ RETURN_ERROR(
++ MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC node action descriptors table"));
++
++ p_AdditionalInfo->p_KeysMatchTableNew = (t_Handle)FM_MURAM_AllocMem(
++ FmPcdGetMuramHandle(p_FmPcd),
++ (uint32_t)(*size * sizeof(uint8_t)
++ * (p_AdditionalInfo->numOfKeys + 1)),
++ FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN);
++ if (!p_AdditionalInfo->p_KeysMatchTableNew)
++ {
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
++ p_AdditionalInfo->p_AdTableNew);
++ p_AdditionalInfo->p_AdTableNew = NULL;
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC node key match table"));
++ }
++
++ MemSet8(
++ (uint8_t*)p_AdditionalInfo->p_AdTableNew,
++ 0,
++ (uint32_t)((p_AdditionalInfo->numOfKeys + 1)
++ * FM_PCD_CC_AD_ENTRY_SIZE));
++ MemSet8((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0,
++ *size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1));
++ }
++ else
++ {
++ /* The allocated shadow is divided as follows:
++ 0 . . . 16 . . .
++ ---------------------------------------------------
++ | Shadow | Shadow Keys | Shadow Next |
++ | Ad | Match Table | Engine Table |
++ | (16 bytes) | (maximal size) | (maximal size) |
++ ---------------------------------------------------
++ */
++
++ if (!p_FmPcd->p_CcShadow)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated"));
++
++ p_AdditionalInfo->p_KeysMatchTableNew =
++ PTR_MOVE(p_FmPcd->p_CcShadow, FM_PCD_CC_AD_ENTRY_SIZE);
++ p_AdditionalInfo->p_AdTableNew =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, p_CcNode->keysMatchTableMaxSize);
++
++ MemSet8(
++ (uint8_t*)p_AdditionalInfo->p_AdTableNew,
++ 0,
++ (uint32_t)((p_CcNode->maxNumOfKeys + 1)
++ * FM_PCD_CC_AD_ENTRY_SIZE));
++ MemSet8((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0,
++ (*size) * sizeof(uint8_t) * (p_CcNode->maxNumOfKeys));
++ }
++
++ p_AdditionalInfo->p_AdTableOld = p_CcNode->h_AdTable;
++ p_AdditionalInfo->p_KeysMatchTableOld = p_CcNode->h_KeysMatchTable;
++
++ return E_OK;
++}
++
++static t_Error BuildNewNodeAddOrMdfyKeyAndNextEngine(
++ t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode, uint16_t keyIndex,
++ t_FmPcdCcKeyParams *p_KeyParams,
++ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo, bool add)
++{
++ t_Error err = E_OK;
++ t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp;
++ t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp;
++ int size;
++ int i = 0, j = 0;
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t requiredAction = 0;
++ bool prvLclMask;
++ t_CcNodeInformation *p_CcNodeInformation;
++ t_FmPcdCcStatsParams statsParams = { 0 };
++ t_List *p_Pos;
++ t_FmPcdStatsObj *p_StatsObj;
++
++ /* Check that new NIA is legal */
++ err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams,
++ p_CcNode->statisticsMode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ prvLclMask = p_CcNode->lclMask;
++
++ /* Check that new key is not require update of localMask */
++ err = UpdateGblMask(p_CcNode, p_CcNode->ccKeySizeAccExtraction,
++ p_KeyParams->p_Mask);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ /* Update internal data structure with new next engine for the given index */
++ memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams,
++ &p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams));
++
++ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key,
++ p_KeyParams->p_Key, p_CcNode->userSizeOfExtraction);
++
++ if ((p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ && p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
++ {
++ err =
++ AllocAndFillAdForContLookupManip(
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ if (p_KeyParams->p_Mask)
++ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask,
++ p_KeyParams->p_Mask, p_CcNode->userSizeOfExtraction);
++ else
++ memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, 0xFF,
++ p_CcNode->userSizeOfExtraction);
++
++ /* Update numOfKeys */
++ if (add)
++ p_AdditionalInfo->numOfKeys = (uint8_t)(p_CcNode->numOfKeys + 1);
++ else
++ p_AdditionalInfo->numOfKeys = (uint8_t)p_CcNode->numOfKeys;
++
++ /* Allocate new tables in MURAM: keys match table and action descriptors table */
++ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /* Check that manip is legal and what requiredAction is necessary for this manip */
++ if (p_KeyParams->ccNextEngineParams.h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(
++ &p_KeyParams->ccNextEngineParams, &requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction =
++ requiredAction;
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |=
++ UPDATE_CC_WITH_TREE;
++
++ /* Update new Ad and new Key Table according to new requirement */
++ i = 0;
++ for (j = 0; j < p_AdditionalInfo->numOfKeys; j++)
++ {
++ p_AdTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE);
++
++ if (j == keyIndex)
++ {
++ if (p_KeyParams->ccNextEngineParams.statisticsEn)
++ {
++ /* Allocate a statistics object that holds statistics AD and counters.
++ - For added key - New statistics AD and counters pointer need to be allocated
++ new statistics object. If statistics were enabled, we need to replace the
++ existing descriptor with a new descriptor with nullified counters.
++ */
++ p_StatsObj = GetStatsObj(p_CcNode);
++ ASSERT_COND(p_StatsObj);
++
++ /* Store allocated statistics object */
++ ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS);
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj =
++ p_StatsObj;
++
++ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
++ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
++#if (DPAA_VERSION >= 11)
++ statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs;
++
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* Building action descriptor for the received new key */
++ NextStepAd(p_AdTableNewTmp, &statsParams,
++ &p_KeyParams->ccNextEngineParams, p_FmPcd);
++ }
++ else
++ {
++ /* Building action descriptor for the received new key */
++ NextStepAd(p_AdTableNewTmp, NULL,
++ &p_KeyParams->ccNextEngineParams, p_FmPcd);
++ }
++
++ /* Copy the received new key into keys match table */
++ p_KeysMatchTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size*sizeof(uint8_t));
++
++ MemCpy8((void*)p_KeysMatchTableNewTmp, p_KeyParams->p_Key,
++ p_CcNode->userSizeOfExtraction);
++
++ /* Update mask for the received new key */
++ if (p_CcNode->lclMask)
++ {
++ if (p_KeyParams->p_Mask)
++ {
++ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ p_KeyParams->p_Mask,
++ p_CcNode->userSizeOfExtraction);
++ }
++ else
++ if (p_CcNode->ccKeySizeAccExtraction > 4)
++ {
++ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ 0xff, p_CcNode->userSizeOfExtraction);
++ }
++ else
++ {
++ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ p_CcNode->p_GlblMask,
++ p_CcNode->userSizeOfExtraction);
++ }
++ }
++
++ /* If key modification requested, the old entry is omitted and replaced by the new parameters */
++ if (!add)
++ i++;
++ }
++ else
++ {
++ /* Copy existing action descriptors to the newly allocated Ad table */
++ p_AdTableOldTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE);
++ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp,
++ FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Copy existing keys and their masks to the newly allocated keys match table */
++ p_KeysMatchTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t));
++ p_KeysMatchTableOldTmp =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, i * size * sizeof(uint8_t));
++
++ if (p_CcNode->lclMask)
++ {
++ if (prvLclMask)
++ {
++ MemCpy8(
++ PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction),
++ PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction),
++ p_CcNode->ccKeySizeAccExtraction);
++ }
++ else
++ {
++ p_KeysMatchTableOldTmp =
++ PTR_MOVE(p_CcNode->h_KeysMatchTable,
++ i * (int)p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t));
++
++ if (p_CcNode->ccKeySizeAccExtraction > 4)
++ {
++ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ 0xff, p_CcNode->userSizeOfExtraction);
++ }
++ else
++ {
++ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ p_CcNode->p_GlblMask,
++ p_CcNode->userSizeOfExtraction);
++ }
++ }
++ }
++
++ MemCpy8(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp,
++ p_CcNode->ccKeySizeAccExtraction);
++
++ i++;
++ }
++ }
++
++ /* Miss action descriptor */
++ p_AdTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE);
++ p_AdTableOldTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i * FM_PCD_CC_AD_ENTRY_SIZE);
++ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ if (!LIST_IsEmpty(&p_CcNode->ccTreesLst))
++ {
++ LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst)
++ {
++ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
++ ASSERT_COND(p_CcNodeInformation->h_CcNode);
++ /* Update the manipulation which has to be updated from parameters of the port */
++ /* It's has to be updated with restrictions defined in the function */
++ err =
++ SetRequiredAction(
++ p_CcNode->h_FmPcd,
++ p_CcNode->shadowAction
++ | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction,
++ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
++ 1, p_CcNodeInformation->h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ err =
++ CcUpdateParam(
++ p_CcNode->h_FmPcd,
++ NULL,
++ NULL,
++ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
++ 1,
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
++ TRUE, p_CcNodeInformation->index,
++ p_CcNodeInformation->h_CcNode, TRUE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++ }
++
++ if (p_CcNode->lclMask)
++ memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t));
++
++ if (p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_CC)
++ p_AdditionalInfo->h_NodeForAdd =
++ p_KeyParams->ccNextEngineParams.params.ccParams.h_CcNode;
++ if (p_KeyParams->ccNextEngineParams.h_Manip)
++ p_AdditionalInfo->h_ManipForAdd =
++ p_KeyParams->ccNextEngineParams.h_Manip;
++
++#if (DPAA_VERSION >= 11)
++ if ((p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_FR)
++ && (p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic))
++ p_AdditionalInfo->h_FrmReplicForAdd =
++ p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic;
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (!add)
++ {
++ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ p_AdditionalInfo->h_NodeForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
++
++ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
++ p_AdditionalInfo->h_ManipForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
++
++ /* If statistics were previously enabled, store the old statistics object to be released */
++ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ {
++ p_AdditionalInfo->p_StatsObjForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj;
++ }
++
++#if (DPAA_VERSION >= 11)
++ if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_FR)
++ && (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
++ p_AdditionalInfo->h_FrmReplicForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ return E_OK;
++}
++
++static t_Error BuildNewNodeRemoveKey(
++ t_FmPcdCcNode *p_CcNode, uint16_t keyIndex,
++ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
++{
++ int i = 0, j = 0;
++ t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp;
++ t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp;
++ int size;
++ t_Error err = E_OK;
++
++ /*save new numOfKeys*/
++ p_AdditionalInfo->numOfKeys = (uint16_t)(p_CcNode->numOfKeys - 1);
++
++ /*function which allocates in the memory new KeyTbl, AdTbl*/
++ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /*update new Ad and new Key Table according to new requirement*/
++ for (i = 0, j = 0; j < p_CcNode->numOfKeys; i++, j++)
++ {
++ if (j == keyIndex)
++ j++;
++
++ if (j == p_CcNode->numOfKeys)
++ break;
++ p_AdTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE);
++ p_AdTableOldTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE);
++ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ p_KeysMatchTableOldTmp =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, j * size * sizeof(uint8_t));
++ p_KeysMatchTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, i * size * sizeof(uint8_t));
++ MemCpy8(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp,
++ size * sizeof(uint8_t));
++ }
++
++ p_AdTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE);
++ p_AdTableOldTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE);
++ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ p_AdditionalInfo->h_NodeForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
++
++ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
++ p_AdditionalInfo->h_ManipForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
++
++ /* If statistics were previously enabled, store the old statistics object to be released */
++ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ {
++ p_AdditionalInfo->p_StatsObjForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj;
++ }
++
++#if (DPAA_VERSION >= 11)
++ if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_FR)
++ && (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
++ p_AdditionalInfo->h_FrmReplicForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
++#endif /* (DPAA_VERSION >= 11) */
++
++ return E_OK;
++}
++
++static t_Error BuildNewNodeModifyKey(
++ t_FmPcdCcNode *p_CcNode, uint16_t keyIndex, uint8_t *p_Key,
++ uint8_t *p_Mask, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ t_Error err = E_OK;
++ t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp;
++ t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp;
++ int size;
++ int i = 0, j = 0;
++ bool prvLclMask;
++ t_FmPcdStatsObj *p_StatsObj, tmpStatsObj;
++ p_AdditionalInfo->numOfKeys = p_CcNode->numOfKeys;
++
++ prvLclMask = p_CcNode->lclMask;
++
++ /* Check that new key is not require update of localMask */
++ err = UpdateGblMask(p_CcNode, p_CcNode->ccKeySizeAccExtraction, p_Mask);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ /* Update internal data structure with new next engine for the given index */
++ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key, p_Key,
++ p_CcNode->userSizeOfExtraction);
++
++ if (p_Mask)
++ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, p_Mask,
++ p_CcNode->userSizeOfExtraction);
++ else
++ memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, 0xFF,
++ p_CcNode->userSizeOfExtraction);
++
++ /*function which build in the memory new KeyTbl, AdTbl*/
++ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /*fill the New AdTable and New KeyTable*/
++ for (j = 0, i = 0; j < p_AdditionalInfo->numOfKeys; j++, i++)
++ {
++ p_AdTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE);
++ p_AdTableOldTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE);
++
++ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ if (j == keyIndex)
++ {
++ ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS);
++ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ {
++ /* As statistics were enabled, we need to update the existing
++ statistics descriptor with a new nullified counters. */
++ p_StatsObj = GetStatsObj(p_CcNode);
++ ASSERT_COND(p_StatsObj);
++
++ SetStatsCounters(
++ p_AdTableNewTmp,
++ (uint32_t)((XX_VirtToPhys(p_StatsObj->h_StatsCounters)
++ - p_FmPcd->physicalMuramBase)));
++
++ tmpStatsObj.h_StatsAd = p_StatsObj->h_StatsAd;
++ tmpStatsObj.h_StatsCounters = p_StatsObj->h_StatsCounters;
++
++ /* As we need to replace only the counters, we build a new statistics
++ object that holds the old AD and the new counters - this will be the
++ currently used statistics object.
++ The newly allocated AD is not required and may be released back to
++ the available objects with the previous counters pointer. */
++ p_StatsObj->h_StatsAd =
++ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd;
++
++ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd =
++ tmpStatsObj.h_StatsAd;
++
++ /* Store allocated statistics object */
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj =
++ p_StatsObj;
++
++ /* As statistics were previously enabled, store the old statistics object to be released */
++ p_AdditionalInfo->p_StatsObjForRmv =
++ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj;
++ }
++
++ p_KeysMatchTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t));
++
++ MemCpy8(p_KeysMatchTableNewTmp, p_Key,
++ p_CcNode->userSizeOfExtraction);
++
++ if (p_CcNode->lclMask)
++ {
++ if (p_Mask)
++ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ p_Mask, p_CcNode->userSizeOfExtraction);
++ else
++ if (p_CcNode->ccKeySizeAccExtraction > 4)
++ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ 0xff, p_CcNode->userSizeOfExtraction);
++ else
++ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ p_CcNode->p_GlblMask,
++ p_CcNode->userSizeOfExtraction);
++ }
++ }
++ else
++ {
++ p_KeysMatchTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t));
++ p_KeysMatchTableOldTmp =
++ PTR_MOVE(p_CcNode->h_KeysMatchTable, i * size * sizeof(uint8_t));
++
++ if (p_CcNode->lclMask)
++ {
++ if (prvLclMask)
++ MemCpy8(
++ PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction),
++ PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction),
++ p_CcNode->userSizeOfExtraction);
++ else
++ {
++ p_KeysMatchTableOldTmp =
++ PTR_MOVE(p_CcNode->h_KeysMatchTable,
++ i * (int)p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t));
++
++ if (p_CcNode->ccKeySizeAccExtraction > 4)
++ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
++ p_CcNode->ccKeySizeAccExtraction),
++ 0xff, p_CcNode->userSizeOfExtraction);
++ else
++ MemCpy8(
++ PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction),
++ p_CcNode->p_GlblMask,
++ p_CcNode->userSizeOfExtraction);
++ }
++ }
++ MemCpy8((void*)p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp,
++ p_CcNode->ccKeySizeAccExtraction);
++ }
++ }
++
++ p_AdTableNewTmp =
++ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE);
++ p_AdTableOldTmp = PTR_MOVE(p_CcNode->h_AdTable, i * FM_PCD_CC_AD_ENTRY_SIZE);
++
++ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ return E_OK;
++}
++
++static t_Error BuildNewNodeModifyNextEngine(
++ t_Handle h_FmPcd, t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex,
++ t_FmPcdCcNextEngineParams *p_CcNextEngineParams, t_List *h_OldLst,
++ t_List *h_NewLst, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
++{
++ t_Error err = E_OK;
++ uint32_t requiredAction = 0;
++ t_List *p_Pos;
++ t_CcNodeInformation *p_CcNodeInformation, ccNodeInfo;
++ t_Handle p_Ad;
++ t_FmPcdCcNode *p_FmPcdCcNode1 = NULL;
++ t_FmPcdCcTree *p_FmPcdCcTree = NULL;
++ t_FmPcdStatsObj *p_StatsObj;
++ t_FmPcdCcStatsParams statsParams = { 0 };
++
++ ASSERT_COND(p_CcNextEngineParams);
++
++ /* check that new NIA is legal */
++ if (!p_AdditionalInfo->tree)
++ err = ValidateNextEngineParams(
++ h_FmPcd, p_CcNextEngineParams,
++ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->statisticsMode);
++ else
++ /* Statistics are not supported for CC root */
++ err = ValidateNextEngineParams(h_FmPcd, p_CcNextEngineParams,
++ e_FM_PCD_CC_STATS_MODE_NONE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /* Update internal data structure for next engine per index (index - key) */
++ memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams,
++ p_CcNextEngineParams, sizeof(t_FmPcdCcNextEngineParams));
++
++ /* Check that manip is legal and what requiredAction is necessary for this manip */
++ if (p_CcNextEngineParams->h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(p_CcNextEngineParams,
++ &requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ if (!p_AdditionalInfo->tree)
++ {
++ p_FmPcdCcNode1 = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree;
++ p_AdditionalInfo->numOfKeys = p_FmPcdCcNode1->numOfKeys;
++ p_Ad = p_FmPcdCcNode1->h_AdTable;
++
++ if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ p_AdditionalInfo->h_NodeForRmv =
++ p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
++
++ if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
++ p_AdditionalInfo->h_ManipForRmv =
++ p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
++
++#if (DPAA_VERSION >= 11)
++ if ((p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_FR)
++ && (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
++ p_AdditionalInfo->h_FrmReplicForRmv =
++ p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
++#endif /* (DPAA_VERSION >= 11) */
++ }
++ else
++ {
++ p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree;
++ p_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
++
++ if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ p_AdditionalInfo->h_NodeForRmv =
++ p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
++
++ if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
++ p_AdditionalInfo->h_ManipForRmv =
++ p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
++
++#if (DPAA_VERSION >= 11)
++ if ((p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
++ == e_FM_PCD_FR)
++ && (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
++ p_AdditionalInfo->h_FrmReplicForRmv =
++ p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_CC)
++ && p_CcNextEngineParams->h_Manip)
++ {
++ err = AllocAndFillAdForContLookupManip(
++ p_CcNextEngineParams->params.ccParams.h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ ASSERT_COND(p_Ad);
++
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = PTR_MOVE(p_Ad, keyIndex * FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* If statistics were enabled, this Ad is the statistics Ad. Need to follow its
++ nextAction to retrieve the actual Nia-Ad. If statistics should remain enabled,
++ only the actual Nia-Ad should be modified. */
++ if ((!p_AdditionalInfo->tree)
++ && (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ && (p_CcNextEngineParams->statisticsEn))
++ ccNodeInfo.h_CcNode =
++ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd;
++
++ EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL);
++
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ p_Ad = GetNewAd(h_FmPcdCcNodeOrTree, p_AdditionalInfo->tree);
++ if (!p_Ad)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC node action descriptor"));
++ MemSet8((uint8_t *)p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* If statistics were not enabled before, but requested now - Allocate a statistics
++ object that holds statistics AD and counters. */
++ if ((!p_AdditionalInfo->tree)
++ && (!((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ && (p_CcNextEngineParams->statisticsEn))
++ {
++ p_StatsObj = GetStatsObj((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree);
++ ASSERT_COND(p_StatsObj);
++
++ /* Store allocated statistics object */
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj =
++ p_StatsObj;
++
++ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
++ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
++
++#if (DPAA_VERSION >= 11)
++ statsParams.h_StatsFLRs =
++ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_StatsFLRs;
++
++#endif /* (DPAA_VERSION >= 11) */
++
++ NextStepAd(p_Ad, &statsParams, p_CcNextEngineParams, h_FmPcd);
++ }
++ else
++ NextStepAd(p_Ad, NULL, p_CcNextEngineParams, h_FmPcd);
++
++ ccNodeInfo.h_CcNode = p_Ad;
++ EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL);
++
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction =
++ requiredAction;
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |=
++ UPDATE_CC_WITH_TREE;
++
++ if (!p_AdditionalInfo->tree)
++ {
++ ASSERT_COND(p_FmPcdCcNode1);
++ if (!LIST_IsEmpty(&p_FmPcdCcNode1->ccTreesLst))
++ {
++ LIST_FOR_EACH(p_Pos, &p_FmPcdCcNode1->ccTreesLst)
++ {
++ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
++
++ ASSERT_COND(p_CcNodeInformation->h_CcNode);
++ /* Update the manipulation which has to be updated from parameters of the port
++ it's has to be updated with restrictions defined in the function */
++
++ err =
++ SetRequiredAction(
++ p_FmPcdCcNode1->h_FmPcd,
++ p_FmPcdCcNode1->shadowAction
++ | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction,
++ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
++ p_Ad, 1, p_CcNodeInformation->h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ err = CcUpdateParam(
++ p_FmPcdCcNode1->h_FmPcd, NULL, NULL,
++ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], 1,
++ p_Ad, TRUE, p_CcNodeInformation->index,
++ p_CcNodeInformation->h_CcNode, TRUE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++ }
++ }
++ else
++ {
++ ASSERT_COND(p_FmPcdCcTree);
++
++ err =
++ SetRequiredAction(
++ h_FmPcd,
++ p_FmPcdCcTree->requiredAction
++ | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction,
++ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
++ p_Ad, 1, (t_Handle)p_FmPcdCcTree);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ err = CcUpdateParam(h_FmPcd, NULL, NULL,
++ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
++ 1, p_Ad, TRUE, 0, (t_Handle)p_FmPcdCcTree, TRUE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ if (p_CcNextEngineParams->nextEngine == e_FM_PCD_CC)
++ p_AdditionalInfo->h_NodeForAdd =
++ p_CcNextEngineParams->params.ccParams.h_CcNode;
++ if (p_CcNextEngineParams->h_Manip)
++ p_AdditionalInfo->h_ManipForAdd = p_CcNextEngineParams->h_Manip;
++
++ /* If statistics were previously enabled, but now are disabled,
++ store the old statistics object to be released */
++ if ((!p_AdditionalInfo->tree)
++ && (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ && (!p_CcNextEngineParams->statisticsEn))
++ {
++ p_AdditionalInfo->p_StatsObjForRmv =
++ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj;
++
++
++ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = NULL;
++ }
++#if (DPAA_VERSION >= 11)
++ if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_FR)
++ && (p_CcNextEngineParams->params.frParams.h_FrmReplic))
++ p_AdditionalInfo->h_FrmReplicForAdd =
++ p_CcNextEngineParams->params.frParams.h_FrmReplic;
++#endif /* (DPAA_VERSION >= 11) */
++
++ return E_OK;
++}
++
++static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(
++ t_FmPcdCcNode *p_CrntMdfNode, t_List *h_OldLst,
++ t_FmPcdCcNextEngineParams **p_NextEngineParams)
++{
++ t_CcNodeInformation *p_CcNodeInformation;
++ t_FmPcdCcNode *p_NodePtrOnCurrentMdfNode = NULL;
++ t_List *p_Pos;
++ int i = 0;
++ t_Handle p_AdTablePtOnCrntCurrentMdfNode/*, p_AdTableNewModified*/;
++ t_CcNodeInformation ccNodeInfo;
++
++ LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccPrevNodesLst)
++ {
++ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
++ p_NodePtrOnCurrentMdfNode =
++ (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode;
++
++ ASSERT_COND(p_NodePtrOnCurrentMdfNode);
++
++ /* Search in the previous node which exact index points on this current modified node for getting AD */
++ for (i = 0; i < p_NodePtrOnCurrentMdfNode->numOfKeys + 1; i++)
++ {
++ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ {
++ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode
++ == (t_Handle)p_CrntMdfNode)
++ {
++ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
++ p_AdTablePtOnCrntCurrentMdfNode = p_CrntMdfNode->h_Ad;
++ else
++ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj)
++ p_AdTablePtOnCrntCurrentMdfNode =
++ p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj->h_StatsAd;
++ else
++ p_AdTablePtOnCrntCurrentMdfNode =
++ PTR_MOVE(p_NodePtrOnCurrentMdfNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE);
++
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode;
++ EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL);
++
++ if (!(*p_NextEngineParams))
++ *p_NextEngineParams =
++ &p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams;
++ }
++ }
++ }
++
++ ASSERT_COND(i != p_NodePtrOnCurrentMdfNode->numOfKeys);
++ }
++}
++
++static void UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(
++ t_FmPcdCcNode *p_CrntMdfNode, t_List *h_OldLst,
++ t_FmPcdCcNextEngineParams **p_NextEngineParams)
++{
++ t_CcNodeInformation *p_CcNodeInformation;
++ t_FmPcdCcTree *p_TreePtrOnCurrentMdfNode = NULL;
++ t_List *p_Pos;
++ int i = 0;
++ t_Handle p_AdTableTmp;
++ t_CcNodeInformation ccNodeInfo;
++
++ LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccTreeIdLst)
++ {
++ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
++ p_TreePtrOnCurrentMdfNode =
++ (t_FmPcdCcTree *)p_CcNodeInformation->h_CcNode;
++
++ ASSERT_COND(p_TreePtrOnCurrentMdfNode);
++
++ /*search in the trees which exact index points on this current modified node for getting AD */
++ for (i = 0; i < p_TreePtrOnCurrentMdfNode->numOfEntries; i++)
++ {
++ if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ {
++ if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode
++ == (t_Handle)p_CrntMdfNode)
++ {
++ p_AdTableTmp =
++ UINT_TO_PTR(p_TreePtrOnCurrentMdfNode->ccTreeBaseAddr + i*FM_PCD_CC_AD_ENTRY_SIZE);
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = p_AdTableTmp;
++ EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL);
++
++ if (!(*p_NextEngineParams))
++ *p_NextEngineParams =
++ &p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams;
++ }
++ }
++ }
++
++ ASSERT_COND(i == p_TreePtrOnCurrentMdfNode->numOfEntries);
++ }
++}
++
++static t_FmPcdModifyCcKeyAdditionalParams * ModifyNodeCommonPart(
++ t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex,
++ e_ModifyState modifyState, bool ttlCheck, bool hashCheck, bool tree)
++{
++ t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams;
++ int i = 0, j = 0;
++ bool wasUpdate = FALSE;
++ t_FmPcdCcNode *p_CcNode = NULL;
++ t_FmPcdCcTree *p_FmPcdCcTree;
++ uint16_t numOfKeys;
++ t_FmPcdCcKeyAndNextEngineParams *p_KeyAndNextEngineParams;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcdCcNodeOrTree, E_INVALID_HANDLE, NULL);
++
++ if (!tree)
++ {
++ p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree;
++ numOfKeys = p_CcNode->numOfKeys;
++
++ /* node has to be pointed by another node or tree */
++
++ p_KeyAndNextEngineParams = (t_FmPcdCcKeyAndNextEngineParams *)XX_Malloc(
++ sizeof(t_FmPcdCcKeyAndNextEngineParams) * (numOfKeys + 1));
++ if (!p_KeyAndNextEngineParams)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Next engine and required action structure"));
++ return NULL;
++ }
++ memcpy(p_KeyAndNextEngineParams, p_CcNode->keyAndNextEngineParams,
++ (numOfKeys + 1) * sizeof(t_FmPcdCcKeyAndNextEngineParams));
++
++ if (ttlCheck)
++ {
++ if ((p_CcNode->parseCode == CC_PC_FF_IPV4TTL)
++ || (p_CcNode->parseCode == CC_PC_FF_IPV6HOP_LIMIT))
++ {
++ XX_Free(p_KeyAndNextEngineParams);
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_FF_IPV4TTL or CC_PC_FF_IPV6HOP_LIMIT can not be used for this operation"));
++ return NULL;
++ }
++ }
++
++ if (hashCheck)
++ {
++ if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)
++ {
++ XX_Free(p_KeyAndNextEngineParams);
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_GENERIC_IC_HASH_INDEXED can not be used for this operation"));
++ return NULL;
++ }
++ }
++ }
++ else
++ {
++ p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree;
++ numOfKeys = p_FmPcdCcTree->numOfEntries;
++
++ p_KeyAndNextEngineParams = (t_FmPcdCcKeyAndNextEngineParams *)XX_Malloc(
++ sizeof(t_FmPcdCcKeyAndNextEngineParams)
++ * FM_PCD_MAX_NUM_OF_CC_GROUPS);
++ if (!p_KeyAndNextEngineParams)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Next engine and required action structure"));
++ return NULL;
++ }
++ memcpy(p_KeyAndNextEngineParams,
++ p_FmPcdCcTree->keyAndNextEngineParams,
++ FM_PCD_MAX_NUM_OF_CC_GROUPS
++ * sizeof(t_FmPcdCcKeyAndNextEngineParams));
++ }
++
++ p_FmPcdModifyCcKeyAdditionalParams =
++ (t_FmPcdModifyCcKeyAdditionalParams *)XX_Malloc(
++ sizeof(t_FmPcdModifyCcKeyAdditionalParams));
++ if (!p_FmPcdModifyCcKeyAdditionalParams)
++ {
++ XX_Free(p_KeyAndNextEngineParams);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of internal data structure FAILED"));
++ return NULL;
++ }
++ memset(p_FmPcdModifyCcKeyAdditionalParams, 0,
++ sizeof(t_FmPcdModifyCcKeyAdditionalParams));
++
++ p_FmPcdModifyCcKeyAdditionalParams->h_CurrentNode = h_FmPcdCcNodeOrTree;
++ p_FmPcdModifyCcKeyAdditionalParams->savedKeyIndex = keyIndex;
++
++ while (i < numOfKeys)
++ {
++ if ((j == keyIndex) && !wasUpdate)
++ {
++ if (modifyState == e_MODIFY_STATE_ADD)
++ j++;
++ else
++ if (modifyState == e_MODIFY_STATE_REMOVE)
++ i++;
++ wasUpdate = TRUE;
++ }
++ else
++ {
++ memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j],
++ p_KeyAndNextEngineParams + i,
++ sizeof(t_FmPcdCcKeyAndNextEngineParams));
++ i++;
++ j++;
++ }
++ }
++
++ if (keyIndex == numOfKeys)
++ {
++ if (modifyState == e_MODIFY_STATE_ADD)
++ j++;
++ }
++
++ memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j],
++ p_KeyAndNextEngineParams + numOfKeys,
++ sizeof(t_FmPcdCcKeyAndNextEngineParams));
++
++ XX_Free(p_KeyAndNextEngineParams);
++
++ return p_FmPcdModifyCcKeyAdditionalParams;
++}
++
++static t_Error UpdatePtrWhichPointOnCrntMdfNode(
++ t_FmPcdCcNode *p_CcNode,
++ t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams,
++ t_List *h_OldLst, t_List *h_NewLst)
++{
++ t_FmPcdCcNextEngineParams *p_NextEngineParams = NULL;
++ t_CcNodeInformation ccNodeInfo = { 0 };
++ t_Handle h_NewAd;
++ t_Handle h_OrigAd = NULL;
++
++ /* Building a list of all action descriptors that point to the previous node */
++ if (!LIST_IsEmpty(&p_CcNode->ccPrevNodesLst))
++ UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst,
++ &p_NextEngineParams);
++
++ if (!LIST_IsEmpty(&p_CcNode->ccTreeIdLst))
++ UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst,
++ &p_NextEngineParams);
++
++ /* This node must be found as next engine of one of its previous nodes or trees*/
++ if (p_NextEngineParams)
++ {
++ /* Building a new action descriptor that points to the modified node */
++ h_NewAd = GetNewAd(p_CcNode, FALSE);
++ if (!h_NewAd)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ MemSet8(h_NewAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ h_OrigAd = p_CcNode->h_Ad;
++ BuildNewAd(h_NewAd, p_FmPcdModifyCcKeyAdditionalParams, p_CcNode,
++ p_NextEngineParams);
++
++ ccNodeInfo.h_CcNode = h_NewAd;
++ EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL);
++
++ if (p_NextEngineParams->h_Manip && !h_OrigAd)
++ FmPcdManipUpdateOwner(p_NextEngineParams->h_Manip, FALSE);
++ }
++ return E_OK;
++}
++
++static void UpdateCcRootOwner(t_FmPcdCcTree *p_FmPcdCcTree, bool add)
++{
++ ASSERT_COND(p_FmPcdCcTree);
++
++ /* this routine must be protected by the calling routine! */
++
++ if (add)
++ p_FmPcdCcTree->owners++;
++ else
++ {
++ ASSERT_COND(p_FmPcdCcTree->owners);
++ p_FmPcdCcTree->owners--;
++ }
++}
++
++static t_Error CheckAndSetManipParamsWithCcNodeParams(t_FmPcdCcNode *p_CcNode)
++{
++ t_Error err = E_OK;
++ int i = 0;
++
++ for (i = 0; i < p_CcNode->numOfKeys; i++)
++ {
++ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
++ {
++ err =
++ FmPcdManipCheckParamsWithCcNodeParams(
++ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip,
++ (t_Handle)p_CcNode);
++ if (err)
++ return err;
++ }
++ }
++
++ return err;
++}
++static t_Error ValidateAndCalcStatsParams(t_FmPcdCcNode *p_CcNode,
++ t_FmPcdCcNodeParams *p_CcNodeParam,
++ uint32_t *p_NumOfRanges,
++ uint32_t *p_CountersArraySize)
++{
++ e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode;
++ uint32_t i;
++
++ UNUSED(p_CcNodeParam);
++
++ switch (statisticsMode)
++ {
++ case e_FM_PCD_CC_STATS_MODE_NONE:
++ for (i = 0; i < p_CcNode->numOfKeys; i++)
++ if (p_CcNodeParam->keysParams.keyParams[i].ccNextEngineParams.statisticsEn)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("Statistics cannot be enabled for key %d when statistics mode was set to 'NONE'", i));
++ return E_OK;
++
++ case e_FM_PCD_CC_STATS_MODE_FRAME:
++ case e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME:
++ *p_NumOfRanges = 1;
++ *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE;
++ return E_OK;
++
++#if (DPAA_VERSION >= 11)
++ case e_FM_PCD_CC_STATS_MODE_RMON:
++ {
++ uint16_t *p_FrameLengthRanges =
++ p_CcNodeParam->keysParams.frameLengthRanges;
++ uint32_t i;
++
++ if (p_FrameLengthRanges[0] <= 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode"));
++
++ if (p_FrameLengthRanges[0] == 0xFFFF)
++ {
++ *p_NumOfRanges = 1;
++ *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE;
++ return E_OK;
++ }
++
++ for (i = 1; i < FM_PCD_CC_STATS_MAX_NUM_OF_FLR; i++)
++ {
++ if (p_FrameLengthRanges[i - 1] >= p_FrameLengthRanges[i])
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("Frame length range must be larger at least by 1 from preceding range"));
++
++ /* Stop when last range is reached */
++ if (p_FrameLengthRanges[i] == 0xFFFF)
++ break;
++ }
++
++ if ((i >= FM_PCD_CC_STATS_MAX_NUM_OF_FLR)
++ || (p_FrameLengthRanges[i] != 0xFFFF))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("Last Frame length range must be 0xFFFF"));
++
++ *p_NumOfRanges = i + 1;
++
++ /* Allocate an extra counter for byte count, as counters
++ array always begins with byte count */
++ *p_CountersArraySize = (*p_NumOfRanges + 1)
++ * FM_PCD_CC_STATS_COUNTER_SIZE;
++
++ }
++ return E_OK;
++#endif /* (DPAA_VERSION >= 11) */
++
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode"));
++ }
++}
++
++static t_Error CheckParams(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam,
++ t_FmPcdCcNode *p_CcNode, bool *isKeyTblAlloc)
++{
++ int tmp = 0;
++ t_FmPcdCcKeyParams *p_KeyParams;
++ t_Error err;
++ uint32_t requiredAction = 0;
++
++ /* Validate statistics parameters */
++ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam,
++ &(p_CcNode->numOfStatsFLRs),
++ &(p_CcNode->countersArraySize));
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters"));
++
++ /* Validate next engine parameters on Miss */
++ err = ValidateNextEngineParams(
++ h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ p_CcNode->statisticsMode);
++ if (err)
++ RETURN_ERROR(MAJOR, err,
++ ("For this node MissNextEngineParams are not valid"));
++
++ if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(
++ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ &requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams,
++ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction =
++ requiredAction;
++
++ if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip)
++ {
++ err =
++ AllocAndFillAdForContLookupManip(
++ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
++ {
++ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
++
++ if (!p_KeyParams->p_Key)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_Key is not initialized"));
++
++ err = ValidateNextEngineParams(h_FmPcd,
++ &p_KeyParams->ccNextEngineParams,
++ p_CcNode->statisticsMode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ err = UpdateGblMask(p_CcNode, p_CcNodeParam->keysParams.keySize,
++ p_KeyParams->p_Mask);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ if (p_KeyParams->ccNextEngineParams.h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(
++ &p_KeyParams->ccNextEngineParams, &requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ /* Store 'key' parameters - key, mask (if passed by the user) */
++ memcpy(p_CcNode->keyAndNextEngineParams[tmp].key, p_KeyParams->p_Key,
++ p_CcNodeParam->keysParams.keySize);
++
++ if (p_KeyParams->p_Mask)
++ memcpy(p_CcNode->keyAndNextEngineParams[tmp].mask,
++ p_KeyParams->p_Mask, p_CcNodeParam->keysParams.keySize);
++ else
++ memset((void *)(p_CcNode->keyAndNextEngineParams[tmp].mask), 0xFF,
++ p_CcNodeParam->keysParams.keySize);
++
++ /* Store next engine parameters */
++ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams,
++ &p_KeyParams->ccNextEngineParams,
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction;
++
++ if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
++ {
++ err =
++ AllocAndFillAdForContLookupManip(
++ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++ }
++
++ if (p_CcNode->maxNumOfKeys)
++ {
++ if (p_CcNode->maxNumOfKeys < p_CcNode->numOfKeys)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("Number of keys exceed the provided maximal number of keys"));
++ }
++
++ *isKeyTblAlloc = TRUE;
++
++ return E_OK;
++}
++
++static t_Error Ipv4TtlOrIpv6HopLimitCheckParams(
++ t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam,
++ t_FmPcdCcNode *p_CcNode, bool *isKeyTblAlloc)
++{
++ int tmp = 0;
++ t_FmPcdCcKeyParams *p_KeyParams;
++ t_Error err;
++ uint8_t key = 0x01;
++ uint32_t requiredAction = 0;
++
++ if (p_CcNode->numOfKeys != 1)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'numOfKeys' is 1"));
++
++ if ((p_CcNodeParam->keysParams.maxNumOfKeys)
++ && (p_CcNodeParam->keysParams.maxNumOfKeys != 1))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'maxNumOfKeys' is 1"));
++
++ /* Validate statistics parameters */
++ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam,
++ &(p_CcNode->numOfStatsFLRs),
++ &(p_CcNode->countersArraySize));
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters"));
++
++ err = ValidateNextEngineParams(
++ h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ p_CcNodeParam->keysParams.statisticsMode);
++ if (err)
++ RETURN_ERROR(MAJOR, err,
++ ("For this node MissNextEngineParams are not valid"));
++
++ if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(
++ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ &requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams,
++ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction =
++ requiredAction;
++
++ if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip)
++ {
++ err =
++ AllocAndFillAdForContLookupManip(
++ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
++ {
++ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
++
++ if (p_KeyParams->p_Mask)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Mask can not be initialized"));
++
++ if (memcmp(p_KeyParams->p_Key, &key, 1) != 0)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Key has to be 1"));
++
++ err = ValidateNextEngineParams(h_FmPcd,
++ &p_KeyParams->ccNextEngineParams,
++ p_CcNode->statisticsMode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++
++ if (p_KeyParams->ccNextEngineParams.h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(
++ &p_KeyParams->ccNextEngineParams, &requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++
++ /* Store 'key' parameters - key (fixed to 0x01), key size of 1 byte and full mask */
++ p_CcNode->keyAndNextEngineParams[tmp].key[0] = key;
++ p_CcNode->keyAndNextEngineParams[tmp].mask[0] = 0xFF;
++
++ /* Store NextEngine parameters */
++ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams,
++ &p_KeyParams->ccNextEngineParams,
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
++ {
++ err =
++ AllocAndFillAdForContLookupManip(
++ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++ p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction;
++ }
++
++ *isKeyTblAlloc = FALSE;
++
++ return E_OK;
++}
++
++static t_Error IcHashIndexedCheckParams(t_Handle h_FmPcd,
++ t_FmPcdCcNodeParams *p_CcNodeParam,
++ t_FmPcdCcNode *p_CcNode,
++ bool *isKeyTblAlloc)
++{
++ int tmp = 0, countOnes = 0;
++ t_FmPcdCcKeyParams *p_KeyParams;
++ t_Error err;
++ uint16_t glblMask = p_CcNodeParam->extractCcParams.extractNonHdr.icIndxMask;
++ uint16_t countMask = (uint16_t)(glblMask >> 4);
++ uint32_t requiredAction = 0;
++
++ if (glblMask & 0x000f)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("icIndxMask has to be with last nibble 0"));
++
++ while (countMask)
++ {
++ countOnes++;
++ countMask = (uint16_t)(countMask >> 1);
++ }
++
++ if (!POWER_OF_2(p_CcNode->numOfKeys))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For Node of the type INDEXED numOfKeys has to be powerOfTwo"));
++
++ if (p_CcNode->numOfKeys != ((uint32_t)1 << countOnes))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For Node of the type IC_HASH_INDEXED numOfKeys has to be powerOfTwo"));
++
++ if (p_CcNodeParam->keysParams.maxNumOfKeys
++ && (p_CcNodeParam->keysParams.maxNumOfKeys != p_CcNode->numOfKeys))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For Node of the type INDEXED 'maxNumOfKeys' should be 0 or equal 'numOfKeys'"));
++
++ /* Validate statistics parameters */
++ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam,
++ &(p_CcNode->numOfStatsFLRs),
++ &(p_CcNode->countersArraySize));
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters"));
++
++ err = ValidateNextEngineParams(
++ h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ p_CcNode->statisticsMode);
++ if (GET_ERROR_TYPE(err) != E_NOT_SUPPORTED)
++ RETURN_ERROR(
++ MAJOR,
++ err,
++ ("MissNextEngineParams for the node of the type IC_INDEX_HASH has to be UnInitialized"));
++
++ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
++ {
++ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
++
++ if (p_KeyParams->p_Mask || p_KeyParams->p_Key)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For Node of the type IC_HASH_INDEXED p_Key or p_Mask has to be NULL"));
++
++ if ((glblMask & (tmp * 16)) == (tmp * 16))
++ {
++ err = ValidateNextEngineParams(h_FmPcd,
++ &p_KeyParams->ccNextEngineParams,
++ p_CcNode->statisticsMode);
++ if (err)
++ RETURN_ERROR(
++ MAJOR,
++ err,
++ ("This index has to be initialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask "));
++
++ if (p_KeyParams->ccNextEngineParams.h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(
++ &p_KeyParams->ccNextEngineParams, &requiredAction);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ p_CcNode->keyAndNextEngineParams[tmp].requiredAction =
++ requiredAction;
++ }
++
++ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams,
++ &p_KeyParams->ccNextEngineParams,
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
++ {
++ err =
++ AllocAndFillAdForContLookupManip(
++ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode);
++ if (err)
++ RETURN_ERROR(MAJOR, err, (NO_MSG));
++ }
++ }
++ else
++ {
++ err = ValidateNextEngineParams(h_FmPcd,
++ &p_KeyParams->ccNextEngineParams,
++ p_CcNode->statisticsMode);
++ if (GET_ERROR_TYPE(err) != E_NOT_SUPPORTED)
++ RETURN_ERROR(
++ MAJOR,
++ err,
++ ("This index has to be UnInitialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask"));
++ }
++ }
++
++ *isKeyTblAlloc = FALSE;
++ cpu_to_be16s(&glblMask);
++ memcpy(PTR_MOVE(p_CcNode->p_GlblMask, 2), &glblMask, 2);
++
++ return E_OK;
++}
++
++static t_Error ModifyNextEngineParamNode(
++ t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
++ t_FmPcd *p_FmPcd;
++ t_List h_OldPointersLst, h_NewPointersLst;
++ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++
++ if (keyIndex >= p_CcNode->numOfKeys)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("keyIndex > previously cleared last index + 1"));
++
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++
++ INIT_LIST(&h_OldPointersLst);
++ INIT_LIST(&h_NewPointersLst);
++
++ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
++ e_MODIFY_STATE_CHANGE, FALSE,
++ FALSE, FALSE);
++ if (!p_ModifyKeyParams)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (p_CcNode->maxNumOfKeys
++ && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ {
++ XX_Free(p_ModifyKeyParams);
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = BuildNewNodeModifyNextEngine(h_FmPcd, p_CcNode, keyIndex,
++ p_FmPcdCcNextEngineParams,
++ &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams);
++ if (err)
++ {
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams, FALSE);
++
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ ReleaseLst(&h_OldPointersLst);
++ ReleaseLst(&h_NewPointersLst);
++
++ return err;
++}
++
++static t_Error FindKeyIndex(t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key,
++ uint8_t *p_Mask, uint16_t *p_KeyIndex)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint8_t tmpMask[FM_PCD_MAX_SIZE_OF_KEY];
++ uint16_t i;
++
++ ASSERT_COND(p_Key);
++ ASSERT_COND(p_KeyIndex);
++ ASSERT_COND(keySize < FM_PCD_MAX_SIZE_OF_KEY);
++
++ if (keySize != p_CcNode->userSizeOfExtraction)
++ RETURN_ERROR(
++ MINOR, E_INVALID_VALUE,
++ ("Key size doesn't match the extraction size of the node"));
++
++ /* If user didn't pass a mask for this key, we'll look for full extraction mask */
++ if (!p_Mask)
++ memset(tmpMask, 0xFF, keySize);
++
++ for (i = 0; i < p_CcNode->numOfKeys; i++)
++ {
++ /* Comparing received key */
++ if (memcmp(p_Key, p_CcNode->keyAndNextEngineParams[i].key, keySize)
++ == 0)
++ {
++ if (p_Mask)
++ {
++ /* If a user passed a mask for this key, it must match to the existing key's mask for a correct match */
++ if (memcmp(p_Mask, p_CcNode->keyAndNextEngineParams[i].mask,
++ keySize) == 0)
++ {
++ *p_KeyIndex = i;
++ return E_OK;
++ }
++ }
++ else
++ {
++ /* If user didn't pass a mask for this key, check if the existing key mask is full extraction */
++ if (memcmp(tmpMask, p_CcNode->keyAndNextEngineParams[i].mask,
++ keySize) == 0)
++ {
++ *p_KeyIndex = i;
++ return E_OK;
++ }
++ }
++ }
++ }
++
++ return ERROR_CODE(E_NOT_FOUND);
++}
++
++static t_Error CalcAndUpdateCcShadow(t_FmPcdCcNode *p_CcNode,
++ bool isKeyTblAlloc,
++ uint32_t *p_MatchTableSize,
++ uint32_t *p_AdTableSize)
++{
++ uint32_t shadowSize;
++ t_Error err;
++
++ /* Calculate keys table maximal size - each entry consists of a key and a mask,
++ (if local mask support is requested) */
++ *p_MatchTableSize = p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t)
++ * p_CcNode->maxNumOfKeys;
++
++ if (p_CcNode->maskSupport)
++ *p_MatchTableSize *= 2;
++
++ /* Calculate next action descriptors table, including one more entry for miss */
++ *p_AdTableSize = (uint32_t)((p_CcNode->maxNumOfKeys + 1)
++ * FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Calculate maximal shadow size of this node.
++ All shadow structures will be used for runtime modifications host command. If
++ keys table was allocated for this node, the keys table and next engines table may
++ be modified in run time (entries added or removed), so shadow tables are requires.
++ Otherwise, the only supported runtime modification is a specific next engine update
++ and this requires shadow memory of a single AD */
++
++ /* Shadow size should be enough to hold the following 3 structures:
++ * 1 - an action descriptor */
++ shadowSize = FM_PCD_CC_AD_ENTRY_SIZE;
++
++ /* 2 - keys match table, if was allocated for the current node */
++ if (isKeyTblAlloc)
++ shadowSize += *p_MatchTableSize;
++
++ /* 3 - next action descriptors table */
++ shadowSize += *p_AdTableSize;
++
++ /* Update shadow to the calculated size */
++ err = FmPcdUpdateCcShadow(p_CcNode->h_FmPcd, (uint32_t)shadowSize,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (err != E_OK)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node shadow"));
++ }
++
++ return E_OK;
++}
++
++static t_Error AllocStatsObjs(t_FmPcdCcNode *p_CcNode)
++{
++ t_FmPcdStatsObj *p_StatsObj;
++ t_Handle h_FmMuram, h_StatsAd, h_StatsCounters;
++ uint32_t i;
++
++ h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd);
++ if (!h_FmMuram)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM"));
++
++ /* Allocate statistics ADs and statistics counter. An extra pair (AD + counters)
++ will be allocated to support runtime modifications */
++ for (i = 0; i < p_CcNode->maxNumOfKeys + 2; i++)
++ {
++ /* Allocate list object structure */
++ p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj));
++ if (!p_StatsObj)
++ {
++ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Statistics object"));
++ }
++ memset(p_StatsObj, 0, sizeof(t_FmPcdStatsObj));
++
++ /* Allocate statistics AD from MURAM */
++ h_StatsAd = (t_Handle)FM_MURAM_AllocMem(h_FmMuram,
++ FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!h_StatsAd)
++ {
++ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
++ XX_Free(p_StatsObj);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for statistics ADs"));
++ }
++ MemSet8(h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Allocate statistics counters from MURAM */
++ h_StatsCounters = (t_Handle)FM_MURAM_AllocMem(
++ h_FmMuram, p_CcNode->countersArraySize,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!h_StatsCounters)
++ {
++ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
++ FM_MURAM_FreeMem(h_FmMuram, h_StatsAd);
++ XX_Free(p_StatsObj);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for statistics counters"));
++ }
++ MemSet8(h_StatsCounters, 0, p_CcNode->countersArraySize);
++
++ p_StatsObj->h_StatsAd = h_StatsAd;
++ p_StatsObj->h_StatsCounters = h_StatsCounters;
++
++ EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj);
++ }
++
++ return E_OK;
++}
++
++static t_Error MatchTableGetKeyStatistics(
++ t_FmPcdCcNode *p_CcNode, uint16_t keyIndex,
++ t_FmPcdCcKeyStatistics *p_KeyStatistics)
++{
++ uint32_t *p_StatsCounters, i;
++
++ if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Statistics were not enabled for this match table"));
++
++ if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Statistics were not enabled for this key"));
++
++ memset(p_KeyStatistics, 0, sizeof(t_FmPcdCcKeyStatistics));
++
++ p_StatsCounters =
++ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters;
++ ASSERT_COND(p_StatsCounters);
++
++ p_KeyStatistics->byteCount = GET_UINT32(*p_StatsCounters);
++
++ for (i = 1; i <= p_CcNode->numOfStatsFLRs; i++)
++ {
++ p_StatsCounters =
++ PTR_MOVE(p_StatsCounters, FM_PCD_CC_STATS_COUNTER_SIZE);
++
++ p_KeyStatistics->frameCount += GET_UINT32(*p_StatsCounters);
++
++#if (DPAA_VERSION >= 11)
++ p_KeyStatistics->frameLengthRangeCount[i - 1] =
++ GET_UINT32(*p_StatsCounters);
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ return E_OK;
++}
++
++static t_Error MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode,
++ t_FmPcdCcNodeParams *p_CcNodeParam)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdCcNode *p_FmPcdCcNextNode;
++ t_Error err = E_OK;
++ uint32_t tmp, keySize;
++ bool glblMask = FALSE;
++ t_FmPcdCcKeyParams *p_KeyParams;
++ t_Handle h_FmMuram, p_KeysMatchTblTmp, p_AdTableTmp;
++#if (DPAA_VERSION >= 11)
++ t_Handle h_StatsFLRs;
++#endif /* (DPAA_VERSION >= 11) */
++ bool fullField = FALSE;
++ ccPrivateInfo_t icCode = CC_PRIVATE_INFO_NONE;
++ bool isKeyTblAlloc, fromIc = FALSE;
++ uint32_t matchTableSize, adTableSize;
++ t_CcNodeInformation ccNodeInfo, *p_CcInformation;
++ t_FmPcdStatsObj *p_StatsObj;
++ t_FmPcdCcStatsParams statsParams = { 0 };
++ t_Handle h_Manip;
++
++ ASSERT_COND(h_FmPcd);
++ ASSERT_COND(p_CcNode);
++ ASSERT_COND(p_CcNodeParam);
++
++ p_CcNode->p_GlblMask = (t_Handle)XX_Malloc(
++ CC_GLBL_MASK_SIZE * sizeof(uint8_t));
++ memset(p_CcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t));
++
++ p_CcNode->h_FmPcd = h_FmPcd;
++ p_CcNode->numOfKeys = p_CcNodeParam->keysParams.numOfKeys;
++ p_CcNode->maxNumOfKeys = p_CcNodeParam->keysParams.maxNumOfKeys;
++ p_CcNode->maskSupport = p_CcNodeParam->keysParams.maskSupport;
++ p_CcNode->statisticsMode = p_CcNodeParam->keysParams.statisticsMode;
++
++ /* For backward compatibility - even if statistics mode is nullified,
++ we'll fix it to frame mode so we can support per-key request for
++ statistics using 'statisticsEn' in next engine parameters */
++ if (!p_CcNode->maxNumOfKeys
++ && (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE))
++ p_CcNode->statisticsMode = e_FM_PCD_CC_STATS_MODE_FRAME;
++
++ h_FmMuram = FmPcdGetMuramHandle(h_FmPcd);
++ if (!h_FmMuram)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM"));
++
++ INIT_LIST(&p_CcNode->ccPrevNodesLst);
++ INIT_LIST(&p_CcNode->ccTreeIdLst);
++ INIT_LIST(&p_CcNode->ccTreesLst);
++ INIT_LIST(&p_CcNode->availableStatsLst);
++
++ p_CcNode->h_Spinlock = XX_InitSpinlock();
++ if (!p_CcNode->h_Spinlock)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock"));
++ }
++
++ if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR)
++ && ((p_CcNodeParam->extractCcParams.extractByHdr.hdr
++ == HEADER_TYPE_IPv4)
++ || (p_CcNodeParam->extractCcParams.extractByHdr.hdr
++ == HEADER_TYPE_IPv6))
++ && (p_CcNodeParam->extractCcParams.extractByHdr.type
++ == e_FM_PCD_EXTRACT_FULL_FIELD)
++ && ((p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv6
++ == NET_HEADER_FIELD_IPv6_HOP_LIMIT)
++ || (p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv4
++ == NET_HEADER_FIELD_IPv4_TTL)))
++ {
++ err = Ipv4TtlOrIpv6HopLimitCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode,
++ &isKeyTblAlloc);
++ glblMask = FALSE;
++ }
++ else
++ if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_NON_HDR)
++ && ((p_CcNodeParam->extractCcParams.extractNonHdr.src
++ == e_FM_PCD_EXTRACT_FROM_KEY)
++ || (p_CcNodeParam->extractCcParams.extractNonHdr.src
++ == e_FM_PCD_EXTRACT_FROM_HASH)
++ || (p_CcNodeParam->extractCcParams.extractNonHdr.src
++ == e_FM_PCD_EXTRACT_FROM_FLOW_ID)))
++ {
++ if ((p_CcNodeParam->extractCcParams.extractNonHdr.src
++ == e_FM_PCD_EXTRACT_FROM_FLOW_ID)
++ && (p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0))
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0"));
++ }
++
++ icCode = IcDefineCode(p_CcNodeParam);
++ fromIc = TRUE;
++ if (icCode == CC_PRIVATE_INFO_NONE)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("user asked extraction from IC and field in internal context or action wasn't initialized in the right way"));
++ }
++
++ if ((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP)
++ || (icCode == CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP))
++ {
++ err = IcHashIndexedCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode,
++ &isKeyTblAlloc);
++ glblMask = TRUE;
++ }
++ else
++ {
++ err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode,
++ &isKeyTblAlloc);
++ if (p_CcNode->glblMaskSize)
++ glblMask = TRUE;
++ }
++ }
++ else
++ {
++ err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc);
++ if (p_CcNode->glblMaskSize)
++ glblMask = TRUE;
++ }
++
++ if (err)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ switch (p_CcNodeParam->extractCcParams.type)
++ {
++ case (e_FM_PCD_EXTRACT_BY_HDR):
++ switch (p_CcNodeParam->extractCcParams.extractByHdr.type)
++ {
++ case (e_FM_PCD_EXTRACT_FULL_FIELD):
++ p_CcNode->parseCode =
++ GetFullFieldParseCode(
++ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
++ p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex,
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField);
++ GetSizeHeaderField(
++ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField,
++ &p_CcNode->sizeOfExtraction);
++ fullField = TRUE;
++ if ((p_CcNode->parseCode != CC_PC_FF_TCI1)
++ && (p_CcNode->parseCode != CC_PC_FF_TCI2)
++ && (p_CcNode->parseCode != CC_PC_FF_MPLS1)
++ && (p_CcNode->parseCode != CC_PC_FF_MPLS_LAST)
++ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1)
++ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2)
++ && (p_CcNode->parseCode
++ != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1)
++ && (p_CcNode->parseCode != CC_PC_FF_IPDSCP)
++ && (p_CcNode->parseCode
++ != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2)
++ && glblMask)
++ {
++ glblMask = FALSE;
++ p_CcNode->glblMaskSize = 4;
++ p_CcNode->lclMask = TRUE;
++ }
++ break;
++
++ case (e_FM_PCD_EXTRACT_FROM_HDR):
++ p_CcNode->sizeOfExtraction =
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.size;
++ p_CcNode->offset =
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset;
++ p_CcNode->userOffset =
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset;
++ p_CcNode->parseCode =
++ GetPrParseCode(
++ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
++ p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex,
++ p_CcNode->offset, glblMask,
++ &p_CcNode->prsArrayOffset);
++ break;
++
++ case (e_FM_PCD_EXTRACT_FROM_FIELD):
++ p_CcNode->offset =
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset;
++ p_CcNode->userOffset =
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset;
++ p_CcNode->sizeOfExtraction =
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.size;
++ p_CcNode->parseCode =
++ GetFieldParseCode(
++ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
++ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.field,
++ p_CcNode->offset,
++ &p_CcNode->prsArrayOffset,
++ p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex);
++ break;
++
++ default:
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ break;
++
++ case (e_FM_PCD_EXTRACT_NON_HDR):
++ /* get the field code for the generic extract */
++ p_CcNode->sizeOfExtraction =
++ p_CcNodeParam->extractCcParams.extractNonHdr.size;
++ p_CcNode->offset =
++ p_CcNodeParam->extractCcParams.extractNonHdr.offset;
++ p_CcNode->userOffset =
++ p_CcNodeParam->extractCcParams.extractNonHdr.offset;
++ p_CcNode->parseCode = GetGenParseCode(
++ p_CcNodeParam->extractCcParams.extractNonHdr.src,
++ p_CcNode->offset, glblMask, &p_CcNode->prsArrayOffset,
++ fromIc, icCode);
++
++ if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)
++ {
++ if ((p_CcNode->offset + p_CcNode->sizeOfExtraction) > 8)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_SELECTION,
++ ("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)"));
++ }
++ }
++ if ((p_CcNode->parseCode == CC_PC_GENERIC_IC_GMASK)
++ || (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED))
++ {
++ p_CcNode->offset += p_CcNode->prsArrayOffset;
++ p_CcNode->prsArrayOffset = 0;
++ }
++ break;
++
++ default:
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ if (p_CcNode->parseCode == CC_PC_ILLEGAL)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type"));
++ }
++
++ if ((p_CcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY)
++ || !p_CcNode->sizeOfExtraction)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("sizeOfExatrction can not be greater than 56 and not 0"));
++ }
++
++ if (p_CcNodeParam->keysParams.keySize != p_CcNode->sizeOfExtraction)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("keySize has to be equal to sizeOfExtraction"));
++ }
++
++ p_CcNode->userSizeOfExtraction = p_CcNode->sizeOfExtraction;
++
++ if (!glblMask)
++ memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t));
++
++ err = CheckAndSetManipParamsWithCcNodeParams(p_CcNode);
++ if (err != E_OK)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("keySize has to be equal to sizeOfExtraction"));
++ }
++
++ /* Calculating matching table entry size by rounding up the user-defined size of extraction to valid entry size */
++ GetCcExtractKeySize(p_CcNode->sizeOfExtraction,
++ &p_CcNode->ccKeySizeAccExtraction);
++
++ /* If local mask is used, it is stored next to each key in the keys match table */
++ if (p_CcNode->lclMask)
++ keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction);
++ else
++ keySize = p_CcNode->ccKeySizeAccExtraction;
++
++ /* Update CC shadow with maximal size required by this node */
++ if (p_CcNode->maxNumOfKeys)
++ {
++ err = CalcAndUpdateCcShadow(p_CcNode, isKeyTblAlloc, &matchTableSize,
++ &adTableSize);
++ if (err != E_OK)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ p_CcNode->keysMatchTableMaxSize = matchTableSize;
++
++ if (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE)
++ {
++ err = AllocStatsObjs(p_CcNode);
++ if (err != E_OK)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++
++ /* If manipulation will be initialized before this node, it will use the table
++ descriptor in the AD table of previous node and this node will need an extra
++ AD as his table descriptor. */
++ p_CcNode->h_TmpAd = (t_Handle)FM_MURAM_AllocMem(
++ h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_CcNode->h_TmpAd)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC action descriptor"));
++ }
++ }
++ else
++ {
++ matchTableSize = (uint32_t)(keySize * sizeof(uint8_t)
++ * (p_CcNode->numOfKeys + 1));
++ adTableSize = (uint32_t)(FM_PCD_CC_AD_ENTRY_SIZE
++ * (p_CcNode->numOfKeys + 1));
++ }
++
++#if (DPAA_VERSION >= 11)
++ switch (p_CcNode->statisticsMode)
++ {
++
++ case e_FM_PCD_CC_STATS_MODE_RMON:
++ /* If RMON statistics or RMON conditional statistics modes are requested,
++ allocate frame length ranges array */
++ p_CcNode->h_StatsFLRs = FM_MURAM_AllocMem(
++ h_FmMuram,
++ (uint32_t)(p_CcNode->numOfStatsFLRs)
++ * FM_PCD_CC_STATS_FLR_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++
++ if (!p_CcNode->h_StatsFLRs)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(
++ MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC frame length ranges array"));
++ }
++
++ /* Initialize using value received from the user */
++ for (tmp = 0; tmp < p_CcNode->numOfStatsFLRs; tmp++)
++ {
++ uint16_t flr =
++ cpu_to_be16(p_CcNodeParam->keysParams.frameLengthRanges[tmp]);
++
++ h_StatsFLRs =
++ PTR_MOVE(p_CcNode->h_StatsFLRs, tmp * FM_PCD_CC_STATS_FLR_SIZE);
++
++ MemCpy8(h_StatsFLRs,
++ &flr,
++ FM_PCD_CC_STATS_FLR_SIZE);
++ }
++ break;
++
++ default:
++ break;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* Allocate keys match table. Not required for some CC nodes, for example for IPv4 TTL
++ identification, IPv6 hop count identification, etc. */
++ if (isKeyTblAlloc)
++ {
++ p_CcNode->h_KeysMatchTable = (t_Handle)FM_MURAM_AllocMem(
++ h_FmMuram, matchTableSize, FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN);
++ if (!p_CcNode->h_KeysMatchTable)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC node key match table"));
++ }
++ MemSet8((uint8_t *)p_CcNode->h_KeysMatchTable, 0, matchTableSize);
++ }
++
++ /* Allocate action descriptors table */
++ p_CcNode->h_AdTable = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, adTableSize,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_CcNode->h_AdTable)
++ {
++ DeleteNode(p_CcNode);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC node action descriptors table"));
++ }
++ MemSet8((uint8_t *)p_CcNode->h_AdTable, 0, adTableSize);
++
++ p_KeysMatchTblTmp = p_CcNode->h_KeysMatchTable;
++ p_AdTableTmp = p_CcNode->h_AdTable;
++
++ /* For each key, create the key and the next step AD */
++ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
++ {
++ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
++
++ if (p_KeysMatchTblTmp)
++ {
++ /* Copy the key */
++ MemCpy8((void*)p_KeysMatchTblTmp, p_KeyParams->p_Key,
++ p_CcNode->sizeOfExtraction);
++
++ /* Copy the key mask or initialize it to 0xFF..F */
++ if (p_CcNode->lclMask && p_KeyParams->p_Mask)
++ {
++ MemCpy8(PTR_MOVE(p_KeysMatchTblTmp,
++ p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */
++ p_KeyParams->p_Mask, p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */
++ }
++ else
++ if (p_CcNode->lclMask)
++ {
++ MemSet8(PTR_MOVE(p_KeysMatchTblTmp,
++ p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */
++ 0xff, p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */
++ }
++
++ p_KeysMatchTblTmp =
++ PTR_MOVE(p_KeysMatchTblTmp, keySize * sizeof(uint8_t));
++ }
++
++ /* Create the next action descriptor in the match table */
++ if (p_KeyParams->ccNextEngineParams.statisticsEn)
++ {
++ p_StatsObj = GetStatsObj(p_CcNode);
++ ASSERT_COND(p_StatsObj);
++
++ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
++ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
++#if (DPAA_VERSION >= 11)
++ statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs;
++
++#endif /* (DPAA_VERSION >= 11) */
++ NextStepAd(p_AdTableTmp, &statsParams,
++ &p_KeyParams->ccNextEngineParams, p_FmPcd);
++
++ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj;
++ }
++ else
++ {
++ NextStepAd(p_AdTableTmp, NULL, &p_KeyParams->ccNextEngineParams,
++ p_FmPcd);
++
++ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL;
++ }
++
++ p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++ }
++
++ /* Update next engine for the 'miss' entry */
++ if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.statisticsEn)
++ {
++ p_StatsObj = GetStatsObj(p_CcNode);
++ ASSERT_COND(p_StatsObj);
++
++ /* All 'bucket' nodes of a hash table should share the same statistics counters,
++ allocated by the hash table. So, if this node is a bucket of a hash table,
++ we'll replace the locally allocated counters with the shared counters. */
++ if (p_CcNode->isHashBucket)
++ {
++ ASSERT_COND(p_CcNode->h_MissStatsCounters);
++
++ /* Store original counters pointer and replace it with mutual preallocated pointer */
++ p_CcNode->h_PrivMissStatsCounters = p_StatsObj->h_StatsCounters;
++ p_StatsObj->h_StatsCounters = p_CcNode->h_MissStatsCounters;
++ }
++
++ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
++ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
++#if (DPAA_VERSION >= 11)
++ statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs;
++
++#endif /* (DPAA_VERSION >= 11) */
++
++ NextStepAd(p_AdTableTmp, &statsParams,
++ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ p_FmPcd);
++
++ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj;
++ }
++ else
++ {
++ NextStepAd(p_AdTableTmp, NULL,
++ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
++ p_FmPcd);
++
++ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL;
++ }
++
++ /* This parameter will be used to initialize the "key length" field in the action descriptor
++ that points to this node and it should be 0 for full field extraction */
++ if (fullField == TRUE)
++ p_CcNode->sizeOfExtraction = 0;
++
++ for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++)
++ {
++ if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ {
++ p_FmPcdCcNextNode =
++ (t_FmPcdCcNode*)p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode;
++ p_CcInformation = FindNodeInfoInReleventLst(
++ &p_FmPcdCcNextNode->ccPrevNodesLst, (t_Handle)p_CcNode,
++ p_FmPcdCcNextNode->h_Spinlock);
++ if (!p_CcInformation)
++ {
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = (t_Handle)p_CcNode;
++ ccNodeInfo.index = 1;
++ EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccPrevNodesLst,
++ &ccNodeInfo,
++ p_FmPcdCcNextNode->h_Spinlock);
++ }
++ else
++ p_CcInformation->index++;
++
++ if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
++ {
++ h_Manip =
++ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip;
++ p_CcInformation = FindNodeInfoInReleventLst(
++ FmPcdManipGetNodeLstPointedOnThisManip(h_Manip),
++ (t_Handle)p_CcNode, FmPcdManipGetSpinlock(h_Manip));
++ if (!p_CcInformation)
++ {
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = (t_Handle)p_CcNode;
++ ccNodeInfo.index = 1;
++ EnqueueNodeInfoToRelevantLst(
++ FmPcdManipGetNodeLstPointedOnThisManip(h_Manip),
++ &ccNodeInfo, FmPcdManipGetSpinlock(h_Manip));
++ }
++ else
++ p_CcInformation->index++;
++ }
++ }
++ }
++
++ p_AdTableTmp = p_CcNode->h_AdTable;
++
++ if (!FmPcdLockTryLockAll(h_FmPcd))
++ {
++ FM_PCD_MatchTableDelete((t_Handle)p_CcNode);
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ /* Required action for each next engine */
++ for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++)
++ {
++ if (p_CcNode->keyAndNextEngineParams[tmp].requiredAction)
++ {
++ err = SetRequiredAction(
++ h_FmPcd,
++ p_CcNode->keyAndNextEngineParams[tmp].requiredAction,
++ &p_CcNode->keyAndNextEngineParams[tmp], p_AdTableTmp, 1,
++ NULL);
++ if (err)
++ {
++ FmPcdLockUnlockAll(h_FmPcd);
++ FM_PCD_MatchTableDelete((t_Handle)p_CcNode);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++ }
++ }
++
++ FmPcdLockUnlockAll(h_FmPcd);
++
++ return E_OK;
++}
++/************************** End of static functions **************************/
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++
++t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info,
++ t_Handle h_Spinlock)
++{
++ t_CcNodeInformation *p_CcInformation;
++ t_List *p_Pos;
++ uint32_t intFlags;
++
++ intFlags = XX_LockIntrSpinlock(h_Spinlock);
++
++ for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List);
++ p_Pos = LIST_NEXT(p_Pos))
++ {
++ p_CcInformation = CC_NODE_F_OBJECT(p_Pos);
++
++ ASSERT_COND(p_CcInformation->h_CcNode);
++
++ if (p_CcInformation->h_CcNode == h_Info)
++ {
++ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
++ return p_CcInformation;
++ }
++ }
++
++ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
++
++ return NULL;
++}
++
++void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo,
++ t_Handle h_Spinlock)
++{
++ t_CcNodeInformation *p_CcInformation;
++ uint32_t intFlags = 0;
++
++ p_CcInformation = (t_CcNodeInformation *)XX_Malloc(
++ sizeof(t_CcNodeInformation));
++
++ if (p_CcInformation)
++ {
++ memset(p_CcInformation, 0, sizeof(t_CcNodeInformation));
++ memcpy(p_CcInformation, p_CcInfo, sizeof(t_CcNodeInformation));
++ INIT_LIST(&p_CcInformation->node);
++
++ if (h_Spinlock)
++ intFlags = XX_LockIntrSpinlock(h_Spinlock);
++
++ LIST_AddToTail(&p_CcInformation->node, p_List);
++
++ if (h_Spinlock)
++ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
++ }
++ else
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Node Information"));
++}
++
++void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info,
++ t_Handle h_Spinlock)
++{
++ t_CcNodeInformation *p_CcInformation = NULL;
++ uint32_t intFlags = 0;
++ t_List *p_Pos;
++
++ if (h_Spinlock)
++ intFlags = XX_LockIntrSpinlock(h_Spinlock);
++
++ if (LIST_IsEmpty(p_List))
++ {
++ XX_RestoreAllIntr(intFlags);
++ return;
++ }
++
++ for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List);
++ p_Pos = LIST_NEXT(p_Pos))
++ {
++ p_CcInformation = CC_NODE_F_OBJECT(p_Pos);
++ ASSERT_COND(p_CcInformation);
++ ASSERT_COND(p_CcInformation->h_CcNode);
++ if (p_CcInformation->h_CcNode == h_Info)
++ break;
++ }
++
++ if (p_CcInformation)
++ {
++ LIST_DelAndInit(&p_CcInformation->node);
++ XX_Free(p_CcInformation);
++ }
++
++ if (h_Spinlock)
++ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
++}
++
++void NextStepAd(t_Handle h_Ad, t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
++ t_FmPcd *p_FmPcd)
++{
++ switch (p_FmPcdCcNextEngineParams->nextEngine)
++ {
++ case (e_FM_PCD_KG):
++ case (e_FM_PCD_PLCR):
++ case (e_FM_PCD_DONE):
++ /* if NIA is not CC, create a "result" type AD */
++ FillAdOfTypeResult(h_Ad, p_FmPcdCcStatsParams, p_FmPcd,
++ p_FmPcdCcNextEngineParams);
++ break;
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_FR):
++ if (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic)
++ {
++ FillAdOfTypeContLookup(
++ h_Ad, p_FmPcdCcStatsParams, p_FmPcd,
++ p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode,
++ p_FmPcdCcNextEngineParams->h_Manip,
++ p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic);
++ FrmReplicGroupUpdateOwner(
++ p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic,
++ TRUE/* add */);
++ }
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++
++ case (e_FM_PCD_CC):
++ /* if NIA is not CC, create a TD to continue the CC lookup */
++ FillAdOfTypeContLookup(
++ h_Ad, p_FmPcdCcStatsParams, p_FmPcd,
++ p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode,
++ p_FmPcdCcNextEngineParams->h_Manip, NULL);
++
++ UpdateNodeOwner(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode,
++ TRUE);
++ break;
++
++ default:
++ return;
++ }
++}
++
++t_Error FmPcdCcTreeAddIPR(t_Handle h_FmPcd, t_Handle h_FmTree,
++ t_Handle h_NetEnv, t_Handle h_IpReassemblyManip,
++ bool createSchemes)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
++ t_FmPcdCcNextEngineParams nextEngineParams;
++ t_NetEnvParams netEnvParams;
++ t_Handle h_Ad;
++ bool isIpv6Present;
++ uint8_t ipv4GroupId, ipv6GroupId;
++ t_Error err;
++
++ ASSERT_COND(p_FmPcdCcTree);
++
++ /* this routine must be protected by the calling routine! */
++
++ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams));
++ memset(&netEnvParams, 0, sizeof(t_NetEnvParams));
++
++ h_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
++
++ isIpv6Present = FmPcdManipIpReassmIsIpv6Hdr(h_IpReassemblyManip);
++
++ if (isIpv6Present
++ && (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 2)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR"));
++
++ if (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR"));
++
++ nextEngineParams.nextEngine = e_FM_PCD_DONE;
++ nextEngineParams.h_Manip = h_IpReassemblyManip;
++
++ /* Lock tree */
++ err = CcRootTryLock(p_FmPcdCcTree);
++ if (err)
++ return ERROR_CODE(E_BUSY);
++
++ if (p_FmPcdCcTree->h_IpReassemblyManip == h_IpReassemblyManip)
++ {
++ CcRootReleaseLock(p_FmPcdCcTree);
++ return E_OK;
++ }
++
++ if ((p_FmPcdCcTree->h_IpReassemblyManip)
++ && (p_FmPcdCcTree->h_IpReassemblyManip != h_IpReassemblyManip))
++ {
++ CcRootReleaseLock(p_FmPcdCcTree);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("This tree was previously updated with different IPR"));
++ }
++
++ /* Initialize IPR for the first time for this tree */
++ if (isIpv6Present)
++ {
++ ipv6GroupId = p_FmPcdCcTree->numOfGrps++;
++ p_FmPcdCcTree->fmPcdGroupParam[ipv6GroupId].baseGroupEntry =
++ (FM_PCD_MAX_NUM_OF_CC_GROUPS - 2);
++
++ if (createSchemes)
++ {
++ err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv,
++ p_FmPcdCcTree,
++ h_IpReassemblyManip, FALSE,
++ ipv6GroupId);
++ if (err)
++ {
++ p_FmPcdCcTree->numOfGrps--;
++ CcRootReleaseLock(p_FmPcdCcTree);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++
++ NextStepAd(
++ PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-2) * FM_PCD_CC_AD_ENTRY_SIZE),
++ NULL, &nextEngineParams, h_FmPcd);
++ }
++
++ ipv4GroupId = p_FmPcdCcTree->numOfGrps++;
++ p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].totalBitsMask = 0;
++ p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].baseGroupEntry =
++ (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1);
++
++ if (createSchemes)
++ {
++ err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv, p_FmPcdCcTree,
++ h_IpReassemblyManip, TRUE,
++ ipv4GroupId);
++ if (err)
++ {
++ p_FmPcdCcTree->numOfGrps--;
++ if (isIpv6Present)
++ {
++ p_FmPcdCcTree->numOfGrps--;
++ FmPcdManipDeleteIpReassmSchemes(h_IpReassemblyManip);
++ }
++ CcRootReleaseLock(p_FmPcdCcTree);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++
++ NextStepAd(
++ PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-1) * FM_PCD_CC_AD_ENTRY_SIZE),
++ NULL, &nextEngineParams, h_FmPcd);
++
++ p_FmPcdCcTree->h_IpReassemblyManip = h_IpReassemblyManip;
++
++ CcRootReleaseLock(p_FmPcdCcTree);
++
++ return E_OK;
++}
++
++t_Error FmPcdCcTreeAddCPR(t_Handle h_FmPcd, t_Handle h_FmTree,
++ t_Handle h_NetEnv, t_Handle h_ReassemblyManip,
++ bool createSchemes)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
++ t_FmPcdCcNextEngineParams nextEngineParams;
++ t_NetEnvParams netEnvParams;
++ t_Handle h_Ad;
++ uint8_t groupId;
++ t_Error err;
++
++ ASSERT_COND(p_FmPcdCcTree);
++
++ /* this routine must be protected by the calling routine! */
++ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams));
++ memset(&netEnvParams, 0, sizeof(t_NetEnvParams));
++
++ h_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
++
++ if (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need one free entries for CPR"));
++
++ nextEngineParams.nextEngine = e_FM_PCD_DONE;
++ nextEngineParams.h_Manip = h_ReassemblyManip;
++
++ /* Lock tree */
++ err = CcRootTryLock(p_FmPcdCcTree);
++ if (err)
++ return ERROR_CODE(E_BUSY);
++
++ if (p_FmPcdCcTree->h_CapwapReassemblyManip == h_ReassemblyManip)
++ {
++ CcRootReleaseLock(p_FmPcdCcTree);
++ return E_OK;
++ }
++
++ if ((p_FmPcdCcTree->h_CapwapReassemblyManip)
++ && (p_FmPcdCcTree->h_CapwapReassemblyManip != h_ReassemblyManip))
++ {
++ CcRootReleaseLock(p_FmPcdCcTree);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("This tree was previously updated with different CPR"));
++ }
++
++ groupId = p_FmPcdCcTree->numOfGrps++;
++ p_FmPcdCcTree->fmPcdGroupParam[groupId].baseGroupEntry =
++ (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1);
++
++ if (createSchemes)
++ {
++ err = FmPcdManipBuildCapwapReassmScheme(h_FmPcd, h_NetEnv,
++ p_FmPcdCcTree,
++ h_ReassemblyManip, groupId);
++ if (err)
++ {
++ p_FmPcdCcTree->numOfGrps--;
++ CcRootReleaseLock(p_FmPcdCcTree);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++
++ NextStepAd(
++ PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-1) * FM_PCD_CC_AD_ENTRY_SIZE),
++ NULL, &nextEngineParams, h_FmPcd);
++
++ p_FmPcdCcTree->h_CapwapReassemblyManip = h_ReassemblyManip;
++
++ CcRootReleaseLock(p_FmPcdCcTree);
++
++ return E_OK;
++}
++
++t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
++
++ ASSERT_COND(p_FmPcdCcTree);
++
++ return p_FmPcdCcTree->h_FmPcdCcSavedManipParams;
++}
++
++void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree,
++ t_Handle h_SavedManipParams)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
++
++ ASSERT_COND(p_FmPcdCcTree);
++
++ p_FmPcdCcTree->h_FmPcdCcSavedManipParams = h_SavedManipParams;
++}
++
++uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++
++ ASSERT_COND(p_CcNode);
++
++ return p_CcNode->parseCode;
++}
++
++uint8_t FmPcdCcGetOffset(t_Handle h_CcNode)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++
++ ASSERT_COND(p_CcNode);
++
++ return p_CcNode->offset;
++}
++
++uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++
++ ASSERT_COND(p_CcNode);
++
++ return p_CcNode->numOfKeys;
++}
++
++t_Error FmPcdCcModifyNextEngineParamTree(
++ t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint8_t grpId, uint8_t index,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
++ t_FmPcd *p_FmPcd;
++ t_List h_OldPointersLst, h_NewPointersLst;
++ uint16_t keyIndex;
++ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((grpId <= 7), E_INVALID_VALUE);
++
++ if (grpId >= p_FmPcdCcTree->numOfGrps)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE,
++ ("grpId you asked > numOfGroup of relevant tree"));
++
++ if (index >= p_FmPcdCcTree->fmPcdGroupParam[grpId].numOfEntriesInGroup)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("index > numOfEntriesInGroup"));
++
++ p_FmPcd = (t_FmPcd *)h_FmPcd;
++
++ INIT_LIST(&h_OldPointersLst);
++ INIT_LIST(&h_NewPointersLst);
++
++ keyIndex = (uint16_t)(p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry
++ + index);
++
++ p_ModifyKeyParams = ModifyNodeCommonPart(p_FmPcdCcTree, keyIndex,
++ e_MODIFY_STATE_CHANGE, FALSE,
++ FALSE, TRUE);
++ if (!p_ModifyKeyParams)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ p_ModifyKeyParams->tree = TRUE;
++
++ if (p_FmPcd->p_CcShadow
++ && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ {
++ XX_Free(p_ModifyKeyParams);
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = BuildNewNodeModifyNextEngine(p_FmPcd, p_FmPcdCcTree, keyIndex,
++ p_FmPcdCcNextEngineParams,
++ &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams);
++ if (err)
++ {
++ XX_Free(p_ModifyKeyParams);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams, FALSE);
++
++ if (p_FmPcd->p_CcShadow)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ ReleaseLst(&h_OldPointersLst);
++ ReleaseLst(&h_NewPointersLst);
++
++ return err;
++
++}
++
++t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
++ uint16_t keyIndex)
++{
++
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
++ t_FmPcd *p_FmPcd;
++ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
++ t_List h_OldPointersLst, h_NewPointersLst;
++ bool useShadowStructs = FALSE;
++ t_Error err = E_OK;
++
++ if (keyIndex >= p_CcNode->numOfKeys)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("impossible to remove key when numOfKeys <= keyIndex"));
++
++ if (p_CcNode->h_FmPcd != h_FmPcd)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("handler to FmPcd is different from the handle provided at node initialization time"));
++
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++
++ INIT_LIST(&h_OldPointersLst);
++ INIT_LIST(&h_NewPointersLst);
++
++ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
++ e_MODIFY_STATE_REMOVE, TRUE, TRUE,
++ FALSE);
++ if (!p_ModifyKeyParams)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (p_CcNode->maxNumOfKeys)
++ {
++ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ {
++ XX_Free(p_ModifyKeyParams);
++ return ERROR_CODE(E_BUSY);
++ }
++
++ useShadowStructs = TRUE;
++ }
++
++ err = BuildNewNodeRemoveKey(p_CcNode, keyIndex, p_ModifyKeyParams);
++ if (err)
++ {
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
++ &h_OldPointersLst,
++ &h_NewPointersLst);
++ if (err)
++ {
++ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams, useShadowStructs);
++
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ ReleaseLst(&h_OldPointersLst);
++ ReleaseLst(&h_NewPointersLst);
++
++ return err;
++}
++
++t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
++ uint16_t keyIndex, uint8_t keySize, uint8_t *p_Key,
++ uint8_t *p_Mask)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
++ t_FmPcd *p_FmPcd;
++ t_List h_OldPointersLst, h_NewPointersLst;
++ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
++ uint16_t tmpKeyIndex;
++ bool useShadowStructs = FALSE;
++ t_Error err = E_OK;
++
++ if (keyIndex >= p_CcNode->numOfKeys)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("keyIndex > previously cleared last index + 1"));
++
++ if (keySize != p_CcNode->userSizeOfExtraction)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("size for ModifyKey has to be the same as defined in SetNode"));
++
++ if (p_CcNode->h_FmPcd != h_FmPcd)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("handler to FmPcd is different from the handle provided at node initialization time"));
++
++ err = FindKeyIndex(h_FmPcdCcNode, keySize, p_Key, p_Mask, &tmpKeyIndex);
++ if (GET_ERROR_TYPE(err) != E_NOT_FOUND)
++ RETURN_ERROR(
++ MINOR,
++ E_ALREADY_EXISTS,
++ ("The received key and mask pair was already found in the match table of the provided node"));
++
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++
++ INIT_LIST(&h_OldPointersLst);
++ INIT_LIST(&h_NewPointersLst);
++
++ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
++ e_MODIFY_STATE_CHANGE, TRUE, TRUE,
++ FALSE);
++ if (!p_ModifyKeyParams)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (p_CcNode->maxNumOfKeys)
++ {
++ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ {
++ XX_Free(p_ModifyKeyParams);
++ return ERROR_CODE(E_BUSY);
++ }
++
++ useShadowStructs = TRUE;
++ }
++
++ err = BuildNewNodeModifyKey(p_CcNode, keyIndex, p_Key, p_Mask,
++ p_ModifyKeyParams);
++ if (err)
++ {
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
++ &h_OldPointersLst,
++ &h_NewPointersLst);
++ if (err)
++ {
++ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams, useShadowStructs);
++
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ ReleaseLst(&h_OldPointersLst);
++ ReleaseLst(&h_NewPointersLst);
++
++ return err;
++}
++
++t_Error FmPcdCcModifyMissNextEngineParamNode(
++ t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
++ t_FmPcd *p_FmPcd;
++ t_List h_OldPointersLst, h_NewPointersLst;
++ uint16_t keyIndex;
++ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_VALUE);
++
++ keyIndex = p_CcNode->numOfKeys;
++
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++
++ INIT_LIST(&h_OldPointersLst);
++ INIT_LIST(&h_NewPointersLst);
++
++ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
++ e_MODIFY_STATE_CHANGE, FALSE, TRUE,
++ FALSE);
++ if (!p_ModifyKeyParams)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (p_CcNode->maxNumOfKeys
++ && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ {
++ XX_Free(p_ModifyKeyParams);
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = BuildNewNodeModifyNextEngine(h_FmPcd, p_CcNode, keyIndex,
++ p_FmPcdCcNextEngineParams,
++ &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams);
++ if (err)
++ {
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams, FALSE);
++
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ ReleaseLst(&h_OldPointersLst);
++ ReleaseLst(&h_NewPointersLst);
++
++ return err;
++}
++
++t_Error FmPcdCcAddKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
++ uint16_t keyIndex, uint8_t keySize,
++ t_FmPcdCcKeyParams *p_FmPcdCcKeyParams)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
++ t_FmPcd *p_FmPcd;
++ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
++ t_List h_OldPointersLst, h_NewPointersLst;
++ bool useShadowStructs = FALSE;
++ uint16_t tmpKeyIndex;
++ t_Error err = E_OK;
++
++ if (keyIndex > p_CcNode->numOfKeys)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
++ ("keyIndex > previously cleared last index + 1"));
++
++ if (keySize != p_CcNode->userSizeOfExtraction)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("keySize has to be defined as it was defined in initialization step"));
++
++ if (p_CcNode->h_FmPcd != h_FmPcd)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("handler to FmPcd is different from the handle provided at node initialization time"));
++
++ if (p_CcNode->maxNumOfKeys)
++ {
++ if (p_CcNode->numOfKeys == p_CcNode->maxNumOfKeys)
++ RETURN_ERROR(
++ MAJOR,
++ E_FULL,
++ ("number of keys exceeds the maximal number of keys provided at node initialization time"));
++ }
++ else
++ if (p_CcNode->numOfKeys == FM_PCD_MAX_NUM_OF_KEYS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("number of keys can not be larger than %d", FM_PCD_MAX_NUM_OF_KEYS));
++
++ err = FindKeyIndex(h_FmPcdCcNode, keySize, p_FmPcdCcKeyParams->p_Key,
++ p_FmPcdCcKeyParams->p_Mask, &tmpKeyIndex);
++ if (GET_ERROR_TYPE(err) != E_NOT_FOUND)
++ RETURN_ERROR(
++ MAJOR,
++ E_ALREADY_EXISTS,
++ ("The received key and mask pair was already found in the match table of the provided node"));
++
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++
++ INIT_LIST(&h_OldPointersLst);
++ INIT_LIST(&h_NewPointersLst);
++
++ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
++ e_MODIFY_STATE_ADD, TRUE, TRUE,
++ FALSE);
++ if (!p_ModifyKeyParams)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (p_CcNode->maxNumOfKeys)
++ {
++ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ {
++ XX_Free(p_ModifyKeyParams);
++ return ERROR_CODE(E_BUSY);
++ }
++
++ useShadowStructs = TRUE;
++ }
++
++ err = BuildNewNodeAddOrMdfyKeyAndNextEngine(h_FmPcd, p_CcNode, keyIndex,
++ p_FmPcdCcKeyParams,
++ p_ModifyKeyParams, TRUE);
++ if (err)
++ {
++ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
++ &h_OldPointersLst,
++ &h_NewPointersLst);
++ if (err)
++ {
++ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams, useShadowStructs);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ ReleaseLst(&h_OldPointersLst);
++ ReleaseLst(&h_NewPointersLst);
++
++ return err;
++}
++
++t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
++ uint16_t keyIndex, uint8_t keySize,
++ t_FmPcdCcKeyParams *p_FmPcdCcKeyParams)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
++ t_FmPcd *p_FmPcd;
++ t_List h_OldPointersLst, h_NewPointersLst;
++ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
++ uint16_t tmpKeyIndex;
++ bool useShadowStructs = FALSE;
++ t_Error err = E_OK;
++
++ if (keyIndex > p_CcNode->numOfKeys)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("keyIndex > previously cleared last index + 1"));
++
++ if (keySize != p_CcNode->userSizeOfExtraction)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("keySize has to be defined as it was defined in initialization step"));
++
++ if (p_CcNode->h_FmPcd != h_FmPcd)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("handler to FmPcd is different from the handle provided at node initialization time"));
++
++ err = FindKeyIndex(h_FmPcdCcNode, keySize, p_FmPcdCcKeyParams->p_Key,
++ p_FmPcdCcKeyParams->p_Mask, &tmpKeyIndex);
++ if (GET_ERROR_TYPE(err) != E_NOT_FOUND)
++ RETURN_ERROR(
++ MINOR,
++ E_ALREADY_EXISTS,
++ ("The received key and mask pair was already found in the match table of the provided node"));
++
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++
++ INIT_LIST(&h_OldPointersLst);
++ INIT_LIST(&h_NewPointersLst);
++
++ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
++ e_MODIFY_STATE_CHANGE, TRUE, TRUE,
++ FALSE);
++ if (!p_ModifyKeyParams)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (p_CcNode->maxNumOfKeys)
++ {
++ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ {
++ XX_Free(p_ModifyKeyParams);
++ return ERROR_CODE(E_BUSY);
++ }
++
++ useShadowStructs = TRUE;
++ }
++
++ err = BuildNewNodeAddOrMdfyKeyAndNextEngine(h_FmPcd, p_CcNode, keyIndex,
++ p_FmPcdCcKeyParams,
++ p_ModifyKeyParams, FALSE);
++ if (err)
++ {
++ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
++ &h_OldPointersLst,
++ &h_NewPointersLst);
++ if (err)
++ {
++ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
++ XX_Free(p_ModifyKeyParams);
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
++ p_ModifyKeyParams, useShadowStructs);
++
++ if (p_CcNode->maxNumOfKeys)
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ ReleaseLst(&h_OldPointersLst);
++ ReleaseLst(&h_NewPointersLst);
++
++ return err;
++}
++
++uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd,
++ t_Handle h_Pointer)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_CcNodeInformation *p_CcNodeInfo;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE,
++ (uint32_t)ILLEGAL_BASE);
++
++ p_CcNodeInfo = CC_NODE_F_OBJECT(h_Pointer);
++
++ return (uint32_t)(XX_VirtToPhys(p_CcNodeInfo->h_CcNode)
++ - p_FmPcd->physicalMuramBase);
++}
++
++t_Error FmPcdCcGetGrpParams(t_Handle h_FmPcdCcTree, uint8_t grpId,
++ uint32_t *p_GrpBits, uint8_t *p_GrpBase)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
++
++ if (grpId >= p_FmPcdCcTree->numOfGrps)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE,
++ ("grpId you asked > numOfGroup of relevant tree"));
++
++ *p_GrpBits = p_FmPcdCcTree->fmPcdGroupParam[grpId].totalBitsMask;
++ *p_GrpBase = p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry;
++
++ return E_OK;
++}
++
++t_Error FmPcdCcBindTree(t_Handle h_FmPcd, t_Handle h_PcdParams,
++ t_Handle h_FmPcdCcTree, uint32_t *p_Offset,
++ t_Handle h_FmPort)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
++
++ /* this routine must be protected by the calling routine by locking all PCD modules! */
++
++ err = CcUpdateParams(h_FmPcd, h_PcdParams, h_FmPort, h_FmPcdCcTree, TRUE);
++
++ if (err == E_OK)
++ UpdateCcRootOwner(p_FmPcdCcTree, TRUE);
++
++ *p_Offset = (uint32_t)(XX_VirtToPhys(
++ UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr))
++ - p_FmPcd->physicalMuramBase);
++
++ return err;
++}
++
++t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree)
++{
++ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
++
++ /* this routine must be protected by the calling routine by locking all PCD modules! */
++
++ UNUSED(h_FmPcd);
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
++
++ UpdateCcRootOwner(p_FmPcdCcTree, FALSE);
++
++ return E_OK;
++}
++
++t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
++ t_List *p_List)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
++ t_List *p_Pos, *p_Tmp;
++ t_CcNodeInformation *p_CcNodeInfo, nodeInfo;
++ uint32_t intFlags;
++ t_Error err = E_OK;
++
++ intFlags = FmPcdLock(h_FmPcd);
++
++ LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst)
++ {
++ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
++ ASSERT_COND(p_CcNodeInfo->h_CcNode);
++
++ err = CcRootTryLock(p_CcNodeInfo->h_CcNode);
++
++ if (err)
++ {
++ LIST_FOR_EACH(p_Tmp, &p_CcNode->ccTreesLst)
++ {
++ if (p_Tmp == p_Pos)
++ break;
++
++ CcRootReleaseLock(p_CcNodeInfo->h_CcNode);
++ }
++ break;
++ }
++
++ memset(&nodeInfo, 0, sizeof(t_CcNodeInformation));
++ nodeInfo.h_CcNode = p_CcNodeInfo->h_CcNode;
++ EnqueueNodeInfoToRelevantLst(p_List, &nodeInfo, NULL);
++ }
++
++ FmPcdUnlock(h_FmPcd, intFlags);
++ CORE_MemoryBarrier();
++
++ return err;
++}
++
++void FmPcdCcNodeTreeReleaseLock(t_Handle h_FmPcd, t_List *p_List)
++{
++ t_List *p_Pos;
++ t_CcNodeInformation *p_CcNodeInfo;
++ t_Handle h_FmPcdCcTree;
++ uint32_t intFlags;
++
++ intFlags = FmPcdLock(h_FmPcd);
++
++ LIST_FOR_EACH(p_Pos, p_List)
++ {
++ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
++ h_FmPcdCcTree = p_CcNodeInfo->h_CcNode;
++ CcRootReleaseLock(h_FmPcdCcTree);
++ }
++
++ ReleaseLst(p_List);
++
++ FmPcdUnlock(h_FmPcd, intFlags);
++ CORE_MemoryBarrier();
++}
++
++t_Error FmPcdUpdateCcShadow(t_FmPcd *p_FmPcd, uint32_t size, uint32_t align)
++{
++ uint32_t intFlags;
++ uint32_t newSize = 0, newAlign = 0;
++ bool allocFail = FALSE;
++
++ ASSERT_COND(p_FmPcd);
++
++ if (!size)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("size must be larger then 0"));
++
++ if (!POWER_OF_2(align))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("alignment must be power of 2"));
++
++ newSize = p_FmPcd->ccShadowSize;
++ newAlign = p_FmPcd->ccShadowAlign;
++
++ /* Check if current shadow is large enough to hold the requested size */
++ if (size > p_FmPcd->ccShadowSize)
++ newSize = size;
++
++ /* Check if current shadow matches the requested alignment */
++ if (align > p_FmPcd->ccShadowAlign)
++ newAlign = align;
++
++ /* If a bigger shadow size or bigger shadow alignment are required,
++ a new shadow will be allocated */
++ if ((newSize != p_FmPcd->ccShadowSize)
++ || (newAlign != p_FmPcd->ccShadowAlign))
++ {
++ intFlags = FmPcdLock(p_FmPcd);
++
++ if (p_FmPcd->p_CcShadow)
++ {
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd), p_FmPcd->p_CcShadow);
++ p_FmPcd->ccShadowSize = 0;
++ p_FmPcd->ccShadowAlign = 0;
++ }
++
++ p_FmPcd->p_CcShadow = FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd),
++ newSize, newAlign);
++ if (!p_FmPcd->p_CcShadow)
++ {
++ allocFail = TRUE;
++
++ /* If new shadow size allocation failed,
++ re-allocate with previous parameters */
++ p_FmPcd->p_CcShadow = FM_MURAM_AllocMem(
++ FmPcdGetMuramHandle(p_FmPcd), p_FmPcd->ccShadowSize,
++ p_FmPcd->ccShadowAlign);
++ }
++
++ FmPcdUnlock(p_FmPcd, intFlags);
++
++ if (allocFail)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for CC Shadow memory"));
++
++ p_FmPcd->ccShadowSize = newSize;
++ p_FmPcd->ccShadowAlign = newAlign;
++ }
++
++ return E_OK;
++}
++
++#if (DPAA_VERSION >= 11)
++void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node,
++ t_Handle h_ReplicGroup,
++ t_List *p_AdTables,
++ uint32_t *p_NumOfAdTables)
++{
++ t_FmPcdCcNode *p_CurrentNode = (t_FmPcdCcNode *)h_Node;
++ int i = 0;
++ void * p_AdTable;
++ t_CcNodeInformation ccNodeInfo;
++
++ ASSERT_COND(h_Node);
++ *p_NumOfAdTables = 0;
++
++ /* search in the current node which exact index points on this current replicator group for getting AD */
++ for (i = 0; i < p_CurrentNode->numOfKeys + 1; i++)
++ {
++ if ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_FR)
++ && ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic
++ == (t_Handle)h_ReplicGroup)))
++ {
++ /* save the current ad table in the list */
++ /* this entry uses the input replicator group */
++ p_AdTable =
++ PTR_MOVE(p_CurrentNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE);
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = p_AdTable;
++ EnqueueNodeInfoToRelevantLst(p_AdTables, &ccNodeInfo, NULL);
++ (*p_NumOfAdTables)++;
++ }
++ }
++
++ ASSERT_COND(i != p_CurrentNode->numOfKeys);
++}
++#endif /* (DPAA_VERSION >= 11) */
++/*********************** End of inter-module routines ************************/
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++
++t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd,
++ t_FmPcdCcTreeParams *p_PcdGroupsParam)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_Error err = E_OK;
++ int i = 0, j = 0, k = 0;
++ t_FmPcdCcTree *p_FmPcdCcTree;
++ uint8_t numOfEntries;
++ t_Handle p_CcTreeTmp;
++ t_FmPcdCcGrpParams *p_FmPcdCcGroupParams;
++ t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams;
++ t_NetEnvParams netEnvParams;
++ uint8_t lastOne = 0;
++ uint32_t requiredAction = 0;
++ t_FmPcdCcNode *p_FmPcdCcNextNode;
++ t_CcNodeInformation ccNodeInfo, *p_CcInformation;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam, E_INVALID_HANDLE, NULL);
++
++ if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
++ return NULL;
++ }
++
++ p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree));
++ if (!p_FmPcdCcTree)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure"));
++ return NULL;
++ }
++ memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree));
++ p_FmPcdCcTree->h_FmPcd = h_FmPcd;
++
++ p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc(
++ FM_PCD_MAX_NUM_OF_CC_GROUPS
++ * sizeof(t_FmPcdCcKeyAndNextEngineParams));
++ memset(p_Params,
++ 0,
++ FM_PCD_MAX_NUM_OF_CC_GROUPS
++ * sizeof(t_FmPcdCcKeyAndNextEngineParams));
++
++ INIT_LIST(&p_FmPcdCcTree->fmPortsLst);
++
++#ifdef FM_CAPWAP_SUPPORT
++ if ((p_PcdGroupsParam->numOfGrps == 1) &&
++ (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) &&
++ (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) &&
++ p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode &&
++ IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode))
++ {
++ p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild();
++ if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip)
++ {
++ DeleteTree(p_FmPcdCcTree,p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++ return NULL;
++ }
++ }
++#endif /* FM_CAPWAP_SUPPORT */
++
++ numOfEntries = 0;
++ p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv);
++
++ for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++)
++ {
++ p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i];
++
++ if (p_FmPcdCcGroupParams->numOfDistinctionUnits
++ > FM_PCD_MAX_NUM_OF_CC_UNITS)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS));
++ return NULL;
++ }
++
++ p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries;
++ p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup = (uint8_t)(0x01
++ << p_FmPcdCcGroupParams->numOfDistinctionUnits);
++ numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
++ if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
++ return NULL;
++ }
++
++ if (lastOne)
++ {
++ if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order"));
++ return NULL;
++ }
++ }
++
++ lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
++
++ netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId;
++ netEnvParams.numOfDistinctionUnits =
++ p_FmPcdCcGroupParams->numOfDistinctionUnits;
++
++ memcpy(netEnvParams.unitIds, &p_FmPcdCcGroupParams->unitIds,
++ (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits);
++
++ err = PcdGetUnitsVector(p_FmPcd, &netEnvParams);
++ if (err)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return NULL;
++ }
++
++ p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector;
++ for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
++ j++)
++ {
++ err = ValidateNextEngineParams(
++ h_FmPcd,
++ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
++ e_FM_PCD_CC_STATS_MODE_NONE);
++ if (err)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, err, (NO_MSG));
++ return NULL;
++ }
++
++ if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip)
++ {
++ err = FmPcdManipCheckParamsForCcNextEngine(
++ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
++ &requiredAction);
++ if (err)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++ return NULL;
++ }
++ }
++ p_KeyAndNextEngineParams = p_Params + k;
++
++ memcpy(&p_KeyAndNextEngineParams->nextEngineParams,
++ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ && p_KeyAndNextEngineParams->nextEngineParams.h_Manip)
++ {
++ err =
++ AllocAndFillAdForContLookupManip(
++ p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode);
++ if (err)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
++ return NULL;
++ }
++ }
++
++ requiredAction |= UPDATE_CC_WITH_TREE;
++ p_KeyAndNextEngineParams->requiredAction = requiredAction;
++
++ k++;
++ }
++ }
++
++ p_FmPcdCcTree->numOfEntries = (uint8_t)k;
++ p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps;
++
++ p_FmPcdCcTree->ccTreeBaseAddr =
++ PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd),
++ (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE),
++ FM_PCD_CC_TREE_ADDR_ALIGN));
++ if (!p_FmPcdCcTree->ccTreeBaseAddr)
++ {
++ DeleteTree(p_FmPcdCcTree, p_FmPcd);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
++ return NULL;
++ }
++ MemSet8(
++ UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0,
++ (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE));
++
++ p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
++
++ for (i = 0; i < numOfEntries; i++)
++ {
++ p_KeyAndNextEngineParams = p_Params + i;
++
++ NextStepAd(p_CcTreeTmp, NULL,
++ &p_KeyAndNextEngineParams->nextEngineParams, p_FmPcd);
++
++ p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i],
++ p_KeyAndNextEngineParams,
++ sizeof(t_FmPcdCcKeyAndNextEngineParams));
++
++ if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ {
++ p_FmPcdCcNextNode =
++ (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
++ p_CcInformation = FindNodeInfoInReleventLst(
++ &p_FmPcdCcNextNode->ccTreeIdLst, (t_Handle)p_FmPcdCcTree,
++ p_FmPcdCcNextNode->h_Spinlock);
++
++ if (!p_CcInformation)
++ {
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree;
++ ccNodeInfo.index = 1;
++ EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst,
++ &ccNodeInfo,
++ p_FmPcdCcNextNode->h_Spinlock);
++ }
++ else
++ p_CcInformation->index++;
++ }
++ }
++
++ FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId);
++ p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ FM_PCD_CcRootDelete(p_FmPcdCcTree);
++ XX_Free(p_Params);
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return NULL;
++ }
++
++ for (i = 0; i < numOfEntries; i++)
++ {
++ if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction)
++ {
++ err = SetRequiredAction(
++ h_FmPcd,
++ p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction,
++ &p_FmPcdCcTree->keyAndNextEngineParams[i], p_CcTreeTmp, 1,
++ p_FmPcdCcTree);
++ if (err)
++ {
++ FmPcdLockUnlockAll(p_FmPcd);
++ FM_PCD_CcRootDelete(p_FmPcdCcTree);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
++ return NULL;
++ }
++ p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
++ }
++ }
++
++ FmPcdLockUnlockAll(p_FmPcd);
++ p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd);
++ if (!p_FmPcdCcTree->p_Lock)
++ {
++ FM_PCD_CcRootDelete(p_FmPcdCcTree);
++ XX_Free(p_Params);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock"));
++ return NULL;
++ }
++
++ XX_Free(p_Params);
++
++ return p_FmPcdCcTree;
++}
++
++t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
++ int i = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_CcTree, E_INVALID_STATE);
++ p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId);
++
++ if (p_CcTree->owners)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_SELECTION,
++ ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree"));
++
++ /* Delete ip-reassembly schemes if exist */
++ if (p_CcTree->h_IpReassemblyManip)
++ {
++ FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip);
++ FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE);
++ }
++
++ /* Delete capwap-reassembly schemes if exist */
++ if (p_CcTree->h_CapwapReassemblyManip)
++ {
++ FmPcdManipDeleteCapwapReassmSchemes(p_CcTree->h_CapwapReassemblyManip);
++ FmPcdManipUpdateOwner(p_CcTree->h_CapwapReassemblyManip, FALSE);
++ }
++
++ for (i = 0; i < p_CcTree->numOfEntries; i++)
++ {
++ if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ UpdateNodeOwner(
++ p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode,
++ FALSE);
++
++ if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
++ FmPcdManipUpdateOwner(
++ p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip,
++ FALSE);
++
++#ifdef FM_CAPWAP_SUPPORT
++ if ((p_CcTree->numOfGrps == 1) &&
++ (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) &&
++ (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) &&
++ p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode &&
++ IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode))
++ {
++ if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK)
++ return E_INVALID_STATE;
++ }
++#endif /* FM_CAPWAP_SUPPORT */
++
++#if (DPAA_VERSION >= 11)
++ if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_FR)
++ && (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic))
++ FrmReplicGroupUpdateOwner(
++ p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic,
++ FALSE);
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ if (p_CcTree->p_Lock)
++ FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock);
++
++ DeleteTree(p_CcTree, p_FmPcd);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_CcRootModifyNextEngine(
++ t_Handle h_CcTree, uint8_t grpId, uint8_t index,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcTree, E_INVALID_STATE);
++ p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdCcModifyNextEngineParamTree(p_FmPcd, p_CcTree, grpId, index,
++ p_FmPcdCcNextEngineParams);
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ if (err)
++ {
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ return E_OK;
++}
++
++t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd,
++ t_FmPcdCcNodeParams *p_CcNodeParam)
++{
++ t_FmPcdCcNode *p_CcNode;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_CcNodeParam, E_NULL_POINTER, NULL);
++
++ p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode));
++ if (!p_CcNode)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
++ return NULL;
++ }
++ memset(p_CcNode, 0, sizeof(t_FmPcdCcNode));
++
++ err = MatchTableSet(h_FmPcd, p_CcNode, p_CcNodeParam);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ break;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return NULL;
++
++ default:
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return NULL;
++ }
++
++ return p_CcNode;
++}
++
++t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ int i = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode->h_FmPcd, E_INVALID_HANDLE);
++
++ if (p_CcNode->owners)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("This node cannot be removed because it is occupied; first unbind this node"));
++
++ for (i = 0; i < p_CcNode->numOfKeys; i++)
++ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ UpdateNodeOwner(
++ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode,
++ FALSE);
++
++ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ UpdateNodeOwner(
++ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode,
++ FALSE);
++
++ /* Handle also Miss entry */
++ for (i = 0; i < p_CcNode->numOfKeys + 1; i++)
++ {
++ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
++ FmPcdManipUpdateOwner(
++ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip,
++ FALSE);
++
++#if (DPAA_VERSION >= 11)
++ if ((p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_FR)
++ && (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic))
++ {
++ FrmReplicGroupUpdateOwner(
++ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic,
++ FALSE);
++ }
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ DeleteNode(p_CcNode);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_MatchTableAddKey(t_Handle h_CcNode, uint16_t keyIndex,
++ uint8_t keySize,
++ t_FmPcdCcKeyParams *p_KeyParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (keyIndex == FM_PCD_LAST_KEY_INDEX)
++ keyIndex = p_CcNode->numOfKeys;
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdCcAddKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_KeyParams);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableRemoveKey(t_Handle h_CcNode, uint16_t keyIndex)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ return E_OK;
++}
++
++t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, uint16_t keyIndex,
++ uint8_t keySize, uint8_t *p_Key,
++ uint8_t *p_Mask)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_Key, p_Mask);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableModifyNextEngine(
++ t_Handle h_CcNode, uint16_t keyIndex,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = ModifyNextEngineParamNode(p_FmPcd, p_CcNode, keyIndex,
++ p_FmPcdCcNextEngineParams);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableModifyMissNextEngine(
++ t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdCcModifyMissNextEngineParamNode(p_FmPcd, p_CcNode,
++ p_FmPcdCcNextEngineParams);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableModifyKeyAndNextEngine(t_Handle h_CcNode,
++ uint16_t keyIndex,
++ uint8_t keySize,
++ t_FmPcdCcKeyParams *p_KeyParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, p_CcNode, keyIndex, keySize,
++ p_KeyParams);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableFindNRemoveKey(t_Handle h_CcNode, uint8_t keySize,
++ uint8_t *p_Key, uint8_t *p_Mask)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint16_t keyIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
++ if (GET_ERROR_TYPE(err) != E_OK)
++ {
++ FmPcdLockUnlockAll(p_FmPcd);
++ RETURN_ERROR(
++ MAJOR,
++ err,
++ ("The received key and mask pair was not found in the match table of the provided node"));
++ }
++
++ err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableFindNModifyNextEngine(
++ t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint16_t keyIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
++ if (GET_ERROR_TYPE(err) != E_OK)
++ {
++ FmPcdLockUnlockAll(p_FmPcd);
++ RETURN_ERROR(
++ MAJOR,
++ err,
++ ("The received key and mask pair was not found in the match table of the provided node"));
++ }
++
++ err = ModifyNextEngineParamNode(p_FmPcd, p_CcNode, keyIndex,
++ p_FmPcdCcNextEngineParams);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableFindNModifyKeyAndNextEngine(
++ t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask,
++ t_FmPcdCcKeyParams *p_KeyParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint16_t keyIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
++ if (GET_ERROR_TYPE(err) != E_OK)
++ {
++ FmPcdLockUnlockAll(p_FmPcd);
++ RETURN_ERROR(
++ MAJOR,
++ err,
++ ("The received key and mask pair was not found in the match table of the provided node"));
++ }
++
++ err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, h_CcNode, keyIndex, keySize,
++ p_KeyParams);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableFindNModifyKey(t_Handle h_CcNode, uint8_t keySize,
++ uint8_t *p_Key, uint8_t *p_Mask,
++ uint8_t *p_NewKey, uint8_t *p_NewMask)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ t_List h_List;
++ uint16_t keyIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_NewKey, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ INIT_LIST(&h_List);
++
++ err = FmPcdCcNodeTreeTryLock(p_FmPcd, p_CcNode, &h_List);
++ if (err)
++ {
++ DBG(TRACE, ("Node's trees lock failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
++ if (GET_ERROR_TYPE(err) != E_OK)
++ {
++ FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List);
++ RETURN_ERROR(MAJOR, err,
++ ("The received key and mask pair was not found in the "
++ "match table of the provided node"));
++ }
++
++ err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_NewKey,
++ p_NewMask);
++
++ FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List);
++
++ switch(GET_ERROR_TYPE(err)
++) {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++t_Error FM_PCD_MatchTableGetNextEngine(
++ t_Handle h_CcNode, uint16_t keyIndex,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++
++ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++
++ if (keyIndex >= p_CcNode->numOfKeys)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("keyIndex exceeds current number of keys"));
++
++ if (keyIndex > (FM_PCD_MAX_NUM_OF_KEYS - 1))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("keyIndex can not be larger than %d", (FM_PCD_MAX_NUM_OF_KEYS - 1)));
++
++ memcpy(p_FmPcdCcNextEngineParams,
++ &p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams,
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ return E_OK;
++}
++
++
++uint32_t FM_PCD_MatchTableGetKeyCounter(t_Handle h_CcNode, uint16_t keyIndex)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint32_t *p_StatsCounters, frameCount;
++ uint32_t intFlags;
++
++ SANITY_CHECK_RETURN_VALUE(p_CcNode, E_INVALID_HANDLE, 0);
++
++ if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table"));
++ return 0;
++ }
++
++ if ((p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_FRAME)
++ && (p_CcNode->statisticsMode
++ != e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Frame count is not supported in the statistics mode of this match table"));
++ return 0;
++ }
++
++ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
++
++ if (keyIndex >= p_CcNode->numOfKeys)
++ {
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table"));
++ return 0;
++ }
++
++ if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
++ {
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key"));
++ return 0;
++ }
++
++ p_StatsCounters =
++ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters;
++ ASSERT_COND(p_StatsCounters);
++
++ /* The first counter is byte counter, so we need to advance to the next counter */
++ frameCount = GET_UINT32(*(uint32_t *)(PTR_MOVE(p_StatsCounters,
++ FM_PCD_CC_STATS_COUNTER_SIZE)));
++
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++
++ return frameCount;
++}
++
++t_Error FM_PCD_MatchTableGetKeyStatistics(
++ t_Handle h_CcNode, uint16_t keyIndex,
++ t_FmPcdCcKeyStatistics *p_KeyStatistics)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint32_t intFlags;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER);
++
++ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
++
++ if (keyIndex >= p_CcNode->numOfKeys)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("The provided keyIndex exceeds the number of keys in this match table"));
++
++ err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics);
++
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_MatchTableGetMissStatistics(
++ t_Handle h_CcNode, t_FmPcdCcKeyStatistics *p_MissStatistics)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint32_t intFlags;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER);
++
++ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
++
++ err = MatchTableGetKeyStatistics(p_CcNode, p_CcNode->numOfKeys,
++ p_MissStatistics);
++
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_MatchTableFindNGetKeyStatistics(
++ t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask,
++ t_FmPcdCcKeyStatistics *p_KeyStatistics)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint16_t keyIndex;
++ uint32_t intFlags;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER);
++
++ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
++
++ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
++ if (GET_ERROR_TYPE(err) != E_OK)
++ {
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, err,
++ ("The received key and mask pair was not found in the "
++ "match table of the provided node"));
++ }
++
++ ASSERT_COND(keyIndex < p_CcNode->numOfKeys);
++
++ err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics);
++
++ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
++
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_MatchTableGetIndexedHashBucket(t_Handle h_CcNode,
++ uint8_t keySize, uint8_t *p_Key,
++ uint8_t hashShift,
++ t_Handle *p_CcNodeBucketHandle,
++ uint8_t *p_BucketIndex,
++ uint16_t *p_LastIndex)
++{
++ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
++ uint16_t glblMask;
++ uint64_t crc64 = 0;
++
++ SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(
++ p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED,
++ E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_CcNodeBucketHandle, E_NULL_POINTER);
++
++ memcpy(&glblMask, PTR_MOVE(p_CcNode->p_GlblMask, 2), 2);
++ be16_to_cpus(&glblMask);
++
++ crc64 = crc64_init();
++ crc64 = crc64_compute(p_Key, keySize, crc64);
++ crc64 >>= hashShift;
++
++ *p_BucketIndex = (uint8_t)(((crc64 >> (8 * (6 - p_CcNode->userOffset)))
++ & glblMask) >> 4);
++ if (*p_BucketIndex >= p_CcNode->numOfKeys)
++ RETURN_ERROR(MINOR, E_NOT_IN_RANGE, ("bucket index!"));
++
++ *p_CcNodeBucketHandle =
++ p_CcNode->keyAndNextEngineParams[*p_BucketIndex].nextEngineParams.params.ccParams.h_CcNode;
++ if (!*p_CcNodeBucketHandle)
++ RETURN_ERROR(MINOR, E_NOT_FOUND, ("bucket!"));
++
++ *p_LastIndex = ((t_FmPcdCcNode *)*p_CcNodeBucketHandle)->numOfKeys;
++
++ return E_OK;
++}
++
++t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
++{
++ t_FmPcdCcNode *p_CcNodeHashTbl;
++ t_FmPcdCcNodeParams *p_IndxHashCcNodeParam, *p_ExactMatchCcNodeParam;
++ t_FmPcdCcNode *p_CcNode;
++ t_Handle h_MissStatsCounters = NULL;
++ t_FmPcdCcKeyParams *p_HashKeyParams;
++ int i;
++ uint16_t numOfSets, numOfWays, countMask, onesCount = 0;
++ bool statsEnForMiss = FALSE;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_Param, E_NULL_POINTER, NULL);
++
++ if (p_Param->maxNumOfKeys == 0)
++ {
++ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Max number of keys must be higher then 0"));
++ return NULL;
++ }
++
++ if (p_Param->hashResMask == 0)
++ {
++ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Hash result mask must differ from 0"));
++ return NULL;
++ }
++
++ /*Fix: QorIQ SDK / QSDK-2131*/
++ if (p_Param->ccNextEngineParamsForMiss.nextEngine == e_FM_PCD_INVALID)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Next PCD Engine for on-miss entry is invalid. On-miss entry is always required. You can use e_FM_PCD_DONE."));
++ return NULL;
++ }
++
++#if (DPAA_VERSION >= 11)
++ if (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_RMON)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("RMON statistics mode is not supported for hash table"));
++ return NULL;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ p_ExactMatchCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc(
++ sizeof(t_FmPcdCcNodeParams));
++ if (!p_ExactMatchCcNodeParam)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_ExactMatchCcNodeParam"));
++ return NULL;
++ }
++ memset(p_ExactMatchCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams));
++
++ p_IndxHashCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc(
++ sizeof(t_FmPcdCcNodeParams));
++ if (!p_IndxHashCcNodeParam)
++ {
++ XX_Free(p_ExactMatchCcNodeParam);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_IndxHashCcNodeParam"));
++ return NULL;
++ }
++ memset(p_IndxHashCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams));
++
++ /* Calculate number of sets and number of ways of the hash table */
++ countMask = (uint16_t)(p_Param->hashResMask >> 4);
++ while (countMask)
++ {
++ onesCount++;
++ countMask = (uint16_t)(countMask >> 1);
++ }
++
++ numOfSets = (uint16_t)(1 << onesCount);
++ numOfWays = (uint16_t)DIV_CEIL(p_Param->maxNumOfKeys, numOfSets);
++
++ if (p_Param->maxNumOfKeys % numOfSets)
++ DBG(INFO, ("'maxNumOfKeys' is not a multiple of hash number of ways, so number of ways will be rounded up"));
++
++ if ((p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_FRAME)
++ || (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME))
++ {
++ /* Allocating a statistics counters table that will be used by all
++ 'miss' entries of the hash table */
++ h_MissStatsCounters = (t_Handle)FM_MURAM_AllocMem(
++ FmPcdGetMuramHandle(h_FmPcd), 2 * FM_PCD_CC_STATS_COUNTER_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!h_MissStatsCounters)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics table for hash miss"));
++ XX_Free(p_IndxHashCcNodeParam);
++ XX_Free(p_ExactMatchCcNodeParam);
++ return NULL;
++ }
++ memset(h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
++
++ /* Always enable statistics for 'miss', so that a statistics AD will be
++ initialized from the start. We'll store the requested 'statistics enable'
++ value and it will be used when statistics are read by the user. */
++ statsEnForMiss = p_Param->ccNextEngineParamsForMiss.statisticsEn;
++ p_Param->ccNextEngineParamsForMiss.statisticsEn = TRUE;
++ }
++
++ /* Building exact-match node params, will be used to create the hash buckets */
++ p_ExactMatchCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR;
++
++ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.src =
++ e_FM_PCD_EXTRACT_FROM_KEY;
++ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.action =
++ e_FM_PCD_ACTION_EXACT_MATCH;
++ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.offset = 0;
++ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.size =
++ p_Param->matchKeySize;
++
++ p_ExactMatchCcNodeParam->keysParams.maxNumOfKeys = numOfWays;
++ p_ExactMatchCcNodeParam->keysParams.maskSupport = FALSE;
++ p_ExactMatchCcNodeParam->keysParams.statisticsMode =
++ p_Param->statisticsMode;
++ p_ExactMatchCcNodeParam->keysParams.numOfKeys = 0;
++ p_ExactMatchCcNodeParam->keysParams.keySize = p_Param->matchKeySize;
++ p_ExactMatchCcNodeParam->keysParams.ccNextEngineParamsForMiss =
++ p_Param->ccNextEngineParamsForMiss;
++
++ p_HashKeyParams = p_IndxHashCcNodeParam->keysParams.keyParams;
++
++ for (i = 0; i < numOfSets; i++)
++ {
++ /* Each exact-match node will be marked as a 'bucket' and provided with
++ a pointer to statistics counters, to be used for 'miss' entry
++ statistics */
++ p_CcNode = (t_FmPcdCcNode *)XX_Malloc(sizeof(t_FmPcdCcNode));
++ if (!p_CcNode)
++ break;
++ memset(p_CcNode, 0, sizeof(t_FmPcdCcNode));
++
++ p_CcNode->isHashBucket = TRUE;
++ p_CcNode->h_MissStatsCounters = h_MissStatsCounters;
++
++ err = MatchTableSet(h_FmPcd, p_CcNode, p_ExactMatchCcNodeParam);
++ if (err)
++ break;
++
++ p_HashKeyParams[i].ccNextEngineParams.nextEngine = e_FM_PCD_CC;
++ p_HashKeyParams[i].ccNextEngineParams.statisticsEn = FALSE;
++ p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode =
++ p_CcNode;
++ }
++
++ if (i < numOfSets)
++ {
++ for (i = i - 1; i >= 0; i--)
++ FM_PCD_MatchTableDelete(
++ p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode);
++
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters);
++
++ REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG);
++ XX_Free(p_IndxHashCcNodeParam);
++ XX_Free(p_ExactMatchCcNodeParam);
++ return NULL;
++ }
++
++ /* Creating indexed-hash CC node */
++ p_IndxHashCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR;
++ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.src =
++ e_FM_PCD_EXTRACT_FROM_HASH;
++ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.action =
++ e_FM_PCD_ACTION_INDEXED_LOOKUP;
++ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.icIndxMask =
++ p_Param->hashResMask;
++ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.offset =
++ p_Param->hashShift;
++ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.size = 2;
++
++ p_IndxHashCcNodeParam->keysParams.maxNumOfKeys = numOfSets;
++ p_IndxHashCcNodeParam->keysParams.maskSupport = FALSE;
++ p_IndxHashCcNodeParam->keysParams.statisticsMode =
++ e_FM_PCD_CC_STATS_MODE_NONE;
++ /* Number of keys of this node is number of sets of the hash */
++ p_IndxHashCcNodeParam->keysParams.numOfKeys = numOfSets;
++ p_IndxHashCcNodeParam->keysParams.keySize = 2;
++
++ p_CcNodeHashTbl = FM_PCD_MatchTableSet(h_FmPcd, p_IndxHashCcNodeParam);
++
++ if (p_CcNodeHashTbl)
++ {
++ p_CcNodeHashTbl->kgHashShift = p_Param->kgHashShift;
++
++ /* Storing the allocated counters for buckets 'miss' in the hash table
++ and if statistics for miss were enabled. */
++ p_CcNodeHashTbl->h_MissStatsCounters = h_MissStatsCounters;
++ p_CcNodeHashTbl->statsEnForMiss = statsEnForMiss;
++ }
++
++ XX_Free(p_IndxHashCcNodeParam);
++ XX_Free(p_ExactMatchCcNodeParam);
++
++ return p_CcNodeHashTbl;
++}
++
++t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_Handle h_FmPcd;
++ t_Handle *p_HashBuckets, h_MissStatsCounters;
++ uint16_t i, numOfBuckets;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
++
++ /* Store all hash buckets before the hash is freed */
++ numOfBuckets = p_HashTbl->numOfKeys;
++
++ p_HashBuckets = (t_Handle *)XX_Malloc(numOfBuckets * sizeof(t_Handle));
++ if (!p_HashBuckets)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++
++ for (i = 0; i < numOfBuckets; i++)
++ p_HashBuckets[i] =
++ p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
++
++ h_FmPcd = p_HashTbl->h_FmPcd;
++ h_MissStatsCounters = p_HashTbl->h_MissStatsCounters;
++
++ /* Free the hash */
++ err = FM_PCD_MatchTableDelete(p_HashTbl);
++
++ /* Free each hash bucket */
++ for (i = 0; i < numOfBuckets; i++)
++ err |= FM_PCD_MatchTableDelete(p_HashBuckets[i]);
++
++ XX_Free(p_HashBuckets);
++
++ /* Free statistics counters for 'miss', if these were allocated */
++ if (h_MissStatsCounters)
++ FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters);
++
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_HashTableAddKey(t_Handle h_HashTbl, uint8_t keySize,
++ t_FmPcdCcKeyParams *p_KeyParams)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_Handle h_HashBucket;
++ uint8_t bucketIndex;
++ uint16_t lastIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_KeyParams->p_Key, E_NULL_POINTER);
++
++ if (p_KeyParams->p_Mask)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("Keys masks not supported for hash table"));
++
++ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize,
++ p_KeyParams->p_Key,
++ p_HashTbl->kgHashShift,
++ &h_HashBucket, &bucketIndex,
++ &lastIndex);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return FM_PCD_MatchTableAddKey(h_HashBucket, FM_PCD_LAST_KEY_INDEX, keySize,
++ p_KeyParams);
++}
++
++t_Error FM_PCD_HashTableRemoveKey(t_Handle h_HashTbl, uint8_t keySize,
++ uint8_t *p_Key)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_Handle h_HashBucket;
++ uint8_t bucketIndex;
++ uint16_t lastIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++
++ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key,
++ p_HashTbl->kgHashShift,
++ &h_HashBucket, &bucketIndex,
++ &lastIndex);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return FM_PCD_MatchTableFindNRemoveKey(h_HashBucket, keySize, p_Key, NULL);
++}
++
++t_Error FM_PCD_HashTableModifyNextEngine(
++ t_Handle h_HashTbl, uint8_t keySize, uint8_t *p_Key,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_Handle h_HashBucket;
++ uint8_t bucketIndex;
++ uint16_t lastIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++
++ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key,
++ p_HashTbl->kgHashShift,
++ &h_HashBucket, &bucketIndex,
++ &lastIndex);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return FM_PCD_MatchTableFindNModifyNextEngine(h_HashBucket, keySize, p_Key,
++ NULL,
++ p_FmPcdCcNextEngineParams);
++}
++
++t_Error FM_PCD_HashTableModifyMissNextEngine(
++ t_Handle h_HashTbl,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_Handle h_HashBucket;
++ uint8_t i;
++ bool nullifyMissStats = FALSE;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(h_HashTbl, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++
++ if ((!p_HashTbl->h_MissStatsCounters)
++ && (p_FmPcdCcNextEngineParams->statisticsEn))
++ RETURN_ERROR(
++ MAJOR,
++ E_CONFLICT,
++ ("Statistics are requested for a key, but statistics mode was set"
++ "to 'NONE' upon initialization"));
++
++ if (p_HashTbl->h_MissStatsCounters)
++ {
++ if ((!p_HashTbl->statsEnForMiss)
++ && (p_FmPcdCcNextEngineParams->statisticsEn))
++ nullifyMissStats = TRUE;
++
++ if ((p_HashTbl->statsEnForMiss)
++ && (!p_FmPcdCcNextEngineParams->statisticsEn))
++ {
++ p_HashTbl->statsEnForMiss = FALSE;
++ p_FmPcdCcNextEngineParams->statisticsEn = TRUE;
++ }
++ }
++
++ for (i = 0; i < p_HashTbl->numOfKeys; i++)
++ {
++ h_HashBucket =
++ p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
++
++ err = FM_PCD_MatchTableModifyMissNextEngine(h_HashBucket,
++ p_FmPcdCcNextEngineParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (nullifyMissStats)
++ {
++ memset(p_HashTbl->h_MissStatsCounters, 0,
++ (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
++ memset(p_HashTbl->h_MissStatsCounters, 0,
++ (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
++ p_HashTbl->statsEnForMiss = TRUE;
++ }
++
++ return E_OK;
++}
++
++
++t_Error FM_PCD_HashTableGetMissNextEngine(
++ t_Handle h_HashTbl,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_FmPcdCcNode *p_HashBucket;
++
++ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
++
++ /* Miss next engine of each bucket was initialized with the next engine of the hash table */
++ p_HashBucket =
++ p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode;
++
++ memcpy(p_FmPcdCcNextEngineParams,
++ &p_HashBucket->keyAndNextEngineParams[p_HashBucket->numOfKeys].nextEngineParams,
++ sizeof(t_FmPcdCcNextEngineParams));
++
++ return E_OK;
++}
++
++t_Error FM_PCD_HashTableFindNGetKeyStatistics(
++ t_Handle h_HashTbl, uint8_t keySize, uint8_t *p_Key,
++ t_FmPcdCcKeyStatistics *p_KeyStatistics)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_Handle h_HashBucket;
++ uint8_t bucketIndex;
++ uint16_t lastIndex;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER);
++
++ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key,
++ p_HashTbl->kgHashShift,
++ &h_HashBucket, &bucketIndex,
++ &lastIndex);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return FM_PCD_MatchTableFindNGetKeyStatistics(h_HashBucket, keySize, p_Key,
++ NULL, p_KeyStatistics);
++}
++
++t_Error FM_PCD_HashTableGetMissStatistics(
++ t_Handle h_HashTbl, t_FmPcdCcKeyStatistics *p_MissStatistics)
++{
++ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
++ t_Handle h_HashBucket;
++
++ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER);
++
++ if (!p_HashTbl->statsEnForMiss)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Statistics were not enabled for miss"));
++
++ h_HashBucket =
++ p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode;
++
++ return FM_PCD_MatchTableGetMissStatistics(h_HashBucket, p_MissStatistics);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h
+new file mode 100644
+index 00000000..3456bb56
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h
+@@ -0,0 +1,399 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_cc.h
++
++ @Description FM PCD CC ...
++*//***************************************************************************/
++#ifndef __FM_CC_H
++#define __FM_CC_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++
++#include "fm_pcd.h"
++
++
++/***********************************************************************/
++/* Coarse classification defines */
++/***********************************************************************/
++
++#define CC_MAX_NUM_OF_KEYS (FM_PCD_MAX_NUM_OF_KEYS + 1)
++
++#define CC_PC_FF_MACDST 0x00
++#define CC_PC_FF_MACSRC 0x01
++#define CC_PC_FF_ETYPE 0x02
++
++#define CC_PC_FF_TCI1 0x03
++#define CC_PC_FF_TCI2 0x04
++
++#define CC_PC_FF_MPLS1 0x06
++#define CC_PC_FF_MPLS_LAST 0x07
++
++#define CC_PC_FF_IPV4DST1 0x08
++#define CC_PC_FF_IPV4DST2 0x16
++#define CC_PC_FF_IPV4IPTOS_TC1 0x09
++#define CC_PC_FF_IPV4IPTOS_TC2 0x17
++#define CC_PC_FF_IPV4PTYPE1 0x0A
++#define CC_PC_FF_IPV4PTYPE2 0x18
++#define CC_PC_FF_IPV4SRC1 0x0b
++#define CC_PC_FF_IPV4SRC2 0x19
++#define CC_PC_FF_IPV4SRC1_IPV4DST1 0x0c
++#define CC_PC_FF_IPV4SRC2_IPV4DST2 0x1a
++#define CC_PC_FF_IPV4TTL 0x29
++
++
++#define CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1 0x0d /*TODO - CLASS - what is it? TOS*/
++#define CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2 0x1b
++#define CC_PC_FF_IPV6PTYPE1 0x0e
++#define CC_PC_FF_IPV6PTYPE2 0x1c
++#define CC_PC_FF_IPV6DST1 0x0f
++#define CC_PC_FF_IPV6DST2 0x1d
++#define CC_PC_FF_IPV6SRC1 0x10
++#define CC_PC_FF_IPV6SRC2 0x1e
++#define CC_PC_FF_IPV6HOP_LIMIT 0x2a
++#define CC_PC_FF_IPPID 0x24
++#define CC_PC_FF_IPDSCP 0x76
++
++#define CC_PC_FF_GREPTYPE 0x11
++
++#define CC_PC_FF_MINENCAP_PTYPE 0x12
++#define CC_PC_FF_MINENCAP_IPDST 0x13
++#define CC_PC_FF_MINENCAP_IPSRC 0x14
++#define CC_PC_FF_MINENCAP_IPSRC_IPDST 0x15
++
++#define CC_PC_FF_L4PSRC 0x1f
++#define CC_PC_FF_L4PDST 0x20
++#define CC_PC_FF_L4PSRC_L4PDST 0x21
++
++#define CC_PC_FF_PPPPID 0x05
++
++#define CC_PC_PR_SHIM1 0x22
++#define CC_PC_PR_SHIM2 0x23
++
++#define CC_PC_GENERIC_WITHOUT_MASK 0x27
++#define CC_PC_GENERIC_WITH_MASK 0x28
++#define CC_PC_GENERIC_IC_GMASK 0x2B
++#define CC_PC_GENERIC_IC_HASH_INDEXED 0x2C
++#define CC_PC_GENERIC_IC_AGING_MASK 0x2D
++
++#define CC_PR_OFFSET 0x25
++#define CC_PR_WITHOUT_OFFSET 0x26
++
++#define CC_PC_PR_ETH_OFFSET 19
++#define CC_PC_PR_USER_DEFINED_SHIM1_OFFSET 16
++#define CC_PC_PR_USER_DEFINED_SHIM2_OFFSET 17
++#define CC_PC_PR_USER_LLC_SNAP_OFFSET 20
++#define CC_PC_PR_VLAN1_OFFSET 21
++#define CC_PC_PR_VLAN2_OFFSET 22
++#define CC_PC_PR_PPPOE_OFFSET 24
++#define CC_PC_PR_MPLS1_OFFSET 25
++#define CC_PC_PR_MPLS_LAST_OFFSET 26
++#define CC_PC_PR_IP1_OFFSET 27
++#define CC_PC_PR_IP_LAST_OFFSET 28
++#define CC_PC_PR_MINENC_OFFSET 28
++#define CC_PC_PR_L4_OFFSET 30
++#define CC_PC_PR_GRE_OFFSET 29
++#define CC_PC_PR_ETYPE_LAST_OFFSET 23
++#define CC_PC_PR_NEXT_HEADER_OFFSET 31
++
++#define CC_PC_ILLEGAL 0xff
++#define CC_SIZE_ILLEGAL 0
++
++#define FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN 16
++#define FM_PCD_CC_AD_TABLE_ALIGN 16
++#define FM_PCD_CC_AD_ENTRY_SIZE 16
++#define FM_PCD_CC_NUM_OF_KEYS 255
++#define FM_PCD_CC_TREE_ADDR_ALIGN 256
++
++#define FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE 0x00000000
++#define FM_PCD_AD_RESULT_DATA_FLOW_TYPE 0x80000000
++#define FM_PCD_AD_RESULT_PLCR_DIS 0x20000000
++#define FM_PCD_AD_RESULT_EXTENDED_MODE 0x80000000
++#define FM_PCD_AD_RESULT_NADEN 0x20000000
++#define FM_PCD_AD_RESULT_STATISTICS_EN 0x40000000
++
++#define FM_PCD_AD_CONT_LOOKUP_TYPE 0x40000000
++#define FM_PCD_AD_CONT_LOOKUP_LCL_MASK 0x00800000
++
++#define FM_PCD_AD_STATS_TYPE 0x40000000
++#define FM_PCD_AD_STATS_FLR_ADDR_MASK 0x00FFFFFF
++#define FM_PCD_AD_STATS_COUNTERS_ADDR_MASK 0x00FFFFFF
++#define FM_PCD_AD_STATS_NEXT_ACTION_MASK 0xFFFF0000
++#define FM_PCD_AD_STATS_NEXT_ACTION_SHIFT 12
++#define FM_PCD_AD_STATS_NAD_EN 0x00008000
++#define FM_PCD_AD_STATS_OP_CODE 0x00000036
++#define FM_PCD_AD_STATS_FLR_EN 0x00004000
++#define FM_PCD_AD_STATS_COND_EN 0x00002000
++
++
++
++#define FM_PCD_AD_BYPASS_TYPE 0xc0000000
++
++#define FM_PCD_AD_TYPE_MASK 0xc0000000
++#define FM_PCD_AD_OPCODE_MASK 0x0000000f
++
++#define FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT 16
++#if (DPAA_VERSION >= 11)
++#define FM_PCD_AD_RESULT_VSP_SHIFT 24
++#define FM_PCD_AD_RESULT_NO_OM_VSPE 0x02000000
++#define FM_PCD_AD_RESULT_VSP_MASK 0x3f
++#define FM_PCD_AD_NCSPFQIDM_MASK 0x80000000
++#endif /* (DPAA_VERSION >= 11) */
++
++#define GLBL_MASK_FOR_HASH_INDEXED 0xfff00000
++#define CC_GLBL_MASK_SIZE 4
++#define CC_AGING_MASK_SIZE 4
++
++typedef uint32_t ccPrivateInfo_t; /**< private info of CC: */
++
++#define CC_PRIVATE_INFO_NONE 0
++#define CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP 0x80000000
++#define CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH 0x40000000
++#define CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH 0x20000000
++#define CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP 0x10000000
++
++#define CC_BUILD_AGING_MASK(numOfKeys) ((((1LL << ((numOfKeys) + 1)) - 1)) << (31 - (numOfKeys)))
++/***********************************************************************/
++/* Memory map */
++/***********************************************************************/
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++typedef struct
++{
++ volatile uint32_t fqid;
++ volatile uint32_t plcrProfile;
++ volatile uint32_t nia;
++ volatile uint32_t res;
++} t_AdOfTypeResult;
++
++typedef struct
++{
++ volatile uint32_t ccAdBase;
++ volatile uint32_t matchTblPtr;
++ volatile uint32_t pcAndOffsets;
++ volatile uint32_t gmask;
++} t_AdOfTypeContLookup;
++
++typedef struct
++{
++ volatile uint32_t profileTableAddr;
++ volatile uint32_t reserved;
++ volatile uint32_t nextActionIndx;
++ volatile uint32_t statsTableAddr;
++} t_AdOfTypeStats;
++
++typedef union
++{
++ volatile t_AdOfTypeResult adResult;
++ volatile t_AdOfTypeContLookup adContLookup;
++} t_Ad;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/***********************************************************************/
++/* Driver's internal structures */
++/***********************************************************************/
++
++typedef struct t_FmPcdStatsObj
++{
++ t_Handle h_StatsAd;
++ t_Handle h_StatsCounters;
++ t_List node;
++} t_FmPcdStatsObj;
++
++typedef struct
++{
++ uint8_t key[FM_PCD_MAX_SIZE_OF_KEY];
++ uint8_t mask[FM_PCD_MAX_SIZE_OF_KEY];
++
++ t_FmPcdCcNextEngineParams nextEngineParams;
++ uint32_t requiredAction;
++ uint32_t shadowAction;
++
++ t_FmPcdStatsObj *p_StatsObj;
++
++} t_FmPcdCcKeyAndNextEngineParams;
++
++typedef struct
++{
++ t_Handle p_Ad;
++ e_FmPcdEngine fmPcdEngine;
++ bool adAllocated;
++ bool isTree;
++
++ uint32_t myInfo;
++ t_List *h_CcNextNodesLst;
++ t_Handle h_AdditionalInfo;
++ t_Handle h_Node;
++} t_FmPcdModifyCcAdditionalParams;
++
++typedef struct
++{
++ t_Handle p_AdTableNew;
++ t_Handle p_KeysMatchTableNew;
++ t_Handle p_AdTableOld;
++ t_Handle p_KeysMatchTableOld;
++ uint16_t numOfKeys;
++ t_Handle h_CurrentNode;
++ uint16_t savedKeyIndex;
++ t_Handle h_NodeForAdd;
++ t_Handle h_NodeForRmv;
++ t_Handle h_ManipForRmv;
++ t_Handle h_ManipForAdd;
++ t_FmPcdStatsObj *p_StatsObjForRmv;
++#if (DPAA_VERSION >= 11)
++ t_Handle h_FrmReplicForAdd;
++ t_Handle h_FrmReplicForRmv;
++#endif /* (DPAA_VERSION >= 11) */
++ bool tree;
++
++ t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[CC_MAX_NUM_OF_KEYS];
++} t_FmPcdModifyCcKeyAdditionalParams;
++
++typedef struct
++{
++ t_Handle h_Manip;
++ t_Handle h_CcNode;
++} t_CcNextEngineInfo;
++
++typedef struct
++{
++ uint16_t numOfKeys;
++ uint16_t maxNumOfKeys;
++
++ bool maskSupport;
++ uint32_t keysMatchTableMaxSize;
++
++ e_FmPcdCcStatsMode statisticsMode;
++ uint32_t numOfStatsFLRs;
++ uint32_t countersArraySize;
++
++ bool isHashBucket; /**< Valid for match table node that is a bucket of a hash table only */
++ t_Handle h_MissStatsCounters; /**< Valid for hash table node and match table that is a bucket;
++ Holds the statistics counters allocated by the hash table and
++ are shared by all hash table buckets; */
++ t_Handle h_PrivMissStatsCounters; /**< Valid for match table node that is a bucket of a hash table only;
++ Holds the statistics counters that were allocated for this node
++ and replaced by the shared counters (allocated by the hash table); */
++ bool statsEnForMiss; /**< Valid for hash table node only; TRUE is statistics are currently
++ enabled for hash 'miss', FALSE otherwise; This parameter effects the
++ returned statistics count to user, statistics AD always present for 'miss'
++ for all hash buckets; */
++ bool glblMaskUpdated;
++ t_Handle p_GlblMask;
++ bool lclMask;
++ uint8_t parseCode;
++ uint8_t offset;
++ uint8_t prsArrayOffset;
++ bool ctrlFlow;
++ uint16_t owners;
++
++ uint8_t ccKeySizeAccExtraction;
++ uint8_t sizeOfExtraction;
++ uint8_t glblMaskSize;
++
++ t_Handle h_KeysMatchTable;
++ t_Handle h_AdTable;
++ t_Handle h_StatsAds;
++ t_Handle h_TmpAd;
++ t_Handle h_Ad;
++ t_Handle h_StatsFLRs;
++
++ t_List availableStatsLst;
++
++ t_List ccPrevNodesLst;
++
++ t_List ccTreeIdLst;
++ t_List ccTreesLst;
++
++ t_Handle h_FmPcd;
++ uint32_t shadowAction;
++ uint8_t userSizeOfExtraction;
++ uint8_t userOffset;
++ uint8_t kgHashShift; /* used in hash-table */
++
++ t_Handle h_Spinlock;
++
++ t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[CC_MAX_NUM_OF_KEYS];
++} t_FmPcdCcNode;
++
++typedef struct
++{
++ t_FmPcdCcNode *p_FmPcdCcNode;
++ bool occupied;
++ uint16_t owners;
++ volatile bool lock;
++} t_FmPcdCcNodeArray;
++
++typedef struct
++{
++ uint8_t numOfEntriesInGroup;
++ uint32_t totalBitsMask;
++ uint8_t baseGroupEntry;
++} t_FmPcdCcGroupParam;
++
++typedef struct
++{
++ t_Handle h_FmPcd;
++ uint8_t netEnvId;
++ uintptr_t ccTreeBaseAddr;
++ uint8_t numOfGrps;
++ t_FmPcdCcGroupParam fmPcdGroupParam[FM_PCD_MAX_NUM_OF_CC_GROUPS];
++ t_List fmPortsLst;
++ t_FmPcdLock *p_Lock;
++ uint8_t numOfEntries;
++ uint16_t owners;
++ t_Handle h_FmPcdCcSavedManipParams;
++ bool modifiedState;
++ uint32_t requiredAction;
++ t_Handle h_IpReassemblyManip;
++ t_Handle h_CapwapReassemblyManip;
++
++ t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[FM_PCD_MAX_NUM_OF_CC_GROUPS];
++} t_FmPcdCcTree;
++
++
++t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_List *p_List);
++void FmPcdCcNodeTreeReleaseLock(t_Handle h_FmPcd, t_List *p_List);
++t_Error FmPcdUpdateCcShadow (t_FmPcd *p_FmPcd, uint32_t size, uint32_t align);
++
++
++#endif /* __FM_CC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c
+new file mode 100644
+index 00000000..f183d2f9
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c
+@@ -0,0 +1,3242 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_kg.c
++
++ @Description FM PCD ...
++*//***************************************************************************/
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++#include "net_ext.h"
++#include "fm_port_ext.h"
++
++#include "fm_common.h"
++#include "fm_pcd.h"
++#include "fm_hc.h"
++#include "fm_pcd_ipc.h"
++#include "fm_kg.h"
++#include "fsl_fman_kg.h"
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++
++static uint32_t KgHwLock(t_Handle h_FmPcdKg)
++{
++ ASSERT_COND(h_FmPcdKg);
++ return XX_LockIntrSpinlock(((t_FmPcdKg *)h_FmPcdKg)->h_HwSpinlock);
++}
++
++static void KgHwUnlock(t_Handle h_FmPcdKg, uint32_t intFlags)
++{
++ ASSERT_COND(h_FmPcdKg);
++ XX_UnlockIntrSpinlock(((t_FmPcdKg *)h_FmPcdKg)->h_HwSpinlock, intFlags);
++}
++
++static uint32_t KgSchemeLock(t_Handle h_Scheme)
++{
++ ASSERT_COND(h_Scheme);
++ return FmPcdLockSpinlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock);
++}
++
++static void KgSchemeUnlock(t_Handle h_Scheme, uint32_t intFlags)
++{
++ ASSERT_COND(h_Scheme);
++ FmPcdUnlockSpinlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock, intFlags);
++}
++
++static bool KgSchemeFlagTryLock(t_Handle h_Scheme)
++{
++ ASSERT_COND(h_Scheme);
++ return FmPcdLockTryLock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock);
++}
++
++static void KgSchemeFlagUnlock(t_Handle h_Scheme)
++{
++ ASSERT_COND(h_Scheme);
++ FmPcdLockUnlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock);
++}
++
++static t_Error WriteKgarWait(t_FmPcd *p_FmPcd, uint32_t fmkg_ar)
++{
++
++ struct fman_kg_regs *regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++
++ if (fman_kg_write_ar_wait(regs, fmkg_ar))
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Keygen scheme access violation"));
++
++ return E_OK;
++}
++
++static e_FmPcdKgExtractDfltSelect GetGenericSwDefault(t_FmPcdKgExtractDflt swDefaults[], uint8_t numOfSwDefaults, uint8_t code)
++{
++ int i;
++
++ switch (code)
++ {
++ case (KG_SCH_GEN_PARSE_RESULT_N_FQID):
++ case (KG_SCH_GEN_DEFAULT):
++ case (KG_SCH_GEN_NEXTHDR):
++ for (i=0 ; i<numOfSwDefaults ; i++)
++ if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_NOT_FROM_DATA)
++ return swDefaults[i].dfltSelect;
++ break;
++ case (KG_SCH_GEN_SHIM1):
++ case (KG_SCH_GEN_SHIM2):
++ case (KG_SCH_GEN_IP_PID_NO_V):
++ case (KG_SCH_GEN_ETH_NO_V):
++ case (KG_SCH_GEN_SNAP_NO_V):
++ case (KG_SCH_GEN_VLAN1_NO_V):
++ case (KG_SCH_GEN_VLAN2_NO_V):
++ case (KG_SCH_GEN_ETH_TYPE_NO_V):
++ case (KG_SCH_GEN_PPP_NO_V):
++ case (KG_SCH_GEN_MPLS1_NO_V):
++ case (KG_SCH_GEN_MPLS_LAST_NO_V):
++ case (KG_SCH_GEN_L3_NO_V):
++ case (KG_SCH_GEN_IP2_NO_V):
++ case (KG_SCH_GEN_GRE_NO_V):
++ case (KG_SCH_GEN_L4_NO_V):
++ for (i=0 ; i<numOfSwDefaults ; i++)
++ if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V)
++ return swDefaults[i].dfltSelect;
++ break;
++ case (KG_SCH_GEN_START_OF_FRM):
++ case (KG_SCH_GEN_ETH):
++ case (KG_SCH_GEN_SNAP):
++ case (KG_SCH_GEN_VLAN1):
++ case (KG_SCH_GEN_VLAN2):
++ case (KG_SCH_GEN_ETH_TYPE):
++ case (KG_SCH_GEN_PPP):
++ case (KG_SCH_GEN_MPLS1):
++ case (KG_SCH_GEN_MPLS2):
++ case (KG_SCH_GEN_MPLS3):
++ case (KG_SCH_GEN_MPLS_LAST):
++ case (KG_SCH_GEN_IPV4):
++ case (KG_SCH_GEN_IPV6):
++ case (KG_SCH_GEN_IPV4_TUNNELED):
++ case (KG_SCH_GEN_IPV6_TUNNELED):
++ case (KG_SCH_GEN_MIN_ENCAP):
++ case (KG_SCH_GEN_GRE):
++ case (KG_SCH_GEN_TCP):
++ case (KG_SCH_GEN_UDP):
++ case (KG_SCH_GEN_IPSEC_AH):
++ case (KG_SCH_GEN_SCTP):
++ case (KG_SCH_GEN_DCCP):
++ case (KG_SCH_GEN_IPSEC_ESP):
++ for (i=0 ; i<numOfSwDefaults ; i++)
++ if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_FROM_DATA)
++ return swDefaults[i].dfltSelect;
++ break;
++ default:
++ break;
++ }
++
++ return e_FM_PCD_KG_DFLT_ILLEGAL;
++}
++
++static uint8_t GetGenCode(e_FmPcdExtractFrom src, uint8_t *p_Offset)
++{
++ *p_Offset = 0;
++
++ switch (src)
++ {
++ case (e_FM_PCD_EXTRACT_FROM_FRAME_START):
++ return KG_SCH_GEN_START_OF_FRM;
++ case (e_FM_PCD_EXTRACT_FROM_DFLT_VALUE):
++ return KG_SCH_GEN_DEFAULT;
++ case (e_FM_PCD_EXTRACT_FROM_PARSE_RESULT):
++ return KG_SCH_GEN_PARSE_RESULT_N_FQID;
++ case (e_FM_PCD_EXTRACT_FROM_ENQ_FQID):
++ *p_Offset = 32;
++ return KG_SCH_GEN_PARSE_RESULT_N_FQID;
++ case (e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE):
++ return KG_SCH_GEN_NEXTHDR;
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src"));
++ return 0;
++ }
++}
++
++static uint8_t GetGenHdrCode(e_NetHeaderType hdr, e_FmPcdHdrIndex hdrIndex, bool ignoreProtocolValidation)
++{
++ if (!ignoreProtocolValidation)
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ case (HEADER_TYPE_ETH):
++ return KG_SCH_GEN_ETH;
++ case (HEADER_TYPE_LLC_SNAP):
++ return KG_SCH_GEN_SNAP;
++ case (HEADER_TYPE_PPPoE):
++ return KG_SCH_GEN_PPP;
++ case (HEADER_TYPE_MPLS):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_GEN_MPLS1;
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_2)
++ return KG_SCH_GEN_MPLS2;
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_3)
++ return KG_SCH_GEN_MPLS3;
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ return KG_SCH_GEN_MPLS_LAST;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index"));
++ return 0;
++ case (HEADER_TYPE_IPv4):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_GEN_IPV4;
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_GEN_IPV4_TUNNELED;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 header index"));
++ return 0;
++ case (HEADER_TYPE_IPv6):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_GEN_IPV6;
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_GEN_IPV6_TUNNELED;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 header index"));
++ return 0;
++ case (HEADER_TYPE_GRE):
++ return KG_SCH_GEN_GRE;
++ case (HEADER_TYPE_TCP):
++ return KG_SCH_GEN_TCP;
++ case (HEADER_TYPE_UDP):
++ return KG_SCH_GEN_UDP;
++ case (HEADER_TYPE_IPSEC_AH):
++ return KG_SCH_GEN_IPSEC_AH;
++ case (HEADER_TYPE_IPSEC_ESP):
++ return KG_SCH_GEN_IPSEC_ESP;
++ case (HEADER_TYPE_SCTP):
++ return KG_SCH_GEN_SCTP;
++ case (HEADER_TYPE_DCCP):
++ return KG_SCH_GEN_DCCP;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ else
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ case (HEADER_TYPE_ETH):
++ return KG_SCH_GEN_ETH_NO_V;
++ case (HEADER_TYPE_LLC_SNAP):
++ return KG_SCH_GEN_SNAP_NO_V;
++ case (HEADER_TYPE_PPPoE):
++ return KG_SCH_GEN_PPP_NO_V;
++ case (HEADER_TYPE_MPLS):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_GEN_MPLS1_NO_V;
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ return KG_SCH_GEN_MPLS_LAST_NO_V;
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_3) )
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Indexed MPLS Extraction not supported"));
++ else
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index"));
++ return 0;
++ case (HEADER_TYPE_IPv4):
++ case (HEADER_TYPE_IPv6):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_GEN_L3_NO_V;
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_GEN_IP2_NO_V;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index"));
++ case (HEADER_TYPE_MINENCAP):
++ return KG_SCH_GEN_IP2_NO_V;
++ case (HEADER_TYPE_USER_DEFINED_L3):
++ return KG_SCH_GEN_L3_NO_V;
++ case (HEADER_TYPE_GRE):
++ return KG_SCH_GEN_GRE_NO_V;
++ case (HEADER_TYPE_TCP):
++ case (HEADER_TYPE_UDP):
++ case (HEADER_TYPE_IPSEC_AH):
++ case (HEADER_TYPE_IPSEC_ESP):
++ case (HEADER_TYPE_SCTP):
++ case (HEADER_TYPE_DCCP):
++ return KG_SCH_GEN_L4_NO_V;
++ case (HEADER_TYPE_USER_DEFINED_SHIM1):
++ return KG_SCH_GEN_SHIM1;
++ case (HEADER_TYPE_USER_DEFINED_SHIM2):
++ return KG_SCH_GEN_SHIM2;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++}
++static t_GenericCodes GetGenFieldCode(e_NetHeaderType hdr, t_FmPcdFields field, bool ignoreProtocolValidation, e_FmPcdHdrIndex hdrIndex)
++{
++ if (!ignoreProtocolValidation)
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ break;
++ case (HEADER_TYPE_ETH):
++ switch (field.eth)
++ {
++ case (NET_HEADER_FIELD_ETH_TYPE):
++ return KG_SCH_GEN_ETH_TYPE;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ break;
++ case (HEADER_TYPE_VLAN):
++ switch (field.vlan)
++ {
++ case (NET_HEADER_FIELD_VLAN_TCI):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_GEN_VLAN1;
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ return KG_SCH_GEN_VLAN2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal VLAN header index"));
++ return 0;
++ }
++ break;
++ case (HEADER_TYPE_MPLS):
++ case (HEADER_TYPE_IPSEC_AH):
++ case (HEADER_TYPE_IPSEC_ESP):
++ case (HEADER_TYPE_LLC_SNAP):
++ case (HEADER_TYPE_PPPoE):
++ case (HEADER_TYPE_IPv4):
++ case (HEADER_TYPE_IPv6):
++ case (HEADER_TYPE_GRE):
++ case (HEADER_TYPE_MINENCAP):
++ case (HEADER_TYPE_USER_DEFINED_L3):
++ case (HEADER_TYPE_TCP):
++ case (HEADER_TYPE_UDP):
++ case (HEADER_TYPE_SCTP):
++ case (HEADER_TYPE_DCCP):
++ case (HEADER_TYPE_USER_DEFINED_L4):
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ default:
++ break;
++
++ }
++ else
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ break;
++ case (HEADER_TYPE_ETH):
++ switch (field.eth)
++ {
++ case (NET_HEADER_FIELD_ETH_TYPE):
++ return KG_SCH_GEN_ETH_TYPE_NO_V;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ break;
++ case (HEADER_TYPE_VLAN):
++ switch (field.vlan)
++ {
++ case (NET_HEADER_FIELD_VLAN_TCI) :
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_GEN_VLAN1_NO_V;
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ return KG_SCH_GEN_VLAN2_NO_V;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal VLAN header index"));
++ return 0;
++ }
++ break;
++ case (HEADER_TYPE_IPv4):
++ switch (field.ipv4)
++ {
++ case (NET_HEADER_FIELD_IPv4_PROTO):
++ return KG_SCH_GEN_IP_PID_NO_V;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ break;
++ case (HEADER_TYPE_IPv6):
++ switch (field.ipv6)
++ {
++ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
++ return KG_SCH_GEN_IP_PID_NO_V;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ break;
++ case (HEADER_TYPE_MPLS):
++ case (HEADER_TYPE_LLC_SNAP):
++ case (HEADER_TYPE_PPPoE):
++ case (HEADER_TYPE_GRE):
++ case (HEADER_TYPE_MINENCAP):
++ case (HEADER_TYPE_USER_DEFINED_L3):
++ case (HEADER_TYPE_TCP):
++ case (HEADER_TYPE_UDP):
++ case (HEADER_TYPE_IPSEC_AH):
++ case (HEADER_TYPE_IPSEC_ESP):
++ case (HEADER_TYPE_SCTP):
++ case (HEADER_TYPE_DCCP):
++ case (HEADER_TYPE_USER_DEFINED_L4):
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ default:
++ break;
++ }
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header not supported"));
++ return 0;
++}
++
++static t_KnownFieldsMasks GetKnownProtMask(t_FmPcd *p_FmPcd, e_NetHeaderType hdr, e_FmPcdHdrIndex index, t_FmPcdFields field)
++{
++ UNUSED(p_FmPcd);
++
++ switch (hdr)
++ {
++ case (HEADER_TYPE_NONE):
++ ASSERT_COND(FALSE);
++ break;
++ case (HEADER_TYPE_ETH):
++ switch (field.eth)
++ {
++ case (NET_HEADER_FIELD_ETH_DA):
++ return KG_SCH_KN_MACDST;
++ case (NET_HEADER_FIELD_ETH_SA):
++ return KG_SCH_KN_MACSRC;
++ case (NET_HEADER_FIELD_ETH_TYPE):
++ return KG_SCH_KN_ETYPE;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_LLC_SNAP):
++ switch (field.llcSnap)
++ {
++ case (NET_HEADER_FIELD_LLC_SNAP_TYPE):
++ return KG_SCH_KN_ETYPE;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_VLAN):
++ switch (field.vlan)
++ {
++ case (NET_HEADER_FIELD_VLAN_TCI):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_TCI1;
++ if (index == e_FM_PCD_HDR_INDEX_LAST)
++ return KG_SCH_KN_TCI2;
++ else
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_MPLS):
++ switch (field.mpls)
++ {
++ case (NET_HEADER_FIELD_MPLS_LABEL_STACK):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_MPLS1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return KG_SCH_KN_MPLS2;
++ if (index == e_FM_PCD_HDR_INDEX_LAST)
++ return KG_SCH_KN_MPLS_LAST;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index"));
++ return 0;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_IPv4):
++ switch (field.ipv4)
++ {
++ case (NET_HEADER_FIELD_IPv4_SRC_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_IPSRC1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_IPSRC2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv4_DST_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_IPDST1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_IPDST2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv4_PROTO):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_PTYPE1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_PTYPE2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv4_TOS):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_IPTOS_TC1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_IPTOS_TC2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
++ return 0;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_IPv6):
++ switch (field.ipv6)
++ {
++ case (NET_HEADER_FIELD_IPv6_SRC_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_IPSRC1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_IPSRC2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv6_DST_IP):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_IPDST1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_IPDST2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_PTYPE1;
++ if (index == e_FM_PCD_HDR_INDEX_2)
++ return KG_SCH_KN_PTYPE2;
++ if (index == e_FM_PCD_HDR_INDEX_LAST)
++#ifdef FM_KG_NO_IPPID_SUPPORT
++ if (p_FmPcd->fmRevInfo.majorRev < 6)
++ return KG_SCH_KN_PTYPE2;
++#endif /* FM_KG_NO_IPPID_SUPPORT */
++ return KG_SCH_KN_IPPID;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return (KG_SCH_KN_IPV6FL1 | KG_SCH_KN_IPTOS_TC1);
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return (KG_SCH_KN_IPV6FL2 | KG_SCH_KN_IPTOS_TC2);
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_TC):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_IPTOS_TC1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_IPTOS_TC2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return 0;
++ case (NET_HEADER_FIELD_IPv6_FL):
++ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
++ return KG_SCH_KN_IPV6FL1;
++ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
++ return KG_SCH_KN_IPV6FL2;
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
++ return 0;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_GRE):
++ switch (field.gre)
++ {
++ case (NET_HEADER_FIELD_GRE_TYPE):
++ return KG_SCH_KN_GREPTYPE;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_MINENCAP):
++ switch (field.minencap)
++ {
++ case (NET_HEADER_FIELD_MINENCAP_SRC_IP):
++ return KG_SCH_KN_IPSRC2;
++ case (NET_HEADER_FIELD_MINENCAP_DST_IP):
++ return KG_SCH_KN_IPDST2;
++ case (NET_HEADER_FIELD_MINENCAP_TYPE):
++ return KG_SCH_KN_PTYPE2;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_TCP):
++ switch (field.tcp)
++ {
++ case (NET_HEADER_FIELD_TCP_PORT_SRC):
++ return KG_SCH_KN_L4PSRC;
++ case (NET_HEADER_FIELD_TCP_PORT_DST):
++ return KG_SCH_KN_L4PDST;
++ case (NET_HEADER_FIELD_TCP_FLAGS):
++ return KG_SCH_KN_TFLG;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_UDP):
++ switch (field.udp)
++ {
++ case (NET_HEADER_FIELD_UDP_PORT_SRC):
++ return KG_SCH_KN_L4PSRC;
++ case (NET_HEADER_FIELD_UDP_PORT_DST):
++ return KG_SCH_KN_L4PDST;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_IPSEC_AH):
++ switch (field.ipsecAh)
++ {
++ case (NET_HEADER_FIELD_IPSEC_AH_SPI):
++ return KG_SCH_KN_IPSEC_SPI;
++ case (NET_HEADER_FIELD_IPSEC_AH_NH):
++ return KG_SCH_KN_IPSEC_NH;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_IPSEC_ESP):
++ switch (field.ipsecEsp)
++ {
++ case (NET_HEADER_FIELD_IPSEC_ESP_SPI):
++ return KG_SCH_KN_IPSEC_SPI;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_SCTP):
++ switch (field.sctp)
++ {
++ case (NET_HEADER_FIELD_SCTP_PORT_SRC):
++ return KG_SCH_KN_L4PSRC;
++ case (NET_HEADER_FIELD_SCTP_PORT_DST):
++ return KG_SCH_KN_L4PDST;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_DCCP):
++ switch (field.dccp)
++ {
++ case (NET_HEADER_FIELD_DCCP_PORT_SRC):
++ return KG_SCH_KN_L4PSRC;
++ case (NET_HEADER_FIELD_DCCP_PORT_DST):
++ return KG_SCH_KN_L4PDST;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ case (HEADER_TYPE_PPPoE):
++ switch (field.pppoe)
++ {
++ case (NET_HEADER_FIELD_PPPoE_PID):
++ return KG_SCH_KN_PPPID;
++ case (NET_HEADER_FIELD_PPPoE_SID):
++ return KG_SCH_KN_PPPSID;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++ }
++ default:
++ break;
++
++ }
++
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
++ return 0;
++}
++
++
++static uint8_t GetKnownFieldId(uint32_t bitMask)
++{
++ uint8_t cnt = 0;
++
++ while (bitMask)
++ if (bitMask & 0x80000000)
++ break;
++ else
++ {
++ cnt++;
++ bitMask <<= 1;
++ }
++ return cnt;
++
++}
++
++static uint8_t GetExtractedOrMask(uint8_t bitOffset, bool fqid)
++{
++ uint8_t i, mask, numOfOnesToClear, walking1Mask = 1;
++
++ /* bitOffset 1-7 --> mask 0x1-0x7F */
++ if (bitOffset<8)
++ {
++ mask = 0;
++ for (i = 0 ; i < bitOffset ; i++, walking1Mask <<= 1)
++ mask |= walking1Mask;
++ }
++ else
++ {
++ mask = 0xFF;
++ numOfOnesToClear = 0;
++ if (fqid && bitOffset>24)
++ /* bitOffset 25-31 --> mask 0xFE-0x80 */
++ numOfOnesToClear = (uint8_t)(bitOffset-24);
++ else
++ /* bitOffset 9-15 --> mask 0xFE-0x80 */
++ if (!fqid && bitOffset>8)
++ numOfOnesToClear = (uint8_t)(bitOffset-8);
++ for (i = 0 ; i < numOfOnesToClear ; i++, walking1Mask <<= 1)
++ mask &= ~walking1Mask;
++ /* bitOffset 8-24 for FQID, 8 for PP --> no mask (0xFF)*/
++ }
++ return mask;
++}
++
++static void IncSchemeOwners(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort)
++{
++ t_FmPcdKg *p_FmPcdKg;
++ t_FmPcdKgScheme *p_Scheme;
++ uint32_t intFlags;
++ uint8_t relativeSchemeId;
++ int i;
++
++ p_FmPcdKg = p_FmPcd->p_FmPcdKg;
++
++ /* for each scheme - update owners counters */
++ for (i = 0; i < p_BindPort->numOfSchemes; i++)
++ {
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]);
++ ASSERT_COND(relativeSchemeId < FM_PCD_KG_NUM_OF_SCHEMES);
++
++ p_Scheme = &p_FmPcdKg->schemes[relativeSchemeId];
++
++ /* increment owners number */
++ intFlags = KgSchemeLock(p_Scheme);
++ p_Scheme->owners++;
++ KgSchemeUnlock(p_Scheme, intFlags);
++ }
++}
++
++static void DecSchemeOwners(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort)
++{
++ t_FmPcdKg *p_FmPcdKg;
++ t_FmPcdKgScheme *p_Scheme;
++ uint32_t intFlags;
++ uint8_t relativeSchemeId;
++ int i;
++
++ p_FmPcdKg = p_FmPcd->p_FmPcdKg;
++
++ /* for each scheme - update owners counters */
++ for (i = 0; i < p_BindPort->numOfSchemes; i++)
++ {
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]);
++ ASSERT_COND(relativeSchemeId < FM_PCD_KG_NUM_OF_SCHEMES);
++
++ p_Scheme = &p_FmPcdKg->schemes[relativeSchemeId];
++
++ /* increment owners number */
++ ASSERT_COND(p_Scheme->owners);
++ intFlags = KgSchemeLock(p_Scheme);
++ p_Scheme->owners--;
++ KgSchemeUnlock(p_Scheme, intFlags);
++ }
++}
++
++static void UpdateRequiredActionFlag(t_FmPcdKgScheme *p_Scheme, bool set)
++{
++ /* this routine is locked by the calling routine */
++ ASSERT_COND(p_Scheme);
++ ASSERT_COND(p_Scheme->valid);
++
++ if (set)
++ p_Scheme->requiredActionFlag = TRUE;
++ else
++ {
++ p_Scheme->requiredAction = 0;
++ p_Scheme->requiredActionFlag = FALSE;
++ }
++}
++
++static t_Error KgWriteSp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t spReg, bool add)
++{
++ struct fman_kg_regs *p_KgRegs;
++
++ uint32_t tmpKgarReg = 0, intFlags;
++ t_Error err = E_OK;
++
++ /* The calling routine had locked the port, so for each port only one core can access
++ * (so we don't need a lock here) */
++
++ if (p_FmPcd->h_Hc)
++ return FmHcKgWriteSp(p_FmPcd->h_Hc, hardwarePortId, spReg, add);
++
++ p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++
++ tmpKgarReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId);
++ /* lock a common KG reg */
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ err = WriteKgarWait(p_FmPcd, tmpKgarReg);
++ if (err)
++ {
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ fman_kg_write_sp(p_KgRegs, spReg, add);
++
++ tmpKgarReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId);
++
++ err = WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ return err;
++}
++
++static t_Error KgWriteCpp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t cppReg)
++{
++ struct fman_kg_regs *p_KgRegs;
++ uint32_t tmpKgarReg, intFlags;
++ t_Error err;
++
++ p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcKgWriteCpp(p_FmPcd->h_Hc, hardwarePortId, cppReg);
++ return err;
++ }
++
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ fman_kg_write_cpp(p_KgRegs, cppReg);
++ tmpKgarReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId);
++ err = WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++
++ return err;
++}
++
++static uint32_t BuildCppReg(t_FmPcd *p_FmPcd, uint8_t clsPlanGrpId)
++{
++ uint32_t tmpKgpeCpp;
++
++ tmpKgpeCpp = (uint32_t)(p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry / 8);
++ tmpKgpeCpp |= (uint32_t)(((p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp / 8) - 1) << FM_KG_PE_CPP_MASK_SHIFT);
++
++ return tmpKgpeCpp;
++}
++
++static t_Error BindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId)
++{
++ uint32_t tmpKgpeCpp = 0;
++
++ tmpKgpeCpp = BuildCppReg(p_FmPcd, clsPlanGrpId);
++ return KgWriteCpp(p_FmPcd, hardwarePortId, tmpKgpeCpp);
++}
++
++static void UnbindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId)
++{
++ KgWriteCpp(p_FmPcd, hardwarePortId, 0);
++}
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++static uint32_t __attribute__((unused)) ReadClsPlanBlockActionReg(uint8_t grpId)
++{
++ return (uint32_t)(FM_KG_KGAR_GO |
++ FM_KG_KGAR_READ |
++ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
++ DUMMY_PORT_ID |
++ ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) |
++ FM_PCD_KG_KGAR_WSEL_MASK);
++
++ /* if we ever want to write 1 by 1, use:
++ sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP)));
++ */
++}
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++static void PcdKgErrorException(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint32_t event,schemeIndexes = 0, index = 0;
++ struct fman_kg_regs *p_KgRegs;
++
++ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
++ p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++ fman_kg_get_event(p_KgRegs, &event, &schemeIndexes);
++
++ if (event & FM_EX_KG_DOUBLE_ECC)
++ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC);
++ if (event & FM_EX_KG_KEYSIZE_OVERFLOW)
++ {
++ if (schemeIndexes)
++ {
++ while (schemeIndexes)
++ {
++ if (schemeIndexes & 0x1)
++ p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, (uint16_t)(31 - index));
++ schemeIndexes >>= 1;
++ index+=1;
++ }
++ }
++ else /* this should happen only when interrupt is forced. */
++ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW);
++ }
++}
++
++static t_Error KgInitGuest(t_FmPcd *p_FmPcd)
++{
++ t_Error err = E_OK;
++ t_FmPcdIpcKgSchemesParams kgAlloc;
++ uint32_t replyLength;
++ t_FmPcdIpcReply reply;
++ t_FmPcdIpcMsg msg;
++
++ ASSERT_COND(p_FmPcd->guestId != NCSW_MASTER_ID);
++
++ /* in GUEST_PARTITION, we use the IPC */
++ memset(&reply, 0, sizeof(reply));
++ memset(&msg, 0, sizeof(msg));
++ memset(&kgAlloc, 0, sizeof(t_FmPcdIpcKgSchemesParams));
++ kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes;
++ kgAlloc.guestId = p_FmPcd->guestId;
++ msg.msgId = FM_PCD_ALLOC_KG_SCHEMES;
++ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
++ replyLength = sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t);
++ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(kgAlloc),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (replyLength != (sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ memcpy(p_FmPcd->p_FmPcdKg->schemesIds, (uint8_t*)(reply.replyBody),p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t));
++
++ return (t_Error)reply.error;
++}
++
++static t_Error KgInitMaster(t_FmPcd *p_FmPcd)
++{
++ t_Error err = E_OK;
++ struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++
++ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
++
++ if (p_FmPcd->exceptions & FM_EX_KG_DOUBLE_ECC)
++ FmEnableRamsEcc(p_FmPcd->h_Fm);
++
++ fman_kg_init(p_Regs, p_FmPcd->exceptions, GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd));
++
++ /* register even if no interrupts enabled, to allow future enablement */
++ FmRegisterIntr(p_FmPcd->h_Fm,
++ e_FM_MOD_KG,
++ 0,
++ e_FM_INTR_TYPE_ERR,
++ PcdKgErrorException,
++ p_FmPcd);
++
++ fman_kg_enable_scheme_interrupts(p_Regs);
++
++ if (p_FmPcd->p_FmPcdKg->numOfSchemes)
++ {
++ err = FmPcdKgAllocSchemes(p_FmPcd,
++ p_FmPcd->p_FmPcdKg->numOfSchemes,
++ p_FmPcd->guestId,
++ p_FmPcd->p_FmPcdKg->schemesIds);
++ if (err)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ return E_OK;
++}
++
++static void ValidateSchemeSw(t_FmPcdKgScheme *p_Scheme)
++{
++ ASSERT_COND(!p_Scheme->valid);
++ if (p_Scheme->netEnvId != ILLEGAL_NETENV)
++ FmPcdIncNetEnvOwners(p_Scheme->h_FmPcd, p_Scheme->netEnvId);
++ p_Scheme->valid = TRUE;
++}
++
++static t_Error InvalidateSchemeSw(t_FmPcdKgScheme *p_Scheme)
++{
++ if (p_Scheme->owners)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a scheme that has ports bound to"));
++
++ if (p_Scheme->netEnvId != ILLEGAL_NETENV)
++ FmPcdDecNetEnvOwners(p_Scheme->h_FmPcd, p_Scheme->netEnvId);
++ p_Scheme->valid = FALSE;
++
++ return E_OK;
++}
++
++static t_Error BuildSchemeRegs(t_FmPcdKgScheme *p_Scheme,
++ t_FmPcdKgSchemeParams *p_SchemeParams,
++ struct fman_kg_scheme_regs *p_SchemeRegs)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)(p_Scheme->h_FmPcd);
++ uint32_t grpBits = 0;
++ uint8_t grpBase;
++ bool direct=TRUE, absolute=FALSE;
++ uint16_t profileId=0, numOfProfiles=0, relativeProfileId;
++ t_Error err = E_OK;
++ int i = 0;
++ t_NetEnvParams netEnvParams;
++ uint32_t tmpReg, fqbTmp = 0, ppcTmp = 0, selectTmp, maskTmp, knownTmp, genTmp;
++ t_FmPcdKgKeyExtractAndHashParams *p_KeyAndHash = NULL;
++ uint8_t j, curr, idx;
++ uint8_t id, shift=0, code=0, offset=0, size=0;
++ t_FmPcdExtractEntry *p_Extract = NULL;
++ t_FmPcdKgExtractedOrParams *p_ExtractOr;
++ bool generic = FALSE;
++ t_KnownFieldsMasks bitMask;
++ e_FmPcdKgExtractDfltSelect swDefault = (e_FmPcdKgExtractDfltSelect)0;
++ t_FmPcdKgSchemesExtracts *p_LocalExtractsArray;
++ uint8_t numOfSwDefaults = 0;
++ t_FmPcdKgExtractDflt swDefaults[NUM_OF_SW_DEFAULTS];
++ uint8_t currGenId = 0;
++
++ memset(swDefaults, 0, NUM_OF_SW_DEFAULTS*sizeof(t_FmPcdKgExtractDflt));
++ memset(p_SchemeRegs, 0, sizeof(struct fman_kg_scheme_regs));
++
++ if (p_SchemeParams->netEnvParams.numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("numOfDistinctionUnits should not exceed %d", FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS));
++
++ /* by netEnv parameters, get match vector */
++ if (!p_SchemeParams->alwaysDirect)
++ {
++ p_Scheme->netEnvId = FmPcdGetNetEnvId(p_SchemeParams->netEnvParams.h_NetEnv);
++ netEnvParams.netEnvId = p_Scheme->netEnvId;
++ netEnvParams.numOfDistinctionUnits = p_SchemeParams->netEnvParams.numOfDistinctionUnits;
++ memcpy(netEnvParams.unitIds, p_SchemeParams->netEnvParams.unitIds, (sizeof(uint8_t))*p_SchemeParams->netEnvParams.numOfDistinctionUnits);
++ err = PcdGetUnitsVector(p_FmPcd, &netEnvParams);
++ if (err)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++ p_Scheme->matchVector = netEnvParams.vector;
++ }
++ else
++ {
++ p_Scheme->matchVector = SCHEME_ALWAYS_DIRECT;
++ p_Scheme->netEnvId = ILLEGAL_NETENV;
++ }
++
++ if (p_SchemeParams->nextEngine == e_FM_PCD_INVALID)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next Engine of the scheme is not Valid"));
++
++ if (p_SchemeParams->bypassFqidGeneration)
++ {
++#ifdef FM_KG_NO_BYPASS_FQID_GEN
++ if ((p_FmPcd->fmRevInfo.majorRev != 4) && (p_FmPcd->fmRevInfo.majorRev < 6))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassFqidGeneration."));
++#endif /* FM_KG_NO_BYPASS_FQID_GEN */
++ if (p_SchemeParams->baseFqid)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid set for a scheme that does not generate an FQID"));
++ }
++ else
++ if (!p_SchemeParams->baseFqid)
++ DBG(WARNING, ("baseFqid is 0."));
++
++ if (p_SchemeParams->nextEngine == e_FM_PCD_PLCR)
++ {
++ direct = p_SchemeParams->kgNextEngineParams.plcrProfile.direct;
++ p_Scheme->directPlcr = direct;
++ absolute = (bool)(p_SchemeParams->kgNextEngineParams.plcrProfile.sharedProfile ? TRUE : FALSE);
++ if (!direct && absolute)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Indirect policing is not available when profile is shared."));
++
++ if (direct)
++ {
++ profileId = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.directRelativeProfileId;
++ numOfProfiles = 1;
++ }
++ else
++ {
++ profileId = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase;
++ shift = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift;
++ numOfProfiles = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.numOfProfiles;
++ }
++ }
++
++ if (p_SchemeParams->nextEngine == e_FM_PCD_CC)
++ {
++#ifdef FM_KG_NO_BYPASS_PLCR_PROFILE_GEN
++ if ((p_SchemeParams->kgNextEngineParams.cc.plcrNext) && (p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration))
++ {
++ if ((p_FmPcd->fmRevInfo.majorRev != 4) && (p_FmPcd->fmRevInfo.majorRev < 6))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassPlcrProfileGeneration."));
++ }
++#endif /* FM_KG_NO_BYPASS_PLCR_PROFILE_GEN */
++
++ err = FmPcdCcGetGrpParams(p_SchemeParams->kgNextEngineParams.cc.h_CcTree,
++ p_SchemeParams->kgNextEngineParams.cc.grpId,
++ &grpBits,
++ &grpBase);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ p_Scheme->ccUnits = grpBits;
++
++ if ((p_SchemeParams->kgNextEngineParams.cc.plcrNext) &&
++ (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration))
++ {
++ if (p_SchemeParams->kgNextEngineParams.cc.plcrProfile.sharedProfile)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Shared profile may not be used after Coarse classification."));
++ absolute = FALSE;
++ direct = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.direct;
++ if (direct)
++ {
++ profileId = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.directRelativeProfileId;
++ numOfProfiles = 1;
++ }
++ else
++ {
++ profileId = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase;
++ shift = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift;
++ numOfProfiles = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.numOfProfiles;
++ }
++ }
++ }
++
++ /* if policer is used directly after KG, or after CC */
++ if ((p_SchemeParams->nextEngine == e_FM_PCD_PLCR) ||
++ ((p_SchemeParams->nextEngine == e_FM_PCD_CC) &&
++ (p_SchemeParams->kgNextEngineParams.cc.plcrNext) &&
++ (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration)))
++ {
++ /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */
++ if (absolute)
++ {
++ /* for absolute direct policy only, */
++ relativeProfileId = profileId;
++ err = FmPcdPlcrGetAbsoluteIdByProfileParams((t_Handle)p_FmPcd,e_FM_PCD_PLCR_SHARED,NULL, relativeProfileId, &profileId);
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("Shared profile not valid offset"));
++ if (!FmPcdPlcrIsProfileValid(p_FmPcd, profileId))
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Shared profile not valid."));
++ p_Scheme->relativeProfileId = profileId;
++ }
++ else
++ {
++ /* save relative profile id's for later check */
++ p_Scheme->nextRelativePlcrProfile = TRUE;
++ p_Scheme->relativeProfileId = profileId;
++ p_Scheme->numOfProfiles = numOfProfiles;
++ }
++ }
++ else
++ {
++ /* if policer is NOT going to be used after KG at all than if bypassFqidGeneration
++ is set, we do not need numOfUsedExtractedOrs and hashDistributionNumOfFqids */
++ if (p_SchemeParams->bypassFqidGeneration && p_SchemeParams->numOfUsedExtractedOrs)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("numOfUsedExtractedOrs is set in a scheme that does not generate FQID or policer profile ID"));
++ if (p_SchemeParams->bypassFqidGeneration &&
++ p_SchemeParams->useHash &&
++ p_SchemeParams->keyExtractAndHashParams.hashDistributionNumOfFqids)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("hashDistributionNumOfFqids is set in a scheme that does not generate FQID or policer profile ID"));
++ }
++
++ /* configure all 21 scheme registers */
++ tmpReg = KG_SCH_MODE_EN;
++ switch (p_SchemeParams->nextEngine)
++ {
++ case (e_FM_PCD_PLCR):
++ /* add to mode register - NIA */
++ tmpReg |= KG_SCH_MODE_NIA_PLCR;
++ tmpReg |= NIA_ENG_PLCR;
++ tmpReg |= (uint32_t)(p_SchemeParams->kgNextEngineParams.plcrProfile.sharedProfile ? NIA_PLCR_ABSOLUTE:0);
++ /* initialize policer profile command - */
++ /* configure kgse_ppc */
++ if (direct)
++ /* use profileId as base, other fields are 0 */
++ p_SchemeRegs->kgse_ppc = (uint32_t)profileId;
++ else
++ {
++ if (shift > MAX_PP_SHIFT)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT));
++
++ if (!numOfProfiles || !POWER_OF_2(numOfProfiles))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2"));
++
++ ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH;
++ ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW;
++ ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT);
++ ppcTmp |= (uint32_t)profileId;
++
++ p_SchemeRegs->kgse_ppc = ppcTmp;
++ }
++ break;
++ case (e_FM_PCD_CC):
++ /* mode reg - define NIA */
++ tmpReg |= (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC);
++
++ p_SchemeRegs->kgse_ccbs = grpBits;
++ tmpReg |= (uint32_t)(grpBase << KG_SCH_MODE_CCOBASE_SHIFT);
++
++ if (p_SchemeParams->kgNextEngineParams.cc.plcrNext)
++ {
++ if (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration)
++ {
++ /* find out if absolute or relative */
++ if (absolute)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("It is illegal to request a shared profile in a scheme that is in a KG->CC->PLCR flow"));
++ if (direct)
++ {
++ /* mask = 0, base = directProfileId */
++ p_SchemeRegs->kgse_ppc = (uint32_t)profileId;
++ }
++ else
++ {
++ if (shift > MAX_PP_SHIFT)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT));
++ if (!numOfProfiles || !POWER_OF_2(numOfProfiles))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2"));
++
++ ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH;
++ ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW;
++ ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT);
++ ppcTmp |= (uint32_t)profileId;
++
++ p_SchemeRegs->kgse_ppc = ppcTmp;
++ }
++ }
++ }
++ break;
++ case (e_FM_PCD_DONE):
++ if (p_SchemeParams->kgNextEngineParams.doneAction == e_FM_PCD_DROP_FRAME)
++ tmpReg |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd);
++ else
++ tmpReg |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Next engine not supported"));
++ }
++ p_SchemeRegs->kgse_mode = tmpReg;
++
++ p_SchemeRegs->kgse_mv = p_Scheme->matchVector;
++
++#if (DPAA_VERSION >= 11)
++ if (p_SchemeParams->overrideStorageProfile)
++ {
++ p_SchemeRegs->kgse_om |= KG_SCH_OM_VSPE;
++
++ if (p_SchemeParams->storageProfile.direct)
++ {
++ profileId = p_SchemeParams->storageProfile.profileSelect.directRelativeProfileId;
++ shift = 0;
++ numOfProfiles = 1;
++ }
++ else
++ {
++ profileId = p_SchemeParams->storageProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase;
++ shift = p_SchemeParams->storageProfile.profileSelect.indirectProfile.fqidOffsetShift;
++ numOfProfiles = p_SchemeParams->storageProfile.profileSelect.indirectProfile.numOfProfiles;
++ }
++ if (shift > MAX_SP_SHIFT)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_SP_SHIFT));
++
++ if (!numOfProfiles || !POWER_OF_2(numOfProfiles))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2"));
++
++ tmpReg = (uint32_t)shift << KG_SCH_VSP_SHIFT;
++ tmpReg |= ((uint32_t)(numOfProfiles-1) << KG_SCH_VSP_MASK_SHIFT);
++ tmpReg |= (uint32_t)profileId;
++
++
++ p_SchemeRegs->kgse_vsp = tmpReg;
++
++ p_Scheme->vspe = TRUE;
++
++ }
++ else
++ p_SchemeRegs->kgse_vsp = KG_SCH_VSP_NO_KSP_EN;
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (p_SchemeParams->useHash)
++ {
++ p_KeyAndHash = &p_SchemeParams->keyExtractAndHashParams;
++
++ if (p_KeyAndHash->numOfUsedExtracts >= FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfUsedExtracts out of range"));
++
++ /* configure kgse_dv0 */
++ p_SchemeRegs->kgse_dv0 = p_KeyAndHash->privateDflt0;
++
++ /* configure kgse_dv1 */
++ p_SchemeRegs->kgse_dv1 = p_KeyAndHash->privateDflt1;
++
++ if (!p_SchemeParams->bypassFqidGeneration)
++ {
++ if (!p_KeyAndHash->hashDistributionNumOfFqids || !POWER_OF_2(p_KeyAndHash->hashDistributionNumOfFqids))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionNumOfFqids must not be 0 and must be a power of 2"));
++ if ((p_KeyAndHash->hashDistributionNumOfFqids-1) & p_SchemeParams->baseFqid)
++ DBG(WARNING, ("baseFqid unaligned. Distribution may result in less than hashDistributionNumOfFqids queues."));
++ }
++
++ /* configure kgse_ekdv */
++ tmpReg = 0;
++ for ( i=0 ;i<p_KeyAndHash->numOfUsedDflts ; i++)
++ {
++ switch (p_KeyAndHash->dflts[i].type)
++ {
++ case (e_FM_PCD_KG_MAC_ADDR):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MAC_ADDR_SHIFT);
++ break;
++ case (e_FM_PCD_KG_TCI):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCI_SHIFT);
++ break;
++ case (e_FM_PCD_KG_ENET_TYPE):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_ENET_TYPE_SHIFT);
++ break;
++ case (e_FM_PCD_KG_PPP_SESSION_ID):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_SESSION_ID_SHIFT);
++ break;
++ case (e_FM_PCD_KG_PPP_PROTOCOL_ID):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT);
++ break;
++ case (e_FM_PCD_KG_MPLS_LABEL):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MPLS_LABEL_SHIFT);
++ break;
++ case (e_FM_PCD_KG_IP_ADDR):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_ADDR_SHIFT);
++ break;
++ case (e_FM_PCD_KG_PROTOCOL_TYPE):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PROTOCOL_TYPE_SHIFT);
++ break;
++ case (e_FM_PCD_KG_IP_TOS_TC):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_TOS_TC_SHIFT);
++ break;
++ case (e_FM_PCD_KG_IPV6_FLOW_LABEL):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT);
++ break;
++ case (e_FM_PCD_KG_IPSEC_SPI):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IPSEC_SPI_SHIFT);
++ break;
++ case (e_FM_PCD_KG_L4_PORT):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT);
++ break;
++ case (e_FM_PCD_KG_TCP_FLAG):
++ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCP_FLAG_SHIFT);
++ break;
++ case (e_FM_PCD_KG_GENERIC_FROM_DATA):
++ swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA;
++ swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect;
++ numOfSwDefaults ++;
++ break;
++ case (e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V):
++ swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V;
++ swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect;
++ numOfSwDefaults ++;
++ break;
++ case (e_FM_PCD_KG_GENERIC_NOT_FROM_DATA):
++ swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_NOT_FROM_DATA;
++ swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect;
++ numOfSwDefaults ++;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ }
++ p_SchemeRegs->kgse_ekdv = tmpReg;
++
++ p_LocalExtractsArray = (t_FmPcdKgSchemesExtracts *)XX_Malloc(sizeof(t_FmPcdKgSchemesExtracts));
++ if (!p_LocalExtractsArray)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
++
++ /* configure kgse_ekfc and kgse_gec */
++ knownTmp = 0;
++ for ( i=0 ;i<p_KeyAndHash->numOfUsedExtracts ; i++)
++ {
++ p_Extract = &p_KeyAndHash->extractArray[i];
++ switch (p_Extract->type)
++ {
++ case (e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO):
++ knownTmp |= KG_SCH_KN_PORT_ID;
++ /* save in driver structure */
++ p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(KG_SCH_KN_PORT_ID);
++ p_LocalExtractsArray->extractsArray[i].known = TRUE;
++ break;
++ case (e_FM_PCD_EXTRACT_BY_HDR):
++ switch (p_Extract->extractByHdr.hdr)
++ {
++#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ case (HEADER_TYPE_UDP_LITE):
++ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
++ break;
++#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++ case (HEADER_TYPE_UDP_ENCAP_ESP):
++ switch (p_Extract->extractByHdr.type)
++ {
++ case (e_FM_PCD_EXTRACT_FROM_HDR):
++ /* case where extraction from ESP only */
++ if (p_Extract->extractByHdr.extractByHdrType.fromHdr.offset >= UDP_HEADER_SIZE)
++ {
++ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
++ p_Extract->extractByHdr.extractByHdrType.fromHdr.offset -= UDP_HEADER_SIZE;
++ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
++ }
++ else
++ {
++ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
++ p_Extract->extractByHdr.ignoreProtocolValidation = FALSE;
++ }
++ break;
++ case (e_FM_PCD_EXTRACT_FROM_FIELD):
++ switch (p_Extract->extractByHdr.extractByHdrType.fromField.field.udpEncapEsp)
++ {
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC):
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST):
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN):
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM):
++ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
++ break;
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI):
++ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
++ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
++ /*p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SPI_OFFSET;*/
++ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
++ break;
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM):
++ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
++ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
++ p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SEQ_NUM_OFFSET;
++ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
++ break;
++ }
++ break;
++ case (e_FM_PCD_EXTRACT_FULL_FIELD):
++ switch (p_Extract->extractByHdr.extractByHdrType.fullField.udpEncapEsp)
++ {
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC):
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST):
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN):
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM):
++ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
++ break;
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI):
++ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
++ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
++ p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SPI_SIZE;
++ p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SPI_OFFSET;
++ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
++ break;
++ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM):
++ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
++ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
++ p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SEQ_NUM_SIZE;
++ p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SEQ_NUM_OFFSET;
++ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
++ break;
++ }
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++ switch (p_Extract->extractByHdr.type)
++ {
++ case (e_FM_PCD_EXTRACT_FROM_HDR):
++ generic = TRUE;
++ /* get the header code for the generic extract */
++ code = GetGenHdrCode(p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex, p_Extract->extractByHdr.ignoreProtocolValidation);
++ /* set generic register fields */
++ offset = p_Extract->extractByHdr.extractByHdrType.fromHdr.offset;
++ size = p_Extract->extractByHdr.extractByHdrType.fromHdr.size;
++ break;
++ case (e_FM_PCD_EXTRACT_FROM_FIELD):
++ generic = TRUE;
++ /* get the field code for the generic extract */
++ code = GetGenFieldCode(p_Extract->extractByHdr.hdr,
++ p_Extract->extractByHdr.extractByHdrType.fromField.field, p_Extract->extractByHdr.ignoreProtocolValidation,p_Extract->extractByHdr.hdrIndex);
++ offset = p_Extract->extractByHdr.extractByHdrType.fromField.offset;
++ size = p_Extract->extractByHdr.extractByHdrType.fromField.size;
++ break;
++ case (e_FM_PCD_EXTRACT_FULL_FIELD):
++ if (!p_Extract->extractByHdr.ignoreProtocolValidation)
++ {
++ /* if we have a known field for it - use it, otherwise use generic */
++ bitMask = GetKnownProtMask(p_FmPcd, p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex,
++ p_Extract->extractByHdr.extractByHdrType.fullField);
++ if (bitMask)
++ {
++ knownTmp |= bitMask;
++ /* save in driver structure */
++ p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(bitMask);
++ p_LocalExtractsArray->extractsArray[i].known = TRUE;
++ }
++ else
++ generic = TRUE;
++ }
++ else
++ generic = TRUE;
++ if (generic)
++ {
++ /* tmp - till we cover more headers under generic */
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Full header selection not supported"));
++ }
++ break;
++ default:
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ break;
++ case (e_FM_PCD_EXTRACT_NON_HDR):
++ /* use generic */
++ generic = TRUE;
++ offset = 0;
++ /* get the field code for the generic extract */
++ code = GetGenCode(p_Extract->extractNonHdr.src, &offset);
++ offset += p_Extract->extractNonHdr.offset;
++ size = p_Extract->extractNonHdr.size;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ if (generic)
++ {
++ /* set generic register fields */
++ if (currGenId >= FM_KG_NUM_OF_GENERIC_REGS)
++ {
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used"));
++ }
++ if (!code)
++ {
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
++ }
++
++ genTmp = KG_SCH_GEN_VALID;
++ genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT);
++ genTmp |= offset;
++ if ((size > MAX_KG_SCH_SIZE) || (size < 1))
++ {
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal extraction (size out of range)"));
++ }
++ genTmp |= (uint32_t)((size - 1) << KG_SCH_GEN_SIZE_SHIFT);
++ swDefault = GetGenericSwDefault(swDefaults, numOfSwDefaults, code);
++ if (swDefault == e_FM_PCD_KG_DFLT_ILLEGAL)
++ DBG(WARNING, ("No sw default configured"));
++ else
++ genTmp |= swDefault << KG_SCH_GEN_DEF_SHIFT;
++
++ genTmp |= KG_SCH_GEN_MASK;
++ p_SchemeRegs->kgse_gec[currGenId] = genTmp;
++ /* save in driver structure */
++ p_LocalExtractsArray->extractsArray[i].id = currGenId++;
++ p_LocalExtractsArray->extractsArray[i].known = FALSE;
++ generic = FALSE;
++ }
++ }
++ p_SchemeRegs->kgse_ekfc = knownTmp;
++
++ selectTmp = 0;
++ maskTmp = 0xFFFFFFFF;
++ /* configure kgse_bmch, kgse_bmcl and kgse_fqb */
++
++ if (p_KeyAndHash->numOfUsedMasks > FM_PCD_KG_NUM_OF_EXTRACT_MASKS)
++ {
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Only %d masks supported", FM_PCD_KG_NUM_OF_EXTRACT_MASKS));
++ }
++ for ( i=0 ;i<p_KeyAndHash->numOfUsedMasks ; i++)
++ {
++ /* Get the relative id of the extract (for known 0-0x1f, for generic 0-7) */
++ id = p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].id;
++ /* Get the shift of the select field (depending on i) */
++ GET_MASK_SEL_SHIFT(shift,i);
++ if (p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].known)
++ selectTmp |= id << shift;
++ else
++ selectTmp |= (id + MASK_FOR_GENERIC_BASE_ID) << shift;
++
++ /* Get the shift of the offset field (depending on i) - may
++ be in kgse_bmch or in kgse_fqb (depending on i) */
++ GET_MASK_OFFSET_SHIFT(shift,i);
++ if (i<=1)
++ selectTmp |= p_KeyAndHash->masks[i].offset << shift;
++ else
++ fqbTmp |= p_KeyAndHash->masks[i].offset << shift;
++
++ /* Get the shift of the mask field (depending on i) */
++ GET_MASK_SHIFT(shift,i);
++ /* pass all bits */
++ maskTmp |= KG_SCH_BITMASK_MASK << shift;
++ /* clear bits that need masking */
++ maskTmp &= ~(0xFF << shift) ;
++ /* set mask bits */
++ maskTmp |= (p_KeyAndHash->masks[i].mask << shift) ;
++ }
++ p_SchemeRegs->kgse_bmch = selectTmp;
++ p_SchemeRegs->kgse_bmcl = maskTmp;
++ /* kgse_fqb will be written t the end of the routine */
++
++ /* configure kgse_hc */
++ if (p_KeyAndHash->hashShift > MAX_HASH_SHIFT)
++ {
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashShift must not be larger than %d", MAX_HASH_SHIFT));
++ }
++ if (p_KeyAndHash->hashDistributionFqidsShift > MAX_DIST_FQID_SHIFT)
++ {
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionFqidsShift must not be larger than %d", MAX_DIST_FQID_SHIFT));
++ }
++
++ tmpReg = 0;
++
++ tmpReg |= ((p_KeyAndHash->hashDistributionNumOfFqids - 1) << p_KeyAndHash->hashDistributionFqidsShift);
++ tmpReg |= p_KeyAndHash->hashShift << KG_SCH_HASH_CONFIG_SHIFT_SHIFT;
++
++ if (p_KeyAndHash->symmetricHash)
++ {
++ if ((!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACDST)) ||
++ (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC1) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST1)) ||
++ (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC2) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST2)) ||
++ (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PDST)))
++ {
++ XX_Free(p_LocalExtractsArray);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("symmetricHash set but src/dest extractions missing"));
++ }
++ tmpReg |= KG_SCH_HASH_CONFIG_SYM;
++ }
++ p_SchemeRegs->kgse_hc = tmpReg;
++
++ /* build the return array describing the order of the extractions */
++
++ /* the last currGenId places of the array
++ are for generic extracts that are always last.
++ We now sort for the calculation of the order of the known
++ extractions we sort the known extracts between orderedArray[0] and
++ orderedArray[p_KeyAndHash->numOfUsedExtracts - currGenId - 1].
++ for the calculation of the order of the generic extractions we use:
++ num_of_generic - currGenId
++ num_of_known - p_KeyAndHash->numOfUsedExtracts - currGenId
++ first_generic_index = num_of_known */
++ curr = 0;
++ for (i=0;i<p_KeyAndHash->numOfUsedExtracts ; i++)
++ {
++ if (p_LocalExtractsArray->extractsArray[i].known)
++ {
++ ASSERT_COND(curr<(p_KeyAndHash->numOfUsedExtracts - currGenId));
++ j = curr;
++ /* id is the extract id (port id = 0, mac src = 1 etc.). the value in the array is the original
++ index in the user's extractions array */
++ /* we compare the id of the current extract with the id of the extract in the orderedArray[j-1]
++ location */
++ while ((j > 0) && (p_LocalExtractsArray->extractsArray[i].id <
++ p_LocalExtractsArray->extractsArray[p_Scheme->orderedArray[j-1]].id))
++ {
++ p_Scheme->orderedArray[j] =
++ p_Scheme->orderedArray[j-1];
++ j--;
++ }
++ p_Scheme->orderedArray[j] = (uint8_t)i;
++ curr++;
++ }
++ else
++ {
++ /* index is first_generic_index + generic index (id) */
++ idx = (uint8_t)(p_KeyAndHash->numOfUsedExtracts - currGenId + p_LocalExtractsArray->extractsArray[i].id);
++ ASSERT_COND(idx < FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY);
++ p_Scheme->orderedArray[idx]= (uint8_t)i;
++ }
++ }
++ XX_Free(p_LocalExtractsArray);
++ }
++ else
++ {
++ /* clear all unused registers: */
++ p_SchemeRegs->kgse_ekfc = 0;
++ p_SchemeRegs->kgse_ekdv = 0;
++ p_SchemeRegs->kgse_bmch = 0;
++ p_SchemeRegs->kgse_bmcl = 0;
++ p_SchemeRegs->kgse_hc = 0;
++ p_SchemeRegs->kgse_dv0 = 0;
++ p_SchemeRegs->kgse_dv1 = 0;
++ }
++
++ if (p_SchemeParams->bypassFqidGeneration)
++ p_SchemeRegs->kgse_hc |= KG_SCH_HASH_CONFIG_NO_FQID;
++
++ /* configure kgse_spc */
++ if ( p_SchemeParams->schemeCounter.update)
++ p_SchemeRegs->kgse_spc = p_SchemeParams->schemeCounter.value;
++
++
++ /* check that are enough generic registers */
++ if (p_SchemeParams->numOfUsedExtractedOrs + currGenId > FM_KG_NUM_OF_GENERIC_REGS)
++ RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used"));
++
++ /* extracted OR mask on Qid */
++ for ( i=0 ;i<p_SchemeParams->numOfUsedExtractedOrs ; i++)
++ {
++
++ p_Scheme->extractedOrs = TRUE;
++ /* configure kgse_gec[i] */
++ p_ExtractOr = &p_SchemeParams->extractedOrs[i];
++ switch (p_ExtractOr->type)
++ {
++ case (e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO):
++ code = KG_SCH_GEN_PARSE_RESULT_N_FQID;
++ offset = 0;
++ break;
++ case (e_FM_PCD_EXTRACT_BY_HDR):
++ /* get the header code for the generic extract */
++ code = GetGenHdrCode(p_ExtractOr->extractByHdr.hdr, p_ExtractOr->extractByHdr.hdrIndex, p_ExtractOr->extractByHdr.ignoreProtocolValidation);
++ /* set generic register fields */
++ offset = p_ExtractOr->extractionOffset;
++ break;
++ case (e_FM_PCD_EXTRACT_NON_HDR):
++ /* get the field code for the generic extract */
++ offset = 0;
++ code = GetGenCode(p_ExtractOr->src, &offset);
++ offset += p_ExtractOr->extractionOffset;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ /* set generic register fields */
++ if (!code)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
++ genTmp = KG_SCH_GEN_EXTRACT_TYPE | KG_SCH_GEN_VALID;
++ genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT);
++ genTmp |= offset;
++ if (!!p_ExtractOr->bitOffsetInFqid == !!p_ExtractOr->bitOffsetInPlcrProfile)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" extracted byte must effect either FQID or Policer profile"));
++
++ /************************************************************************************
++ bitOffsetInFqid and bitOffsetInPolicerProfile are translated to rotate parameter
++ in the following way:
++
++ Driver API and implementation:
++ ==============================
++ FQID: extracted OR byte may be shifted right 1-31 bits to effect parts of the FQID.
++ if shifted less than 8 bits, or more than 24 bits a mask is set on the bits that
++ are not overlapping FQID.
++ ------------------------
++ | FQID (24) |
++ ------------------------
++ --------
++ | | extracted OR byte
++ --------
++
++ Policer Profile: extracted OR byte may be shifted right 1-15 bits to effect parts of the
++ PP id. Unless shifted exactly 8 bits to overlap the PP id, a mask is set on the bits that
++ are not overlapping PP id.
++
++ --------
++ | PP (8) |
++ --------
++ --------
++ | | extracted OR byte
++ --------
++
++ HW implementation
++ =================
++ FQID and PP construct a 32 bit word in the way describe below. Extracted byte is located
++ as the highest byte of that word and may be rotated to effect any part os the FQID or
++ the PP.
++ ------------------------ --------
++ | FQID (24) || PP (8) |
++ ------------------------ --------
++ --------
++ | | extracted OR byte
++ --------
++
++ ************************************************************************************/
++
++ if (p_ExtractOr->bitOffsetInFqid)
++ {
++ if (p_ExtractOr->bitOffsetInFqid > MAX_KG_SCH_FQID_BIT_OFFSET )
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInFqid out of range)"));
++ if (p_ExtractOr->bitOffsetInFqid<8)
++ genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid+24) << KG_SCH_GEN_SIZE_SHIFT);
++ else
++ genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid-8) << KG_SCH_GEN_SIZE_SHIFT);
++ p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInFqid, TRUE);
++ }
++ else /* effect policer profile */
++ {
++ if (p_ExtractOr->bitOffsetInPlcrProfile > MAX_KG_SCH_PP_BIT_OFFSET )
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInPlcrProfile out of range)"));
++ p_Scheme->bitOffsetInPlcrProfile = p_ExtractOr->bitOffsetInPlcrProfile;
++ genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInPlcrProfile+16) << KG_SCH_GEN_SIZE_SHIFT);
++ p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInPlcrProfile, FALSE);
++ }
++
++ genTmp |= (uint32_t)(p_ExtractOr->extractionOffset << KG_SCH_GEN_DEF_SHIFT);
++ /* clear bits that need masking */
++ genTmp &= ~KG_SCH_GEN_MASK ;
++ /* set mask bits */
++ genTmp |= (uint32_t)(p_ExtractOr->mask << KG_SCH_GEN_MASK_SHIFT);
++ p_SchemeRegs->kgse_gec[currGenId++] = genTmp;
++
++ }
++ /* clear all unused GEC registers */
++ for ( i=currGenId ;i<FM_KG_NUM_OF_GENERIC_REGS ; i++)
++ p_SchemeRegs->kgse_gec[i] = 0;
++
++ /* add base Qid for this scheme */
++ /* add configuration for kgse_fqb */
++ if (p_SchemeParams->baseFqid & ~0x00FFFFFF)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid must be between 1 and 2^24-1"));
++
++ fqbTmp |= p_SchemeParams->baseFqid;
++ p_SchemeRegs->kgse_fqb = fqbTmp;
++
++ p_Scheme->nextEngine = p_SchemeParams->nextEngine;
++ p_Scheme->doneAction = p_SchemeParams->kgNextEngineParams.doneAction;
++
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++
++t_Error FmPcdKgBuildClsPlanGrp(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_Grp, t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdKgClsPlanGrp *p_ClsPlanGrp;
++ t_FmPcdIpcKgClsPlanParams kgAlloc;
++ t_Error err = E_OK;
++ uint32_t oredVectors = 0;
++ int i, j;
++
++ /* this routine is protected by the calling routine ! */
++ if (p_Grp->numOfOptions >= FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Too many classification plan basic options selected."));
++
++ /* find a new clsPlan group */
++ for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++)
++ if (!p_FmPcd->p_FmPcdKg->clsPlanGrps[i].used)
++ break;
++ if (i == FM_MAX_NUM_OF_PORTS)
++ RETURN_ERROR(MAJOR, E_FULL,("No classification plan groups available."));
++
++ p_FmPcd->p_FmPcdKg->clsPlanGrps[i].used = TRUE;
++
++ p_Grp->clsPlanGrpId = (uint8_t)i;
++
++ if (p_Grp->numOfOptions == 0)
++ p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = (uint8_t)i;
++
++ p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[i];
++ p_ClsPlanGrp->netEnvId = p_Grp->netEnvId;
++ p_ClsPlanGrp->owners = 0;
++ FmPcdSetClsPlanGrpId(p_FmPcd, p_Grp->netEnvId, p_Grp->clsPlanGrpId);
++ if (p_Grp->numOfOptions != 0)
++ FmPcdIncNetEnvOwners(p_FmPcd, p_Grp->netEnvId);
++
++ p_ClsPlanGrp->sizeOfGrp = (uint16_t)(1 << p_Grp->numOfOptions);
++ /* a minimal group of 8 is required */
++ if (p_ClsPlanGrp->sizeOfGrp < CLS_PLAN_NUM_PER_GRP)
++ p_ClsPlanGrp->sizeOfGrp = CLS_PLAN_NUM_PER_GRP;
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ {
++ err = KgAllocClsPlanEntries(h_FmPcd, p_ClsPlanGrp->sizeOfGrp, p_FmPcd->guestId, &p_ClsPlanGrp->baseEntry);
++
++ if (err)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG);
++ }
++ else
++ {
++ t_FmPcdIpcMsg msg;
++ uint32_t replyLength;
++ t_FmPcdIpcReply reply;
++
++ /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */
++ memset(&reply, 0, sizeof(reply));
++ memset(&msg, 0, sizeof(msg));
++ memset(&kgAlloc, 0, sizeof(kgAlloc));
++ kgAlloc.guestId = p_FmPcd->guestId;
++ kgAlloc.numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp;
++ msg.msgId = FM_PCD_ALLOC_KG_CLSPLAN;
++ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
++ replyLength = (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry));
++ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(kgAlloc),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (replyLength != (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ if ((t_Error)reply.error != E_OK)
++ RETURN_ERROR(MINOR, (t_Error)reply.error, NO_MSG);
++
++ p_ClsPlanGrp->baseEntry = *(uint8_t*)(reply.replyBody);
++ }
++
++ /* build classification plan entries parameters */
++ p_ClsPlanSet->baseEntry = p_ClsPlanGrp->baseEntry;
++ p_ClsPlanSet->numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp;
++
++ oredVectors = 0;
++ for (i = 0; i<p_Grp->numOfOptions; i++)
++ {
++ oredVectors |= p_Grp->optVectors[i];
++ /* save an array of used options - the indexes represent the power of 2 index */
++ p_ClsPlanGrp->optArray[i] = p_Grp->options[i];
++ }
++ /* set the classification plan relevant entries so that all bits
++ * relevant to the list of options is cleared
++ */
++ for (j = 0; j<p_ClsPlanGrp->sizeOfGrp; j++)
++ p_ClsPlanSet->vectors[j] = ~oredVectors;
++
++ for (i = 0; i<p_Grp->numOfOptions; i++)
++ {
++ /* option i got the place 2^i in the clsPlan array. all entries that
++ * have bit i set, should have the vector bit cleared. So each option
++ * has one location that it is exclusive (1,2,4,8...) and represent the
++ * presence of that option only, and other locations that represent a
++ * combination of options.
++ * e.g:
++ * If ethernet-BC is option 1 it gets entry 2 in the table. Entry 2
++ * now represents a frame with ethernet-BC header - so the bit
++ * representing ethernet-BC should be set and all other option bits
++ * should be cleared.
++ * Entries 2,3,6,7,10... also have ethernet-BC and therefore have bit
++ * vector[1] set, but they also have other bits set:
++ * 3=1+2, options 0 and 1
++ * 6=2+4, options 1 and 2
++ * 7=1+2+4, options 0,1,and 2
++ * 10=2+8, options 1 and 3
++ * etc.
++ * */
++
++ /* now for each option (i), we set their bits in all entries (j)
++ * that contain bit 2^i.
++ */
++ for (j = 0; j<p_ClsPlanGrp->sizeOfGrp; j++)
++ {
++ if (j & (1<<i))
++ p_ClsPlanSet->vectors[j] |= p_Grp->optVectors[i];
++ }
++ }
++
++ return E_OK;
++}
++
++void FmPcdKgDestroyClsPlanGrp(t_Handle h_FmPcd, uint8_t grpId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdIpcKgClsPlanParams kgAlloc;
++ t_Error err;
++ t_FmPcdIpcMsg msg;
++ uint32_t replyLength;
++ t_FmPcdIpcReply reply;
++
++ /* check that no port is bound to this clsPlan */
++ if (p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].owners)
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a clsPlan grp that has ports bound to"));
++ return;
++ }
++
++ FmPcdSetClsPlanGrpId(p_FmPcd, p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].netEnvId, ILLEGAL_CLS_PLAN);
++
++ if (grpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId)
++ p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN;
++ else
++ FmPcdDecNetEnvOwners(p_FmPcd, p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].netEnvId);
++
++ /* free blocks */
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ KgFreeClsPlanEntries(h_FmPcd,
++ p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp,
++ p_FmPcd->guestId,
++ p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry);
++ else /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */
++ {
++ memset(&reply, 0, sizeof(reply));
++ memset(&msg, 0, sizeof(msg));
++ kgAlloc.guestId = p_FmPcd->guestId;
++ kgAlloc.numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp;
++ kgAlloc.clsPlanBase = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry;
++ msg.msgId = FM_PCD_FREE_KG_CLSPLAN;
++ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
++ replyLength = sizeof(uint32_t);
++ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(kgAlloc),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return;
++ }
++ if (replyLength != sizeof(uint32_t))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return;
++ }
++ if ((t_Error)reply.error != E_OK)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Free KG clsPlan failed"));
++ return;
++ }
++ }
++
++ /* clear clsPlan driver structure */
++ memset(&p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId], 0, sizeof(t_FmPcdKgClsPlanGrp));
++}
++
++t_Error FmPcdKgBuildBindPortToSchemes(t_Handle h_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort, uint32_t *p_SpReg, bool add)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t j, schemesPerPortVector = 0;
++ t_FmPcdKgScheme *p_Scheme;
++ uint8_t i, relativeSchemeId;
++ uint32_t tmp, walking1Mask;
++ uint8_t swPortIndex = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
++
++ /* for each scheme */
++ for (i = 0; i<p_BindPort->numOfSchemes; i++)
++ {
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]);
++ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++
++ if (add)
++ {
++ p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId];
++ if (!FmPcdKgIsSchemeValidSw(p_Scheme))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested scheme is invalid."));
++ /* check netEnvId of the port against the scheme netEnvId */
++ if ((p_Scheme->netEnvId != p_BindPort->netEnvId) && (p_Scheme->netEnvId != ILLEGAL_NETENV))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port may not be bound to requested scheme - differ in netEnvId"));
++
++ /* if next engine is private port policer profile, we need to check that it is valid */
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, p_BindPort->hardwarePortId);
++ if (p_Scheme->nextRelativePlcrProfile)
++ {
++ for (j = 0;j<p_Scheme->numOfProfiles;j++)
++ {
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort);
++ if (p_Scheme->relativeProfileId+j >= p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Relative profile not in range"));
++ if (!FmPcdPlcrIsProfileValid(p_FmPcd, (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase + p_Scheme->relativeProfileId + j)))
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Relative profile not valid."));
++ }
++ }
++ if (!p_BindPort->useClsPlan)
++ {
++ /* This check may be redundant as port is a assigned to the whole NetEnv */
++
++ /* if this port does not use clsPlan, it may not be bound to schemes with units that contain
++ cls plan options. Schemes that are used only directly, should not be checked.
++ it also may not be bound to schemes that go to CC with units that are options - so we OR
++ the match vector and the grpBits (= ccUnits) */
++ if ((p_Scheme->matchVector != SCHEME_ALWAYS_DIRECT) || p_Scheme->ccUnits)
++ {
++ uint8_t netEnvId;
++ walking1Mask = 0x80000000;
++ netEnvId = (p_Scheme->netEnvId == ILLEGAL_NETENV)? p_BindPort->netEnvId:p_Scheme->netEnvId;
++ tmp = (p_Scheme->matchVector == SCHEME_ALWAYS_DIRECT)? 0:p_Scheme->matchVector;
++ tmp |= p_Scheme->ccUnits;
++ while (tmp)
++ {
++ if (tmp & walking1Mask)
++ {
++ tmp &= ~walking1Mask;
++ if (!PcdNetEnvIsUnitWithoutOpts(p_FmPcd, netEnvId, walking1Mask))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port (without clsPlan) may not be bound to requested scheme - uses clsPlan options"));
++ }
++ walking1Mask >>= 1;
++ }
++ }
++ }
++ }
++ /* build vector */
++ schemesPerPortVector |= 1 << (31 - p_BindPort->schemesIds[i]);
++ }
++
++ *p_SpReg = schemesPerPortVector;
++
++ return E_OK;
++}
++
++t_Error FmPcdKgBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t spReg;
++ t_Error err = E_OK;
++
++ err = FmPcdKgBuildBindPortToSchemes(h_FmPcd, p_SchemeBind, &spReg, TRUE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, TRUE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ IncSchemeOwners(p_FmPcd, p_SchemeBind);
++
++ return E_OK;
++}
++
++t_Error FmPcdKgUnbindPortToSchemes(t_Handle h_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t spReg;
++ t_Error err = E_OK;
++
++ err = FmPcdKgBuildBindPortToSchemes(p_FmPcd, p_SchemeBind, &spReg, FALSE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, FALSE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ DecSchemeOwners(p_FmPcd, p_SchemeBind);
++
++ return E_OK;
++}
++
++bool FmPcdKgIsSchemeValidSw(t_Handle h_Scheme)
++{
++ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme*)h_Scheme;
++
++ return p_Scheme->valid;
++}
++
++bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ if (p_FmPcd->p_FmPcdKg->schemes[schemeId].matchVector == SCHEME_ALWAYS_DIRECT)
++ return TRUE;
++ else
++ return FALSE;
++}
++
++t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint8_t i, j;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE);
++
++ /* This routine is issued only on master core of master partition -
++ either directly or through IPC, so no need for lock */
++
++ for (j = 0, i = 0; i < FM_PCD_KG_NUM_OF_SCHEMES && j < numOfSchemes; i++)
++ {
++ if (!p_FmPcd->p_FmPcdKg->schemesMng[i].allocated)
++ {
++ p_FmPcd->p_FmPcdKg->schemesMng[i].allocated = TRUE;
++ p_FmPcd->p_FmPcdKg->schemesMng[i].ownerId = guestId;
++ p_SchemesIds[j] = i;
++ j++;
++ }
++ }
++
++ if (j != numOfSchemes)
++ {
++ /* roll back */
++ for (j--; j; j--)
++ {
++ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].allocated = FALSE;
++ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].ownerId = 0;
++ p_SchemesIds[j] = 0;
++ }
++
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("No schemes found"));
++ }
++
++ return E_OK;
++}
++
++t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint8_t i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE);
++
++ /* This routine is issued only on master core of master partition -
++ either directly or through IPC */
++
++ for (i = 0; i < numOfSchemes; i++)
++ {
++ if (!p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme was not previously allocated"));
++ }
++ if (p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId != guestId)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme is not owned by caller. "));
++ }
++ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated = FALSE;
++ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId = 0;
++ }
++
++ return E_OK;
++}
++
++t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint8_t numOfBlocks, blocksFound=0, first=0;
++ uint8_t i, j;
++
++ /* This routine is issued only on master core of master partition -
++ either directly or through IPC, so no need for lock */
++
++ if (!numOfClsPlanEntries)
++ return E_OK;
++
++ if ((numOfClsPlanEntries % CLS_PLAN_NUM_PER_GRP) || (!POWER_OF_2(numOfClsPlanEntries)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfClsPlanEntries must be a power of 2 and divisible by 8"));
++
++ numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP);
++
++ /* try to find consequent blocks */
++ first = 0;
++ for (i = 0; i < FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP;)
++ {
++ if (!p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated)
++ {
++ blocksFound++;
++ i++;
++ if (blocksFound == numOfBlocks)
++ break;
++ }
++ else
++ {
++ blocksFound = 0;
++ /* advance i to the next aligned address */
++ first = i = (uint8_t)(first + numOfBlocks);
++ }
++ }
++
++ if (blocksFound == numOfBlocks)
++ {
++ *p_First = (uint8_t)(first * CLS_PLAN_NUM_PER_GRP);
++ for (j = first; j < (first + numOfBlocks); j++)
++ {
++ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[j].allocated = TRUE;
++ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[j].ownerId = guestId;
++ }
++ return E_OK;
++ }
++ else
++ RETURN_ERROR(MINOR, E_FULL, ("No resources for clsPlan"));
++}
++
++void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint8_t numOfBlocks;
++ uint8_t i, baseBlock;
++
++#ifdef DISABLE_ASSERTIONS
++UNUSED(guestId);
++#endif /* DISABLE_ASSERTIONS */
++
++ /* This routine is issued only on master core of master partition -
++ either directly or through IPC, so no need for lock */
++
++ numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP);
++ ASSERT_COND(!(base%CLS_PLAN_NUM_PER_GRP));
++
++ baseBlock = (uint8_t)(base/CLS_PLAN_NUM_PER_GRP);
++ for (i=baseBlock;i<baseBlock+numOfBlocks;i++)
++ {
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated);
++ ASSERT_COND(guestId == p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId);
++ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated = FALSE;
++ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId = 0;
++ }
++}
++
++void KgEnable(t_FmPcd *p_FmPcd)
++{
++ struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++
++ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
++ fman_kg_enable(p_Regs);
++}
++
++void KgDisable(t_FmPcd *p_FmPcd)
++{
++ struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++
++ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
++ fman_kg_disable(p_Regs);
++}
++
++void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ struct fman_kg_cp_regs *p_FmPcdKgPortRegs;
++ uint32_t tmpKgarReg = 0, intFlags;
++ uint16_t i, j;
++
++ /* This routine is protected by the calling routine ! */
++ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
++ p_FmPcdKgPortRegs = &p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->clsPlanRegs;
++
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ for (i=p_Set->baseEntry;i<p_Set->baseEntry+p_Set->numOfClsPlanEntries;i+=8)
++ {
++ tmpKgarReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP));
++
++ for (j = i; j < i+8; j++)
++ {
++ ASSERT_COND(IN_RANGE(0, (j - p_Set->baseEntry), FM_PCD_MAX_NUM_OF_CLS_PLANS-1));
++ WRITE_UINT32(p_FmPcdKgPortRegs->kgcpe[j % CLS_PLAN_NUM_PER_GRP],p_Set->vectors[j - p_Set->baseEntry]);
++ }
++
++ if (WriteKgarWait(p_FmPcd, tmpKgarReg) != E_OK)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("WriteKgarWait FAILED"));
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ return;
++ }
++ }
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++}
++
++t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams)
++{
++ t_FmPcdKg *p_FmPcdKg;
++
++ UNUSED(p_FmPcd);
++
++ if (p_FmPcdParams->numOfSchemes > FM_PCD_KG_NUM_OF_SCHEMES)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("numOfSchemes should not exceed %d", FM_PCD_KG_NUM_OF_SCHEMES));
++ return NULL;
++ }
++
++ p_FmPcdKg = (t_FmPcdKg *)XX_Malloc(sizeof(t_FmPcdKg));
++ if (!p_FmPcdKg)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Keygen allocation FAILED"));
++ return NULL;
++ }
++ memset(p_FmPcdKg, 0, sizeof(t_FmPcdKg));
++
++
++ if (FmIsMaster(p_FmPcd->h_Fm))
++ {
++ p_FmPcdKg->p_FmPcdKgRegs = (struct fman_kg_regs *)UINT_TO_PTR(FmGetPcdKgBaseAddr(p_FmPcdParams->h_Fm));
++ p_FmPcd->exceptions |= DEFAULT_fmPcdKgErrorExceptions;
++ p_FmPcdKg->p_IndirectAccessRegs = (u_FmPcdKgIndirectAccessRegs *)&p_FmPcdKg->p_FmPcdKgRegs->fmkg_indirect[0];
++ }
++
++ p_FmPcdKg->numOfSchemes = p_FmPcdParams->numOfSchemes;
++ if ((p_FmPcd->guestId == NCSW_MASTER_ID) && !p_FmPcdKg->numOfSchemes)
++ {
++ p_FmPcdKg->numOfSchemes = FM_PCD_KG_NUM_OF_SCHEMES;
++ DBG(WARNING, ("numOfSchemes was defined 0 by user, re-defined by driver to FM_PCD_KG_NUM_OF_SCHEMES"));
++ }
++
++ p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN;
++
++ return p_FmPcdKg;
++}
++
++t_Error KgInit(t_FmPcd *p_FmPcd)
++{
++ t_Error err = E_OK;
++
++ p_FmPcd->p_FmPcdKg->h_HwSpinlock = XX_InitSpinlock();
++ if (!p_FmPcd->p_FmPcdKg->h_HwSpinlock)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM KG HW spinlock"));
++
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ err = KgInitMaster(p_FmPcd);
++ else
++ err = KgInitGuest(p_FmPcd);
++
++ if (err != E_OK)
++ {
++ if (p_FmPcd->p_FmPcdKg->h_HwSpinlock)
++ XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock);
++ }
++
++ return err;
++}
++
++t_Error KgFree(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdIpcKgSchemesParams kgAlloc;
++ t_Error err = E_OK;
++ t_FmPcdIpcMsg msg;
++ uint32_t replyLength;
++ t_FmPcdIpcReply reply;
++
++ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_KG, 0, e_FM_INTR_TYPE_ERR);
++
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ {
++ err = FmPcdKgFreeSchemes(p_FmPcd,
++ p_FmPcd->p_FmPcdKg->numOfSchemes,
++ p_FmPcd->guestId,
++ p_FmPcd->p_FmPcdKg->schemesIds);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (p_FmPcd->p_FmPcdKg->h_HwSpinlock)
++ XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock);
++
++ return E_OK;
++ }
++
++ /* guest */
++ memset(&reply, 0, sizeof(reply));
++ memset(&msg, 0, sizeof(msg));
++ kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes;
++ kgAlloc.guestId = p_FmPcd->guestId;
++ ASSERT_COND(kgAlloc.numOfSchemes < FM_PCD_KG_NUM_OF_SCHEMES);
++ memcpy(kgAlloc.schemesIds, p_FmPcd->p_FmPcdKg->schemesIds, (sizeof(uint8_t))*kgAlloc.numOfSchemes);
++ msg.msgId = FM_PCD_FREE_KG_SCHEMES;
++ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
++ replyLength = sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(kgAlloc),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++
++ if (p_FmPcd->p_FmPcdKg->h_HwSpinlock)
++ XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock);
++
++ return (t_Error)reply.error;
++}
++
++t_Error FmPcdKgSetOrBindToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t netEnvId, protocolOpt_t *p_OptArray, uint8_t *p_ClsPlanGrpId, bool *p_IsEmptyClsPlanGrp)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdKgInterModuleClsPlanGrpParams grpParams, *p_GrpParams;
++ t_FmPcdKgClsPlanGrp *p_ClsPlanGrp;
++ t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet;
++ t_Error err;
++
++ /* This function is issued only from FM_PORT_SetPcd which locked all PCD modules,
++ so no need for lock here */
++
++ memset(&grpParams, 0, sizeof(grpParams));
++ grpParams.clsPlanGrpId = ILLEGAL_CLS_PLAN;
++ p_GrpParams = &grpParams;
++
++ p_GrpParams->netEnvId = netEnvId;
++
++ /* Get from the NetEnv the information of the clsPlan (can be already created,
++ * or needs to build) */
++ err = PcdGetClsPlanGrpParams(h_FmPcd, p_GrpParams);
++ if (err)
++ RETURN_ERROR(MINOR,err,NO_MSG);
++
++ if (p_GrpParams->grpExists)
++ {
++ /* this group was already updated (at least) in SW */
++ *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId;
++ }
++ else
++ {
++ p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet));
++ if (!p_ClsPlanSet)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set"));
++ memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet));
++ /* Build (in SW) the clsPlan parameters, including the vectors to be written to HW */
++ err = FmPcdKgBuildClsPlanGrp(h_FmPcd, p_GrpParams, p_ClsPlanSet);
++ if (err)
++ {
++ XX_Free(p_ClsPlanSet);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++ *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId;
++
++ if (p_FmPcd->h_Hc)
++ {
++ /* write clsPlan entries to memory */
++ err = FmHcPcdKgSetClsPlan(p_FmPcd->h_Hc, p_ClsPlanSet);
++ if (err)
++ {
++ XX_Free(p_ClsPlanSet);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++ else
++ /* write clsPlan entries to memory */
++ KgSetClsPlan(p_FmPcd, p_ClsPlanSet);
++
++ XX_Free(p_ClsPlanSet);
++ }
++
++ /* Set caller parameters */
++
++ /* mark if this is an empty classification group */
++ if (*p_ClsPlanGrpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId)
++ *p_IsEmptyClsPlanGrp = TRUE;
++ else
++ *p_IsEmptyClsPlanGrp = FALSE;
++
++ p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId];
++
++ /* increment owners number */
++ p_ClsPlanGrp->owners++;
++
++ /* copy options array for port */
++ memcpy(p_OptArray, &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId].optArray, FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)*sizeof(protocolOpt_t));
++
++ /* bind port to the new or existing group */
++ err = BindPortToClsPlanGrp(p_FmPcd, hardwarePortId, p_GrpParams->clsPlanGrpId);
++ if (err)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FmPcdKgDeleteOrUnbindPortToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdKgClsPlanGrp *p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId];
++ t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet;
++ t_Error err;
++
++ /* This function is issued only from FM_PORT_DeletePcd which locked all PCD modules,
++ so no need for lock here */
++
++ UnbindPortToClsPlanGrp(p_FmPcd, hardwarePortId);
++
++ /* decrement owners number */
++ ASSERT_COND(p_ClsPlanGrp->owners);
++ p_ClsPlanGrp->owners--;
++
++ if (!p_ClsPlanGrp->owners)
++ {
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcPcdKgDeleteClsPlan(p_FmPcd->h_Hc, clsPlanGrpId);
++ return err;
++ }
++ else
++ {
++ /* clear clsPlan entries in memory */
++ p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet));
++ if (!p_ClsPlanSet)
++ {
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set"));
++ }
++ memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet));
++
++ p_ClsPlanSet->baseEntry = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry;
++ p_ClsPlanSet->numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp;
++ KgSetClsPlan(p_FmPcd, p_ClsPlanSet);
++ XX_Free(p_ClsPlanSet);
++
++ FmPcdKgDestroyClsPlanGrp(h_FmPcd, clsPlanGrpId);
++ }
++ }
++ return E_OK;
++}
++
++uint32_t FmPcdKgGetRequiredAction(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
++
++ return p_FmPcd->p_FmPcdKg->schemes[schemeId].requiredAction;
++}
++
++uint32_t FmPcdKgGetRequiredActionFlag(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
++
++ return p_FmPcd->p_FmPcdKg->schemes[schemeId].requiredActionFlag;
++}
++
++bool FmPcdKgIsDirectPlcr(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
++
++ return p_FmPcd->p_FmPcdKg->schemes[schemeId].directPlcr;
++}
++
++
++uint16_t FmPcdKgGetRelativeProfileId(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
++
++ return p_FmPcd->p_FmPcdKg->schemes[schemeId].relativeProfileId;
++}
++
++bool FmPcdKgIsDistrOnPlcrProfile(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
++
++ if ((p_FmPcd->p_FmPcdKg->schemes[schemeId].extractedOrs &&
++ p_FmPcd->p_FmPcdKg->schemes[schemeId].bitOffsetInPlcrProfile) ||
++ p_FmPcd->p_FmPcdKg->schemes[schemeId].nextRelativePlcrProfile)
++ return TRUE;
++ else
++ return FALSE;
++
++}
++
++e_FmPcdEngine FmPcdKgGetNextEngine(t_Handle h_FmPcd, uint8_t relativeSchemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].valid);
++
++ return p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine;
++}
++
++e_FmPcdDoneAction FmPcdKgGetDoneAction(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
++
++ return p_FmPcd->p_FmPcdKg->schemes[schemeId].doneAction;
++}
++
++void FmPcdKgUpdateRequiredAction(t_Handle h_Scheme, uint32_t requiredAction)
++{
++ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme *)h_Scheme;
++
++ /* this routine is protected by calling routine */
++
++ ASSERT_COND(p_Scheme->valid);
++
++ p_Scheme->requiredAction |= requiredAction;
++}
++
++bool FmPcdKgHwSchemeIsValid(uint32_t schemeModeReg)
++{
++ return (bool)!!(schemeModeReg & KG_SCH_MODE_EN);
++}
++
++uint32_t FmPcdKgBuildWriteSchemeActionReg(uint8_t schemeId, bool updateCounter)
++{
++ return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT) |
++ FM_KG_KGAR_GO |
++ FM_KG_KGAR_WRITE |
++ FM_KG_KGAR_SEL_SCHEME_ENTRY |
++ DUMMY_PORT_ID |
++ (updateCounter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT:0));
++}
++
++uint32_t FmPcdKgBuildReadSchemeActionReg(uint8_t schemeId)
++{
++ return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT) |
++ FM_KG_KGAR_GO |
++ FM_KG_KGAR_READ |
++ FM_KG_KGAR_SEL_SCHEME_ENTRY |
++ DUMMY_PORT_ID |
++ FM_KG_KGAR_SCM_WSEL_UPDATE_CNT);
++
++}
++
++uint32_t FmPcdKgBuildWriteClsPlanBlockActionReg(uint8_t grpId)
++{
++ return (uint32_t)(FM_KG_KGAR_GO |
++ FM_KG_KGAR_WRITE |
++ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
++ DUMMY_PORT_ID |
++ ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) |
++ FM_PCD_KG_KGAR_WSEL_MASK);
++
++ /* if we ever want to write 1 by 1, use:
++ sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP)));
++ */
++}
++
++uint32_t FmPcdKgBuildWritePortSchemeBindActionReg(uint8_t hardwarePortId)
++{
++
++ return (uint32_t)(FM_KG_KGAR_GO |
++ FM_KG_KGAR_WRITE |
++ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
++ hardwarePortId |
++ FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP);
++}
++
++uint32_t FmPcdKgBuildReadPortSchemeBindActionReg(uint8_t hardwarePortId)
++{
++
++ return (uint32_t)(FM_KG_KGAR_GO |
++ FM_KG_KGAR_READ |
++ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
++ hardwarePortId |
++ FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP);
++}
++
++uint32_t FmPcdKgBuildWritePortClsPlanBindActionReg(uint8_t hardwarePortId)
++{
++
++ return (uint32_t)(FM_KG_KGAR_GO |
++ FM_KG_KGAR_WRITE |
++ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
++ hardwarePortId |
++ FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP);
++}
++
++uint8_t FmPcdKgGetClsPlanGrpBase(t_Handle h_FmPcd, uint8_t clsPlanGrp)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].baseEntry;
++}
++
++uint16_t FmPcdKgGetClsPlanGrpSize(t_Handle h_FmPcd, uint8_t clsPlanGrp)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].sizeOfGrp;
++}
++
++
++uint8_t FmPcdKgGetSchemeId(t_Handle h_Scheme)
++{
++ return ((t_FmPcdKgScheme*)h_Scheme)->schemeId;
++
++}
++
++#if (DPAA_VERSION >= 11)
++bool FmPcdKgGetVspe(t_Handle h_Scheme)
++{
++ return ((t_FmPcdKgScheme*)h_Scheme)->vspe;
++
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++uint8_t FmPcdKgGetRelativeSchemeId(t_Handle h_FmPcd, uint8_t schemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint8_t i;
++
++ for (i = 0;i<p_FmPcd->p_FmPcdKg->numOfSchemes;i++)
++ if (p_FmPcd->p_FmPcdKg->schemesIds[i] == schemeId)
++ return i;
++
++ if (i == p_FmPcd->p_FmPcdKg->numOfSchemes)
++ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("Scheme is out of partition range"));
++
++ return FM_PCD_KG_NUM_OF_SCHEMES;
++}
++
++t_Handle FmPcdKgGetSchemeHandle(t_Handle h_FmPcd, uint8_t relativeSchemeId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd);
++
++ /* check that schemeId is in range */
++ if (relativeSchemeId >= p_FmPcd->p_FmPcdKg->numOfSchemes)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("relative-scheme-id %d!", relativeSchemeId));
++ return NULL;
++ }
++
++ if (!FmPcdKgIsSchemeValidSw(&p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId]))
++ return NULL;
++
++ return &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId];
++}
++
++bool FmPcdKgIsSchemeHasOwners(t_Handle h_Scheme)
++{
++ return (((t_FmPcdKgScheme*)h_Scheme)->owners == 0)?FALSE:TRUE;
++}
++
++t_Error FmPcdKgCcGetSetParams(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint8_t relativeSchemeId, physicalSchemeId;
++ uint32_t tmpKgarReg, tmpReg32 = 0, intFlags;
++ t_Error err;
++ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme*)h_Scheme;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0);
++
++ /* Calling function locked all PCD modules, so no need to lock here */
++
++ if (!FmPcdKgIsSchemeValidSw(h_Scheme))
++ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid"));
++
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcPcdKgCcGetSetParams(p_FmPcd->h_Hc, h_Scheme, requiredAction, value);
++
++ UpdateRequiredActionFlag(h_Scheme,TRUE);
++ FmPcdKgUpdateRequiredAction(h_Scheme,requiredAction);
++ return err;
++ }
++
++ physicalSchemeId = p_Scheme->schemeId;
++
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId);
++ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++
++ if (!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].requiredActionFlag ||
++ !(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].requiredAction & requiredAction))
++ {
++ if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
++ {
++ switch (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine)
++ {
++ case (e_FM_PCD_DONE):
++ if (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].doneAction == e_FM_PCD_ENQ_FRAME)
++ {
++ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode);
++ ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME));
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA);
++ /* call indirect command for scheme write */
++ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ }
++ break;
++ case (e_FM_PCD_PLCR):
++ if (!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].directPlcr ||
++ (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].extractedOrs &&
++ p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].bitOffsetInPlcrProfile) ||
++ p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextRelativePlcrProfile)
++ {
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared"));
++ }
++ err = FmPcdPlcrCcGetSetParams(h_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].relativeProfileId, requiredAction);
++ if (err)
++ {
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("in this situation the next engine after scheme can be or PLCR or ENQ_FRAME"));
++ }
++ }
++ if (requiredAction & UPDATE_KG_NIA_CC_WA)
++ {
++ if (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine == e_FM_PCD_CC)
++ {
++ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode);
++ ASSERT_COND(tmpReg32 & (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC));
++ tmpReg32 &= ~NIA_FM_CTL_AC_CC;
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32 | NIA_FM_CTL_AC_PRE_CC);
++ /* call indirect command for scheme write */
++ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ }
++ }
++ if (requiredAction & UPDATE_KG_OPT_MODE)
++ {
++ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_om, value);
++ /* call indirect command for scheme write */
++ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ }
++ if (requiredAction & UPDATE_KG_NIA)
++ {
++ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode);
++ tmpReg32 &= ~(NIA_ENG_MASK | NIA_AC_MASK);
++ tmpReg32 |= value;
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32);
++ /* call indirect command for scheme write */
++ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ }
++ }
++
++ UpdateRequiredActionFlag(h_Scheme, TRUE);
++ FmPcdKgUpdateRequiredAction(h_Scheme, requiredAction);
++
++ return E_OK;
++}
++/*********************** End of inter-module routines ************************/
++
++
++/****************************************/
++/* API routines */
++/****************************************/
++
++t_Handle FM_PCD_KgSchemeSet(t_Handle h_FmPcd, t_FmPcdKgSchemeParams *p_SchemeParams)
++{
++ t_FmPcd *p_FmPcd;
++ struct fman_kg_scheme_regs schemeRegs;
++ struct fman_kg_scheme_regs *p_MemRegs;
++ uint8_t i;
++ t_Error err = E_OK;
++ uint32_t tmpKgarReg;
++ uint32_t intFlags;
++ uint8_t physicalSchemeId, relativeSchemeId = 0;
++ t_FmPcdKgScheme *p_Scheme;
++
++ if (p_SchemeParams->modify)
++ {
++ p_Scheme = (t_FmPcdKgScheme *)p_SchemeParams->id.h_Scheme;
++ p_FmPcd = p_Scheme->h_FmPcd;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, NULL);
++
++ if (!FmPcdKgIsSchemeValidSw(p_Scheme))
++ {
++ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS,
++ ("Scheme is invalid"));
++ return NULL;
++ }
++
++ if (!KgSchemeFlagTryLock(p_Scheme))
++ {
++ DBG(TRACE, ("Scheme Try Lock - BUSY"));
++ /* Signal to caller BUSY condition */
++ p_SchemeParams->id.h_Scheme = NULL;
++ return NULL;
++ }
++ }
++ else
++ {
++ p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, NULL);
++
++ relativeSchemeId = p_SchemeParams->id.relativeSchemeId;
++ /* check that schemeId is in range */
++ if (relativeSchemeId >= p_FmPcd->p_FmPcdKg->numOfSchemes)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("relative-scheme-id %d!", relativeSchemeId));
++ return NULL;
++ }
++
++ p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId];
++ if (FmPcdKgIsSchemeValidSw(p_Scheme))
++ {
++ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS,
++ ("Scheme id (%d)!", relativeSchemeId));
++ return NULL;
++ }
++ /* Clear all fields, scheme may have beed previously used */
++ memset(p_Scheme, 0, sizeof(t_FmPcdKgScheme));
++
++ p_Scheme->schemeId = p_FmPcd->p_FmPcdKg->schemesIds[relativeSchemeId];
++ p_Scheme->h_FmPcd = p_FmPcd;
++
++ p_Scheme->p_Lock = FmPcdAcquireLock(p_FmPcd);
++ if (!p_Scheme->p_Lock)
++ REPORT_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM KG Scheme lock obj!"));
++ }
++
++ err = BuildSchemeRegs((t_Handle)p_Scheme, p_SchemeParams, &schemeRegs);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ if (p_SchemeParams->modify)
++ KgSchemeFlagUnlock(p_Scheme);
++ if (!p_SchemeParams->modify &&
++ p_Scheme->p_Lock)
++ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
++ return NULL;
++ }
++
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcPcdKgSetScheme(p_FmPcd->h_Hc,
++ (t_Handle)p_Scheme,
++ &schemeRegs,
++ p_SchemeParams->schemeCounter.update);
++ if (p_SchemeParams->modify)
++ KgSchemeFlagUnlock(p_Scheme);
++ if (err)
++ {
++ if (!p_SchemeParams->modify &&
++ p_Scheme->p_Lock)
++ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
++ return NULL;
++ }
++ if (!p_SchemeParams->modify)
++ ValidateSchemeSw(p_Scheme);
++ return (t_Handle)p_Scheme;
++ }
++
++ physicalSchemeId = p_Scheme->schemeId;
++
++ /* configure all 21 scheme registers */
++ p_MemRegs = &p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs;
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ WRITE_UINT32(p_MemRegs->kgse_ppc, schemeRegs.kgse_ppc);
++ WRITE_UINT32(p_MemRegs->kgse_ccbs, schemeRegs.kgse_ccbs);
++ WRITE_UINT32(p_MemRegs->kgse_mode, schemeRegs.kgse_mode);
++ WRITE_UINT32(p_MemRegs->kgse_mv, schemeRegs.kgse_mv);
++ WRITE_UINT32(p_MemRegs->kgse_dv0, schemeRegs.kgse_dv0);
++ WRITE_UINT32(p_MemRegs->kgse_dv1, schemeRegs.kgse_dv1);
++ WRITE_UINT32(p_MemRegs->kgse_ekdv, schemeRegs.kgse_ekdv);
++ WRITE_UINT32(p_MemRegs->kgse_ekfc, schemeRegs.kgse_ekfc);
++ WRITE_UINT32(p_MemRegs->kgse_bmch, schemeRegs.kgse_bmch);
++ WRITE_UINT32(p_MemRegs->kgse_bmcl, schemeRegs.kgse_bmcl);
++ WRITE_UINT32(p_MemRegs->kgse_hc, schemeRegs.kgse_hc);
++ WRITE_UINT32(p_MemRegs->kgse_spc, schemeRegs.kgse_spc);
++ WRITE_UINT32(p_MemRegs->kgse_fqb, schemeRegs.kgse_fqb);
++ WRITE_UINT32(p_MemRegs->kgse_om, schemeRegs.kgse_om);
++ WRITE_UINT32(p_MemRegs->kgse_vsp, schemeRegs.kgse_vsp);
++ for (i=0 ; i<FM_KG_NUM_OF_GENERIC_REGS ; i++)
++ WRITE_UINT32(p_MemRegs->kgse_gec[i], schemeRegs.kgse_gec[i]);
++
++ /* call indirect command for scheme write */
++ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, p_SchemeParams->schemeCounter.update);
++
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++
++ if (!p_SchemeParams->modify)
++ ValidateSchemeSw(p_Scheme);
++ else
++ KgSchemeFlagUnlock(p_Scheme);
++
++ return (t_Handle)p_Scheme;
++}
++
++t_Error FM_PCD_KgSchemeDelete(t_Handle h_Scheme)
++{
++ t_FmPcd *p_FmPcd;
++ uint8_t physicalSchemeId;
++ uint32_t tmpKgarReg, intFlags;
++ t_Error err = E_OK;
++ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme *)h_Scheme;
++
++ SANITY_CHECK_RETURN_ERROR(h_Scheme, E_INVALID_HANDLE);
++
++ p_FmPcd = (t_FmPcd*)(p_Scheme->h_FmPcd);
++
++ UpdateRequiredActionFlag(h_Scheme, FALSE);
++
++ /* check that no port is bound to this scheme */
++ err = InvalidateSchemeSw(h_Scheme);
++ if (err)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcPcdKgDeleteScheme(p_FmPcd->h_Hc, h_Scheme);
++ if (p_Scheme->p_Lock)
++ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
++ return err;
++ }
++
++ physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId;
++
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ /* clear mode register, including enable bit */
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, 0);
++
++ /* call indirect command for scheme write */
++ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
++
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++
++ if (p_Scheme->p_Lock)
++ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
++
++ return E_OK;
++}
++
++uint32_t FM_PCD_KgSchemeGetCounter(t_Handle h_Scheme)
++{
++ t_FmPcd *p_FmPcd;
++ uint32_t tmpKgarReg, spc, intFlags;
++ uint8_t physicalSchemeId;
++
++ SANITY_CHECK_RETURN_VALUE(h_Scheme, E_INVALID_HANDLE, 0);
++
++ p_FmPcd = (t_FmPcd*)(((t_FmPcdKgScheme *)h_Scheme)->h_FmPcd);
++ if (p_FmPcd->h_Hc)
++ return FmHcPcdKgGetSchemeCounter(p_FmPcd->h_Hc, h_Scheme);
++
++ physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId;
++
++ if (FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES)
++ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++
++ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode) & KG_SCH_MODE_EN))
++ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid"));
++ spc = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_spc);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++
++ return spc;
++}
++
++t_Error FM_PCD_KgSchemeSetCounter(t_Handle h_Scheme, uint32_t value)
++{
++ t_FmPcd *p_FmPcd;
++ uint32_t tmpKgarReg, intFlags;
++ uint8_t physicalSchemeId;
++
++ SANITY_CHECK_RETURN_VALUE(h_Scheme, E_INVALID_HANDLE, 0);
++
++ p_FmPcd = (t_FmPcd*)(((t_FmPcdKgScheme *)h_Scheme)->h_FmPcd);
++
++ if (!FmPcdKgIsSchemeValidSw(h_Scheme))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested scheme is invalid."));
++
++ if (p_FmPcd->h_Hc)
++ return FmHcPcdKgSetSchemeCounter(p_FmPcd->h_Hc, h_Scheme, value);
++
++ physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId;
++ /* check that schemeId is in range */
++ if (FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES)
++ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++
++ /* read specified scheme into scheme registers */
++ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
++ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode) & KG_SCH_MODE_EN))
++ {
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid"));
++ }
++
++ /* change counter value */
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_spc, value);
++
++ /* call indirect command for scheme write */
++ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE);
++
++ WriteKgarWait(p_FmPcd, tmpKgarReg);
++ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_KgSetAdditionalDataAfterParsing(t_Handle h_FmPcd, uint8_t payloadOffset)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ struct fman_kg_regs *p_Regs;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER);
++
++ p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++ if (!FmIsMaster(p_FmPcd->h_Fm))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetAdditionalDataAfterParsing - guest mode!"));
++
++ WRITE_UINT32(p_Regs->fmkg_fdor,payloadOffset);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_KgSetDfltValue(t_Handle h_FmPcd, uint8_t valueId, uint32_t value)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ struct fman_kg_regs *p_Regs;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((valueId == 0) || (valueId == 1)), E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER);
++
++ p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
++
++ if (!FmIsMaster(p_FmPcd->h_Fm))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetDfltValue - guest mode!"));
++
++ if (valueId == 0)
++ WRITE_UINT32(p_Regs->fmkg_gdv0r,value);
++ else
++ WRITE_UINT32(p_Regs->fmkg_gdv1r,value);
++ return E_OK;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h
+new file mode 100644
+index 00000000..cb7521a1
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h
+@@ -0,0 +1,206 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_kg.h
++
++ @Description FM KG private header
++*//***************************************************************************/
++#ifndef __FM_KG_H
++#define __FM_KG_H
++
++#include "std_ext.h"
++
++/***********************************************************************/
++/* Keygen defines */
++/***********************************************************************/
++/* maskes */
++#if (DPAA_VERSION >= 11)
++#define KG_SCH_VSP_SHIFT_MASK 0x0003f000
++#define KG_SCH_OM_VSPE 0x00000001
++#define KG_SCH_VSP_NO_KSP_EN 0x80000000
++
++#define MAX_SP_SHIFT 23
++#define KG_SCH_VSP_MASK_SHIFT 12
++#define KG_SCH_VSP_SHIFT 24
++#endif /* (DPAA_VERSION >= 11) */
++
++typedef uint32_t t_KnownFieldsMasks;
++#define KG_SCH_KN_PORT_ID 0x80000000
++#define KG_SCH_KN_MACDST 0x40000000
++#define KG_SCH_KN_MACSRC 0x20000000
++#define KG_SCH_KN_TCI1 0x10000000
++#define KG_SCH_KN_TCI2 0x08000000
++#define KG_SCH_KN_ETYPE 0x04000000
++#define KG_SCH_KN_PPPSID 0x02000000
++#define KG_SCH_KN_PPPID 0x01000000
++#define KG_SCH_KN_MPLS1 0x00800000
++#define KG_SCH_KN_MPLS2 0x00400000
++#define KG_SCH_KN_MPLS_LAST 0x00200000
++#define KG_SCH_KN_IPSRC1 0x00100000
++#define KG_SCH_KN_IPDST1 0x00080000
++#define KG_SCH_KN_PTYPE1 0x00040000
++#define KG_SCH_KN_IPTOS_TC1 0x00020000
++#define KG_SCH_KN_IPV6FL1 0x00010000
++#define KG_SCH_KN_IPSRC2 0x00008000
++#define KG_SCH_KN_IPDST2 0x00004000
++#define KG_SCH_KN_PTYPE2 0x00002000
++#define KG_SCH_KN_IPTOS_TC2 0x00001000
++#define KG_SCH_KN_IPV6FL2 0x00000800
++#define KG_SCH_KN_GREPTYPE 0x00000400
++#define KG_SCH_KN_IPSEC_SPI 0x00000200
++#define KG_SCH_KN_IPSEC_NH 0x00000100
++#define KG_SCH_KN_IPPID 0x00000080
++#define KG_SCH_KN_L4PSRC 0x00000004
++#define KG_SCH_KN_L4PDST 0x00000002
++#define KG_SCH_KN_TFLG 0x00000001
++
++typedef uint8_t t_GenericCodes;
++#define KG_SCH_GEN_SHIM1 0x70
++#define KG_SCH_GEN_DEFAULT 0x10
++#define KG_SCH_GEN_PARSE_RESULT_N_FQID 0x20
++#define KG_SCH_GEN_START_OF_FRM 0x40
++#define KG_SCH_GEN_SHIM2 0x71
++#define KG_SCH_GEN_IP_PID_NO_V 0x72
++#define KG_SCH_GEN_ETH 0x03
++#define KG_SCH_GEN_ETH_NO_V 0x73
++#define KG_SCH_GEN_SNAP 0x04
++#define KG_SCH_GEN_SNAP_NO_V 0x74
++#define KG_SCH_GEN_VLAN1 0x05
++#define KG_SCH_GEN_VLAN1_NO_V 0x75
++#define KG_SCH_GEN_VLAN2 0x06
++#define KG_SCH_GEN_VLAN2_NO_V 0x76
++#define KG_SCH_GEN_ETH_TYPE 0x07
++#define KG_SCH_GEN_ETH_TYPE_NO_V 0x77
++#define KG_SCH_GEN_PPP 0x08
++#define KG_SCH_GEN_PPP_NO_V 0x78
++#define KG_SCH_GEN_MPLS1 0x09
++#define KG_SCH_GEN_MPLS2 0x19
++#define KG_SCH_GEN_MPLS3 0x29
++#define KG_SCH_GEN_MPLS1_NO_V 0x79
++#define KG_SCH_GEN_MPLS_LAST 0x0a
++#define KG_SCH_GEN_MPLS_LAST_NO_V 0x7a
++#define KG_SCH_GEN_IPV4 0x0b
++#define KG_SCH_GEN_IPV6 0x1b
++#define KG_SCH_GEN_L3_NO_V 0x7b
++#define KG_SCH_GEN_IPV4_TUNNELED 0x0c
++#define KG_SCH_GEN_IPV6_TUNNELED 0x1c
++#define KG_SCH_GEN_MIN_ENCAP 0x2c
++#define KG_SCH_GEN_IP2_NO_V 0x7c
++#define KG_SCH_GEN_GRE 0x0d
++#define KG_SCH_GEN_GRE_NO_V 0x7d
++#define KG_SCH_GEN_TCP 0x0e
++#define KG_SCH_GEN_UDP 0x1e
++#define KG_SCH_GEN_IPSEC_AH 0x2e
++#define KG_SCH_GEN_SCTP 0x3e
++#define KG_SCH_GEN_DCCP 0x4e
++#define KG_SCH_GEN_IPSEC_ESP 0x6e
++#define KG_SCH_GEN_L4_NO_V 0x7e
++#define KG_SCH_GEN_NEXTHDR 0x7f
++/* shifts */
++#define KG_SCH_PP_SHIFT_HIGH_SHIFT 27
++#define KG_SCH_PP_SHIFT_LOW_SHIFT 12
++#define KG_SCH_PP_MASK_SHIFT 16
++#define KG_SCH_MODE_CCOBASE_SHIFT 24
++#define KG_SCH_DEF_MAC_ADDR_SHIFT 30
++#define KG_SCH_DEF_TCI_SHIFT 28
++#define KG_SCH_DEF_ENET_TYPE_SHIFT 26
++#define KG_SCH_DEF_PPP_SESSION_ID_SHIFT 24
++#define KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT 22
++#define KG_SCH_DEF_MPLS_LABEL_SHIFT 20
++#define KG_SCH_DEF_IP_ADDR_SHIFT 18
++#define KG_SCH_DEF_PROTOCOL_TYPE_SHIFT 16
++#define KG_SCH_DEF_IP_TOS_TC_SHIFT 14
++#define KG_SCH_DEF_IPV6_FLOW_LABEL_SHIFT 12
++#define KG_SCH_DEF_IPSEC_SPI_SHIFT 10
++#define KG_SCH_DEF_L4_PORT_SHIFT 8
++#define KG_SCH_DEF_TCP_FLAG_SHIFT 6
++#define KG_SCH_HASH_CONFIG_SHIFT_SHIFT 24
++#define KG_SCH_GEN_MASK_SHIFT 16
++#define KG_SCH_GEN_HT_SHIFT 8
++#define KG_SCH_GEN_SIZE_SHIFT 24
++#define KG_SCH_GEN_DEF_SHIFT 29
++#define FM_PCD_KG_KGAR_NUM_SHIFT 16
++
++/* others */
++#define NUM_OF_SW_DEFAULTS 3
++#define MAX_PP_SHIFT 23
++#define MAX_KG_SCH_SIZE 16
++#define MASK_FOR_GENERIC_BASE_ID 0x20
++#define MAX_HASH_SHIFT 40
++#define MAX_KG_SCH_FQID_BIT_OFFSET 31
++#define MAX_KG_SCH_PP_BIT_OFFSET 15
++#define MAX_DIST_FQID_SHIFT 23
++
++#define GET_MASK_SEL_SHIFT(shift,i) \
++switch (i) { \
++ case (0):shift = 26;break; \
++ case (1):shift = 20;break; \
++ case (2):shift = 10;break; \
++ case (3):shift = 4;break; \
++ default: \
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \
++}
++
++#define GET_MASK_OFFSET_SHIFT(shift,i) \
++switch (i) { \
++ case (0):shift = 16;break; \
++ case (1):shift = 0;break; \
++ case (2):shift = 28;break; \
++ case (3):shift = 24;break; \
++ default: \
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \
++}
++
++#define GET_MASK_SHIFT(shift,i) \
++switch (i) { \
++ case (0):shift = 24;break; \
++ case (1):shift = 16;break; \
++ case (2):shift = 8;break; \
++ case (3):shift = 0;break; \
++ default: \
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \
++}
++
++/***********************************************************************/
++/* Keygen defines */
++/***********************************************************************/
++
++#define KG_DOUBLE_MEANING_REGS_OFFSET 0x100
++#define NO_VALIDATION 0x70
++#define KG_ACTION_REG_TO 1024
++#define KG_MAX_PROFILE 255
++#define SCHEME_ALWAYS_DIRECT 0xFFFFFFFF
++
++
++#endif /* __FM_KG_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.c
+new file mode 100644
+index 00000000..113777e5
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.c
+@@ -0,0 +1,5571 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_manip.c
++
++ @Description FM PCD manip ...
++ *//***************************************************************************/
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_port_ext.h"
++#include "fm_muram_ext.h"
++#include "memcpy_ext.h"
++
++#include "fm_common.h"
++#include "fm_hc.h"
++#include "fm_manip.h"
++
++/****************************************/
++/* static functions */
++/****************************************/
++static t_Handle GetManipInfo(t_FmPcdManip *p_Manip, e_ManipInfo manipInfo)
++{
++ t_FmPcdManip *p_CurManip = p_Manip;
++
++ if (!MANIP_IS_UNIFIED(p_Manip))
++ p_CurManip = p_Manip;
++ else
++ {
++ /* go to first unified */
++ while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip))
++ p_CurManip = p_CurManip->h_PrevManip;
++ }
++
++ switch (manipInfo)
++ {
++ case (e_MANIP_HMCT):
++ return p_CurManip->p_Hmct;
++ case (e_MANIP_HMTD):
++ return p_CurManip->h_Ad;
++ case (e_MANIP_HANDLER_TABLE_OWNER):
++ return (t_Handle)p_CurManip;
++ default:
++ return NULL;
++ }
++}
++
++static uint16_t GetHmctSize(t_FmPcdManip *p_Manip)
++{
++ uint16_t size = 0;
++ t_FmPcdManip *p_CurManip = p_Manip;
++
++ if (!MANIP_IS_UNIFIED(p_Manip))
++ return p_Manip->tableSize;
++
++ /* accumulate sizes, starting with the first node */
++ while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip))
++ p_CurManip = p_CurManip->h_PrevManip;
++
++ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
++ {
++ size += p_CurManip->tableSize;
++ p_CurManip = (t_FmPcdManip *)p_CurManip->h_NextManip;
++ }
++ size += p_CurManip->tableSize; /* add last size */
++
++ return (size);
++}
++
++static uint16_t GetDataSize(t_FmPcdManip *p_Manip)
++{
++ uint16_t size = 0;
++ t_FmPcdManip *p_CurManip = p_Manip;
++
++ if (!MANIP_IS_UNIFIED(p_Manip))
++ return p_Manip->dataSize;
++
++ /* accumulate sizes, starting with the first node */
++ while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip))
++ p_CurManip = p_CurManip->h_PrevManip;
++
++ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
++ {
++ size += p_CurManip->dataSize;
++ p_CurManip = (t_FmPcdManip *)p_CurManip->h_NextManip;
++ }
++ size += p_CurManip->dataSize; /* add last size */
++
++ return (size);
++}
++
++static t_Error CalculateTableSize(t_FmPcdManipParams *p_FmPcdManipParams,
++ uint16_t *p_TableSize, uint8_t *p_DataSize)
++{
++ uint8_t localDataSize, remain, tableSize = 0, dataSize = 0;
++
++ if (p_FmPcdManipParams->u.hdr.rmv)
++ {
++ switch (p_FmPcdManipParams->u.hdr.rmvParams.type)
++ {
++ case (e_FM_PCD_MANIP_RMV_GENERIC):
++ tableSize += HMCD_BASIC_SIZE;
++ break;
++ case (e_FM_PCD_MANIP_RMV_BY_HDR):
++ switch (p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.type)
++ {
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2):
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP):
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START):
++#endif /* (DPAA_VERSION >= 11) */
++ tableSize += HMCD_BASIC_SIZE;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown byHdr.type"));
++ }
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown rmvParams.type"));
++ }
++ }
++
++ if (p_FmPcdManipParams->u.hdr.insrt)
++ {
++ switch (p_FmPcdManipParams->u.hdr.insrtParams.type)
++ {
++ case (e_FM_PCD_MANIP_INSRT_GENERIC):
++ remain =
++ (uint8_t)(p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size
++ % 4);
++ if (remain)
++ localDataSize =
++ (uint8_t)(p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size
++ + 4 - remain);
++ else
++ localDataSize =
++ p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size;
++ tableSize += (uint8_t)(HMCD_BASIC_SIZE + localDataSize);
++ break;
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR):
++ {
++ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.type)
++ {
++
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2):
++ tableSize += HMCD_BASIC_SIZE + HMCD_PTR_SIZE;
++ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.specificL2)
++ {
++ case (e_FM_PCD_MANIP_HDR_INSRT_MPLS):
++ case (e_FM_PCD_MANIP_HDR_INSRT_PPPOE):
++ dataSize +=
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.size;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++ }
++ break;
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_IP):
++ tableSize +=
++ (HMCD_BASIC_SIZE + HMCD_PTR_SIZE
++ + HMCD_PARAM_SIZE
++ + p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size);
++ dataSize += 2;
++ break;
++
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP):
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE):
++ tableSize += (HMCD_BASIC_SIZE + HMCD_L4_HDR_SIZE);
++
++ break;
++
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP):
++ tableSize +=
++ (HMCD_BASIC_SIZE
++ + p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size);
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown byHdr.type"));
++ }
++ }
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown insrtParams.type"));
++ }
++ }
++
++ if (p_FmPcdManipParams->u.hdr.fieldUpdate)
++ {
++ switch (p_FmPcdManipParams->u.hdr.fieldUpdateParams.type)
++ {
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN):
++ tableSize += HMCD_BASIC_SIZE;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
++ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN)
++ {
++ tableSize += HMCD_PTR_SIZE;
++ dataSize += DSCP_TO_VLAN_TABLE_SIZE;
++ }
++ break;
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4):
++ tableSize += HMCD_BASIC_SIZE;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_ID)
++ {
++ tableSize += HMCD_PARAM_SIZE;
++ dataSize += 2;
++ }
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_SRC)
++ tableSize += HMCD_IPV4_ADDR_SIZE;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_DST)
++ tableSize += HMCD_IPV4_ADDR_SIZE;
++ break;
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6):
++ tableSize += HMCD_BASIC_SIZE;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV6_SRC)
++ tableSize += HMCD_IPV6_ADDR_SIZE;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV6_DST)
++ tableSize += HMCD_IPV6_ADDR_SIZE;
++ break;
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP):
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
++ == HDR_MANIP_TCP_UDP_CHECKSUM)
++ /* we implement this case with the update-checksum descriptor */
++ tableSize += HMCD_BASIC_SIZE;
++ else
++ /* we implement this case with the TCP/UDP-update descriptor */
++ tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown fieldUpdateParams.type"));
++ }
++ }
++
++ if (p_FmPcdManipParams->u.hdr.custom)
++ {
++ switch (p_FmPcdManipParams->u.hdr.customParams.type)
++ {
++ case (e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE):
++ {
++ tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE + HMCD_PARAM_SIZE;
++ dataSize +=
++ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdrSize;
++ if ((p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
++ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4)
++ && (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id))
++ dataSize += 2;
++ }
++ break;
++ case (e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE):
++ tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown customParams.type"));
++ }
++ }
++
++ *p_TableSize = tableSize;
++ *p_DataSize = dataSize;
++
++ return E_OK;
++}
++
++static t_Error GetPrOffsetByHeaderOrField(t_FmManipHdrInfo *p_HdrInfo,
++ uint8_t *parseArrayOffset)
++{
++ e_NetHeaderType hdr = p_HdrInfo->hdr;
++ e_FmPcdHdrIndex hdrIndex = p_HdrInfo->hdrIndex;
++ bool byField = p_HdrInfo->byField;
++ t_FmPcdFields field;
++
++ if (byField)
++ field = p_HdrInfo->fullField;
++
++ if (byField)
++ {
++ switch (hdr)
++ {
++ case (HEADER_TYPE_ETH):
++ switch (field.eth)
++ {
++ case (NET_HEADER_FIELD_ETH_TYPE):
++ *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET;
++ break;
++ default:
++ RETURN_ERROR(
++ MAJOR,
++ E_NOT_SUPPORTED,
++ ("Header manipulation of the type Ethernet with this field not supported"));
++ }
++ break;
++ case (HEADER_TYPE_VLAN):
++ switch (field.vlan)
++ {
++ case (NET_HEADER_FIELD_VLAN_TCI):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
++ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET;
++ else
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET;
++ break;
++ default:
++ RETURN_ERROR(
++ MAJOR,
++ E_NOT_SUPPORTED,
++ ("Header manipulation of the type VLAN with this field not supported"));
++ }
++ break;
++ default:
++ RETURN_ERROR(
++ MAJOR,
++ E_NOT_SUPPORTED,
++ ("Header manipulation of this header by field not supported"));
++ }
++ }
++ else
++ {
++ switch (hdr)
++ {
++ case (HEADER_TYPE_ETH):
++ *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET;
++ break;
++ case (HEADER_TYPE_USER_DEFINED_SHIM1):
++ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET;
++ break;
++ case (HEADER_TYPE_USER_DEFINED_SHIM2):
++ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET;
++ break;
++ case (HEADER_TYPE_LLC_SNAP):
++ *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET;
++ break;
++ case (HEADER_TYPE_PPPoE):
++ *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET;
++ break;
++ case (HEADER_TYPE_MPLS):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
++ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET;
++ else
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
++ *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET;
++ break;
++ case (HEADER_TYPE_IPv4):
++ case (HEADER_TYPE_IPv6):
++ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
++ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
++ *parseArrayOffset = CC_PC_PR_IP1_OFFSET;
++ else
++ if (hdrIndex == e_FM_PCD_HDR_INDEX_2)
++ *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET;
++ break;
++ case (HEADER_TYPE_MINENCAP):
++ *parseArrayOffset = CC_PC_PR_MINENC_OFFSET;
++ break;
++ case (HEADER_TYPE_GRE):
++ *parseArrayOffset = CC_PC_PR_GRE_OFFSET;
++ break;
++ case (HEADER_TYPE_TCP):
++ case (HEADER_TYPE_UDP):
++ case (HEADER_TYPE_IPSEC_AH):
++ case (HEADER_TYPE_IPSEC_ESP):
++ case (HEADER_TYPE_DCCP):
++ case (HEADER_TYPE_SCTP):
++ *parseArrayOffset = CC_PC_PR_L4_OFFSET;
++ break;
++ case (HEADER_TYPE_CAPWAP):
++ case (HEADER_TYPE_CAPWAP_DTLS):
++ *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET;
++ break;
++ default:
++ RETURN_ERROR(
++ MAJOR,
++ E_NOT_SUPPORTED,
++ ("Header manipulation of this header is not supported"));
++ }
++ }
++ return E_OK;
++}
++
++static t_Error BuildHmct(t_FmPcdManip *p_Manip,
++ t_FmPcdManipParams *p_FmPcdManipParams,
++ uint8_t *p_DestHmct, uint8_t *p_DestData, bool new)
++{
++ uint32_t *p_TmpHmct = (uint32_t*)p_DestHmct, *p_LocalData;
++ uint32_t tmpReg = 0, *p_Last = NULL, tmp_ipv6_addr;
++ uint8_t remain, i, size = 0, origSize, *p_UsrData = NULL, *p_TmpData =
++ p_DestData;
++ t_Handle h_FmPcd = p_Manip->h_FmPcd;
++ uint8_t j = 0;
++
++ if (p_FmPcdManipParams->u.hdr.rmv)
++ {
++ if (p_FmPcdManipParams->u.hdr.rmvParams.type
++ == e_FM_PCD_MANIP_RMV_GENERIC)
++ {
++ /* initialize HMCD */
++ tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_RMV) << HMCD_OC_SHIFT;
++ /* tmp, should be conditional */
++ tmpReg |= p_FmPcdManipParams->u.hdr.rmvParams.u.generic.offset
++ << HMCD_RMV_OFFSET_SHIFT;
++ tmpReg |= p_FmPcdManipParams->u.hdr.rmvParams.u.generic.size
++ << HMCD_RMV_SIZE_SHIFT;
++ }
++ else
++ if (p_FmPcdManipParams->u.hdr.rmvParams.type
++ == e_FM_PCD_MANIP_RMV_BY_HDR)
++ {
++ switch (p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.type)
++ {
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2):
++ {
++ uint8_t hmcdOpt;
++
++ /* initialize HMCD */
++ tmpReg = (uint32_t)(HMCD_OPCODE_L2_RMV) << HMCD_OC_SHIFT;
++
++ switch (p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.u.specificL2)
++ {
++ case (e_FM_PCD_MANIP_HDR_RMV_ETHERNET):
++ hmcdOpt = HMCD_RMV_L2_ETHERNET;
++ break;
++ case (e_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS):
++ hmcdOpt = HMCD_RMV_L2_STACKED_QTAGS;
++ break;
++ case (e_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS):
++ hmcdOpt = HMCD_RMV_L2_ETHERNET_AND_MPLS;
++ break;
++ case (e_FM_PCD_MANIP_HDR_RMV_MPLS):
++ hmcdOpt = HMCD_RMV_L2_MPLS;
++ break;
++ case (e_FM_PCD_MANIP_HDR_RMV_PPPOE):
++ hmcdOpt = HMCD_RMV_L2_PPPOE;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++ }
++ tmpReg |= hmcdOpt << HMCD_L2_MODE_SHIFT;
++ break;
++ }
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP):
++ tmpReg = (uint32_t)(HMCD_OPCODE_CAPWAP_RMV)
++ << HMCD_OC_SHIFT;
++ break;
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START):
++ {
++ uint8_t prsArrayOffset;
++ t_Error err = E_OK;
++
++ tmpReg = (uint32_t)(HMCD_OPCODE_RMV_TILL)
++ << HMCD_OC_SHIFT;
++
++ err =
++ GetPrOffsetByHeaderOrField(
++ &p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.u.hdrInfo,
++ &prsArrayOffset);
++ ASSERT_COND(!err);
++ /* was previously checked */
++
++ tmpReg |= ((uint32_t)prsArrayOffset << 16);
++ }
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("manip header remove by hdr type!"));
++ }
++ }
++
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++ /* advance to next command */
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++ }
++
++ if (p_FmPcdManipParams->u.hdr.insrt)
++ {
++ if (p_FmPcdManipParams->u.hdr.insrtParams.type
++ == e_FM_PCD_MANIP_INSRT_GENERIC)
++ {
++ /* initialize HMCD */
++ if (p_FmPcdManipParams->u.hdr.insrtParams.u.generic.replace)
++ tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_REPLACE)
++ << HMCD_OC_SHIFT;
++ else
++ tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_INSRT) << HMCD_OC_SHIFT;
++
++ tmpReg |= p_FmPcdManipParams->u.hdr.insrtParams.u.generic.offset
++ << HMCD_INSRT_OFFSET_SHIFT;
++ tmpReg |= p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size
++ << HMCD_INSRT_SIZE_SHIFT;
++
++ size = p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size;
++ p_UsrData = p_FmPcdManipParams->u.hdr.insrtParams.u.generic.p_Data;
++
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ /* initialize data to be inserted */
++ /* if size is not a multiple of 4, padd with 0's */
++ origSize = size;
++ remain = (uint8_t)(size % 4);
++ if (remain)
++ {
++ size += (uint8_t)(4 - remain);
++ p_LocalData = (uint32_t *)XX_Malloc(size);
++ memset((uint8_t *)p_LocalData, 0, size);
++ memcpy((uint8_t *)p_LocalData, p_UsrData, origSize);
++ }
++ else
++ p_LocalData = (uint32_t*)p_UsrData;
++
++ /* initialize data and advance pointer to next command */
++ MemCpy8(p_TmpHmct, p_LocalData, size);
++ p_TmpHmct += size / sizeof(uint32_t);
++
++ if (remain)
++ XX_Free(p_LocalData);
++ }
++
++ else
++ if (p_FmPcdManipParams->u.hdr.insrtParams.type
++ == e_FM_PCD_MANIP_INSRT_BY_HDR)
++ {
++ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.type)
++ {
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2):
++ {
++ uint8_t hmcdOpt;
++
++ /* initialize HMCD */
++ tmpReg = (uint32_t)(HMCD_OPCODE_L2_INSRT)
++ << HMCD_OC_SHIFT;
++
++ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.specificL2)
++ {
++ case (e_FM_PCD_MANIP_HDR_INSRT_MPLS):
++ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.update)
++ hmcdOpt = HMCD_INSRT_N_UPDATE_L2_MPLS;
++ else
++ hmcdOpt = HMCD_INSRT_L2_MPLS;
++ break;
++ case (e_FM_PCD_MANIP_HDR_INSRT_PPPOE):
++ hmcdOpt = HMCD_INSRT_L2_PPPOE;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
++ }
++ tmpReg |= hmcdOpt << HMCD_L2_MODE_SHIFT;
++
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ /* set size and pointer of user's data */
++ size =
++ (uint8_t)p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.size;
++
++ ASSERT_COND(p_TmpData);
++ MemCpy8(
++ p_TmpData,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.p_Data,
++ size);
++ tmpReg =
++ (size << HMCD_INSRT_L2_SIZE_SHIFT)
++ | (uint32_t)(XX_VirtToPhys(p_TmpData)
++ - (((t_FmPcd*)h_FmPcd)->physicalMuramBase));
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++ p_TmpData += size;
++ }
++ break;
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_IP):
++ tmpReg = (uint32_t)(HMCD_OPCODE_IP_INSRT)
++ << HMCD_OC_SHIFT;
++ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.calcL4Checksum)
++ tmpReg |= HMCD_IP_L4_CS_CALC;
++ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.mappingMode
++ == e_FM_PCD_MANIP_HDR_QOS_MAPPING_AS_IS)
++ tmpReg |= HMCD_IP_OR_QOS;
++ tmpReg |=
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.lastPidOffset
++ & HMCD_IP_LAST_PID_MASK;
++ tmpReg |=
++ ((p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size
++ << HMCD_IP_SIZE_SHIFT)
++ & HMCD_IP_SIZE_MASK);
++ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.dontFragOverwrite)
++ tmpReg |= HMCD_IP_DF_MODE;
++
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ /* set IP id */
++ ASSERT_COND(p_TmpData);
++ WRITE_UINT16(
++ *(uint16_t*)p_TmpData,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.id);
++ WRITE_UINT32(
++ *p_TmpHmct,
++ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)));
++ p_TmpData += 2;
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++
++ WRITE_UINT8(*p_TmpHmct, p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.lastDstOffset);
++ p_TmpHmct += HMCD_PARAM_SIZE / 4;
++
++ MemCpy8(
++ p_TmpHmct,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.p_Data,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size);
++ p_TmpHmct +=
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size
++ / 4;
++ break;
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE):
++ tmpReg = HMCD_INSRT_UDP_LITE;
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP):
++ tmpReg |= (uint32_t)(HMCD_OPCODE_UDP_INSRT)
++ << HMCD_OC_SHIFT;
++
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ MemCpy8(
++ p_TmpHmct,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.p_Data,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size);
++ p_TmpHmct +=
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
++ / 4;
++ break;
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP):
++ tmpReg = (uint32_t)(HMCD_OPCODE_CAPWAP_INSRT)
++ << HMCD_OC_SHIFT;
++ tmpReg |= HMCD_CAPWAP_INSRT;
++
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ MemCpy8(
++ p_TmpHmct,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.p_Data,
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size);
++ p_TmpHmct +=
++ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
++ / 4;
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("manip header insert by header type!"));
++
++ }
++ }
++ }
++
++ if (p_FmPcdManipParams->u.hdr.fieldUpdate)
++ {
++ switch (p_FmPcdManipParams->u.hdr.fieldUpdateParams.type)
++ {
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN):
++ /* set opcode */
++ tmpReg = (uint32_t)(HMCD_OPCODE_VLAN_PRI_UPDATE)
++ << HMCD_OC_SHIFT;
++
++ /* set mode & table pointer */
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
++ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN)
++ {
++ /* set Mode */
++ tmpReg |= (uint32_t)(HMCD_VLAN_PRI_UPDATE_DSCP_TO_VPRI)
++ << HMCD_VLAN_PRI_REP_MODE_SHIFT;
++ /* set VPRI default */
++ tmpReg |=
++ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.vpriDefVal;
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++ /* write the table pointer into the Manip descriptor */
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ tmpReg = 0;
++ ASSERT_COND(p_TmpData);
++ for (i = 0; i < HMCD_DSCP_VALUES; i++)
++ {
++ /* first we build from each 8 values a 32bit register */
++ tmpReg |=
++ (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.dscpToVpriTable[i])
++ << (32 - 4 * (j + 1));
++ j++;
++ /* Than we write this register to the next table word
++ * (i=7-->word 0, i=15-->word 1,... i=63-->word 7) */
++ if ((i % 8) == 7)
++ {
++ WRITE_UINT32(*((uint32_t*)p_TmpData + (i+1)/8-1),
++ tmpReg);
++ tmpReg = 0;
++ j = 0;
++ }
++ }
++
++ WRITE_UINT32(
++ *p_TmpHmct,
++ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase)));
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++
++ p_TmpData += DSCP_TO_VLAN_TABLE_SIZE;
++ }
++ else
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
++ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI)
++ {
++ /* set Mode */
++ /* line commented out as it has no-side-effect ('0' value). */
++ /*tmpReg |= HMCD_VLAN_PRI_UPDATE << HMCD_VLAN_PRI_REP_MODE_SHIFT*/;
++ /* set VPRI parameter */
++ tmpReg |=
++ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.vpri;
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++ }
++ break;
++
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4):
++ /* set opcode */
++ tmpReg = (uint32_t)(HMCD_OPCODE_IPV4_UPDATE) << HMCD_OC_SHIFT;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_TTL)
++ tmpReg |= HMCD_IPV4_UPDATE_TTL;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_TOS)
++ {
++ tmpReg |= HMCD_IPV4_UPDATE_TOS;
++ tmpReg |=
++ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.tos
++ << HMCD_IPV4_UPDATE_TOS_SHIFT;
++ }
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_ID)
++ tmpReg |= HMCD_IPV4_UPDATE_ID;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_SRC)
++ tmpReg |= HMCD_IPV4_UPDATE_SRC;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_DST)
++ tmpReg |= HMCD_IPV4_UPDATE_DST;
++ /* write the first 4 bytes of the descriptor */
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_ID)
++ {
++ ASSERT_COND(p_TmpData);
++ WRITE_UINT16(
++ *(uint16_t*)p_TmpData,
++ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.id);
++ WRITE_UINT32(
++ *p_TmpHmct,
++ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)));
++ p_TmpData += 2;
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++ }
++
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_SRC)
++ {
++ WRITE_UINT32(
++ *p_TmpHmct,
++ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.src);
++ p_TmpHmct += HMCD_IPV4_ADDR_SIZE / 4;
++ }
++
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
++ & HDR_MANIP_IPV4_DST)
++ {
++ WRITE_UINT32(
++ *p_TmpHmct,
++ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.dst);
++ p_TmpHmct += HMCD_IPV4_ADDR_SIZE / 4;
++ }
++ break;
++
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6):
++ /* set opcode */
++ tmpReg = (uint32_t)(HMCD_OPCODE_IPV6_UPDATE) << HMCD_OC_SHIFT;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
++ & HDR_MANIP_IPV6_HL)
++ tmpReg |= HMCD_IPV6_UPDATE_HL;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
++ & HDR_MANIP_IPV6_TC)
++ {
++ tmpReg |= HMCD_IPV6_UPDATE_TC;
++ tmpReg |=
++ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.trafficClass
++ << HMCD_IPV6_UPDATE_TC_SHIFT;
++ }
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
++ & HDR_MANIP_IPV6_SRC)
++ tmpReg |= HMCD_IPV6_UPDATE_SRC;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
++ & HDR_MANIP_IPV6_DST)
++ tmpReg |= HMCD_IPV6_UPDATE_DST;
++ /* write the first 4 bytes of the descriptor */
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
++ & HDR_MANIP_IPV6_SRC)
++ {
++ for (i = 0; i < NET_HEADER_FIELD_IPv6_ADDR_SIZE; i += 4)
++ {
++ memcpy(&tmp_ipv6_addr,
++ &p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.src[i],
++ sizeof(uint32_t));
++ WRITE_UINT32(*p_TmpHmct, tmp_ipv6_addr);
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++ }
++ }
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
++ & HDR_MANIP_IPV6_DST)
++ {
++ for (i = 0; i < NET_HEADER_FIELD_IPv6_ADDR_SIZE; i += 4)
++ {
++ memcpy(&tmp_ipv6_addr,
++ &p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.dst[i],
++ sizeof(uint32_t));
++ WRITE_UINT32(*p_TmpHmct, tmp_ipv6_addr);
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++ }
++ }
++ break;
++
++ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP):
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
++ == HDR_MANIP_TCP_UDP_CHECKSUM)
++ {
++ /* we implement this case with the update-checksum descriptor */
++ /* set opcode */
++ tmpReg = (uint32_t)(HMCD_OPCODE_TCP_UDP_CHECKSUM)
++ << HMCD_OC_SHIFT;
++ /* write the first 4 bytes of the descriptor */
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++ }
++ else
++ {
++ /* we implement this case with the TCP/UDP update descriptor */
++ /* set opcode */
++ tmpReg = (uint32_t)(HMCD_OPCODE_TCP_UDP_UPDATE)
++ << HMCD_OC_SHIFT;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
++ & HDR_MANIP_TCP_UDP_DST)
++ tmpReg |= HMCD_TCP_UDP_UPDATE_DST;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
++ & HDR_MANIP_TCP_UDP_SRC)
++ tmpReg |= HMCD_TCP_UDP_UPDATE_SRC;
++ /* write the first 4 bytes of the descriptor */
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ tmpReg = 0;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
++ & HDR_MANIP_TCP_UDP_SRC)
++ tmpReg |=
++ ((uint32_t)p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.src)
++ << HMCD_TCP_UDP_UPDATE_SRC_SHIFT;
++ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
++ & HDR_MANIP_TCP_UDP_DST)
++ tmpReg |=
++ ((uint32_t)p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.dst);
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++ }
++ break;
++
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown fieldUpdateParams.type"));
++ }
++ }
++
++ if (p_FmPcdManipParams->u.hdr.custom)
++ {
++ switch (p_FmPcdManipParams->u.hdr.customParams.type)
++ {
++ case (e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE):
++ /* set opcode */
++ tmpReg = (uint32_t)(HMCD_OPCODE_REPLACE_IP) << HMCD_OC_SHIFT;
++
++ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.decTtlHl)
++ tmpReg |= HMCD_IP_REPLACE_TTL_HL;
++ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
++ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV4_BY_IPV6)
++ /* line commented out as it has no-side-effect ('0' value). */
++ /*tmpReg |= HMCD_IP_REPLACE_REPLACE_IPV4*/;
++ else
++ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
++ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4)
++ {
++ tmpReg |= HMCD_IP_REPLACE_REPLACE_IPV6;
++ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id)
++ tmpReg |= HMCD_IP_REPLACE_ID;
++ }
++ else
++ RETURN_ERROR(
++ MINOR,
++ E_NOT_SUPPORTED,
++ ("One flag out of HDR_MANIP_IP_REPLACE_IPV4, HDR_MANIP_IP_REPLACE_IPV6 - must be set."));
++
++ /* write the first 4 bytes of the descriptor */
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE / 4;
++
++ size =
++ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdrSize;
++ ASSERT_COND(p_TmpData);
++ MemCpy8(
++ p_TmpData,
++ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdr,
++ size);
++ tmpReg = (uint32_t)(size << HMCD_IP_REPLACE_L3HDRSIZE_SHIFT);
++ tmpReg |= (uint32_t)(XX_VirtToPhys(p_TmpData)
++ - (((t_FmPcd*)h_FmPcd)->physicalMuramBase));
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++ p_TmpData += size;
++
++ if ((p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
++ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4)
++ && (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id))
++ {
++ WRITE_UINT16(
++ *(uint16_t*)p_TmpData,
++ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.id);
++ WRITE_UINT32(
++ *p_TmpHmct,
++ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase)));
++ p_TmpData += 2;
++ }
++ p_TmpHmct += HMCD_PTR_SIZE / 4;
++ break;
++ case (e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE):
++ /* set opcode */
++ tmpReg = (uint32_t)(HMCD_OPCODE_GEN_FIELD_REPLACE) << HMCD_OC_SHIFT;
++ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.size << HMCD_GEN_FIELD_SIZE_SHIFT;
++ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.srcOffset << HMCD_GEN_FIELD_SRC_OFF_SHIFT;
++ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.dstOffset << HMCD_GEN_FIELD_DST_OFF_SHIFT;
++ if (p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.mask)
++ tmpReg |= HMCD_GEN_FIELD_MASK_EN;
++
++ /* write the first 4 bytes of the descriptor */
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ /* save a pointer to the "last" indication word */
++ p_Last = p_TmpHmct;
++
++ p_TmpHmct += HMCD_BASIC_SIZE/4;
++
++ if (p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.mask)
++ {
++ tmpReg = p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.mask << HMCD_GEN_FIELD_MASK_SHIFT;
++ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.maskOffset << HMCD_GEN_FIELD_MASK_OFF_SHIFT;
++ /* write the next 4 bytes of the descriptor */
++ WRITE_UINT32(*p_TmpHmct, tmpReg);
++ }
++ p_TmpHmct += HMCD_PARAM_SIZE/4;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("Unknown customParams.type"));
++ }
++ }
++
++ /* If this node has a nextManip, and no parsing is required, the old table must be copied to the new table
++ the old table and should be freed */
++ if (p_FmPcdManipParams->h_NextManip
++ && (p_Manip->nextManipType == e_FM_PCD_MANIP_HDR)
++ && (MANIP_DONT_REPARSE(p_Manip)))
++ {
++ if (new)
++ {
++ /* If this is the first time this manip is created we need to free unused memory. If it
++ * is a dynamic changes case, the memory used is either the CC shadow or the existing
++ * table - no allocation, no free */
++ MANIP_UPDATE_UNIFIED_POSITION(p_FmPcdManipParams->h_NextManip);
++
++ p_Manip->unifiedPosition = e_MANIP_UNIFIED_FIRST;
++ }
++ }
++ else
++ {
++ ASSERT_COND(p_Last);
++ /* set the "last" indication on the last command of the current table */
++ WRITE_UINT32(*p_Last, GET_UINT32(*p_Last) | HMCD_LAST);
++ }
++
++ return E_OK;
++}
++
++static t_Error CreateManipActionNew(t_FmPcdManip *p_Manip,
++ t_FmPcdManipParams *p_FmPcdManipParams)
++{
++ t_FmPcdManip *p_CurManip;
++ t_Error err;
++ uint32_t nextSize = 0, totalSize;
++ uint16_t tmpReg;
++ uint8_t *p_OldHmct, *p_TmpHmctPtr, *p_TmpDataPtr;
++
++ /* set Manip structure */
++
++ p_Manip->dontParseAfterManip =
++ p_FmPcdManipParams->u.hdr.dontParseAfterManip;
++
++ if (p_FmPcdManipParams->h_NextManip)
++ { /* Next Header manipulation exists */
++ p_Manip->nextManipType = MANIP_GET_TYPE(p_FmPcdManipParams->h_NextManip);
++
++ if ((p_Manip->nextManipType == e_FM_PCD_MANIP_HDR) && p_Manip->dontParseAfterManip)
++ nextSize = (uint32_t)(GetHmctSize(p_FmPcdManipParams->h_NextManip)
++ + GetDataSize(p_FmPcdManipParams->h_NextManip));
++ else /* either parsing is required or next manip is Frag; no table merging. */
++ p_Manip->cascaded = TRUE;
++ /* pass up the "cascaded" attribute. The whole chain is cascaded
++ * if something is cascaded along the way. */
++ if (MANIP_IS_CASCADED(p_FmPcdManipParams->h_NextManip))
++ p_Manip->cascaded = TRUE;
++ }
++
++ /* Allocate new table */
++ /* calculate table size according to manip parameters */
++ err = CalculateTableSize(p_FmPcdManipParams, &p_Manip->tableSize,
++ &p_Manip->dataSize);
++ if (err)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ totalSize = (uint16_t)(p_Manip->tableSize + p_Manip->dataSize + nextSize);
++
++ p_Manip->p_Hmct = (uint8_t*)FM_MURAM_AllocMem(
++ ((t_FmPcd *)p_Manip->h_FmPcd)->h_FmMuram, totalSize, 4);
++ if (!p_Manip->p_Hmct)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc failed"));
++
++ if (p_Manip->dataSize)
++ p_Manip->p_Data =
++ (uint8_t*)PTR_MOVE(p_Manip->p_Hmct, (p_Manip->tableSize + nextSize));
++
++ /* update shadow size to allow runtime replacement of Header manipulation */
++ /* The allocated shadow is divided as follows:
++ 0 . . . 16 . . .
++ --------------------------------
++ | Shadow | Shadow HMTD |
++ | HMTD | Match Table |
++ | (16 bytes) | (maximal size) |
++ --------------------------------
++ */
++
++ err = FmPcdUpdateCcShadow(p_Manip->h_FmPcd, (uint32_t)(totalSize + 16),
++ (uint16_t)FM_PCD_CC_AD_TABLE_ALIGN);
++ if (err != E_OK)
++ {
++ FM_MURAM_FreeMem(p_Manip->h_FmPcd, p_Manip->p_Hmct);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM allocation for HdrManip node shadow"));
++ }
++
++ if (p_FmPcdManipParams->h_NextManip
++ && (p_Manip->nextManipType == e_FM_PCD_MANIP_HDR)
++ && (MANIP_DONT_REPARSE(p_Manip)))
++ {
++ p_OldHmct = (uint8_t *)GetManipInfo(p_FmPcdManipParams->h_NextManip,
++ e_MANIP_HMCT);
++ p_CurManip = p_FmPcdManipParams->h_NextManip;
++ /* Run till the last Manip (which is the first to configure) */
++ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
++ p_CurManip = p_CurManip->h_NextManip;
++
++ while (p_CurManip)
++ {
++ /* If this is a unified table, point to the part of the table
++ * which is the relative offset in HMCT.
++ */
++ p_TmpHmctPtr = (uint8_t*)PTR_MOVE(p_Manip->p_Hmct,
++ (p_Manip->tableSize +
++ (PTR_TO_UINT(p_CurManip->p_Hmct) -
++ PTR_TO_UINT(p_OldHmct))));
++ if (p_CurManip->p_Data)
++ p_TmpDataPtr = (uint8_t*)PTR_MOVE(p_Manip->p_Hmct,
++ (p_Manip->tableSize +
++ (PTR_TO_UINT(p_CurManip->p_Data) -
++ PTR_TO_UINT(p_OldHmct))));
++ else
++ p_TmpDataPtr = NULL;
++
++ BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr,
++ p_TmpDataPtr, FALSE);
++ /* update old manip table pointer */
++ MANIP_SET_HMCT_PTR(p_CurManip, p_TmpHmctPtr);
++ MANIP_SET_DATA_PTR(p_CurManip, p_TmpDataPtr);
++
++ p_CurManip = p_CurManip->h_PrevManip;
++ }
++ /* We copied the HMCT to create a new large HMCT so we can free the old one */
++ FM_MURAM_FreeMem(MANIP_GET_MURAM(p_FmPcdManipParams->h_NextManip),
++ p_OldHmct);
++ }
++
++ /* Fill table */
++ err = BuildHmct(p_Manip, p_FmPcdManipParams, p_Manip->p_Hmct,
++ p_Manip->p_Data, TRUE);
++ if (err)
++ {
++ FM_MURAM_FreeMem(p_Manip->h_FmPcd, p_Manip->p_Hmct);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ /* Build HMTD (table descriptor) */
++ tmpReg = HMTD_CFG_TYPE; /* NADEN = 0 */
++
++ /* add parseAfterManip */
++ if (!p_Manip->dontParseAfterManip)
++ tmpReg |= HMTD_CFG_PRS_AFTER_HM;
++
++ /* create cascade */
++ /*if (p_FmPcdManipParams->h_NextManip
++ && (!MANIP_DONT_REPARSE(p_Manip) || (p_Manip->nextManipType != e_FM_PCD_MANIP_HDR)))*/
++ if (p_Manip->cascaded)
++ {
++ uint16_t nextAd;
++ /* indicate that there's another HM table descriptor */
++ tmpReg |= HMTD_CFG_NEXT_AD_EN;
++ /* get address of next HMTD (table descriptor; h_Ad).
++ * If the next HMTD was removed due to table unifing, get the address
++ * of the "next next" as written in the h_Ad of the next h_Manip node.
++ */
++ if (p_Manip->unifiedPosition != e_MANIP_UNIFIED_FIRST)
++ nextAd = (uint16_t)((uint32_t)(XX_VirtToPhys(MANIP_GET_HMTD_PTR(p_FmPcdManipParams->h_NextManip)) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4);
++ else
++ nextAd = ((t_Hmtd *)((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad)->nextAdIdx;
++
++ WRITE_UINT16(((t_Hmtd *)p_Manip->h_Ad)->nextAdIdx, nextAd);
++ }
++
++ WRITE_UINT16(((t_Hmtd *)p_Manip->h_Ad)->cfg, tmpReg);
++ WRITE_UINT32(
++ ((t_Hmtd *)p_Manip->h_Ad)->hmcdBasePtr,
++ (uint32_t)(XX_VirtToPhys(p_Manip->p_Hmct) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)));
++
++ WRITE_UINT8(((t_Hmtd *)p_Manip->h_Ad)->opCode, HMAN_OC);
++
++ if (p_Manip->unifiedPosition == e_MANIP_UNIFIED_FIRST)
++ {
++ /* The HMTD of the next Manip is never going to be used */
++ if (((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->muramAllocate)
++ FM_MURAM_FreeMem(
++ ((t_FmPcd *)((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_FmPcd)->h_FmMuram,
++ ((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad);
++ else
++ XX_Free(((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad);
++ ((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad = NULL;
++ }
++
++ return E_OK;
++}
++
++static t_Error CreateManipActionShadow(t_FmPcdManip *p_Manip,
++ t_FmPcdManipParams *p_FmPcdManipParams)
++{
++ uint8_t *p_WholeHmct, *p_TmpHmctPtr, newDataSize, *p_TmpDataPtr = NULL;
++ uint16_t newSize;
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
++ t_Error err;
++ t_FmPcdManip *p_CurManip = p_Manip;
++
++ err = CalculateTableSize(p_FmPcdManipParams, &newSize, &newDataSize);
++ if (err)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ /* check coherency of new table parameters */
++ if (newSize > p_Manip->tableSize)
++ RETURN_ERROR(
++ MINOR,
++ E_INVALID_VALUE,
++ ("New Hdr Manip configuration requires larger size than current one (command table)."));
++ if (newDataSize > p_Manip->dataSize)
++ RETURN_ERROR(
++ MINOR,
++ E_INVALID_VALUE,
++ ("New Hdr Manip configuration requires larger size than current one (data)."));
++ if (p_FmPcdManipParams->h_NextManip)
++ RETURN_ERROR(
++ MINOR, E_INVALID_VALUE,
++ ("New Hdr Manip configuration can not contain h_NextManip."));
++ if (MANIP_IS_UNIFIED(p_Manip) && (newSize != p_Manip->tableSize))
++ RETURN_ERROR(
++ MINOR,
++ E_INVALID_VALUE,
++ ("New Hdr Manip configuration in a chained manipulation requires different size than current one."));
++ if (p_Manip->dontParseAfterManip
++ != p_FmPcdManipParams->u.hdr.dontParseAfterManip)
++ RETURN_ERROR(
++ MINOR,
++ E_INVALID_VALUE,
++ ("New Hdr Manip configuration differs in dontParseAfterManip value."));
++
++ p_Manip->tableSize = newSize;
++ p_Manip->dataSize = newDataSize;
++
++ /* Build the new table in the shadow */
++ if (!MANIP_IS_UNIFIED(p_Manip))
++ {
++ p_TmpHmctPtr = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, 16);
++ if (p_Manip->p_Data)
++ p_TmpDataPtr =
++ (uint8_t*)PTR_MOVE(p_TmpHmctPtr,
++ (PTR_TO_UINT(p_Manip->p_Data) - PTR_TO_UINT(p_Manip->p_Hmct)));
++
++ BuildHmct(p_Manip, p_FmPcdManipParams, p_TmpHmctPtr, p_Manip->p_Data,
++ FALSE);
++ }
++ else
++ {
++ p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT);
++ ASSERT_COND(p_WholeHmct);
++
++ /* Run till the last Manip (which is the first to configure) */
++ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
++ p_CurManip = p_CurManip->h_NextManip;
++
++ while (p_CurManip)
++ {
++ /* If this is a non-head node in a unified table, point to the part of the shadow
++ * which is the relative offset in HMCT.
++ * else, point to the beginning of the
++ * shadow table (we save 16 for the HMTD.
++ */
++ p_TmpHmctPtr =
++ (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow,
++ (16 + PTR_TO_UINT(p_CurManip->p_Hmct) - PTR_TO_UINT(p_WholeHmct)));
++ if (p_CurManip->p_Data)
++ p_TmpDataPtr =
++ (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow,
++ (16 + PTR_TO_UINT(p_CurManip->p_Data) - PTR_TO_UINT(p_WholeHmct)));
++
++ BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr,
++ p_TmpDataPtr, FALSE);
++ p_CurManip = p_CurManip->h_PrevManip;
++ }
++ }
++
++ return E_OK;
++}
++
++static t_Error CreateManipActionBackToOrig(
++ t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_FmPcdManipParams)
++{
++ uint8_t *p_WholeHmct = NULL, *p_TmpHmctPtr, *p_TmpDataPtr;
++ t_FmPcdManip *p_CurManip = p_Manip;
++
++ /* Build the new table in the shadow */
++ if (!MANIP_IS_UNIFIED(p_Manip))
++ BuildHmct(p_Manip, p_FmPcdManipParams, p_Manip->p_Hmct, p_Manip->p_Data,
++ FALSE);
++ else
++ {
++ p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT);
++ ASSERT_COND(p_WholeHmct);
++
++ /* Run till the last Manip (which is the first to configure) */
++ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
++ p_CurManip = p_CurManip->h_NextManip;
++
++ while (p_CurManip)
++ {
++ /* If this is a unified table, point to the part of the table
++ * which is the relative offset in HMCT.
++ */
++ p_TmpHmctPtr = p_CurManip->p_Hmct; /*- (uint32_t)p_WholeHmct*/
++ p_TmpDataPtr = p_CurManip->p_Data; /*- (uint32_t)p_WholeHmct*/
++
++ BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr,
++ p_TmpDataPtr, FALSE);
++
++ p_CurManip = p_CurManip->h_PrevManip;
++ }
++ }
++
++ return E_OK;
++}
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++static t_Error UpdateManipIc(t_Handle h_Manip, uint8_t icOffset)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++ t_Handle p_Ad;
++ uint32_t tmpReg32 = 0;
++ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
++
++ switch (p_Manip->opcode)
++ {
++ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++ if (p_Manip->updateParams & INTERNAL_CONTEXT_OFFSET)
++ {
++ tmpReg32 =
++ *(uint32_t *)&((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets;
++ tmpReg32 |= (uint32_t)((uint32_t)icOffset << 16);
++ *(uint32_t *)&((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets =
++ tmpReg32;
++ p_Manip->updateParams &= ~INTERNAL_CONTEXT_OFFSET;
++ p_Manip->icOffset = icOffset;
++ }
++ else
++ {
++ if (p_Manip->icOffset != icOffset)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("this manipulation was updated previously by different value"););
++ }
++ break;
++ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
++ if (p_Manip->h_Frag)
++ {
++ if (p_Manip->updateParams & INTERNAL_CONTEXT_OFFSET)
++ {
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++ tmpReg32 |= GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets);
++ tmpReg32 |= (uint32_t)((uint32_t)icOffset << 16);
++ WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets, tmpReg32);
++ p_Manip->updateParams &= ~INTERNAL_CONTEXT_OFFSET;
++ p_Manip->icOffset = icOffset;
++ }
++ else
++ {
++ if (p_Manip->icOffset != icOffset)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("this manipulation was updated previousely by different value"););
++ }
++ }
++ break;
++ }
++
++ return E_OK;
++}
++
++static t_Error UpdateInitMvIntFrameHeaderFromFrameToBufferPrefix(
++ t_Handle h_FmPort, t_FmPcdManip *p_Manip, t_Handle h_Ad, bool validate)
++{
++
++ t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)h_Ad;
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++ t_Error err;
++ uint32_t tmpReg32;
++
++ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(
++ (p_Manip->opcode & HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX),
++ E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_Manip->muramAllocate, E_INVALID_STATE);
++
++ if (p_Manip->updateParams)
++ {
++ if ((!(p_Manip->updateParams & OFFSET_OF_PR))
++ || (p_Manip->shadowUpdateParams & OFFSET_OF_PR))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("in this stage parameters from Port has not be updated"));
++
++ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams;
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_PSO;
++ fmPortGetSetCcParams.setCcParams.psoSize = 16;
++
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("Parser result offset wasn't configured previousely"));
++#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
++ ASSERT_COND(!(fmPortGetSetCcParams.getCcParams.prOffset % 16));
++#endif
++ }
++ else
++ if (validate)
++ {
++ if ((!(p_Manip->shadowUpdateParams & OFFSET_OF_PR))
++ || (p_Manip->updateParams & OFFSET_OF_PR))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("in this stage parameters from Port has be updated"));
++ fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams;
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_PSO;
++ fmPortGetSetCcParams.setCcParams.psoSize = 16;
++
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("Parser result offset wasn't configured previousely"));
++
++ }
++
++ ASSERT_COND(p_Ad);
++
++ if (p_Manip->updateParams & OFFSET_OF_PR)
++ {
++ tmpReg32 = 0;
++ tmpReg32 |= fmPortGetSetCcParams.getCcParams.prOffset;
++ WRITE_UINT32(p_Ad->matchTblPtr,
++ (GET_UINT32(p_Ad->matchTblPtr) | tmpReg32));
++ p_Manip->updateParams &= ~OFFSET_OF_PR;
++ p_Manip->shadowUpdateParams |= OFFSET_OF_PR;
++ }
++ else
++ if (validate)
++ {
++ tmpReg32 = GET_UINT32(p_Ad->matchTblPtr);
++ if ((uint8_t)tmpReg32 != fmPortGetSetCcParams.getCcParams.prOffset)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("this manipulation was updated previousely by different value"););
++ }
++
++ return E_OK;
++}
++
++static t_Error UpdateModifyCapwapFragmenation(t_FmPcdManip *p_Manip, t_Handle h_Ad, bool validate,t_Handle h_FmTree)
++{
++ t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)h_Ad;
++ t_FmPcdCcSavedManipParams *p_SavedManipParams = NULL;
++ uint32_t tmpReg32 = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->frag,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION) || (p_Manip->opcode == HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER)), E_INVALID_STATE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag;
++
++ if (p_Manip->updateParams)
++ {
++
++ if ((!(p_Manip->updateParams & OFFSET_OF_DATA)) ||
++ ((p_Manip->shadowUpdateParams & OFFSET_OF_DATA)))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated"));
++ p_SavedManipParams = FmPcdCcTreeGetSavedManipParams(h_FmTree);
++ if (!p_SavedManipParams)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("for this manipulation tree has to be configured previosely with this type"));
++ p_Manip->capwapFragParams.dataOffset = p_SavedManipParams->capwapParams.dataOffset;
++
++ tmpReg32 = GET_UINT32(p_Ad->pcAndOffsets);
++ tmpReg32 |= ((uint32_t)p_Manip->capwapFragParams.dataOffset<< 16);
++ WRITE_UINT32(p_Ad->pcAndOffsets,tmpReg32);
++
++ p_Manip->updateParams &= ~OFFSET_OF_DATA;
++ p_Manip->shadowUpdateParams |= OFFSET_OF_DATA;
++ }
++ else if (validate)
++ {
++
++ p_SavedManipParams = FmPcdCcTreeGetSavedManipParams(h_FmTree);
++ if (!p_SavedManipParams)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("for this manipulation tree has to be configured previosely with this type"));
++ if (p_Manip->capwapFragParams.dataOffset != p_SavedManipParams->capwapParams.dataOffset)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("this manipulation was updated previousely by different value"));
++ }
++
++ return E_OK;
++}
++
++static t_Error UpdateInitCapwapFragmentation(t_Handle h_FmPort,
++ t_FmPcdManip *p_Manip,
++ t_Handle h_Ad,
++ bool validate,
++ t_Handle h_FmTree)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++ t_Error err;
++ uint32_t tmpReg32 = 0;
++ t_FmPcdCcSavedManipParams *p_SavedManipParams;
++
++ UNUSED(h_Ad);
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->frag,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION) ||
++ (p_Manip->opcode == HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER)), E_INVALID_STATE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag;
++
++ if (p_Manip->updateParams)
++ {
++ if ((!(p_Manip->updateParams & OFFSET_OF_DATA)) ||
++ ((p_Manip->shadowUpdateParams & OFFSET_OF_DATA)))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated"));
++ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams;
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN | UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY;
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
++ /* For CAPWAP Rassembly used FMAN_CTRL2 hardcoded - so for fragmentation its better to use FMAN_CTRL1 */
++ fmPortGetSetCcParams.setCcParams.orFmanCtrl = FPM_PORT_FM_CTL1;
++
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Data offset wasn't configured previousely"));
++
++ p_SavedManipParams = (t_FmPcdCcSavedManipParams *)XX_Malloc(sizeof(t_FmPcdCcSavedManipParams));
++ p_SavedManipParams->capwapParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset;
++
++#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
++ ASSERT_COND(!(p_SavedManipParams->capwapParams.dataOffset % 16));
++#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */
++
++ FmPcdCcTreeSetSavedManipParams(h_FmTree, (t_Handle)p_SavedManipParams);
++ }
++ else if (validate)
++ {
++ if ((!(p_Manip->shadowUpdateParams & OFFSET_OF_DATA)) ||
++ ((p_Manip->updateParams & OFFSET_OF_DATA)))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated"));
++ fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams;
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN | UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY;
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Data offset wasn't configured previousely"));
++ }
++
++ if (p_Manip->updateParams)
++ {
++ tmpReg32 = GET_UINT32(p_Ad->pcAndOffsets);
++ tmpReg32 |= ((uint32_t)fmPortGetSetCcParams.getCcParams.dataOffset<< 16);
++ WRITE_UINT32(p_Ad->pcAndOffsets,tmpReg32);
++
++ p_Manip->updateParams &= ~OFFSET_OF_DATA;
++ p_Manip->shadowUpdateParams |= OFFSET_OF_DATA;
++ p_Manip->capwapFragParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset;
++ }
++ else if (validate)
++ {
++ if (p_Manip->capwapFragParams.dataOffset != fmPortGetSetCcParams.getCcParams.dataOffset)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("this manipulation was updated previousely by different value"));
++ }
++
++ return E_OK;
++}
++
++static t_Error UpdateInitCapwapReasm(t_Handle h_FmPcd,
++ t_Handle h_FmPort,
++ t_FmPcdManip *p_Manip,
++ t_Handle h_Ad,
++ bool validate)
++{
++ t_CapwapReasmPram *p_ReassmTbl;
++ t_Error err;
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++ uint8_t i = 0;
++ uint16_t size;
++ uint32_t tmpReg32;
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdCcCapwapReassmTimeoutParams ccCapwapReassmTimeoutParams;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Manip->frag,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST), E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc,E_INVALID_HANDLE);
++
++ if (p_Manip->h_FmPcd != h_FmPcd)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("handler of PCD previously was initiated by different value"));
++
++ UNUSED(h_Ad);
++
++ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
++ p_ReassmTbl = (t_CapwapReasmPram *)p_Manip->h_Frag;
++
++ if (p_Manip->updateParams)
++ {
++ if ((!(p_Manip->updateParams & NUM_OF_TASKS) &&
++ !(p_Manip->updateParams & OFFSET_OF_DATA) &&
++ !(p_Manip->updateParams & OFFSET_OF_PR) &&
++ !(p_Manip->updateParams & HW_PORT_ID)) ||
++ ((p_Manip->shadowUpdateParams & NUM_OF_TASKS) ||
++ (p_Manip->shadowUpdateParams & OFFSET_OF_DATA) || (p_Manip->shadowUpdateParams & OFFSET_OF_PR) ||
++ (p_Manip->shadowUpdateParams & HW_PORT_ID)))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated"));
++
++ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams;
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN;
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
++
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (fmPortGetSetCcParams.getCcParams.type & NUM_OF_TASKS)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Num of tasks wasn't configured previousely"));
++ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previousely"));
++ if (fmPortGetSetCcParams.getCcParams.type & HW_PORT_ID)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("hwPortId wasn't updated"));
++#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
++ ASSERT_COND((fmPortGetSetCcParams.getCcParams.dataOffset % 16) == 0);
++#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */
++ }
++ else if (validate)
++ {
++ if ((!(p_Manip->shadowUpdateParams & NUM_OF_TASKS) &&
++ !(p_Manip->shadowUpdateParams & OFFSET_OF_DATA) &&
++ !(p_Manip->shadowUpdateParams & OFFSET_OF_PR) &&
++ !(p_Manip->shadowUpdateParams & HW_PORT_ID)) &&
++ ((p_Manip->updateParams & NUM_OF_TASKS) ||
++ (p_Manip->updateParams & OFFSET_OF_DATA) || (p_Manip->updateParams & OFFSET_OF_PR) ||
++ (p_Manip->updateParams & HW_PORT_ID)))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated"));
++
++ fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams;
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN;
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
++
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (fmPortGetSetCcParams.getCcParams.type & NUM_OF_TASKS)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("NumOfTasks wasn't configured previously"));
++ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previously"));
++ if (fmPortGetSetCcParams.getCcParams.type & HW_PORT_ID)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("hwPortId wasn't updated"));
++ }
++
++ if (p_Manip->updateParams)
++ {
++ if (p_Manip->updateParams & NUM_OF_TASKS)
++ {
++ /*recommendation of Microcode team - (maxNumFramesInProcess * 2) */
++ size = (uint16_t)(p_Manip->capwapFragParams.maxNumFramesInProcess*2 + fmPortGetSetCcParams.getCcParams.numOfTasks);
++ if (size > 255)
++ RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("numOfOpenReassmEntries + numOfTasks per port can not be greater than 256"));
++
++ p_Manip->capwapFragParams.numOfTasks = fmPortGetSetCcParams.getCcParams.numOfTasks;
++
++ /*p_ReassmFrmDescrIndxPoolTbl*/
++ p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl =
++ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)(size + 1),
++ 4);
++ if (!p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly frame buffer index pool table"));
++
++ MemSet8(p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl, 0, (uint32_t)(size + 1));
++
++ for ( i = 0; i < size; i++)
++ WRITE_UINT8(*(uint8_t *)PTR_MOVE(p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl, i), (uint8_t)(i+1));
++
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl) - p_FmPcd->physicalMuramBase);
++
++ WRITE_UINT32(p_ReassmTbl->reasmFrmDescIndexPoolTblPtr, tmpReg32);
++
++ /*p_ReassmFrmDescrPoolTbl*/
++ p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl =
++ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)((size + 1) * FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE),
++ 4);
++
++ if (!p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly frame buffer pool table"));
++
++ MemSet8(p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl, 0, (uint32_t)((size +1)* FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE));
++
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl) - p_FmPcd->physicalMuramBase);
++
++ WRITE_UINT32(p_ReassmTbl->reasmFrmDescPoolTblPtr, tmpReg32);
++
++ /*p_TimeOutTbl*/
++
++ p_Manip->capwapFragParams.p_TimeOutTbl =
++ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)((size + 1)* FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE),
++ 4);
++
++ if (!p_Manip->capwapFragParams.p_TimeOutTbl)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly timeout table"));
++
++ MemSet8(p_Manip->capwapFragParams.p_TimeOutTbl, 0, (uint16_t)((size + 1)*FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE));
++
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_TimeOutTbl) - p_FmPcd->physicalMuramBase);
++ WRITE_UINT32(p_ReassmTbl->timeOutTblPtr, tmpReg32);
++
++ p_Manip->updateParams &= ~NUM_OF_TASKS;
++ p_Manip->shadowUpdateParams |= NUM_OF_TASKS;
++ }
++
++ if (p_Manip->updateParams & OFFSET_OF_DATA)
++ {
++ p_Manip->capwapFragParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset;
++ tmpReg32 = GET_UINT32(p_ReassmTbl->mode);
++ tmpReg32|= p_Manip->capwapFragParams.dataOffset;
++ WRITE_UINT32(p_ReassmTbl->mode, tmpReg32);
++ p_Manip->updateParams &= ~OFFSET_OF_DATA;
++ p_Manip->shadowUpdateParams |= OFFSET_OF_DATA;
++ }
++
++ if (!(fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR))
++ {
++ p_Manip->capwapFragParams.prOffset = fmPortGetSetCcParams.getCcParams.prOffset;
++
++ tmpReg32 = GET_UINT32(p_ReassmTbl->mode);
++ tmpReg32|= FM_PCD_MANIP_CAPWAP_REASM_PR_COPY;
++ WRITE_UINT32(p_ReassmTbl->mode, tmpReg32);
++
++ tmpReg32 = GET_UINT32(p_ReassmTbl->intStatsTblPtr);
++ tmpReg32 |= (uint32_t)p_Manip->capwapFragParams.prOffset << 24;
++ WRITE_UINT32(p_ReassmTbl->intStatsTblPtr, tmpReg32);
++ p_Manip->updateParams &= ~OFFSET_OF_PR;
++ p_Manip->shadowUpdateParams |= OFFSET_OF_PR;
++ }
++ else
++ {
++ p_Manip->capwapFragParams.prOffset = 0xff;
++ p_Manip->updateParams &= ~OFFSET_OF_PR;
++ p_Manip->shadowUpdateParams |= OFFSET_OF_PR;
++ }
++
++ p_Manip->capwapFragParams.hwPortId = fmPortGetSetCcParams.getCcParams.hardwarePortId;
++ p_Manip->updateParams &= ~HW_PORT_ID;
++ p_Manip->shadowUpdateParams |= HW_PORT_ID;
++
++ /*timeout hc */
++ ccCapwapReassmTimeoutParams.fqidForTimeOutFrames = p_Manip->capwapFragParams.fqidForTimeOutFrames;
++ ccCapwapReassmTimeoutParams.portIdAndCapwapReassmTbl = (uint32_t)p_Manip->capwapFragParams.hwPortId << 24;
++ ccCapwapReassmTimeoutParams.portIdAndCapwapReassmTbl |= (uint32_t)((XX_VirtToPhys(p_ReassmTbl) - p_FmPcd->physicalMuramBase));
++ ccCapwapReassmTimeoutParams.timeoutRequestTime = (((uint32_t)1<<p_Manip->capwapFragParams.bitFor1Micro) * p_Manip->capwapFragParams.timeoutRoutineRequestTime)/2;
++ return FmHcPcdCcCapwapTimeoutReassm(p_FmPcd->h_Hc,&ccCapwapReassmTimeoutParams);
++ }
++
++ else if (validate)
++ {
++ if (fmPortGetSetCcParams.getCcParams.hardwarePortId != p_Manip->capwapFragParams.hwPortId)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Reassembly manipulation previously was assigned to another port"));
++ if (fmPortGetSetCcParams.getCcParams.numOfTasks != p_Manip->capwapFragParams.numOfTasks)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfTasks for this manipulation previously was defined by another value "));
++
++ if (!(fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR))
++ {
++ if (p_Manip->capwapFragParams.prOffset != fmPortGetSetCcParams.getCcParams.prOffset)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Parse result offset previously was defined by another value "));
++ }
++ else
++ {
++ if (p_Manip->capwapFragParams.prOffset != 0xff)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Parse result offset previously was defined by another value "));
++ }
++ if (fmPortGetSetCcParams.getCcParams.dataOffset != p_Manip->capwapFragParams.dataOffset)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Data offset previously was defined by another value "));
++ }
++
++ return E_OK;
++}
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++
++t_Error FmPcdRegisterReassmPort(t_Handle h_FmPcd, t_Handle h_ReasmCommonPramTbl)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdCcReassmTimeoutParams ccReassmTimeoutParams = { 0 };
++ t_Error err = E_OK;
++ uint8_t result;
++ uint32_t bitFor1Micro, tsbs, log2num;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(h_ReasmCommonPramTbl);
++
++ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
++ if (bitFor1Micro == 0)
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
++
++ bitFor1Micro = 32 - bitFor1Micro;
++ LOG2(FM_PCD_MANIP_REASM_TIMEOUT_THREAD_THRESH, log2num);
++ tsbs = bitFor1Micro - log2num;
++
++ ccReassmTimeoutParams.iprcpt = (uint32_t)(XX_VirtToPhys(
++ h_ReasmCommonPramTbl) - p_FmPcd->physicalMuramBase);
++ ccReassmTimeoutParams.tsbs = (uint8_t)tsbs;
++ ccReassmTimeoutParams.activate = TRUE;
++ if ((err = FmHcPcdCcTimeoutReassm(p_FmPcd->h_Hc, &ccReassmTimeoutParams,
++ &result)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ switch (result)
++ {
++ case (0):
++ return E_OK;
++ case (1):
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("failed to allocate TNUM"));
++ case (2):
++ RETURN_ERROR(
++ MAJOR, E_NO_MEMORY,
++ ("failed to allocate internal buffer from the HC-Port"));
++ case (3):
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("'Disable Timeout Task' with invalid IPRCPT"));
++ case (4):
++ RETURN_ERROR(MAJOR, E_FULL, ("too many timeout tasks"));
++ case (5):
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("invalid sub command"));
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++ }
++ return E_OK;
++}
++
++static t_Error CreateReassCommonTable(t_FmPcdManip *p_Manip)
++{
++ uint32_t tmpReg32 = 0, i, bitFor1Micro;
++ uint64_t tmpReg64, size;
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
++ t_Error err = E_OK;
++
++ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
++ if (bitFor1Micro == 0)
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
++
++ /* Allocation of the Reassembly Common Parameters table. This table is located in the
++ MURAM. Its size is 64 bytes and its base address should be 8-byte aligned. */
++ p_Manip->reassmParams.p_ReassCommonTbl =
++ (t_ReassCommonTbl *)FM_MURAM_AllocMem(
++ p_FmPcd->h_FmMuram,
++ FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_SIZE,
++ FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_ALIGN);
++
++ if (!p_Manip->reassmParams.p_ReassCommonTbl)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM alloc for Reassembly common parameters table"));
++
++ MemSet8(p_Manip->reassmParams.p_ReassCommonTbl, 0,
++ FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_SIZE);
++
++ /* Setting the TimeOut Mode.*/
++ tmpReg32 = 0;
++ if (p_Manip->reassmParams.timeOutMode
++ == e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES)
++ tmpReg32 |= FM_PCD_MANIP_REASM_TIME_OUT_BETWEEN_FRAMES;
++
++ /* Setting TimeOut FQID - Frames that time out are enqueued to this FQID.
++ In order to cause TimeOut frames to be discarded, this queue should be configured accordingly*/
++ tmpReg32 |= p_Manip->reassmParams.fqidForTimeOutFrames;
++ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->timeoutModeAndFqid,
++ tmpReg32);
++
++ /* Calculation the size of IP Reassembly Frame Descriptor - number of frames that are allowed to be reassembled simultaneously + 129.*/
++ size = p_Manip->reassmParams.maxNumFramesInProcess + 129;
++
++ /*Allocation of IP Reassembly Frame Descriptor Indexes Pool - This pool resides in the MURAM */
++ p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr =
++ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)(size * 2),
++ 256));
++ if (!p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr)
++ RETURN_ERROR(
++ MAJOR, E_NO_MEMORY,
++ ("MURAM alloc for Reassembly frame descriptor indexes pool"));
++
++ MemSet8(UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr),
++ 0, (uint32_t)(size * 2));
++
++ /* The entries in IP Reassembly Frame Descriptor Indexes Pool contains indexes starting with 1 up to
++ the maximum number of frames that are allowed to be reassembled simultaneously + 128.
++ The last entry in this pool must contain the index zero*/
++ for (i = 0; i < (size - 1); i++)
++ WRITE_UINT16(
++ *(uint16_t *)PTR_MOVE(UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr), (i<<1)),
++ (uint16_t)(i+1));
++
++ /* Sets the IP Reassembly Frame Descriptor Indexes Pool offset from MURAM */
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(
++ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr))
++ - p_FmPcd->physicalMuramBase);
++ WRITE_UINT32(
++ p_Manip->reassmParams.p_ReassCommonTbl->reassFrmDescIndexPoolTblPtr,
++ tmpReg32);
++
++ /* Allocation of the Reassembly Frame Descriptors Pool - This pool resides in external memory.
++ The number of entries in this pool should be equal to the number of entries in IP Reassembly Frame Descriptor Indexes Pool.*/
++ p_Manip->reassmParams.reassFrmDescrPoolTblAddr =
++ PTR_TO_UINT(XX_MallocSmart((uint32_t)(size * 64), p_Manip->reassmParams.dataMemId, 64));
++
++ if (!p_Manip->reassmParams.reassFrmDescrPoolTblAddr)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED"));
++
++ MemSet8(UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrPoolTblAddr), 0,
++ (uint32_t)(size * 64));
++
++ /* Sets the Reassembly Frame Descriptors Pool and liodn offset*/
++ tmpReg64 = (uint64_t)(XX_VirtToPhys(
++ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrPoolTblAddr)));
++ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
++ & FM_PCD_MANIP_REASM_LIODN_MASK)
++ << (uint64_t)FM_PCD_MANIP_REASM_LIODN_SHIFT);
++ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
++ & FM_PCD_MANIP_REASM_ELIODN_MASK)
++ << (uint64_t)FM_PCD_MANIP_REASM_ELIODN_SHIFT);
++ WRITE_UINT32(
++ p_Manip->reassmParams.p_ReassCommonTbl->liodnAndReassFrmDescPoolPtrHi,
++ (uint32_t)(tmpReg64 >> 32));
++ WRITE_UINT32(
++ p_Manip->reassmParams.p_ReassCommonTbl->reassFrmDescPoolPtrLow,
++ (uint32_t)tmpReg64);
++
++ /*Allocation of the TimeOut table - This table resides in the MURAM.
++ The number of entries in this table is identical to the number of entries in the Reassembly Frame Descriptors Pool*/
++ p_Manip->reassmParams.timeOutTblAddr =
++ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, (uint32_t)(size * 8),8));
++
++ if (!p_Manip->reassmParams.timeOutTblAddr)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM alloc for Reassembly timeout table"));
++
++ MemSet8(UINT_TO_PTR(p_Manip->reassmParams.timeOutTblAddr), 0,
++ (uint16_t)(size * 8));
++
++ /* Sets the TimeOut table offset from MURAM */
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(
++ UINT_TO_PTR(p_Manip->reassmParams.timeOutTblAddr))
++ - p_FmPcd->physicalMuramBase);
++ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->timeOutTblPtr,
++ tmpReg32);
++
++ /* Sets the Expiration Delay */
++ tmpReg32 = 0;
++ tmpReg32 |= (((uint32_t)(1 << bitFor1Micro))
++ * p_Manip->reassmParams.timeoutThresholdForReassmProcess);
++ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->expirationDelay,
++ tmpReg32);
++
++ err = FmPcdRegisterReassmPort(p_FmPcd,
++ p_Manip->reassmParams.p_ReassCommonTbl);
++ if (err != E_OK)
++ {
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->reassmParams.p_ReassCommonTbl);
++ RETURN_ERROR(MAJOR, err, ("port registration"));
++ }
++
++ return err;
++}
++
++static t_Error CreateReassTable(t_FmPcdManip *p_Manip, e_NetHeaderType hdr)
++{
++ t_FmPcd *p_FmPcd = p_Manip->h_FmPcd;
++ uint32_t tmpReg32, autoLearnHashTblSize;
++ uint32_t numOfWays, setSize, setSizeCode, keySize;
++ uint32_t waySize, numOfSets, numOfEntries;
++ uint64_t tmpReg64;
++ uint16_t minFragSize;
++ uint16_t maxReassemSize;
++ uintptr_t *p_AutoLearnHashTblAddr, *p_AutoLearnSetLockTblAddr;
++ t_ReassTbl **p_ReassTbl;
++
++ switch (hdr)
++ {
++ case HEADER_TYPE_IPv4:
++ p_ReassTbl = &p_Manip->reassmParams.ip.p_Ipv4ReassTbl;
++ p_AutoLearnHashTblAddr =
++ &p_Manip->reassmParams.ip.ipv4AutoLearnHashTblAddr;
++ p_AutoLearnSetLockTblAddr =
++ &p_Manip->reassmParams.ip.ipv4AutoLearnSetLockTblAddr;
++ minFragSize = p_Manip->reassmParams.ip.minFragSize[0];
++ maxReassemSize = 0;
++ numOfWays = p_Manip->reassmParams.ip.numOfFramesPerHashEntry[0];
++ keySize = 4 + 4 + 1 + 2; /* 3-tuple + IP-Id */
++ break;
++ case HEADER_TYPE_IPv6:
++ p_ReassTbl = &p_Manip->reassmParams.ip.p_Ipv6ReassTbl;
++ p_AutoLearnHashTblAddr =
++ &p_Manip->reassmParams.ip.ipv6AutoLearnHashTblAddr;
++ p_AutoLearnSetLockTblAddr =
++ &p_Manip->reassmParams.ip.ipv6AutoLearnSetLockTblAddr;
++ minFragSize = p_Manip->reassmParams.ip.minFragSize[1];
++ maxReassemSize = 0;
++ numOfWays = p_Manip->reassmParams.ip.numOfFramesPerHashEntry[1];
++ keySize = 16 + 16 + 4; /* 2-tuple + IP-Id */
++ if (numOfWays > e_FM_PCD_MANIP_SIX_WAYS_HASH)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("num of ways"));
++ break;
++ case HEADER_TYPE_CAPWAP:
++ p_ReassTbl = &p_Manip->reassmParams.capwap.p_ReassTbl;
++ p_AutoLearnHashTblAddr =
++ &p_Manip->reassmParams.capwap.autoLearnHashTblAddr;
++ p_AutoLearnSetLockTblAddr =
++ &p_Manip->reassmParams.capwap.autoLearnSetLockTblAddr;
++ minFragSize = 0;
++ maxReassemSize = p_Manip->reassmParams.capwap.maxRessembledsSize;
++ numOfWays = p_Manip->reassmParams.capwap.numOfFramesPerHashEntry;
++ keySize = 4;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("header type"));
++ }
++ keySize += 2; /* 2 bytes reserved for RFDIndex */
++#if (DPAA_VERSION >= 11)
++ keySize += 2; /* 2 bytes reserved */
++#endif /* (DPAA_VERSION >= 11) */
++ waySize = ROUND_UP(keySize, 8);
++
++ /* Allocates the Reassembly Parameters Table - This table is located in the MURAM.*/
++ *p_ReassTbl = (t_ReassTbl *)FM_MURAM_AllocMem(
++ p_FmPcd->h_FmMuram, FM_PCD_MANIP_REASM_TABLE_SIZE,
++ FM_PCD_MANIP_REASM_TABLE_ALIGN);
++ if (!*p_ReassTbl)
++ RETURN_ERROR( MAJOR, E_NO_MEMORY,
++ ("MURAM alloc for Reassembly specific parameters table"));
++ memset(*p_ReassTbl, 0, sizeof(t_ReassTbl));
++
++ /* Sets the Reassembly common Parameters table offset from MURAM in the Reassembly Table descriptor*/
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->reassmParams.p_ReassCommonTbl)
++ - p_FmPcd->physicalMuramBase);
++ WRITE_UINT32((*p_ReassTbl)->reassCommonPrmTblPtr, tmpReg32);
++
++ /* Calculate set size (set size is rounded-up to next power of 2) */
++ NEXT_POWER_OF_2(numOfWays * waySize, setSize);
++
++ /* Get set size code */
++ LOG2(setSize, setSizeCode);
++
++ /* Sets ways number and set size code */
++ WRITE_UINT16((*p_ReassTbl)->waysNumAndSetSize,
++ (uint16_t)((numOfWays << 8) | setSizeCode));
++
++ /* It is recommended that the total number of entries in this table
++ (number of sets * number of ways) will be twice the number of frames that
++ are expected to be reassembled simultaneously.*/
++ numOfEntries = (uint32_t)(p_Manip->reassmParams.maxNumFramesInProcess * 2);
++
++ /* sets number calculation - number of entries = number of sets * number of ways */
++ numOfSets = numOfEntries / numOfWays;
++
++ /* Sets AutoLearnHashKeyMask*/
++ NEXT_POWER_OF_2(numOfSets, numOfSets);
++
++ WRITE_UINT16((*p_ReassTbl)->autoLearnHashKeyMask,
++ (uint16_t)(numOfSets - 1));
++
++ /* Allocation of Reassembly Automatic Learning Hash Table - This table resides in external memory.
++ The size of this table is determined by the number of sets and the set size.
++ Table size = set size * number of sets
++ This table base address should be aligned to SetSize.*/
++ autoLearnHashTblSize = numOfSets * setSize;
++
++ *p_AutoLearnHashTblAddr =
++ PTR_TO_UINT(XX_MallocSmart(autoLearnHashTblSize, p_Manip->reassmParams.dataMemId, setSize));
++ if (!*p_AutoLearnHashTblAddr)
++ {
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, *p_ReassTbl);
++ *p_ReassTbl = NULL;
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED"));
++ }
++ MemSet8(UINT_TO_PTR(*p_AutoLearnHashTblAddr), 0, autoLearnHashTblSize);
++
++ /* Sets the Reassembly Automatic Learning Hash Table and liodn offset */
++ tmpReg64 = ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
++ & FM_PCD_MANIP_REASM_LIODN_MASK)
++ << (uint64_t)FM_PCD_MANIP_REASM_LIODN_SHIFT);
++ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
++ & FM_PCD_MANIP_REASM_ELIODN_MASK)
++ << (uint64_t)FM_PCD_MANIP_REASM_ELIODN_SHIFT);
++ tmpReg64 |= XX_VirtToPhys(UINT_TO_PTR(*p_AutoLearnHashTblAddr));
++ WRITE_UINT32( (*p_ReassTbl)->liodnAlAndAutoLearnHashTblPtrHi,
++ (uint32_t)(tmpReg64 >> 32));
++ WRITE_UINT32((*p_ReassTbl)->autoLearnHashTblPtrLow, (uint32_t)tmpReg64);
++
++ /* Allocation of the Set Lock table - This table resides in external memory
++ The size of this table is (number of sets in the Reassembly Automatic Learning Hash table)*4 bytes.
++ This table resides in external memory and its base address should be 4-byte aligned */
++ *p_AutoLearnSetLockTblAddr =
++ PTR_TO_UINT(XX_MallocSmart((uint32_t)(numOfSets * 4), p_Manip->reassmParams.dataMemId, 4));
++ if (!*p_AutoLearnSetLockTblAddr)
++ {
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, *p_ReassTbl);
++ *p_ReassTbl = NULL;
++ XX_FreeSmart(UINT_TO_PTR(*p_AutoLearnHashTblAddr));
++ *p_AutoLearnHashTblAddr = 0;
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED"));
++ }
++ MemSet8(UINT_TO_PTR(*p_AutoLearnSetLockTblAddr), 0, (numOfSets * 4));
++
++ /* sets Set Lock table pointer and liodn offset*/
++ tmpReg64 = ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
++ & FM_PCD_MANIP_REASM_LIODN_MASK)
++ << (uint64_t)FM_PCD_MANIP_REASM_LIODN_SHIFT);
++ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
++ & FM_PCD_MANIP_REASM_ELIODN_MASK)
++ << (uint64_t)FM_PCD_MANIP_REASM_ELIODN_SHIFT);
++ tmpReg64 |= XX_VirtToPhys(UINT_TO_PTR(*p_AutoLearnSetLockTblAddr));
++ WRITE_UINT32( (*p_ReassTbl)->liodnSlAndAutoLearnSetLockTblPtrHi,
++ (uint32_t)(tmpReg64 >> 32));
++ WRITE_UINT32((*p_ReassTbl)->autoLearnSetLockTblPtrLow, (uint32_t)tmpReg64);
++
++ /* Sets user's requested minimum fragment size (in Bytes) for First/Middle fragment */
++ WRITE_UINT16((*p_ReassTbl)->minFragSize, minFragSize);
++
++ WRITE_UINT16((*p_ReassTbl)->maxReassemblySize, maxReassemSize);
++
++ return E_OK;
++}
++
++static t_Error UpdateInitReasm(t_Handle h_FmPcd, t_Handle h_PcdParams,
++ t_Handle h_FmPort, t_FmPcdManip *p_Manip,
++ t_Handle h_Ad, bool validate)
++{
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++ uint32_t tmpReg32;
++ t_Error err;
++ t_FmPortPcdParams *p_PcdParams = (t_FmPortPcdParams *)h_PcdParams;
++#if (DPAA_VERSION >= 11)
++ t_FmPcdCtrlParamsPage *p_ParamsPage;
++#endif /* (DPAA_VERSION >= 11) */
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Manip->frag, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(
++ (p_Manip->opcode == HMAN_OC_IP_REASSEMBLY) || (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY),
++ E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Manip->updateParams || h_PcdParams,
++ E_INVALID_HANDLE);
++
++ UNUSED(h_Ad);
++
++ if (!p_Manip->updateParams)
++ return E_OK;
++
++ if (p_Manip->h_FmPcd != h_FmPcd)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("handler of PCD previously was initiated by different value"));
++
++ if (p_Manip->updateParams)
++ {
++ if ((!(p_Manip->updateParams
++ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK)))
++ || ((p_Manip->shadowUpdateParams
++ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK))))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("in this stage parameters from Port has not be updated"));
++
++ fmPortGetSetCcParams.setCcParams.type = 0;
++ if (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY)
++ {
++ fmPortGetSetCcParams.setCcParams.type |= UPDATE_OFP_DPTE;
++ fmPortGetSetCcParams.setCcParams.ofpDpde = 0xF;
++ }
++ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams | FM_REV;
++ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
++ != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (fmPortGetSetCcParams.getCcParams.type
++ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK | FM_REV))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("offset of the data wasn't configured previously"));
++ if (p_Manip->updateParams
++ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK))
++ {
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint8_t *p_Ptr, i, totalNumOfTnums;
++
++ totalNumOfTnums =
++ (uint8_t)(fmPortGetSetCcParams.getCcParams.numOfTasks
++ + fmPortGetSetCcParams.getCcParams.numOfExtraTasks);
++
++ p_Manip->reassmParams.internalBufferPoolAddr =
++ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)(totalNumOfTnums * BMI_FIFO_UNITS),
++ BMI_FIFO_UNITS));
++ if (!p_Manip->reassmParams.internalBufferPoolAddr)
++ RETURN_ERROR(
++ MAJOR, E_NO_MEMORY,
++ ("MURAM alloc for Reassembly internal buffers pool"));
++ MemSet8(
++ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolAddr),
++ 0, (uint32_t)(totalNumOfTnums * BMI_FIFO_UNITS));
++
++ p_Manip->reassmParams.internalBufferPoolManagementIndexAddr =
++ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)(5 + totalNumOfTnums),
++ 4));
++ if (!p_Manip->reassmParams.internalBufferPoolManagementIndexAddr)
++ RETURN_ERROR(
++ MAJOR,
++ E_NO_MEMORY,
++ ("MURAM alloc for Reassembly internal buffers management"));
++
++ p_Ptr =
++ (uint8_t*)UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolManagementIndexAddr);
++ WRITE_UINT32(
++ *(uint32_t*)p_Ptr,
++ (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolAddr)) - p_FmPcd->physicalMuramBase));
++ for (i = 0, p_Ptr += 4; i < totalNumOfTnums; i++, p_Ptr++)
++ WRITE_UINT8(*p_Ptr, i);
++ WRITE_UINT8(*p_Ptr, 0xFF);
++
++ tmpReg32 =
++ (4 << FM_PCD_MANIP_REASM_COMMON_INT_BUFFER_IDX_SHIFT)
++ | ((uint32_t)(XX_VirtToPhys(
++ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolManagementIndexAddr))
++ - p_FmPcd->physicalMuramBase));
++ WRITE_UINT32(
++ p_Manip->reassmParams.p_ReassCommonTbl->internalBufferManagement,
++ tmpReg32);
++
++ p_Manip->updateParams &= ~(NUM_OF_TASKS | NUM_OF_EXTRA_TASKS
++ | DISCARD_MASK);
++ p_Manip->shadowUpdateParams |= (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS
++ | DISCARD_MASK);
++ }
++ }
++
++ if (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY)
++ {
++ if (p_Manip->reassmParams.capwap.h_Scheme)
++ {
++ p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] =
++ p_Manip->reassmParams.capwap.h_Scheme;
++ p_PcdParams->p_KgParams->numOfSchemes++;
++ }
++
++ }
++ else
++ {
++ if (p_Manip->reassmParams.ip.h_Ipv4Scheme)
++ {
++ p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] =
++ p_Manip->reassmParams.ip.h_Ipv4Scheme;
++ p_PcdParams->p_KgParams->numOfSchemes++;
++ }
++ if (p_Manip->reassmParams.ip.h_Ipv6Scheme)
++ {
++ p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] =
++ p_Manip->reassmParams.ip.h_Ipv6Scheme;
++ p_PcdParams->p_KgParams->numOfSchemes++;
++ }
++#if (DPAA_VERSION >= 11)
++ if (fmPortGetSetCcParams.getCcParams.revInfo.majorRev >= 6)
++ {
++ if ((err = FmPortSetGprFunc(h_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
++ (void**)&p_ParamsPage)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ tmpReg32 = NIA_ENG_KG;
++ if (p_Manip->reassmParams.ip.h_Ipv4Scheme)
++ {
++ tmpReg32 |= NIA_KG_DIRECT;
++ tmpReg32 |= NIA_KG_CC_EN;
++ tmpReg32 |= FmPcdKgGetSchemeId(
++ p_Manip->reassmParams.ip.h_Ipv4Scheme);
++ WRITE_UINT32(p_ParamsPage->iprIpv4Nia, tmpReg32);
++ }
++ if (p_Manip->reassmParams.ip.h_Ipv6Scheme)
++ {
++ tmpReg32 &= ~NIA_AC_MASK;
++ tmpReg32 |= NIA_KG_DIRECT;
++ tmpReg32 |= NIA_KG_CC_EN;
++ tmpReg32 |= FmPcdKgGetSchemeId(
++ p_Manip->reassmParams.ip.h_Ipv6Scheme);
++ WRITE_UINT32(p_ParamsPage->iprIpv6Nia, tmpReg32);
++ }
++ }
++#else
++ if (fmPortGetSetCcParams.getCcParams.revInfo.majorRev < 6)
++ {
++ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->discardMask,
++ fmPortGetSetCcParams.getCcParams.discardMask);
++ }
++#endif /* (DPAA_VERSION >= 11) */
++ }
++ return E_OK;
++}
++
++#if (DPAA_VERSION == 10)
++static t_Error FmPcdFragHcScratchPoolFill(t_Handle h_FmPcd, uint8_t scratchBpid)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdCcFragScratchPoolCmdParams fmPcdCcFragScratchPoolCmdParams;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ memset(&fmPcdCcFragScratchPoolCmdParams, 0, sizeof(t_FmPcdCcFragScratchPoolCmdParams));
++
++ fmPcdCcFragScratchPoolCmdParams.numOfBuffers = NUM_OF_SCRATCH_POOL_BUFFERS;
++ fmPcdCcFragScratchPoolCmdParams.bufferPoolId = scratchBpid;
++ if ((err = FmHcPcdCcIpFragScratchPollCmd(p_FmPcd->h_Hc, TRUE, &fmPcdCcFragScratchPoolCmdParams)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (fmPcdCcFragScratchPoolCmdParams.numOfBuffers != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Fill scratch pool failed,"
++ "Failed to release %d buffers to the BM (missing FBPRs)",
++ fmPcdCcFragScratchPoolCmdParams.numOfBuffers));
++
++ return E_OK;
++}
++
++static t_Error FmPcdFragHcScratchPoolEmpty(t_Handle h_FmPcd, uint8_t scratchBpid)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdCcFragScratchPoolCmdParams fmPcdCcFragScratchPoolCmdParams;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ memset(&fmPcdCcFragScratchPoolCmdParams, 0, sizeof(t_FmPcdCcFragScratchPoolCmdParams));
++
++ fmPcdCcFragScratchPoolCmdParams.bufferPoolId = scratchBpid;
++ if ((err = FmHcPcdCcIpFragScratchPollCmd(p_FmPcd->h_Hc, FALSE, &fmPcdCcFragScratchPoolCmdParams)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++#endif /* (DPAA_VERSION == 10) */
++
++static void ReleaseManipHandler(t_FmPcdManip *p_Manip, t_FmPcd *p_FmPcd)
++{
++ if (p_Manip->h_Ad)
++ {
++ if (p_Manip->muramAllocate)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->h_Ad);
++ else
++ XX_Free(p_Manip->h_Ad);
++ p_Manip->h_Ad = NULL;
++ }
++ if (p_Manip->p_Template)
++ {
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->p_Template);
++ p_Manip->p_Template = NULL;
++ }
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ if (p_Manip->h_Frag)
++ {
++ if (p_Manip->capwapFragParams.p_AutoLearnHashTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->capwapFragParams.p_AutoLearnHashTbl);
++ if (p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl);
++ if (p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl);
++ if (p_Manip->capwapFragParams.p_TimeOutTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->capwapFragParams.p_TimeOutTbl);
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->h_Frag);
++
++ }
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ if (p_Manip->frag)
++ {
++ if (p_Manip->fragParams.p_Frag)
++ {
++#if (DPAA_VERSION == 10)
++ FmPcdFragHcScratchPoolEmpty((t_Handle)p_FmPcd, p_Manip->fragParams.scratchBpid);
++#endif /* (DPAA_VERSION == 10) */
++
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_Frag);
++ }
++ }
++ else
++ if (p_Manip->reassm)
++ {
++ FmPcdUnregisterReassmPort(p_FmPcd,
++ p_Manip->reassmParams.p_ReassCommonTbl);
++
++ if (p_Manip->reassmParams.timeOutTblAddr)
++ FM_MURAM_FreeMem(
++ p_FmPcd->h_FmMuram,
++ UINT_TO_PTR(p_Manip->reassmParams.timeOutTblAddr));
++ if (p_Manip->reassmParams.reassFrmDescrPoolTblAddr)
++ XX_FreeSmart(
++ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrPoolTblAddr));
++ if (p_Manip->reassmParams.p_ReassCommonTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->reassmParams.p_ReassCommonTbl);
++ if (p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr)
++ FM_MURAM_FreeMem(
++ p_FmPcd->h_FmMuram,
++ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr));
++ if (p_Manip->reassmParams.internalBufferPoolManagementIndexAddr)
++ FM_MURAM_FreeMem(
++ p_FmPcd->h_FmMuram,
++ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolManagementIndexAddr));
++ if (p_Manip->reassmParams.internalBufferPoolAddr)
++ FM_MURAM_FreeMem(
++ p_FmPcd->h_FmMuram,
++ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolAddr));
++ if (p_Manip->reassmParams.hdr == HEADER_TYPE_CAPWAP)
++ {
++
++ }
++ else
++ {
++ if (p_Manip->reassmParams.ip.ipv4AutoLearnHashTblAddr)
++ XX_FreeSmart(
++ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv4AutoLearnHashTblAddr));
++ if (p_Manip->reassmParams.ip.ipv6AutoLearnHashTblAddr)
++ XX_FreeSmart(
++ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv6AutoLearnHashTblAddr));
++ if (p_Manip->reassmParams.ip.ipv4AutoLearnSetLockTblAddr)
++ XX_FreeSmart(
++ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv4AutoLearnSetLockTblAddr));
++ if (p_Manip->reassmParams.ip.ipv6AutoLearnSetLockTblAddr)
++ XX_FreeSmart(
++ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv6AutoLearnSetLockTblAddr));
++ if (p_Manip->reassmParams.ip.p_Ipv4ReassTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->reassmParams.ip.p_Ipv4ReassTbl);
++ if (p_Manip->reassmParams.ip.p_Ipv6ReassTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
++ p_Manip->reassmParams.ip.p_Ipv6ReassTbl);
++ if (p_Manip->reassmParams.ip.h_Ipv6Ad)
++ XX_FreeSmart(p_Manip->reassmParams.ip.h_Ipv6Ad);
++ if (p_Manip->reassmParams.ip.h_Ipv4Ad)
++ XX_FreeSmart(p_Manip->reassmParams.ip.h_Ipv4Ad);
++ }
++ }
++
++ if (p_Manip->p_StatsTbl)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->p_StatsTbl);
++}
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++static t_Error CheckManipParamsAndSetType(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_ManipParams)
++{
++ if (p_ManipParams->u.hdr.rmv)
++ {
++ switch (p_ManipParams->u.hdr.rmvParams.type)
++ {
++ case (e_FM_PCD_MANIP_RMV_BY_HDR):
++ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.type)
++ {
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START) :
++ if (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.include)
++ {
++ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.hdrInfo.hdr)
++ {
++ case (HEADER_TYPE_CAPWAP_DTLS) :
++ p_Manip->opcode = HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST;
++ p_Manip->muramAllocate = TRUE;
++ if (p_ManipParams->u.hdr.insrt)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for CAPWAP_DTLS_HDR remove can not be insrt manipualtion after"));
++ if (p_ManipParams->fragOrReasm)
++ {
++ if (!p_ManipParams->fragOrReasmParams.frag)
++ {
++ switch (p_ManipParams->fragOrReasmParams.hdr)
++ {
++ case (HEADER_TYPE_CAPWAP):
++ p_Manip->opcode = HMAN_OC_CAPWAP_REASSEMBLY;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("unsupported header for Reassembly"));
++ }
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for this type of manipulation frag can not be TRUE"));
++ }
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("non valid net header of remove location"));
++ }
++ }
++ else
++ {
++ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.hdrInfo.hdr)
++ {
++ case (HEADER_TYPE_CAPWAP_DTLS) :
++ case (HEADER_TYPE_CAPWAP) :
++ if (p_ManipParams->fragOrReasm || p_ManipParams->u.hdr.insrt)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for the type of remove e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_TILL_CAPWAP can not be insert or fragOrReasm TRUE"));
++ p_Manip->opcode = HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR;
++ p_Manip->muramAllocate = TRUE;
++ p_ManipParams->u.hdr.insrt = TRUE; //internal frame header
++ break;
++ default :
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation"));
++ }
++ }
++ break;
++ default :
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation"));
++ }
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation"));
++ }
++ }
++ else if (p_ManipParams->u.hdr.insrt)
++ {
++ switch (p_ManipParams->u.hdr.insrtParams.type)
++ {
++ case (e_FM_PCD_MANIP_INSRT_BY_TEMPLATE) :
++
++ p_Manip->opcode = HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER;
++ p_Manip->muramAllocate = FALSE;
++ if (p_ManipParams->fragOrReasm)
++ {
++ if (p_ManipParams->fragOrReasmParams.frag)
++ {
++ switch (p_ManipParams->fragOrReasmParams.hdr)
++ {
++ case (HEADER_TYPE_CAPWAP):
++ p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid header for fragmentation"));
++ }
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,("can not reach this point"));
++ }
++ break;
++
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for only isert manipulation unsupported type"));
++ }
++ }
++ else if (p_ManipParams->fragOrReasm)
++ {
++ if (p_ManipParams->fragOrReasmParams.frag)
++ {
++ switch (p_ManipParams->fragOrReasmParams.hdr)
++ {
++ case (HEADER_TYPE_CAPWAP):
++ p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION;
++ p_Manip->muramAllocate = FALSE;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported header for fragmentation"));
++ }
++ }
++ else
++ {
++ switch (p_ManipParams->fragOrReasmParams.hdr)
++ {
++ case (HEADER_TYPE_CAPWAP):
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Reassembly has to be with additional operation - rmv = TRUE, type of remove - e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_INCLUDE_SPECIFIC_LOCATION,type = e_FM_PCD_MANIP_LOC_BY_HDR, hdr = HEADER_TYPE_CAPWAP_DTLS"));
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported header for reassembly"));
++ }
++ }
++
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("User didn't ask for any manipulation"));
++
++ p_Manip->insrt = p_ManipParams->u.hdr.insrt;
++ p_Manip->rmv = p_ManipParams->u.hdr.rmv;
++
++ return E_OK;
++}
++
++#else /* not (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++static t_Error CheckManipParamsAndSetType(t_FmPcdManip *p_Manip,
++ t_FmPcdManipParams *p_ManipParams)
++{
++ switch (p_ManipParams->type)
++ {
++ case e_FM_PCD_MANIP_HDR:
++ /* Check that next-manip is not already used */
++ if (p_ManipParams->h_NextManip)
++ {
++ if (!MANIP_IS_FIRST(p_ManipParams->h_NextManip))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("h_NextManip is already a part of another chain"));
++ if ((MANIP_GET_TYPE(p_ManipParams->h_NextManip)
++ != e_FM_PCD_MANIP_HDR) &&
++ (MANIP_GET_TYPE(p_ManipParams->h_NextManip)
++ != e_FM_PCD_MANIP_FRAG))
++ RETURN_ERROR(
++ MAJOR,
++ E_NOT_SUPPORTED,
++ ("For a Header Manipulation node - no support of h_NextManip of type other than Header Manipulation or Fragmentation."));
++ }
++
++ if (p_ManipParams->u.hdr.rmv)
++ {
++ switch (p_ManipParams->u.hdr.rmvParams.type)
++ {
++ case (e_FM_PCD_MANIP_RMV_BY_HDR):
++ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.type)
++ {
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2):
++ break;
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP):
++ break;
++ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START):
++ {
++ t_Error err;
++ uint8_t prsArrayOffset;
++
++ err =
++ GetPrOffsetByHeaderOrField(
++ &p_ManipParams->u.hdr.rmvParams.u.byHdr.u.hdrInfo,
++ &prsArrayOffset);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ break;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("invalid type of remove manipulation"));
++ }
++ break;
++ case (e_FM_PCD_MANIP_RMV_GENERIC):
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("invalid type of remove manipulation"));
++ }
++ p_Manip->opcode = HMAN_OC;
++ p_Manip->muramAllocate = TRUE;
++ p_Manip->rmv = TRUE;
++ }
++ else
++ if (p_ManipParams->u.hdr.insrt)
++ {
++ switch (p_ManipParams->u.hdr.insrtParams.type)
++ {
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR):
++ {
++ switch (p_ManipParams->u.hdr.insrtParams.u.byHdr.type)
++ {
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2):
++ /* nothing to check */
++ break;
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_IP):
++ if (p_ManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size
++ % 4)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("IP inserted header must be of size which is a multiple of four bytes"));
++ break;
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP):
++ if (p_ManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
++ % 4)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("CAPWAP inserted header must be of size which is a multiple of four bytes"));
++ break;
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP):
++ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE):
++ if (p_ManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
++ != 8)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("Inserted header must be of size 8"));
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("unsupported insert by header type"));
++ }
++ }
++ case (e_FM_PCD_MANIP_INSRT_GENERIC):
++ break;
++ default:
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("for only insert manipulation unsupported type"));
++ }
++ p_Manip->opcode = HMAN_OC;
++ p_Manip->muramAllocate = TRUE;
++ p_Manip->insrt = TRUE;
++ }
++ else
++ if (p_ManipParams->u.hdr.fieldUpdate)
++ {
++ /* Check parameters */
++ if (p_ManipParams->u.hdr.fieldUpdateParams.type
++ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN)
++ {
++ if ((p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
++ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI)
++ && (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.vpri
++ > 7))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("vpri should get values of 0-7 "));
++ if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
++ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN)
++ {
++ int i;
++
++ if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.vpriDefVal
++ > 7)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("vpriDefVal should get values of 0-7 "));
++ for (i = 0; i < FM_PCD_MANIP_DSCP_TO_VLAN_TRANS;
++ i++)
++ if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.dscpToVpriTable[i]
++ & 0xf0)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("dscpToVpriTabl value out of range (0-15)"));
++ }
++
++ }
++
++ p_Manip->opcode = HMAN_OC;
++ p_Manip->muramAllocate = TRUE;
++ p_Manip->fieldUpdate = TRUE;
++ }
++ else
++ if (p_ManipParams->u.hdr.custom)
++ {
++ if (p_ManipParams->u.hdr.customParams.type == e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE)
++ {
++
++ if ((p_ManipParams->u.hdr.customParams.u.genFieldReplace.size == 0) ||
++ (p_ManipParams->u.hdr.customParams.u.genFieldReplace.size > 8))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("size should get values of 1-8 "));
++
++ if (p_ManipParams->u.hdr.customParams.u.genFieldReplace.srcOffset > 7)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("srcOffset should be <= 7"));
++
++ if ((p_ManipParams->u.hdr.customParams.u.genFieldReplace.srcOffset +
++ p_ManipParams->u.hdr.customParams.u.genFieldReplace.size) > 8)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("(srcOffset + size) should be <= 8"));
++
++ if ((p_ManipParams->u.hdr.customParams.u.genFieldReplace.dstOffset +
++ p_ManipParams->u.hdr.customParams.u.genFieldReplace.size) > 256)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("(dstOffset + size) should be <= 256"));
++
++ }
++
++ p_Manip->opcode = HMAN_OC;
++ p_Manip->muramAllocate = TRUE;
++ p_Manip->custom = TRUE;
++ }
++ break;
++ case e_FM_PCD_MANIP_REASSEM:
++ if (p_ManipParams->h_NextManip)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("next manip with reassembly"));
++ switch (p_ManipParams->u.reassem.hdr)
++ {
++ case (HEADER_TYPE_IPv4):
++ p_Manip->reassmParams.hdr = HEADER_TYPE_IPv4;
++ p_Manip->opcode = HMAN_OC_IP_REASSEMBLY;
++ break;
++ case (HEADER_TYPE_IPv6):
++ p_Manip->reassmParams.hdr = HEADER_TYPE_IPv6;
++ p_Manip->opcode = HMAN_OC_IP_REASSEMBLY;
++ break;
++#if (DPAA_VERSION >= 11)
++ case (HEADER_TYPE_CAPWAP):
++ p_Manip->reassmParams.hdr = HEADER_TYPE_CAPWAP;
++ p_Manip->opcode = HMAN_OC_CAPWAP_REASSEMBLY;
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("header for reassembly"));
++ }
++ break;
++ case e_FM_PCD_MANIP_FRAG:
++ if (p_ManipParams->h_NextManip)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("next manip with fragmentation"));
++ switch (p_ManipParams->u.frag.hdr)
++ {
++ case (HEADER_TYPE_IPv4):
++ case (HEADER_TYPE_IPv6):
++ p_Manip->opcode = HMAN_OC_IP_FRAGMENTATION;
++ break;
++#if (DPAA_VERSION >= 11)
++ case (HEADER_TYPE_CAPWAP):
++ p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION;
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("header for fragmentation"));
++ }
++ p_Manip->muramAllocate = TRUE;
++ break;
++ case e_FM_PCD_MANIP_SPECIAL_OFFLOAD:
++ switch (p_ManipParams->u.specialOffload.type)
++ {
++ case (e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC):
++ p_Manip->opcode = HMAN_OC_IPSEC_MANIP;
++ p_Manip->muramAllocate = TRUE;
++ break;
++#if (DPAA_VERSION >= 11)
++ case (e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP):
++ p_Manip->opcode = HMAN_OC_CAPWAP_MANIP;
++ p_Manip->muramAllocate = TRUE;
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("special offload type"));
++ }
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("manip type"));
++ }
++
++ return E_OK;
++}
++#endif /* not (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++
++static t_Error UpdateIndxStats(t_Handle h_FmPcd,
++ t_Handle h_FmPort,
++ t_FmPcdManip *p_Manip)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint32_t tmpReg32 = 0;
++ t_AdOfTypeContLookup *p_Ad;
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++ if (p_Manip->h_FmPcd != h_FmPcd)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("handler of PCD previously was initiated by different value"));
++
++ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
++
++ if (!p_Manip->p_StatsTbl)
++ {
++
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN;
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_CC;
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ tmpReg32 = GET_UINT32(p_Ad->ccAdBase);
++
++ p_Manip->p_StatsTbl =
++ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)p_Manip->owner * FM_PCD_MANIP_INDEXED_STATS_ENTRY_SIZE,
++ 4);
++ if (!p_Manip->p_StatsTbl)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Manipulation indexed statistics table"));
++
++ MemSet8(p_Manip->p_StatsTbl, 0, (uint32_t)(p_Manip->owner * 4));
++
++ tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->p_StatsTbl) - p_FmPcd->physicalMuramBase);
++
++ if (p_Manip->cnia)
++ tmpReg32 |= FM_PCD_MANIP_INDEXED_STATS_CNIA;
++
++ tmpReg32 |= FM_PCD_MANIP_INDEXED_STATS_DPD;
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++ }
++ else
++ {
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN;
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_CC;
++ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ return E_OK;
++}
++
++static t_Error RmvHdrTillSpecLocNOrInsrtIntFrmHdr(t_FmPcdManipHdrRmvParams *p_ManipParams, t_FmPcdManip *p_Manip)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ uint32_t tmpReg32 = 0;
++ uint8_t prsArrayOffset = 0;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip,E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++ if (p_Manip->rmv)
++ {
++ err = GetPrOffsetByHeaderOrField(&p_ManipParams->u.byHdr.u.fromStartByHdr.hdrInfo, &prsArrayOffset);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ tmpReg32 |= (uint32_t)prsArrayOffset << 24;
++ tmpReg32 |= HMAN_RMV_HDR;
++ }
++
++ if (p_Manip->insrt)
++ tmpReg32 |= HMAN_INSRT_INT_FRM_HDR;
++
++ tmpReg32 |= (uint32_t)HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR;
++
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++
++ return E_OK;
++}
++
++static t_Error MvIntFrameHeaderFromFrameToBufferPrefix(t_FmPcdManip *p_Manip,
++ bool caamUsed)
++{
++ t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++ uint32_t tmpReg32 = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Ad, E_INVALID_HANDLE);
++
++ p_Manip->updateParams |= OFFSET_OF_PR | INTERNAL_CONTEXT_OFFSET;
++
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ *(uint32_t *)&p_Ad->ccAdBase = tmpReg32;
++
++ tmpReg32 = 0;
++ tmpReg32 |= HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX;
++ tmpReg32 |= (uint32_t)0x16 << 16;
++ *(uint32_t *)&p_Ad->pcAndOffsets = tmpReg32;
++
++ if (caamUsed)
++ *(uint32_t *)&p_Ad->gmask = 0xf0000000;
++
++ return E_OK;
++}
++
++static t_Error CapwapRmvDtlsHdr(t_FmPcd *p_FmPcd, t_FmPcdManip *p_Manip)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ uint32_t tmpReg32 = 0;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++
++ tmpReg32 = 0;
++ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST;
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++
++
++ if (p_Manip->h_Frag)
++ {
++ p_Manip->updateParams |= INTERNAL_CONTEXT_OFFSET;
++ tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->h_Frag) - (p_FmPcd->physicalMuramBase));
++ }
++
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++
++ return err;
++}
++
++static t_Error CapwapReassembly(t_CapwapReassemblyParams *p_ManipParams,
++ t_FmPcdManip *p_Manip,
++ t_FmPcd *p_FmPcd,
++ uint8_t poolId)
++{
++ t_Handle p_Table;
++ uint32_t tmpReg32 = 0;
++ int i = 0;
++ uint8_t log2Num;
++ uint8_t numOfSets;
++ uint32_t j = 0;
++ uint32_t bitFor1Micro;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ if (!p_FmPcd->h_Hc)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("hc port has to be initialized in this mode"));
++ if (!POWER_OF_2(p_ManipParams->timeoutRoutineRequestTime))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("timeoutRoutineRequestTime has to be power of 2"));
++ if (!POWER_OF_2(p_ManipParams->maxNumFramesInProcess))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("maxNumFramesInProcess has to be power of 2"));
++ if (!p_ManipParams->timeoutRoutineRequestTime && p_ManipParams->timeoutThresholdForReassmProcess)
++ DBG(WARNING, ("if timeoutRoutineRequestTime 0, timeoutThresholdForReassmProcess is uselessly"));
++ if (p_ManipParams->numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH)
++ {
++ if ((p_ManipParams->maxNumFramesInProcess < 4) ||
++ (p_ManipParams->maxNumFramesInProcess > 512))
++ RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("In the case of numOfFramesPerHashEntry = e_FM_PCD_MANIP_EIGHT_WAYS_HASH maxNumFramesInProcess has to be in the range 4-512"));
++ }
++ else
++ {
++ if ((p_ManipParams->maxNumFramesInProcess < 8) ||
++ (p_ManipParams->maxNumFramesInProcess > 2048))
++ RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("In the case of numOfFramesPerHashEntry = e_FM_PCD_MANIP_FOUR_WAYS_HASH maxNumFramesInProcess has to be in the range 8-2048"));
++ }
++
++ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
++ if (bitFor1Micro == 0)
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
++
++ p_Manip->updateParams |= (NUM_OF_TASKS | OFFSET_OF_PR | OFFSET_OF_DATA | HW_PORT_ID);
++
++ p_Manip->h_Frag = (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE,
++ FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN);
++ if (!p_Manip->h_Frag)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc CAPWAP reassembly parameters table"));
++
++ MemSet8(p_Manip->h_Frag, 0, FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE);
++
++ p_Table = (t_CapwapReasmPram *)p_Manip->h_Frag;
++
++ p_Manip->capwapFragParams.p_AutoLearnHashTbl =
++ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ (uint32_t)(p_ManipParams->maxNumFramesInProcess * 2 * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE),
++ FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN);
++
++ if (!p_Manip->capwapFragParams.p_AutoLearnHashTbl)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,("MURAM alloc for CAPWAP automatic learning hash table"));
++
++ MemSet8(p_Manip->capwapFragParams.p_AutoLearnHashTbl, 0, (uint32_t)(p_ManipParams->maxNumFramesInProcess * 2 * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE));
++
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_AutoLearnHashTbl) - p_FmPcd->physicalMuramBase);
++
++ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->autoLearnHashTblPtr, tmpReg32);
++
++ tmpReg32 = 0;
++ if (p_ManipParams->timeOutMode == e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES)
++ tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_BETWEEN_FRAMES;
++ if (p_ManipParams->haltOnDuplicationFrag)
++ tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_HALT_ON_DUPLICATE_FRAG;
++ if (p_ManipParams->numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH)
++ {
++ i = 8;
++ tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_AUTOMATIC_LEARNIN_HASH_8_WAYS;
++ }
++ else
++ i = 4;
++
++ numOfSets = (uint8_t)((p_ManipParams->maxNumFramesInProcess * 2) / i);
++ LOG2(numOfSets, log2Num);
++ tmpReg32 |= (uint32_t)(log2Num - 1) << 24;
++
++ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->mode, tmpReg32);
++
++ for (j=0; j<p_ManipParams->maxNumFramesInProcess*2; j++)
++ if (((j / i) % 2)== 0)
++ WRITE_UINT32(*(uint32_t *)PTR_MOVE(p_Manip->capwapFragParams.p_AutoLearnHashTbl, j * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE), 0x80000000);
++
++ tmpReg32 = 0x00008000;
++ tmpReg32 |= (uint32_t)poolId << 16;
++ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->bufferPoolIdAndRisc1SetIndexes, tmpReg32);
++ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->risc23SetIndexes, 0x80008000);
++ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->risc4SetIndexesAndExtendedStatsTblPtr, 0x80000000);
++
++ p_Manip->capwapFragParams.maxNumFramesInProcess = p_ManipParams->maxNumFramesInProcess;
++
++ p_Manip->capwapFragParams.sgBpid = poolId;
++
++ p_Manip->capwapFragParams.fqidForTimeOutFrames = p_ManipParams->fqidForTimeOutFrames;
++ p_Manip->capwapFragParams.timeoutRoutineRequestTime = p_ManipParams->timeoutRoutineRequestTime;
++ p_Manip->capwapFragParams.bitFor1Micro = bitFor1Micro;
++
++ tmpReg32 = 0;
++ tmpReg32 |= (((uint32_t)1<<p_Manip->capwapFragParams.bitFor1Micro) * p_ManipParams->timeoutThresholdForReassmProcess);
++ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->expirationDelay, tmpReg32);
++
++ return E_OK;
++}
++
++static t_Error CapwapFragmentation(t_CapwapFragmentationParams *p_ManipParams,
++ t_FmPcdManip *p_Manip,
++ t_FmPcd *p_FmPcd,
++ uint8_t poolId)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ uint32_t tmpReg32 = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
++
++ p_Manip->updateParams |= OFFSET_OF_DATA;
++
++ p_Manip->frag = TRUE;
++
++ p_Manip->h_Frag = (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_Manip->h_Frag)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP fragmentation table descriptor"));
++
++ MemSet8(p_Manip->h_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag;
++
++ tmpReg32 = 0;
++ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_FRAGMENTATION;
++
++ if (p_ManipParams->headerOptionsCompr)
++ tmpReg32 |= FM_PCD_MANIP_CAPWAP_FRAG_COMPR_OPTION_FIELD_EN;
++ tmpReg32 |= ((uint32_t)poolId << 8);
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++
++ p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation;
++ p_Manip->capwapFragParams.sgBpid = poolId;
++
++ return E_OK;
++}
++
++static t_Error IndxStats(t_FmPcdStatsParams *p_StatsParams,t_FmPcdManip *p_Manip,t_FmPcd *p_FmPcd)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ uint32_t tmpReg32 = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
++
++ UNUSED(p_FmPcd);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++
++ tmpReg32 = 0;
++ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_INDEXED_STATS;
++ if (p_StatsParams->type == e_FM_PCD_STATS_PER_FLOWID)
++ tmpReg32 |= (uint32_t)0x16 << 16;
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++
++ return E_OK;
++}
++
++static t_Error InsrtHdrByTempl(t_FmPcdManipHdrInsrtParams *p_ManipParams, t_FmPcdManip *p_Manip, t_FmPcd *p_FmPcd)
++{
++ t_FmPcdManipHdrInsrtByTemplateParams *p_InsrtByTemplate = &p_ManipParams->u.byTemplate;
++ uint8_t tmpReg8 = 0xff;
++ t_AdOfTypeContLookup *p_Ad;
++ bool ipModify = FALSE;
++ uint32_t tmpReg32 = 0, tmpRegNia = 0;
++ uint16_t tmpReg16 = 0;
++ t_Error err = E_OK;
++ uint8_t extraAddedBytes = 0, blockSize = 0, extraAddedBytesAlignedToBlockSize = 0, log2Num = 0;
++ uint8_t *p_Template = NULL;
++
++ SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_Manip,E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd,E_NULL_POINTER);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++ if (p_Manip->insrt)
++ {
++ if ((!p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterIp) ||
++ (!p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterVlan))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : asking for header template modifications with no template for insertion (template size)"));
++
++ if (p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterIp && (p_InsrtByTemplate->size <= p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : size of template < ipOuterOffset"));
++
++ if (p_InsrtByTemplate->size > 128)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Size of header template for insertion can not be more than 128"));
++
++ if (p_InsrtByTemplate->size)
++ {
++ p_Manip->p_Template = (uint8_t *)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
++ p_InsrtByTemplate->size,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if(!p_Manip->p_Template)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation in MURAM FAILED"));
++
++ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->p_Template) - (p_FmPcd->physicalMuramBase));
++ tmpReg32 |= (uint32_t)p_InsrtByTemplate->size << 24;
++ *(uint32_t *)&p_Ad->matchTblPtr = tmpReg32;
++ }
++
++ tmpReg32 = 0;
++
++ p_Template = (uint8_t *)XX_Malloc(p_InsrtByTemplate->size * sizeof(uint8_t));
++
++ if (!p_Template)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("XX_Malloc allocation FAILED"));
++
++ memcpy(p_Template, p_InsrtByTemplate->hdrTemplate, p_InsrtByTemplate->size * sizeof(uint8_t));
++
++ if (p_InsrtByTemplate->modifyOuterIp)
++ {
++ ipModify = TRUE;
++
++ tmpReg8 = (uint8_t)p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset];
++
++ if((tmpReg8 & 0xf0) == 0x40)
++ tmpReg8 = 4;
++ else if((tmpReg8 & 0xf0) == 0x60)
++ tmpReg8 = 6;
++ else
++ tmpReg8 = 0xff;
++
++ if (tmpReg8 != 0xff)
++ {
++ if(p_InsrtByTemplate->modifyOuterIpParams.dscpEcn & 0xff00)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : IPV4 present in header template, dscpEcn has to be only 1 byte"));
++ if(p_InsrtByTemplate->modifyOuterIpParams.recalculateLength)
++ {
++
++ if((p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize + p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedNotAlignedToBlockSize) > 255)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("extra Byte added can not be more than 256 bytes"));
++ extraAddedBytes = (uint8_t) (p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize + p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedNotAlignedToBlockSize);
++ blockSize = p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.blockSize;
++ extraAddedBytesAlignedToBlockSize = p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize;
++ /*IP header template - IP totalLength -
++ (1 byte) extraByteForIp = headerTemplateSize - ipOffset + insertedBytesAfterThisStage ,
++ in the case of SEC insertedBytesAfterThisStage - SEC trailer (21/31) + header(13)
++ second byte - extraByteForIp = headerTemplate - ipOffset + insertedBytesAfterThisStage*/
++ }
++ if (blockSize)
++ {
++ if (!POWER_OF_2(blockSize))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("inputFrmPaddingUpToBlockSize has to be power of 2"));
++ }
++
++ }
++ if (tmpReg8 == 4)
++ {
++ if ((IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP + p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset) > p_InsrtByTemplate->size)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : IP present in header template, user asked for IP modifications but ipOffset + ipTotalLengthFieldOffset in header template bigger than template size"));
++
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_DSCECN_FIELD_OFFSET_FROM_IP] = (uint8_t)p_InsrtByTemplate->modifyOuterIpParams.dscpEcn;
++
++ if (blockSize)
++ blockSize -= 1;
++
++ if ((p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes) > 255)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes has to be less than 255"));
++
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP + 1] = blockSize; // IPV6 - in AD instead of SEQ IND
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP] = (uint8_t)(p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes);// for IPV6 decrement additional 40 bytes of IPV6 heade size
++
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_ID_FIELD_OFFSET_FROM_IP] = 0x00;
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_ID_FIELD_OFFSET_FROM_IP + 1] = extraAddedBytesAlignedToBlockSize;
++
++ /*IP header template - relevant only for ipv4 CheckSum = 0*/
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP] = 0x00;
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP + 1] = 0x00;
++
++ /*UDP checksum has to be 0*/
++ if (p_InsrtByTemplate->modifyOuterIpParams.udpPresent)
++ {
++ if ((p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + UDP_CHECKSUM_FIELD_SIZE) > p_InsrtByTemplate->size)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : UDP present according to user but (UDP offset + UDP header size) < size of header template"));
++
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP ] = 0x00;
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + 1] = 0x00;
++
++ }
++
++ if (p_InsrtByTemplate->modifyOuterIpParams.ipIdentGenId > 7)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("ipIdentGenId has to be one out of 8 sequence number generators (0 - 7) for IP identification field"));
++
++ tmpRegNia |= (uint32_t)p_InsrtByTemplate->modifyOuterIpParams.ipIdentGenId<<24;
++ }
++ else if (tmpReg8 == 6)
++ {
++ /*TODO - add check for maximum value of blockSize;*/
++ if (blockSize)
++ LOG2(blockSize, log2Num);
++ tmpRegNia |= (uint32_t)log2Num << 24;
++
++ // for IPV6 decrement additional 40 bytes of IPV6 heade size - because IPV6 header size is not included in payloadLength
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP] = (uint8_t)(p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes - 40);
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP + 1] = extraAddedBytesAlignedToBlockSize;
++ if (p_InsrtByTemplate->modifyOuterIpParams.udpPresent)
++ {
++ if ((p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + UDP_CHECKSUM_FIELD_SIZE) > p_InsrtByTemplate->size)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : UDP present according to user but (UDP offset + UDP header size) < size of header template"));
++ if (p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_NEXT_HEADER_OFFSET_FROM_IP] != 0x88)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("OUr suppport is only IPv6/UDPLite"));
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_LENGTH_FIELD_OFFSET_FROM_UDP] = 0x00;
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_LENGTH_FIELD_OFFSET_FROM_UDP + 1] = 0x08;
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP] = 0x00;
++ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + 1] = 0x00;
++ }
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("IP version supported only IPV4"));
++ }
++
++ tmpReg32 = tmpReg16 = tmpReg8 = 0;
++ /*TODO - check it*/
++ if (p_InsrtByTemplate->modifyOuterVlan)
++ {
++ if (p_InsrtByTemplate->modifyOuterVlanParams.vpri & ~0x07)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,("Inconsistent parameters : user asked for VLAN modifications but VPRI more than 3 bits"));
++
++ memcpy(&tmpReg16, &p_Template[VLAN_TAG_FIELD_OFFSET_FROM_ETH], 2*(sizeof(uint8_t)));
++ if ((tmpReg16 != 0x9100) && (tmpReg16!= 0x9200) && (tmpReg16 != 0x8100))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,("Inconsistent parameters : user asked for VLAN modifications but Tag Protocol identifier is not VLAN "));
++
++ memcpy(&tmpReg8, &p_Template[14],1*(sizeof(uint8_t)));
++ tmpReg8 &= 0x1f;
++ tmpReg8 |= (uint8_t)(p_InsrtByTemplate->modifyOuterVlanParams.vpri << 5);
++
++ p_Template[14] = tmpReg8;
++ }
++
++ MemCpy8(p_Manip->p_Template, p_Template, p_InsrtByTemplate->size);
++
++ XX_Free(p_Template);
++ }
++
++ tmpReg32 = 0;
++ if (p_Manip->h_Frag)
++ {
++ tmpRegNia |= (uint32_t)(XX_VirtToPhys(p_Manip->h_Frag) - (p_FmPcd->physicalMuramBase));
++ tmpReg32 |= (uint32_t)p_Manip->sizeForFragmentation << 16;
++ }
++ else
++ tmpReg32 = 0xffff0000;
++
++ if (ipModify)
++ tmpReg32 |= (uint32_t)p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset << 8;
++ else
++ tmpReg32 |= (uint32_t)0x0000ff00;
++
++ tmpReg32 |= (uint32_t)HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER;
++ *(uint32_t *)&p_Ad->pcAndOffsets = tmpReg32;
++
++ tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ *(uint32_t *)&p_Ad->ccAdBase = tmpRegNia;
++
++ return err;
++}
++
++static t_Error CheckStatsParamsAndSetType(t_FmPcdManip *p_Manip, t_FmPcdStatsParams *p_StatsParams)
++{
++
++ switch (p_StatsParams->type)
++ {
++ case (e_FM_PCD_STATS_PER_FLOWID):
++ p_Manip->opcode = HMAN_OC_CAPWAP_INDEXED_STATS;
++ p_Manip->muramAllocate = TRUE;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported statistics type"));
++ }
++
++ return E_OK;
++}
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++
++static t_Error FillReassmManipParams(t_FmPcdManip *p_Manip, e_NetHeaderType hdr)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
++ uint32_t tmpReg32;
++ t_Error err = E_OK;
++
++ /* Creates the Reassembly Parameters table. It contains parameters that are specific to either the IPv4 reassembly
++ function or to the IPv6 reassembly function. If both IPv4 reassembly and IPv6 reassembly are required, then
++ two separate IP Reassembly Parameter tables are required.*/
++ if ((err = CreateReassTable(p_Manip, hdr)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /* Sets the first Ad register (ccAdBase) - Action Descriptor Type and Pointer to the Reassembly Parameters Table offset from MURAM*/
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++
++ /* Gets the required Action descriptor table pointer */
++ switch (hdr)
++ {
++ case HEADER_TYPE_IPv4:
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->reassmParams.ip.h_Ipv4Ad;
++ tmpReg32 |= (uint32_t)(XX_VirtToPhys(
++ p_Manip->reassmParams.ip.p_Ipv4ReassTbl)
++ - (p_FmPcd->physicalMuramBase));
++ break;
++ case HEADER_TYPE_IPv6:
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->reassmParams.ip.h_Ipv6Ad;
++ tmpReg32 |= (uint32_t)(XX_VirtToPhys(
++ p_Manip->reassmParams.ip.p_Ipv6ReassTbl)
++ - (p_FmPcd->physicalMuramBase));
++ break;
++ case HEADER_TYPE_CAPWAP:
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->reassmParams.capwap.h_Ad;
++ tmpReg32 |= (uint32_t)(XX_VirtToPhys(
++ p_Manip->reassmParams.capwap.p_ReassTbl)
++ - (p_FmPcd->physicalMuramBase));
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("header type"));
++ }
++
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++
++ /* Sets the second Ad register (matchTblPtr) - Buffer pool ID (BPID for V2) and Scatter/Gather table offset*/
++ /* mark the Scatter/Gather table offset to be set later on when the port will be known */
++ p_Manip->updateParams = (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK);
++
++ if ((hdr == HEADER_TYPE_IPv6) || (hdr == HEADER_TYPE_IPv4))
++ {
++#if (DPAA_VERSION == 10)
++ tmpReg32 = (uint32_t)(p_Manip->reassmParams.sgBpid << 8);
++ WRITE_UINT32(p_Ad->matchTblPtr, tmpReg32);
++#endif /* (DPAA_VERSION == 10) */
++#if (DPAA_VERSION >= 11)
++ if (p_Manip->reassmParams.ip.nonConsistentSpFqid != 0)
++ {
++ tmpReg32 = FM_PCD_AD_NCSPFQIDM_MASK
++ | (uint32_t)(p_Manip->reassmParams.ip.nonConsistentSpFqid);
++ WRITE_UINT32(p_Ad->gmask, tmpReg32);
++ }
++#endif /* (DPAA_VERSION >= 11) */
++ /* Sets the third Ad register (pcAndOffsets)- IP Reassemble Operation Code*/
++ tmpReg32 = 0;
++ tmpReg32 |= (uint32_t)HMAN_OC_IP_REASSEMBLY;
++ }
++#if (DPAA_VERSION >= 11)
++ else
++ if (hdr == HEADER_TYPE_CAPWAP)
++ {
++ tmpReg32 = 0;
++ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_REASSEMBLY;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++
++ p_Manip->reassm = TRUE;
++
++ return E_OK;
++}
++
++static t_Error SetIpv4ReassmManip(t_FmPcdManip *p_Manip)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
++
++ /* Allocation if IPv4 Action descriptor */
++ p_Manip->reassmParams.ip.h_Ipv4Ad = (t_Handle)XX_MallocSmart(
++ FM_PCD_CC_AD_ENTRY_SIZE, p_Manip->reassmParams.dataMemId,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_Manip->reassmParams.ip.h_Ipv4Ad)
++ {
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("Allocation of IPv4 table descriptor"));
++ }
++
++ memset(p_Manip->reassmParams.ip.h_Ipv4Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Fill reassembly manipulation parameter in the IP Reassembly Action Descriptor */
++ return FillReassmManipParams(p_Manip, HEADER_TYPE_IPv4);
++}
++
++static t_Error SetIpv6ReassmManip(t_FmPcdManip *p_Manip)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
++
++ /* Allocation if IPv6 Action descriptor */
++ p_Manip->reassmParams.ip.h_Ipv6Ad = (t_Handle)XX_MallocSmart(
++ FM_PCD_CC_AD_ENTRY_SIZE, p_Manip->reassmParams.dataMemId,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_Manip->reassmParams.ip.h_Ipv6Ad)
++ {
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("Allocation of IPv6 table descriptor"));
++ }
++
++ memset(p_Manip->reassmParams.ip.h_Ipv6Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Fill reassembly manipulation parameter in the IP Reassembly Action Descriptor */
++ return FillReassmManipParams(p_Manip, HEADER_TYPE_IPv6);
++}
++
++static t_Error IpReassembly(t_FmPcdManipReassemParams *p_ManipReassmParams,
++ t_FmPcdManip *p_Manip)
++{
++ uint32_t maxSetNumber = 10000;
++ t_FmPcdManipReassemIpParams reassmManipParams =
++ p_ManipReassmParams->u.ipReassem;
++ t_Error res;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((t_FmPcd *)p_Manip->h_FmPcd)->h_Hc,
++ E_INVALID_HANDLE);
++
++ /* Check validation of user's parameter.*/
++ if ((reassmManipParams.timeoutThresholdForReassmProcess < 1000)
++ || (reassmManipParams.timeoutThresholdForReassmProcess > 8000000))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("timeoutThresholdForReassmProcess should be 1msec - 8sec"));
++ /* It is recommended that the total number of entries in this table (number of sets * number of ways)
++ will be twice the number of frames that are expected to be reassembled simultaneously.*/
++ if (reassmManipParams.maxNumFramesInProcess
++ > (reassmManipParams.maxNumFramesInProcess * maxSetNumber / 2))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("maxNumFramesInProcess has to be less than (maximun set number * number of ways / 2)"));
++
++ if ((p_ManipReassmParams->hdr == HEADER_TYPE_IPv6)
++ && (reassmManipParams.minFragSize[1] < 256))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("minFragSize[1] must be >= 256"));
++
++ /* Saves user's reassembly manipulation parameters */
++ p_Manip->reassmParams.ip.relativeSchemeId[0] =
++ reassmManipParams.relativeSchemeId[0];
++ p_Manip->reassmParams.ip.relativeSchemeId[1] =
++ reassmManipParams.relativeSchemeId[1];
++ p_Manip->reassmParams.ip.numOfFramesPerHashEntry[0] =
++ reassmManipParams.numOfFramesPerHashEntry[0];
++ p_Manip->reassmParams.ip.numOfFramesPerHashEntry[1] =
++ reassmManipParams.numOfFramesPerHashEntry[1];
++ p_Manip->reassmParams.ip.minFragSize[0] = reassmManipParams.minFragSize[0];
++ p_Manip->reassmParams.ip.minFragSize[1] = reassmManipParams.minFragSize[1];
++ p_Manip->reassmParams.maxNumFramesInProcess =
++ reassmManipParams.maxNumFramesInProcess;
++ p_Manip->reassmParams.timeOutMode = reassmManipParams.timeOutMode;
++ p_Manip->reassmParams.fqidForTimeOutFrames =
++ reassmManipParams.fqidForTimeOutFrames;
++ p_Manip->reassmParams.timeoutThresholdForReassmProcess =
++ reassmManipParams.timeoutThresholdForReassmProcess;
++ p_Manip->reassmParams.dataMemId = reassmManipParams.dataMemId;
++ p_Manip->reassmParams.dataLiodnOffset = reassmManipParams.dataLiodnOffset;
++#if (DPAA_VERSION == 10)
++ p_Manip->reassmParams.sgBpid = reassmManipParams.sgBpid;
++#endif /* (DPAA_VERSION == 10) */
++#if (DPAA_VERSION >= 11)
++ if (reassmManipParams.nonConsistentSpFqid != 0)
++ {
++ p_Manip->reassmParams.ip.nonConsistentSpFqid =
++ reassmManipParams.nonConsistentSpFqid;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* Creates and initializes the IP Reassembly common parameter table */
++ CreateReassCommonTable(p_Manip);
++
++ /* Creation of IPv4 reassembly manipulation */
++ if ((p_Manip->reassmParams.hdr == HEADER_TYPE_IPv6)
++ || (p_Manip->reassmParams.hdr == HEADER_TYPE_IPv4))
++ {
++ res = SetIpv4ReassmManip(p_Manip);
++ if (res != E_OK)
++ return res;
++ }
++
++ /* Creation of IPv6 reassembly manipulation */
++ if (p_Manip->reassmParams.hdr == HEADER_TYPE_IPv6)
++ {
++ res = SetIpv6ReassmManip(p_Manip);
++ if (res != E_OK)
++ return res;
++ }
++
++ return E_OK;
++}
++
++static void setIpReassmSchemeParams(t_FmPcd* p_FmPcd,
++ t_FmPcdKgSchemeParams *p_Scheme,
++ t_Handle h_CcTree, bool ipv4,
++ uint8_t groupId)
++{
++ uint32_t j;
++ uint8_t res;
++
++ /* Configures scheme's network environment parameters */
++ p_Scheme->netEnvParams.numOfDistinctionUnits = 2;
++ if (ipv4)
++ res = FmPcdNetEnvGetUnitId(
++ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
++ HEADER_TYPE_IPv4, FALSE, 0);
++ else
++ res = FmPcdNetEnvGetUnitId(
++ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
++ HEADER_TYPE_IPv6, FALSE, 0);
++ ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++ p_Scheme->netEnvParams.unitIds[0] = res;
++
++ res = FmPcdNetEnvGetUnitId(
++ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
++ HEADER_TYPE_USER_DEFINED_SHIM2, FALSE, 0);
++ ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++ p_Scheme->netEnvParams.unitIds[1] = res;
++
++ /* Configures scheme's next engine parameters*/
++ p_Scheme->nextEngine = e_FM_PCD_CC;
++ p_Scheme->kgNextEngineParams.cc.h_CcTree = h_CcTree;
++ p_Scheme->kgNextEngineParams.cc.grpId = groupId;
++ p_Scheme->useHash = TRUE;
++
++ /* Configures scheme's key*/
++ if (ipv4 == TRUE)
++ {
++ p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 4;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].type =
++ e_FM_PCD_EXTRACT_BY_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.type =
++ e_FM_PCD_EXTRACT_FULL_FIELD;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.hdr =
++ HEADER_TYPE_IPv4;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.extractByHdrType.fullField.ipv4 =
++ NET_HEADER_FIELD_IPv4_DST_IP;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].type =
++ e_FM_PCD_EXTRACT_BY_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.type =
++ e_FM_PCD_EXTRACT_FULL_FIELD;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.hdr =
++ HEADER_TYPE_IPv4;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.extractByHdrType.fullField.ipv4 =
++ NET_HEADER_FIELD_IPv4_SRC_IP;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].type =
++ e_FM_PCD_EXTRACT_BY_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.type =
++ e_FM_PCD_EXTRACT_FULL_FIELD;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.hdr =
++ HEADER_TYPE_IPv4;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fullField.ipv4 =
++ NET_HEADER_FIELD_IPv4_PROTO;
++ p_Scheme->keyExtractAndHashParams.extractArray[3].type =
++ e_FM_PCD_EXTRACT_BY_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.hdr =
++ HEADER_TYPE_IPv4;
++ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.type =
++ e_FM_PCD_EXTRACT_FROM_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.ignoreProtocolValidation =
++ FALSE;
++ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.extractByHdrType.fromHdr.size =
++ 2;
++ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.extractByHdrType.fromHdr.offset =
++ 4;
++ }
++ else /* IPv6 */
++ {
++ p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 3;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].type =
++ e_FM_PCD_EXTRACT_BY_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.type =
++ e_FM_PCD_EXTRACT_FULL_FIELD;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.hdr =
++ HEADER_TYPE_IPv6;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.extractByHdrType.fullField.ipv6 =
++ NET_HEADER_FIELD_IPv6_DST_IP;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].type =
++ e_FM_PCD_EXTRACT_BY_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.type =
++ e_FM_PCD_EXTRACT_FULL_FIELD;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.hdr =
++ HEADER_TYPE_IPv6;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.extractByHdrType.fullField.ipv6 =
++ NET_HEADER_FIELD_IPv6_SRC_IP;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].type =
++ e_FM_PCD_EXTRACT_BY_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.hdr =
++ HEADER_TYPE_USER_DEFINED_SHIM2;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.type =
++ e_FM_PCD_EXTRACT_FROM_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fromHdr.size =
++ 4;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fromHdr.offset =
++ 4;
++ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.ignoreProtocolValidation =
++ TRUE;
++ }
++
++ p_Scheme->keyExtractAndHashParams.privateDflt0 = 0x01020304;
++ p_Scheme->keyExtractAndHashParams.privateDflt1 = 0x11121314;
++ p_Scheme->keyExtractAndHashParams.numOfUsedDflts =
++ FM_PCD_KG_NUM_OF_DEFAULT_GROUPS;
++ for (j = 0; j < FM_PCD_KG_NUM_OF_DEFAULT_GROUPS; j++)
++ {
++ p_Scheme->keyExtractAndHashParams.dflts[j].type =
++ (e_FmPcdKgKnownFieldsDfltTypes)j; /* all types */
++ p_Scheme->keyExtractAndHashParams.dflts[j].dfltSelect =
++ e_FM_PCD_KG_DFLT_GBL_0;
++ }
++}
++
++static t_Error IpReassemblyStats(t_FmPcdManip *p_Manip,
++ t_FmPcdManipReassemIpStats *p_Stats)
++{
++ ASSERT_COND(p_Manip);
++ ASSERT_COND(p_Stats);
++ ASSERT_COND(p_Manip->reassmParams.p_ReassCommonTbl);
++
++ p_Stats->timeout =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalTimeOutCounter);
++ p_Stats->rfdPoolBusy =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalRfdPoolBusyCounter);
++ p_Stats->internalBufferBusy =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalInternalBufferBusy);
++ p_Stats->externalBufferBusy =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalExternalBufferBusy);
++ p_Stats->sgFragments =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalSgFragmentCounter);
++ p_Stats->dmaSemaphoreDepletion =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalDmaSemaphoreDepletionCounter);
++#if (DPAA_VERSION >= 11)
++ p_Stats->nonConsistentSp =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalNCSPCounter);
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (p_Manip->reassmParams.ip.p_Ipv4ReassTbl)
++ {
++ p_Stats->specificHdrStatistics[0].successfullyReassembled =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalSuccessfullyReasmFramesCounter);
++ p_Stats->specificHdrStatistics[0].validFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalValidFragmentCounter);
++ p_Stats->specificHdrStatistics[0].processedFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalProcessedFragCounter);
++ p_Stats->specificHdrStatistics[0].malformedFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalMalformdFragCounter);
++ p_Stats->specificHdrStatistics[0].autoLearnBusy =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalSetBusyCounter);
++ p_Stats->specificHdrStatistics[0].discardedFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalDiscardedFragsCounter);
++ p_Stats->specificHdrStatistics[0].moreThan16Fragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalMoreThan16FramesCounter);
++ }
++ if (p_Manip->reassmParams.ip.p_Ipv6ReassTbl)
++ {
++ p_Stats->specificHdrStatistics[1].successfullyReassembled =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalSuccessfullyReasmFramesCounter);
++ p_Stats->specificHdrStatistics[1].validFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalValidFragmentCounter);
++ p_Stats->specificHdrStatistics[1].processedFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalProcessedFragCounter);
++ p_Stats->specificHdrStatistics[1].malformedFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalMalformdFragCounter);
++ p_Stats->specificHdrStatistics[1].autoLearnBusy =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalSetBusyCounter);
++ p_Stats->specificHdrStatistics[1].discardedFragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalDiscardedFragsCounter);
++ p_Stats->specificHdrStatistics[1].moreThan16Fragments =
++ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalMoreThan16FramesCounter);
++ }
++ return E_OK;
++}
++
++static t_Error IpFragmentationStats(t_FmPcdManip *p_Manip,
++ t_FmPcdManipFragIpStats *p_Stats)
++{
++ t_AdOfTypeContLookup *p_Ad;
++
++ ASSERT_COND(p_Manip);
++ ASSERT_COND(p_Stats);
++ ASSERT_COND(p_Manip->h_Ad);
++ ASSERT_COND(p_Manip->fragParams.p_Frag);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++
++ p_Stats->totalFrames = GET_UINT32(p_Ad->gmask);
++ p_Stats->fragmentedFrames = GET_UINT32(p_Manip->fragParams.p_Frag->ccAdBase)
++ & 0x00ffffff;
++ p_Stats->generatedFragments =
++ GET_UINT32(p_Manip->fragParams.p_Frag->matchTblPtr);
++
++ return E_OK;
++}
++
++static t_Error IpFragmentation(t_FmPcdManipFragIpParams *p_ManipParams,
++ t_FmPcdManip *p_Manip)
++{
++ uint32_t pcAndOffsetsReg = 0, ccAdBaseReg = 0, gmaskReg = 0;
++ t_FmPcd *p_FmPcd;
++#if (DPAA_VERSION == 10)
++ t_Error err = E_OK;
++#endif /* (DPAA_VERSION == 10) */
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_ManipParams->sizeForFragmentation != 0xFFFF,
++ E_INVALID_VALUE);
++
++ p_FmPcd = p_Manip->h_FmPcd;
++ /* Allocation of fragmentation Action Descriptor */
++ p_Manip->fragParams.p_Frag = (t_AdOfTypeContLookup *)FM_MURAM_AllocMem(
++ p_FmPcd->h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_Manip->fragParams.p_Frag)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM alloc for Fragmentation table descriptor"));
++ MemSet8(p_Manip->fragParams.p_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Prepare the third Ad register (pcAndOffsets)- OperationCode */
++ pcAndOffsetsReg = (uint32_t)HMAN_OC_IP_FRAGMENTATION;
++
++ /* Prepare the first Ad register (ccAdBase) - Don't frag action and Action descriptor type*/
++ ccAdBaseReg = FM_PCD_AD_CONT_LOOKUP_TYPE;
++ ccAdBaseReg |= (p_ManipParams->dontFragAction
++ << FM_PCD_MANIP_IP_FRAG_DF_SHIFT);
++
++
++ /* Set Scatter/Gather BPid */
++ if (p_ManipParams->sgBpidEn)
++ {
++ ccAdBaseReg |= FM_PCD_MANIP_IP_FRAG_SG_BDID_EN;
++ pcAndOffsetsReg |= ((p_ManipParams->sgBpid
++ << FM_PCD_MANIP_IP_FRAG_SG_BDID_SHIFT)
++ & FM_PCD_MANIP_IP_FRAG_SG_BDID_MASK);
++ }
++
++ /* Prepare the first Ad register (gmask) - scratch buffer pool id and Pointer to fragment ID */
++ gmaskReg = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr))
++ - p_FmPcd->physicalMuramBase);
++#if (DPAA_VERSION == 10)
++ gmaskReg |= p_ManipParams->scratchBpid << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID;
++#else
++ gmaskReg |= (0xFF) << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID;
++#endif /* (DPAA_VERSION == 10) */
++
++ /* Set all Ad registers */
++ WRITE_UINT32(p_Manip->fragParams.p_Frag->pcAndOffsets, pcAndOffsetsReg);
++ WRITE_UINT32(p_Manip->fragParams.p_Frag->ccAdBase, ccAdBaseReg);
++ WRITE_UINT32(p_Manip->fragParams.p_Frag->gmask, gmaskReg);
++
++ /* Saves user's fragmentation manipulation parameters */
++ p_Manip->frag = TRUE;
++ p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation;
++
++#if (DPAA_VERSION == 10)
++ p_Manip->fragParams.scratchBpid = p_ManipParams->scratchBpid;
++
++ /* scratch buffer pool initialization */
++ if ((err = FmPcdFragHcScratchPoolFill((t_Handle)p_FmPcd, p_ManipParams->scratchBpid)) != E_OK)
++ {
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_Frag);
++ p_Manip->fragParams.p_Frag = NULL;
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++#endif /* (DPAA_VERSION == 10) */
++
++ return E_OK;
++}
++
++static t_Error IPManip(t_FmPcdManip *p_Manip)
++{
++ t_Error err = E_OK;
++ t_FmPcd *p_FmPcd;
++ t_AdOfTypeContLookup *p_Ad;
++ uint32_t tmpReg32 = 0, tmpRegNia = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ p_FmPcd = p_Manip->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++
++ tmpReg32 = FM_PCD_MANIP_IP_NO_FRAGMENTATION;
++ if (p_Manip->frag == TRUE)
++ {
++ tmpRegNia = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_Frag)
++ - (p_FmPcd->physicalMuramBase));
++ tmpReg32 = (uint32_t)p_Manip->sizeForFragmentation
++ << FM_PCD_MANIP_IP_MTU_SHIFT;
++ }
++
++ tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ tmpReg32 |= HMAN_OC_IP_MANIP;
++
++#if (DPAA_VERSION >= 11)
++ tmpRegNia |= FM_PCD_MANIP_IP_CNIA;
++#endif /* (DPAA_VERSION >= 11) */
++
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++ WRITE_UINT32(p_Ad->ccAdBase, tmpRegNia);
++ WRITE_UINT32(p_Ad->gmask, 0);
++ /* Total frame counter - MUST be initialized to zero.*/
++
++ return err;
++}
++
++static t_Error UpdateInitIpFrag(t_Handle h_FmPcd, t_Handle h_PcdParams,
++ t_Handle h_FmPort, t_FmPcdManip *p_Manip,
++ t_Handle h_Ad, bool validate)
++{
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_IP_FRAGMENTATION),
++ E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++
++ UNUSED(h_FmPcd);
++ UNUSED(h_Ad);
++ UNUSED(h_PcdParams);
++ UNUSED(validate);
++ UNUSED(p_Manip);
++
++ fmPortGetSetCcParams.setCcParams.type = 0;
++ fmPortGetSetCcParams.getCcParams.type = MANIP_EXTRA_SPACE;
++ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (!fmPortGetSetCcParams.getCcParams.internalBufferOffset)
++ DBG(WARNING, ("manipExtraSpace must be larger than '0'"));
++
++ return E_OK;
++}
++
++static t_Error IPSecManip(t_FmPcdManipParams *p_ManipParams,
++ t_FmPcdManip *p_Manip)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ t_FmPcdManipSpecialOffloadIPSecParams *p_IPSecParams;
++ t_Error err = E_OK;
++ uint32_t tmpReg32 = 0;
++ uint32_t power;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_ManipParams, E_INVALID_HANDLE);
++
++ p_IPSecParams = &p_ManipParams->u.specialOffload.u.ipsec;
++
++ SANITY_CHECK_RETURN_ERROR(
++ !p_IPSecParams->variableIpHdrLen || p_IPSecParams->decryption,
++ E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(
++ !p_IPSecParams->variableIpVersion || !p_IPSecParams->decryption,
++ E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(
++ !p_IPSecParams->variableIpVersion || p_IPSecParams->outerIPHdrLen,
++ E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(
++ !p_IPSecParams->arwSize || p_IPSecParams->arwAddr,
++ E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(
++ !p_IPSecParams->arwSize || p_IPSecParams->decryption,
++ E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR((p_IPSecParams->arwSize % 16) == 0, E_INVALID_VALUE);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ tmpReg32 |= (p_IPSecParams->decryption) ? FM_PCD_MANIP_IPSEC_DEC : 0;
++ tmpReg32 |= (p_IPSecParams->ecnCopy) ? FM_PCD_MANIP_IPSEC_ECN_EN : 0;
++ tmpReg32 |= (p_IPSecParams->dscpCopy) ? FM_PCD_MANIP_IPSEC_DSCP_EN : 0;
++ tmpReg32 |=
++ (p_IPSecParams->variableIpHdrLen) ? FM_PCD_MANIP_IPSEC_VIPL_EN : 0;
++ tmpReg32 |=
++ (p_IPSecParams->variableIpVersion) ? FM_PCD_MANIP_IPSEC_VIPV_EN : 0;
++ if (p_IPSecParams->arwSize)
++ tmpReg32 |= (uint32_t)((XX_VirtToPhys(UINT_TO_PTR(p_IPSecParams->arwAddr))-FM_MM_MURAM)
++ & (FM_MURAM_SIZE-1));
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++
++ tmpReg32 = 0;
++ if (p_IPSecParams->arwSize) {
++ NEXT_POWER_OF_2((p_IPSecParams->arwSize + 32), power);
++ LOG2(power, power);
++ tmpReg32 = (p_IPSecParams->arwSize | (power - 5)) << FM_PCD_MANIP_IPSEC_ARW_SIZE_SHIFT;
++ }
++
++ if (p_ManipParams->h_NextManip)
++ tmpReg32 |=
++ (uint32_t)(XX_VirtToPhys(((t_FmPcdManip *)p_ManipParams->h_NextManip)->h_Ad)-
++ (((t_FmPcd *)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4;
++ WRITE_UINT32(p_Ad->matchTblPtr, tmpReg32);
++
++ tmpReg32 = HMAN_OC_IPSEC_MANIP;
++ tmpReg32 |= p_IPSecParams->outerIPHdrLen
++ << FM_PCD_MANIP_IPSEC_IP_HDR_LEN_SHIFT;
++ if (p_ManipParams->h_NextManip)
++ tmpReg32 |= FM_PCD_MANIP_IPSEC_NADEN;
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++
++ return err;
++}
++
++static t_Error SetCapwapReassmManip(t_FmPcdManip *p_Manip)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
++
++ /* Allocation if CAPWAP Action descriptor */
++ p_Manip->reassmParams.capwap.h_Ad = (t_Handle)XX_MallocSmart(
++ FM_PCD_CC_AD_ENTRY_SIZE, p_Manip->reassmParams.dataMemId,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_Manip->reassmParams.capwap.h_Ad)
++ {
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("Allocation of CAPWAP table descriptor"));
++ }
++
++ memset(p_Manip->reassmParams.capwap.h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Fill reassembly manipulation parameter in the Reassembly Action Descriptor */
++ return FillReassmManipParams(p_Manip, HEADER_TYPE_CAPWAP);
++}
++
++static void setCapwapReassmSchemeParams(t_FmPcd* p_FmPcd,
++ t_FmPcdKgSchemeParams *p_Scheme,
++ t_Handle h_CcTree, uint8_t groupId)
++{
++ uint8_t res;
++
++ /* Configures scheme's network environment parameters */
++ p_Scheme->netEnvParams.numOfDistinctionUnits = 1;
++ res = FmPcdNetEnvGetUnitId(
++ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
++ HEADER_TYPE_USER_DEFINED_SHIM2, FALSE, 0);
++ ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++ p_Scheme->netEnvParams.unitIds[0] = res;
++
++ /* Configures scheme's next engine parameters*/
++ p_Scheme->nextEngine = e_FM_PCD_CC;
++ p_Scheme->kgNextEngineParams.cc.h_CcTree = h_CcTree;
++ p_Scheme->kgNextEngineParams.cc.grpId = groupId;
++ p_Scheme->useHash = TRUE;
++
++ /* Configures scheme's key*/
++ p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 2;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].type =
++ e_FM_PCD_EXTRACT_NON_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.src =
++ e_FM_PCD_EXTRACT_FROM_PARSE_RESULT;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.action =
++ e_FM_PCD_ACTION_NONE;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.offset = 20;
++ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.size = 4;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].type =
++ e_FM_PCD_EXTRACT_NON_HDR;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.src =
++ e_FM_PCD_EXTRACT_FROM_DFLT_VALUE;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.action =
++ e_FM_PCD_ACTION_NONE;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.offset = 0;
++ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.size = 1;
++
++ p_Scheme->keyExtractAndHashParams.privateDflt0 = 0x0;
++ p_Scheme->keyExtractAndHashParams.privateDflt1 = 0x0;
++ p_Scheme->keyExtractAndHashParams.numOfUsedDflts = 1;
++ p_Scheme->keyExtractAndHashParams.dflts[0].type = e_FM_PCD_KG_GENERIC_NOT_FROM_DATA;
++ p_Scheme->keyExtractAndHashParams.dflts[0].dfltSelect = e_FM_PCD_KG_DFLT_PRIVATE_0;
++}
++
++#if (DPAA_VERSION >= 11)
++static t_Error CapwapReassemblyStats(t_FmPcdManip *p_Manip,
++ t_FmPcdManipReassemCapwapStats *p_Stats)
++{
++ ASSERT_COND(p_Manip);
++ ASSERT_COND(p_Stats);
++ ASSERT_COND(p_Manip->reassmParams.p_ReassCommonTbl);
++
++ p_Stats->timeout =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalTimeOutCounter);
++ p_Stats->rfdPoolBusy =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalRfdPoolBusyCounter);
++ p_Stats->internalBufferBusy =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalInternalBufferBusy);
++ p_Stats->externalBufferBusy =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalExternalBufferBusy);
++ p_Stats->sgFragments =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalSgFragmentCounter);
++ p_Stats->dmaSemaphoreDepletion =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalDmaSemaphoreDepletionCounter);
++ p_Stats->exceedMaxReassemblyFrameLen =
++ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalNCSPCounter);
++
++ p_Stats->successfullyReassembled =
++ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalSuccessfullyReasmFramesCounter);
++ p_Stats->validFragments =
++ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalValidFragmentCounter);
++ p_Stats->processedFragments =
++ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalProcessedFragCounter);
++ p_Stats->malformedFragments =
++ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalMalformdFragCounter);
++ p_Stats->autoLearnBusy =
++ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalSetBusyCounter);
++ p_Stats->discardedFragments =
++ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalDiscardedFragsCounter);
++ p_Stats->moreThan16Fragments =
++ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalMoreThan16FramesCounter);
++
++ return E_OK;
++}
++
++static t_Error CapwapFragmentationStats(t_FmPcdManip *p_Manip,
++ t_FmPcdManipFragCapwapStats *p_Stats)
++{
++ t_AdOfTypeContLookup *p_Ad;
++
++ ASSERT_COND(p_Manip);
++ ASSERT_COND(p_Stats);
++ ASSERT_COND(p_Manip->h_Ad);
++ ASSERT_COND(p_Manip->fragParams.p_Frag);
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++
++ p_Stats->totalFrames = GET_UINT32(p_Ad->gmask);
++
++ return E_OK;
++}
++
++static t_Error CapwapReassembly(t_FmPcdManipReassemParams *p_ManipReassmParams,
++ t_FmPcdManip *p_Manip)
++{
++ uint32_t maxSetNumber = 10000;
++ t_FmPcdManipReassemCapwapParams reassmManipParams =
++ p_ManipReassmParams->u.capwapReassem;
++ t_Error res;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((t_FmPcd *)p_Manip->h_FmPcd)->h_Hc,
++ E_INVALID_HANDLE);
++
++ /* Check validation of user's parameter.*/
++ if ((reassmManipParams.timeoutThresholdForReassmProcess < 1000)
++ || (reassmManipParams.timeoutThresholdForReassmProcess > 8000000))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("timeoutThresholdForReassmProcess should be 1msec - 8sec"));
++ /* It is recommended that the total number of entries in this table (number of sets * number of ways)
++ will be twice the number of frames that are expected to be reassembled simultaneously.*/
++ if (reassmManipParams.maxNumFramesInProcess
++ > (reassmManipParams.maxNumFramesInProcess * maxSetNumber / 2))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("maxNumFramesInProcess has to be less than (maximun set number * number of ways / 2)"));
++
++ /* Saves user's reassembly manipulation parameters */
++ p_Manip->reassmParams.capwap.relativeSchemeId =
++ reassmManipParams.relativeSchemeId;
++ p_Manip->reassmParams.capwap.numOfFramesPerHashEntry =
++ reassmManipParams.numOfFramesPerHashEntry;
++ p_Manip->reassmParams.capwap.maxRessembledsSize =
++ reassmManipParams.maxReassembledFrameLength;
++ p_Manip->reassmParams.maxNumFramesInProcess =
++ reassmManipParams.maxNumFramesInProcess;
++ p_Manip->reassmParams.timeOutMode = reassmManipParams.timeOutMode;
++ p_Manip->reassmParams.fqidForTimeOutFrames =
++ reassmManipParams.fqidForTimeOutFrames;
++ p_Manip->reassmParams.timeoutThresholdForReassmProcess =
++ reassmManipParams.timeoutThresholdForReassmProcess;
++ p_Manip->reassmParams.dataMemId = reassmManipParams.dataMemId;
++ p_Manip->reassmParams.dataLiodnOffset = reassmManipParams.dataLiodnOffset;
++
++ /* Creates and initializes the Reassembly common parameter table */
++ CreateReassCommonTable(p_Manip);
++
++ res = SetCapwapReassmManip(p_Manip);
++ if (res != E_OK)
++ return res;
++
++ return E_OK;
++}
++
++static t_Error CapwapFragmentation(t_FmPcdManipFragCapwapParams *p_ManipParams,
++ t_FmPcdManip *p_Manip)
++{
++ t_FmPcd *p_FmPcd;
++ t_AdOfTypeContLookup *p_Ad;
++ uint32_t pcAndOffsetsReg = 0, ccAdBaseReg = 0, gmaskReg = 0;
++ uint32_t tmpReg32 = 0, tmpRegNia = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_ManipParams->sizeForFragmentation != 0xFFFF,
++ E_INVALID_VALUE);
++ p_FmPcd = p_Manip->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ /* Allocation of fragmentation Action Descriptor */
++ p_Manip->fragParams.p_Frag = (t_AdOfTypeContLookup *)FM_MURAM_AllocMem(
++ p_FmPcd->h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_Manip->fragParams.p_Frag)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("MURAM alloc for Fragmentation table descriptor"));
++ MemSet8(p_Manip->fragParams.p_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Prepare the third Ad register (pcAndOffsets)- OperationCode */
++ pcAndOffsetsReg = (uint32_t)HMAN_OC_CAPWAP_FRAGMENTATION;
++
++ /* Prepare the first Ad register (ccAdBase) - Don't frag action and Action descriptor type*/
++ ccAdBaseReg = FM_PCD_AD_CONT_LOOKUP_TYPE;
++ ccAdBaseReg |=
++ (p_ManipParams->compressModeEn) ? FM_PCD_MANIP_CAPWAP_FRAG_COMPRESS_EN :
++ 0;
++
++ /* Set Scatter/Gather BPid */
++ if (p_ManipParams->sgBpidEn)
++ {
++ ccAdBaseReg |= FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_EN;
++ pcAndOffsetsReg |= ((p_ManipParams->sgBpid
++ << FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_SHIFT)
++ & FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_MASK);
++ }
++
++ /* Prepare the first Ad register (gmask) - scratch buffer pool id and Pointer to fragment ID */
++ gmaskReg = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_FmPcd->capwapFrameIdAddr))
++ - p_FmPcd->physicalMuramBase);
++ gmaskReg |= (0xFF) << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID;
++
++ /* Set all Ad registers */
++ WRITE_UINT32(p_Manip->fragParams.p_Frag->pcAndOffsets, pcAndOffsetsReg);
++ WRITE_UINT32(p_Manip->fragParams.p_Frag->ccAdBase, ccAdBaseReg);
++ WRITE_UINT32(p_Manip->fragParams.p_Frag->gmask, gmaskReg);
++
++ /* Saves user's fragmentation manipulation parameters */
++ p_Manip->frag = TRUE;
++ p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation;
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++
++ tmpRegNia = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_Frag)
++ - (p_FmPcd->physicalMuramBase));
++ tmpReg32 = (uint32_t)p_Manip->sizeForFragmentation
++ << FM_PCD_MANIP_CAPWAP_FRAG_CHECK_MTU_SHIFT;
++
++ tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ tmpReg32 |= HMAN_OC_CAPWAP_FRAG_CHECK;
++
++ tmpRegNia |= FM_PCD_MANIP_CAPWAP_FRAG_CHECK_CNIA;
++
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++ WRITE_UINT32(p_Ad->ccAdBase, tmpRegNia);
++ WRITE_UINT32(p_Ad->gmask, 0);
++ /* Total frame counter - MUST be initialized to zero.*/
++
++ return E_OK;
++}
++
++static t_Error UpdateInitCapwapFrag(t_Handle h_FmPcd, t_Handle h_PcdParams,
++ t_Handle h_FmPort, t_FmPcdManip *p_Manip,
++ t_Handle h_Ad, bool validate)
++{
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION),
++ E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++
++ UNUSED(h_FmPcd);
++ UNUSED(h_Ad);
++ UNUSED(h_PcdParams);
++ UNUSED(validate);
++ UNUSED(p_Manip);
++
++ fmPortGetSetCcParams.setCcParams.type = 0;
++ fmPortGetSetCcParams.getCcParams.type = MANIP_EXTRA_SPACE;
++ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (!fmPortGetSetCcParams.getCcParams.internalBufferOffset)
++ DBG(WARNING, ("manipExtraSpace must be larger than '0'"));
++
++ return E_OK;
++}
++
++static t_Error CapwapManip(t_FmPcdManipParams *p_ManipParams,
++ t_FmPcdManip *p_Manip)
++{
++ t_AdOfTypeContLookup *p_Ad;
++ t_FmPcdManipSpecialOffloadCapwapParams *p_Params;
++ t_Error err = E_OK;
++ uint32_t tmpReg32 = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_ManipParams, E_INVALID_HANDLE);
++
++ p_Params = &p_ManipParams->u.specialOffload.u.capwap;
++
++ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
++ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
++ tmpReg32 |= (p_Params->dtls) ? FM_PCD_MANIP_CAPWAP_DTLS : 0;
++ /* TODO - add 'qosSrc' */
++ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
++
++ tmpReg32 = HMAN_OC_CAPWAP_MANIP;
++ if (p_ManipParams->h_NextManip)
++ {
++ WRITE_UINT32(
++ p_Ad->matchTblPtr,
++ (uint32_t)(XX_VirtToPhys(((t_FmPcdManip *)p_ManipParams->h_NextManip)->h_Ad)- (((t_FmPcd *)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4);
++
++ tmpReg32 |= FM_PCD_MANIP_CAPWAP_NADEN;
++ }
++
++ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
++
++ return err;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++static t_Handle ManipOrStatsSetNode(t_Handle h_FmPcd, t_Handle *p_Params,
++ bool stats)
++{
++ t_FmPcdManip *p_Manip;
++ t_Error err;
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++
++ p_Manip = (t_FmPcdManip*)XX_Malloc(sizeof(t_FmPcdManip));
++ if (!p_Manip)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
++ return NULL;
++ }
++ memset(p_Manip, 0, sizeof(t_FmPcdManip));
++
++ p_Manip->type = ((t_FmPcdManipParams *)p_Params)->type;
++ memcpy((uint8_t*)&p_Manip->manipParams, p_Params,
++ sizeof(p_Manip->manipParams));
++
++ if (!stats)
++ err = CheckManipParamsAndSetType(p_Manip,
++ (t_FmPcdManipParams *)p_Params);
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ else
++ err = CheckStatsParamsAndSetType(p_Manip, (t_FmPcdStatsParams *)p_Params);
++#else /* not (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ else
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Statistics node!"));
++ XX_Free(p_Manip);
++ return NULL;
++ }
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Invalid header manipulation type"));
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ if ((p_Manip->opcode != HMAN_OC_IP_REASSEMBLY) && (p_Manip->opcode != HMAN_OC_CAPWAP_REASSEMBLY))
++ {
++ /* In Case of reassembly manipulation the reassembly action descriptor will
++ be defines later on */
++ if (p_Manip->muramAllocate)
++ {
++ p_Manip->h_Ad = (t_Handle)FM_MURAM_AllocMem(
++ p_FmPcd->h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_Manip->h_Ad)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Manipulation action descriptor"));
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ MemSet8(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++ }
++ else
++ {
++ p_Manip->h_Ad = (t_Handle)XX_Malloc(
++ FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
++ if (!p_Manip->h_Ad)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of Manipulation action descriptor"));
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ memset(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
++ }
++ }
++
++ p_Manip->h_FmPcd = h_FmPcd;
++
++ return p_Manip;
++}
++
++static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfManip(
++ t_FmPcdManip *p_CrntMdfManip, t_List *h_NodesLst)
++{
++ t_CcNodeInformation *p_CcNodeInformation;
++ t_FmPcdCcNode *p_NodePtrOnCurrentMdfManip = NULL;
++ t_List *p_Pos;
++ int i = 0;
++ t_Handle p_AdTablePtOnCrntCurrentMdfNode/*, p_AdTableNewModified*/;
++ t_CcNodeInformation ccNodeInfo;
++
++ LIST_FOR_EACH(p_Pos, &p_CrntMdfManip->nodesLst)
++ {
++ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
++ p_NodePtrOnCurrentMdfManip =
++ (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode;
++
++ ASSERT_COND(p_NodePtrOnCurrentMdfManip);
++
++ /* Search in the previous node which exact index points on this current modified node for getting AD */
++ for (i = 0; i < p_NodePtrOnCurrentMdfManip->numOfKeys + 1; i++)
++ {
++ if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].nextEngineParams.nextEngine
++ == e_FM_PCD_CC)
++ {
++ if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].nextEngineParams.h_Manip
++ == (t_Handle)p_CrntMdfManip)
++ {
++ if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].p_StatsObj)
++ p_AdTablePtOnCrntCurrentMdfNode =
++ p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].p_StatsObj->h_StatsAd;
++ else
++ p_AdTablePtOnCrntCurrentMdfNode =
++ PTR_MOVE(p_NodePtrOnCurrentMdfManip->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE);
++
++ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
++ ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode;
++ EnqueueNodeInfoToRelevantLst(h_NodesLst, &ccNodeInfo, NULL);
++ }
++ }
++ }
++
++ ASSERT_COND(i != p_NodePtrOnCurrentMdfManip->numOfKeys);
++ }
++}
++
++static void BuildHmtd(uint8_t *p_Dest, uint8_t *p_Src, uint8_t *p_Hmcd,
++ t_FmPcd *p_FmPcd)
++{
++ t_Error err;
++
++ /* Copy the HMTD */
++ MemCpy8(p_Dest, (uint8_t*)p_Src, 16);
++ /* Replace the HMCT table pointer */
++ WRITE_UINT32(
++ ((t_Hmtd *)p_Dest)->hmcdBasePtr,
++ (uint32_t)(XX_VirtToPhys(p_Hmcd) - ((t_FmPcd*)p_FmPcd)->physicalMuramBase));
++ /* Call Host Command to replace HMTD by a new HMTD */
++ err = FmHcPcdCcDoDynamicChange(
++ p_FmPcd->h_Hc,
++ (uint32_t)(XX_VirtToPhys(p_Src) - p_FmPcd->physicalMuramBase),
++ (uint32_t)(XX_VirtToPhys(p_Dest) - p_FmPcd->physicalMuramBase));
++ if (err)
++ REPORT_ERROR(MINOR, err, ("Failed in dynamic manip change, continued to the rest of the owners."));
++}
++
++static t_Error FmPcdManipInitUpdate(t_Handle h_FmPcd, t_Handle h_PcdParams,
++ t_Handle h_FmPort, t_Handle h_Manip,
++ t_Handle h_Ad, bool validate, int level,
++ t_Handle h_FmTree)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
++
++ UNUSED(level);
++ UNUSED(h_FmTree);
++
++ switch (p_Manip->opcode)
++ {
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
++ err = UpdateInitMvIntFrameHeaderFromFrameToBufferPrefix(h_FmPort,
++ p_Manip,
++ h_Ad,
++ validate);
++ break;
++ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
++ if (!p_Manip->h_Frag)
++ break;
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++ err = UpdateInitCapwapFragmentation(h_FmPort, p_Manip, h_Ad, validate, h_FmTree);
++ break;
++ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
++ if (p_Manip->h_Frag)
++ err = UpdateInitCapwapReasm(h_FmPcd, h_FmPort, p_Manip, h_Ad, validate);
++ break;
++ case (HMAN_OC_CAPWAP_INDEXED_STATS):
++ err = UpdateIndxStats(h_FmPcd, h_FmPort, p_Manip);
++ break;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ case (HMAN_OC_IP_REASSEMBLY):
++ err = UpdateInitReasm(h_FmPcd, h_PcdParams, h_FmPort, p_Manip, h_Ad,
++ validate);
++ break;
++ case (HMAN_OC_IP_FRAGMENTATION):
++ err = UpdateInitIpFrag(h_FmPcd, h_PcdParams, h_FmPort, p_Manip,
++ h_Ad, validate);
++ break;
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++ err = UpdateInitCapwapFrag(h_FmPcd, h_PcdParams, h_FmPort, p_Manip,
++ h_Ad, validate);
++ break;
++ case (HMAN_OC_CAPWAP_REASSEMBLY):
++ err = UpdateInitReasm(h_FmPcd, h_PcdParams, h_FmPort, p_Manip, h_Ad,
++ validate);
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ return E_OK;
++ }
++
++ return err;
++}
++
++static t_Error FmPcdManipModifyUpdate(t_Handle h_Manip, t_Handle h_Ad,
++ bool validate, int level,
++ t_Handle h_FmTree)
++{
++
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++ t_Error err = E_OK;
++
++ UNUSED(level);
++
++ switch (p_Manip->opcode)
++ {
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("modify node with this type of manipulation is not suppported"));
++ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
++
++ if (p_Manip->h_Frag)
++ {
++ if (!(p_Manip->shadowUpdateParams & NUM_OF_TASKS)
++ && !(p_Manip->shadowUpdateParams & OFFSET_OF_DATA)
++ && !(p_Manip->shadowUpdateParams & OFFSET_OF_PR))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("modify node with this type of manipulation requires manipulation be updated previously in SetPcd function"));
++ }
++ break;
++ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
++ if (p_Manip->h_Frag)
++ err = UpdateModifyCapwapFragmenation(p_Manip, h_Ad, validate, h_FmTree);
++ break;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ default:
++ return E_OK;
++ }
++
++ return err;
++}
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++
++t_Error FmPcdManipUpdate(t_Handle h_FmPcd, t_Handle h_PcdParams,
++ t_Handle h_FmPort, t_Handle h_Manip, t_Handle h_Ad,
++ bool validate, int level, t_Handle h_FmTree,
++ bool modify)
++{
++ t_Error err;
++
++ if (!modify)
++ err = FmPcdManipInitUpdate(h_FmPcd, h_PcdParams, h_FmPort, h_Manip,
++ h_Ad, validate, level, h_FmTree);
++ else
++ err = FmPcdManipModifyUpdate(h_Manip, h_Ad, validate, level, h_FmTree);
++
++ return err;
++}
++
++void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add)
++{
++
++ uint32_t intFlags;
++
++ intFlags = XX_LockIntrSpinlock(((t_FmPcdManip *)h_Manip)->h_Spinlock);
++ if (add)
++ ((t_FmPcdManip *)h_Manip)->owner++;
++ else
++ {
++ ASSERT_COND(((t_FmPcdManip *)h_Manip)->owner);
++ ((t_FmPcdManip *)h_Manip)->owner--;
++ }
++ XX_UnlockIntrSpinlock(((t_FmPcdManip *)h_Manip)->h_Spinlock, intFlags);
++}
++
++t_List *FmPcdManipGetNodeLstPointedOnThisManip(t_Handle h_Manip)
++{
++ ASSERT_COND(h_Manip);
++ return &((t_FmPcdManip *)h_Manip)->nodesLst;
++}
++
++t_List *FmPcdManipGetSpinlock(t_Handle h_Manip)
++{
++ ASSERT_COND(h_Manip);
++ return ((t_FmPcdManip *)h_Manip)->h_Spinlock;
++}
++
++t_Error FmPcdManipCheckParamsForCcNextEngine(
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
++ uint32_t *requiredAction)
++{
++ t_FmPcdManip *p_Manip;
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ t_Error err = E_OK;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))*/
++ bool pointFromCc = TRUE;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams->h_Manip,
++ E_NULL_POINTER);
++
++ p_Manip = (t_FmPcdManip *)(p_FmPcdCcNextEngineParams->h_Manip);
++ *requiredAction = 0;
++
++ while (p_Manip)
++ {
++ switch (p_Manip->opcode)
++ {
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ case (HMAN_OC_CAPWAP_INDEXED_STATS):
++ if (p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE"));
++ if (p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid)
++ p_Manip->cnia = TRUE;
++ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
++ *requiredAction = UPDATE_NIA_ENQ_WITHOUT_DMA;
++ case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR):
++ p_Manip->ownerTmp++;
++ break;
++ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
++ if ((p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE)
++ && !p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE with fqidForCtrlFlow FALSE"));
++ p_Manip->ownerTmp++;
++ break;
++ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
++ if ((p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_CC)
++ && (FmPcdCcGetParseCode(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)
++ != CC_PC_GENERIC_IC_HASH_INDEXED))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation next engine has to be CC and action = e_FM_PCD_ACTION_INDEXED_LOOKUP"));
++ err = UpdateManipIc(p_FmPcdCcNextEngineParams->h_Manip,
++ FmPcdCcGetOffset(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode));
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ *requiredAction = UPDATE_NIA_ENQ_WITHOUT_DMA;
++ break;
++ #endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ case (HMAN_OC_IP_FRAGMENTATION):
++ case (HMAN_OC_IP_REASSEMBLY):
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_REASSEMBLY):
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++#endif /* (DPAA_VERSION >= 11) */
++ if (p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE"));
++ p_Manip->ownerTmp++;
++ break;
++ case (HMAN_OC_IPSEC_MANIP):
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_MANIP):
++#endif /* (DPAA_VERSION >= 11) */
++ p_Manip->ownerTmp++;
++ break;
++ case (HMAN_OC):
++ if ((p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC)
++ && MANIP_IS_CASCADED(p_Manip))
++ RETURN_ERROR(
++ MINOR,
++ E_INVALID_STATE,
++ ("Can't have a cascaded manipulation when and Next Engine is CC"));
++ if (!MANIP_IS_FIRST(p_Manip) && pointFromCc)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("h_Manip is already used and may not be shared (no sharing of non-head manip nodes)"));
++ break;
++ default:
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("invalid type of header manipulation for this state"));
++ }
++ p_Manip = p_Manip->h_NextManip;
++ pointFromCc = FALSE;
++ }
++ return E_OK;
++}
++
++
++t_Error FmPcdManipCheckParamsWithCcNodeParams(t_Handle h_Manip,
++ t_Handle h_FmPcdCcNode)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcNode, E_INVALID_HANDLE);
++
++ switch (p_Manip->opcode)
++ {
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ case (HMAN_OC_CAPWAP_INDEXED_STATS):
++ if (p_Manip->ownerTmp != FmPcdCcGetNumOfKeys(h_FmPcdCcNode))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("The manipulation of the type statistics flowId if exist has to be pointed by all numOfKeys"));
++ break;
++ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
++ if (p_Manip->h_Frag)
++ {
++ if (p_Manip->ownerTmp != FmPcdCcGetNumOfKeys(h_FmPcdCcNode))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("The manipulation of the type remove DTLS if exist has to be pointed by all numOfKeys"));
++ err = UpdateManipIc(h_Manip, FmPcdCcGetOffset(h_FmPcdCcNode));
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ break;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ default:
++ break;
++ }
++
++ return err;
++}
++
++void FmPcdManipUpdateAdResultForCc(
++ t_Handle h_Manip, t_FmPcdCcNextEngineParams *p_CcNextEngineParams,
++ t_Handle p_Ad, t_Handle *p_AdNewPtr)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++
++ /* This routine creates a Manip AD and can return in "p_AdNewPtr"
++ * either the new descriptor or NULL if it writes the Manip AD into p_AD (into the match table) */
++
++ ASSERT_COND(p_Manip);
++ ASSERT_COND(p_CcNextEngineParams);
++ ASSERT_COND(p_Ad);
++ ASSERT_COND(p_AdNewPtr);
++
++ FmPcdManipUpdateOwner(h_Manip, TRUE);
++
++ /* According to "type", either build & initialize a new AD (p_AdNew) or initialize
++ * p_Ad ( the AD in the match table) and set p_AdNew = NULL. */
++ switch (p_Manip->opcode)
++ {
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR):
++ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
++ case (HMAN_OC_CAPWAP_INDEXED_STATS):
++ *p_AdNewPtr = p_Manip->h_Ad;
++ break;
++ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++ WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->fqid,
++ ((t_AdOfTypeResult *)(p_Manip->h_Ad))->fqid);
++ WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->plcrProfile,
++ ((t_AdOfTypeResult *)(p_Manip->h_Ad))->plcrProfile);
++ WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->nia,
++ ((t_AdOfTypeResult *)(p_Manip->h_Ad))->nia);
++ *p_AdNewPtr = NULL;
++ break;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ case (HMAN_OC_IPSEC_MANIP):
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_MANIP):
++#endif /* (DPAA_VERSION >= 11) */
++ *p_AdNewPtr = p_Manip->h_Ad;
++ break;
++ case (HMAN_OC_IP_FRAGMENTATION):
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++#endif /* (DPAA_VERSION >= 11) */
++ if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_DONE)
++ && (!p_CcNextEngineParams->params.enqueueParams.overrideFqid))
++ {
++ memcpy((uint8_t *)p_Ad, (uint8_t *)p_Manip->h_Ad,
++ sizeof(t_AdOfTypeContLookup));
++#if (DPAA_VERSION >= 11)
++ WRITE_UINT32(
++ ((t_AdOfTypeContLookup *)p_Ad)->ccAdBase,
++ GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase) & ~FM_PCD_MANIP_IP_CNIA);
++#endif /* (DPAA_VERSION >= 11) */
++ *p_AdNewPtr = NULL;
++ }
++ else
++ *p_AdNewPtr = p_Manip->h_Ad;
++ break;
++ case (HMAN_OC_IP_REASSEMBLY):
++ if (FmPcdManipIpReassmIsIpv6Hdr(p_Manip))
++ {
++ if (!p_Manip->reassmParams.ip.ipv6Assigned)
++ {
++ *p_AdNewPtr = p_Manip->reassmParams.ip.h_Ipv6Ad;
++ p_Manip->reassmParams.ip.ipv6Assigned = TRUE;
++ FmPcdManipUpdateOwner(h_Manip, FALSE);
++ }
++ else
++ {
++ *p_AdNewPtr = p_Manip->reassmParams.ip.h_Ipv4Ad;
++ p_Manip->reassmParams.ip.ipv6Assigned = FALSE;
++ }
++ }
++ else
++ *p_AdNewPtr = p_Manip->reassmParams.ip.h_Ipv4Ad;
++ memcpy((uint8_t *)p_Ad, (uint8_t *)*p_AdNewPtr,
++ sizeof(t_AdOfTypeContLookup));
++ *p_AdNewPtr = NULL;
++ break;
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_REASSEMBLY):
++ *p_AdNewPtr = p_Manip->reassmParams.capwap.h_Ad;
++ memcpy((uint8_t *)p_Ad, (uint8_t *)*p_AdNewPtr,
++ sizeof(t_AdOfTypeContLookup));
++ *p_AdNewPtr = NULL;
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++ case (HMAN_OC):
++ /* Allocate and initialize HMTD */
++ *p_AdNewPtr = p_Manip->h_Ad;
++ break;
++ default:
++ break;
++ }
++}
++
++void FmPcdManipUpdateAdContLookupForCc(t_Handle h_Manip, t_Handle p_Ad,
++ t_Handle *p_AdNewPtr,
++ uint32_t adTableOffset)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++
++ /* This routine creates a Manip AD and can return in "p_AdNewPtr"
++ * either the new descriptor or NULL if it writes the Manip AD into p_AD (into the match table) */
++ ASSERT_COND(p_Manip);
++
++ FmPcdManipUpdateOwner(h_Manip, TRUE);
++
++ switch (p_Manip->opcode)
++ {
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
++ WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase,
++ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->ccAdBase);
++ WRITE_UINT32(
++ ((t_AdOfTypeContLookup *)p_Ad)->matchTblPtr,
++ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->matchTblPtr);
++ WRITE_UINT32(
++ ((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets,
++ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->pcAndOffsets);
++ WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->gmask,
++ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->gmask);
++ WRITE_UINT32(
++ ((t_AdOfTypeContLookup *)p_Ad)->ccAdBase,
++ (GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase) | adTableOffset));
++ *p_AdNewPtr = NULL;
++ break;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ case (HMAN_OC):
++ /* Initialize HMTD within the match table*/
++ MemSet8(p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++ /* copy the existing HMTD *//* ask Alla - memcpy??? */
++ memcpy((uint8_t*)p_Ad, p_Manip->h_Ad, sizeof(t_Hmtd));
++ /* update NADEN to be "1"*/
++ WRITE_UINT16(
++ ((t_Hmtd *)p_Ad)->cfg,
++ (uint16_t)(GET_UINT16(((t_Hmtd *)p_Ad)->cfg) | HMTD_CFG_NEXT_AD_EN));
++ /* update next action descriptor */
++ WRITE_UINT16(((t_Hmtd *)p_Ad)->nextAdIdx,
++ (uint16_t)(adTableOffset >> 4));
++ /* mark that Manip's HMTD is not used */
++ *p_AdNewPtr = NULL;
++ break;
++
++ default:
++ break;
++ }
++}
++
++t_Error FmPcdManipBuildIpReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv,
++ t_Handle h_CcTree, t_Handle h_Manip,
++ bool isIpv4, uint8_t groupId)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++ t_FmPcdKgSchemeParams *p_SchemeParams = NULL;
++ t_Handle h_Scheme;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(h_NetEnv);
++ ASSERT_COND(p_Manip);
++
++ /* scheme was already build, no need to check for IPv6 */
++ if (p_Manip->reassmParams.ip.h_Ipv4Scheme)
++ return E_OK;
++
++ if (isIpv4) {
++ h_Scheme = FmPcdKgGetSchemeHandle(p_FmPcd, p_Manip->reassmParams.ip.relativeSchemeId[0]);
++ if (h_Scheme) {
++ /* scheme was found */
++ p_Manip->reassmParams.ip.h_Ipv4Scheme = h_Scheme;
++ return E_OK;
++ }
++ } else {
++ h_Scheme = FmPcdKgGetSchemeHandle(p_FmPcd, p_Manip->reassmParams.ip.relativeSchemeId[1]);
++ if (h_Scheme) {
++ /* scheme was found */
++ p_Manip->reassmParams.ip.h_Ipv6Scheme = h_Scheme;
++ return E_OK;
++ }
++ }
++
++ p_SchemeParams = XX_Malloc(sizeof(t_FmPcdKgSchemeParams));
++ if (!p_SchemeParams)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("Memory allocation failed for scheme"));
++
++ /* Configures the IPv4 or IPv6 scheme*/
++ memset(p_SchemeParams, 0, sizeof(t_FmPcdKgSchemeParams));
++ p_SchemeParams->netEnvParams.h_NetEnv = h_NetEnv;
++ p_SchemeParams->id.relativeSchemeId = (uint8_t)(
++ (isIpv4 == TRUE) ? p_Manip->reassmParams.ip.relativeSchemeId[0] :
++ p_Manip->reassmParams.ip.relativeSchemeId[1]);
++ p_SchemeParams->schemeCounter.update = TRUE;
++#if (DPAA_VERSION >= 11)
++ p_SchemeParams->alwaysDirect = TRUE;
++ p_SchemeParams->bypassFqidGeneration = TRUE;
++#else
++ p_SchemeParams->keyExtractAndHashParams.hashDistributionNumOfFqids = 1;
++ p_SchemeParams->baseFqid = 0xFFFFFF; /*TODO- baseFqid*/
++#endif /* (DPAA_VERSION >= 11) */
++
++ setIpReassmSchemeParams(p_FmPcd, p_SchemeParams, h_CcTree, isIpv4, groupId);
++
++ /* Sets the new scheme */
++ if (isIpv4)
++ p_Manip->reassmParams.ip.h_Ipv4Scheme = FM_PCD_KgSchemeSet(
++ p_FmPcd, p_SchemeParams);
++ else
++ p_Manip->reassmParams.ip.h_Ipv6Scheme = FM_PCD_KgSchemeSet(
++ p_FmPcd, p_SchemeParams);
++
++ XX_Free(p_SchemeParams);
++
++ return E_OK;
++}
++
++t_Error FmPcdManipDeleteIpReassmSchemes(t_Handle h_Manip)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++
++ ASSERT_COND(p_Manip);
++
++ if ((p_Manip->reassmParams.ip.h_Ipv4Scheme) &&
++ !FmPcdKgIsSchemeHasOwners(p_Manip->reassmParams.ip.h_Ipv4Scheme))
++ FM_PCD_KgSchemeDelete(p_Manip->reassmParams.ip.h_Ipv4Scheme);
++
++ if ((p_Manip->reassmParams.ip.h_Ipv6Scheme) &&
++ !FmPcdKgIsSchemeHasOwners(p_Manip->reassmParams.ip.h_Ipv6Scheme))
++ FM_PCD_KgSchemeDelete(p_Manip->reassmParams.ip.h_Ipv6Scheme);
++
++ return E_OK;
++}
++
++bool FmPcdManipIpReassmIsIpv6Hdr(t_Handle h_Manip)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++
++ ASSERT_COND(p_Manip);
++
++ return (p_Manip->reassmParams.hdr == HEADER_TYPE_IPv6);
++}
++
++t_Error FmPcdManipBuildCapwapReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv,
++ t_Handle h_CcTree, t_Handle h_Manip,
++ uint8_t groupId)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++ t_FmPcdKgSchemeParams *p_SchemeParams = NULL;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(h_NetEnv);
++ ASSERT_COND(p_Manip);
++
++ /* scheme was already build, no need to check for IPv6 */
++ if (p_Manip->reassmParams.capwap.h_Scheme)
++ return E_OK;
++
++ p_SchemeParams = XX_Malloc(sizeof(t_FmPcdKgSchemeParams));
++ if (!p_SchemeParams)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY,
++ ("Memory allocation failed for scheme"));
++
++ memset(p_SchemeParams, 0, sizeof(t_FmPcdKgSchemeParams));
++ p_SchemeParams->netEnvParams.h_NetEnv = h_NetEnv;
++ p_SchemeParams->id.relativeSchemeId =
++ (uint8_t)p_Manip->reassmParams.capwap.relativeSchemeId;
++ p_SchemeParams->schemeCounter.update = TRUE;
++ p_SchemeParams->bypassFqidGeneration = TRUE;
++
++ setCapwapReassmSchemeParams(p_FmPcd, p_SchemeParams, h_CcTree, groupId);
++
++ p_Manip->reassmParams.capwap.h_Scheme = FM_PCD_KgSchemeSet(p_FmPcd,
++ p_SchemeParams);
++
++ XX_Free(p_SchemeParams);
++
++ return E_OK;
++}
++
++t_Error FmPcdManipDeleteCapwapReassmSchemes(t_Handle h_Manip)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++
++ ASSERT_COND(p_Manip);
++
++ if (p_Manip->reassmParams.capwap.h_Scheme)
++ FM_PCD_KgSchemeDelete(p_Manip->reassmParams.capwap.h_Scheme);
++
++ return E_OK;
++}
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++t_Handle FmPcdManipApplSpecificBuild(void)
++{
++ t_FmPcdManip *p_Manip;
++
++ p_Manip = (t_FmPcdManip*)XX_Malloc(sizeof(t_FmPcdManip));
++ if (!p_Manip)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
++ return NULL;
++ }
++ memset(p_Manip, 0, sizeof(t_FmPcdManip));
++
++ p_Manip->opcode = HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX;
++ p_Manip->muramAllocate = FALSE;
++
++ p_Manip->h_Ad = (t_Handle)XX_Malloc(FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
++ if (!p_Manip->h_Ad)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of Manipulation action descriptor"));
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ memset(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
++
++ /*treatFdStatusFieldsAsErrors = TRUE hardcoded - assumption its always come after CAAM*/
++ /*Application specific = type of flowId index, move internal frame header from data to IC,
++ SEC errors check*/
++ if (MvIntFrameHeaderFromFrameToBufferPrefix(p_Manip, TRUE)!= E_OK)
++ {
++ XX_Free(p_Manip->h_Ad);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++ return p_Manip;
++}
++
++bool FmPcdManipIsCapwapApplSpecific(t_Handle h_Manip)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
++ ASSERT_COND(h_Manip);
++
++ return (bool)((p_Manip->opcode == HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST) ? TRUE : FALSE);
++}
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++/*********************** End of inter-module routines ************************/
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++
++t_Handle FM_PCD_ManipNodeSet(t_Handle h_FmPcd,
++ t_FmPcdManipParams *p_ManipParams)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdManip *p_Manip;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_ManipParams, E_INVALID_HANDLE, NULL);
++
++ p_Manip = ManipOrStatsSetNode(h_FmPcd, (t_Handle)p_ManipParams, FALSE);
++ if (!p_Manip)
++ return NULL;
++
++ if (((p_Manip->opcode == HMAN_OC_IP_REASSEMBLY)
++ || (p_Manip->opcode == HMAN_OC_IP_FRAGMENTATION)
++ || (p_Manip->opcode == HMAN_OC)
++ || (p_Manip->opcode == HMAN_OC_IPSEC_MANIP)
++#if (DPAA_VERSION >= 11)
++ || (p_Manip->opcode == HMAN_OC_CAPWAP_MANIP)
++ || (p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION)
++ || (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY)
++#endif /* (DPAA_VERSION >= 11) */
++ ) && (!FmPcdIsAdvancedOffloadSupported(p_FmPcd)))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Advanced-offload must be enabled"));
++ XX_Free(p_Manip);
++ return NULL;
++ }
++ p_Manip->h_Spinlock = XX_InitSpinlock();
++ if (!p_Manip->h_Spinlock)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }INIT_LIST(&p_Manip->nodesLst);
++
++ switch (p_Manip->opcode)
++ {
++ case (HMAN_OC_IP_REASSEMBLY):
++ /* IpReassembly */
++ err = IpReassembly(&p_ManipParams->u.reassem, p_Manip);
++ break;
++ case (HMAN_OC_IP_FRAGMENTATION):
++ /* IpFragmentation */
++ err = IpFragmentation(&p_ManipParams->u.frag.u.ipFrag, p_Manip);
++ if (err)
++ break;
++ err = IPManip(p_Manip);
++ break;
++ case (HMAN_OC_IPSEC_MANIP):
++ err = IPSecManip(p_ManipParams, p_Manip);
++ break;
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_REASSEMBLY):
++ /* CapwapReassembly */
++ err = CapwapReassembly(&p_ManipParams->u.reassem, p_Manip);
++ break;
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++ /* CapwapFragmentation */
++ err = CapwapFragmentation(&p_ManipParams->u.frag.u.capwapFrag,
++ p_Manip);
++ break;
++ case (HMAN_OC_CAPWAP_MANIP):
++ err = CapwapManip(p_ManipParams, p_Manip);
++ break;
++#endif /* (DPAA_VERSION >= 11) */
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR):
++ /* HmanType1 */
++ err = RmvHdrTillSpecLocNOrInsrtIntFrmHdr(&p_ManipParams->u.hdr.rmvParams, p_Manip);
++ break;
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++ err = CapwapFragmentation(&p_ManipParams->fragOrReasmParams.u.capwapFragParams,
++ p_Manip,
++ p_FmPcd,
++ p_ManipParams->fragOrReasmParams.sgBpid);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++ if (p_Manip->insrt)
++ p_Manip->opcode = HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER;
++ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
++ /* HmanType2 + if user asked only for fragmentation still need to allocate HmanType2 */
++ err = InsrtHdrByTempl(&p_ManipParams->u.hdr.insrtParams, p_Manip, p_FmPcd);
++ break;
++ case (HMAN_OC_CAPWAP_REASSEMBLY):
++ err = CapwapReassembly(&p_ManipParams->fragOrReasmParams.u.capwapReasmParams,
++ p_Manip,
++ p_FmPcd,
++ p_ManipParams->fragOrReasmParams.sgBpid);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++ if (p_Manip->rmv)
++ p_Manip->opcode = HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST;
++ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
++ /*CAPWAP decapsulation + if user asked only for reassembly still need to allocate CAPWAP decapsulation*/
++ err = CapwapRmvDtlsHdr(p_FmPcd, p_Manip);
++ break;
++ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
++ /*Application Specific type 1*/
++ err = MvIntFrameHeaderFromFrameToBufferPrefix(p_Manip, TRUE);
++ break;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ case (HMAN_OC):
++ /* New Manip */
++ err = CreateManipActionNew(p_Manip, p_ManipParams);
++ break;
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ if (p_ManipParams->h_NextManip)
++ {
++ /* in the check routine we've verified that h_NextManip has no owners
++ * and that only supported types are allowed. */
++ p_Manip->h_NextManip = p_ManipParams->h_NextManip;
++ /* save a "prev" pointer in h_NextManip */
++ MANIP_SET_PREV(p_Manip->h_NextManip, p_Manip);
++ FmPcdManipUpdateOwner(p_Manip->h_NextManip, TRUE);
++ }
++
++ return p_Manip;
++}
++
++t_Error FM_PCD_ManipNodeReplace(t_Handle h_Manip,
++ t_FmPcdManipParams *p_ManipParams)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip, *p_FirstManip;
++ t_FmPcd *p_FmPcd = (t_FmPcd *)(p_Manip->h_FmPcd);
++ t_Error err;
++ uint8_t *p_WholeHmct = NULL, *p_ShadowHmct = NULL, *p_Hmtd = NULL;
++ t_List lstOfNodeshichPointsOnCrntMdfManip, *p_Pos;
++ t_CcNodeInformation *p_CcNodeInfo;
++ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_ManipParams, E_INVALID_HANDLE);
++
++ INIT_LIST(&lstOfNodeshichPointsOnCrntMdfManip);
++
++ if ((p_ManipParams->type != e_FM_PCD_MANIP_HDR)
++ || (p_Manip->type != e_FM_PCD_MANIP_HDR))
++ RETURN_ERROR(
++ MINOR,
++ E_NOT_SUPPORTED,
++ ("FM_PCD_ManipNodeReplace Functionality supported only for Header Manipulation."));
++
++ ASSERT_COND(p_Manip->opcode == HMAN_OC);
++ ASSERT_COND(p_Manip->manipParams.h_NextManip == p_Manip->h_NextManip);
++ memcpy((uint8_t*)&p_Manip->manipParams, p_ManipParams,
++ sizeof(p_Manip->manipParams));
++ p_Manip->manipParams.h_NextManip = p_Manip->h_NextManip;
++
++ /* The replacement of the HdrManip depends on the node type.*/
++ /*
++ * (1) If this is an independent node, all its owners should be updated.
++ *
++ * (2) If it is the head of a cascaded chain (it does not have a "prev" but
++ * it has a "next" and it has a "cascaded" indication), the next
++ * node remains unchanged, and the behavior is as in (1).
++ *
++ * (3) If it is not the head, but a part of a cascaded chain, in can be
++ * also replaced as a regular node with just one owner.
++ *
++ * (4) If it is a part of a chain implemented as a unified table, the
++ * whole table is replaced and the owners of the head node must be updated.
++ *
++ */
++ /* lock shadow */
++ if (!p_FmPcd->p_CcShadow)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated"));
++
++ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ return ERROR_CODE(E_BUSY);
++
++ /* this routine creates a new manip action in the CC Shadow. */
++ err = CreateManipActionShadow(p_Manip, p_ManipParams);
++ if (err)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ /* If the owners list is empty (these are NOT the "owners" counter, but pointers from CC)
++ * replace only HMTD and no lcok is required. Otherwise
++ * lock the whole PCD
++ * In case 4 MANIP_IS_UNIFIED_NON_FIRST(p_Manip) - Use the head node instead. */
++ if (!FmPcdLockTryLockAll(p_FmPcd))
++ {
++ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ p_ShadowHmct = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, 16);
++
++ p_FirstManip = (t_FmPcdManip*)GetManipInfo(p_Manip,
++ e_MANIP_HANDLER_TABLE_OWNER);
++ ASSERT_COND(p_FirstManip);
++
++ if (!LIST_IsEmpty(&p_FirstManip->nodesLst))
++ UpdateAdPtrOfNodesWhichPointsOnCrntMdfManip(
++ p_FirstManip, &lstOfNodeshichPointsOnCrntMdfManip);
++
++ p_Hmtd = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMTD);
++ ASSERT_COND(p_Hmtd);
++ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_Hmtd, p_ShadowHmct,
++ ((t_FmPcd*)(p_Manip->h_FmPcd)));
++
++ LIST_FOR_EACH(p_Pos, &lstOfNodeshichPointsOnCrntMdfManip)
++ {
++ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
++ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_CcNodeInfo->h_CcNode,
++ p_ShadowHmct, ((t_FmPcd*)(p_Manip->h_FmPcd)));
++ }
++
++ p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT);
++ ASSERT_COND(p_WholeHmct);
++
++ /* re-build the HMCT n the original location */
++ err = CreateManipActionBackToOrig(p_Manip, p_ManipParams);
++ if (err)
++ {
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ p_Hmtd = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMTD);
++ ASSERT_COND(p_Hmtd);
++ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_Hmtd, p_WholeHmct,
++ ((t_FmPcd*)p_Manip->h_FmPcd));
++
++ /* If LIST > 0, create a list of p_Ad's that point to the HMCT. Join also t_HMTD to this list.
++ * For each p_Hmct (from list+fixed):
++ * call Host Command to replace HMTD by a new one */LIST_FOR_EACH(p_Pos, &lstOfNodeshichPointsOnCrntMdfManip)
++ {
++ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
++ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_CcNodeInfo->h_CcNode,
++ p_WholeHmct, ((t_FmPcd*)(p_Manip->h_FmPcd)));
++ }
++
++
++ ReleaseLst(&lstOfNodeshichPointsOnCrntMdfManip);
++
++ FmPcdLockUnlockAll(p_FmPcd);
++
++ /* unlock shadow */
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_ManipNodeDelete(t_Handle h_ManipNode)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_ManipNode;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++
++ if (p_Manip->owner)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("This manipulation node not be removed because this node is occupied, first - unbind this node "));
++
++ if (p_Manip->h_NextManip)
++ {
++ MANIP_SET_PREV(p_Manip->h_NextManip, NULL);
++ FmPcdManipUpdateOwner(p_Manip->h_NextManip, FALSE);
++ }
++
++ if (p_Manip->p_Hmct
++ && (MANIP_IS_UNIFIED_FIRST(p_Manip) || !MANIP_IS_UNIFIED(p_Manip)))
++ FM_MURAM_FreeMem(((t_FmPcd *)p_Manip->h_FmPcd)->h_FmMuram,
++ p_Manip->p_Hmct);
++
++ if (p_Manip->h_Spinlock)
++ {
++ XX_FreeSpinlock(p_Manip->h_Spinlock);
++ p_Manip->h_Spinlock = NULL;
++ }
++
++ ReleaseManipHandler(p_Manip, p_Manip->h_FmPcd);
++
++ XX_Free(h_ManipNode);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_ManipGetStatistics(t_Handle h_ManipNode,
++ t_FmPcdManipStats *p_FmPcdManipStats)
++{
++ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_ManipNode;
++
++ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdManipStats, E_NULL_POINTER);
++
++ switch (p_Manip->opcode)
++ {
++ case (HMAN_OC_IP_REASSEMBLY):
++ return IpReassemblyStats(p_Manip,
++ &p_FmPcdManipStats->u.reassem.u.ipReassem);
++ case (HMAN_OC_IP_FRAGMENTATION):
++ return IpFragmentationStats(p_Manip,
++ &p_FmPcdManipStats->u.frag.u.ipFrag);
++#if (DPAA_VERSION >= 11)
++ case (HMAN_OC_CAPWAP_REASSEMBLY):
++ return CapwapReassemblyStats(
++ p_Manip, &p_FmPcdManipStats->u.reassem.u.capwapReassem);
++ case (HMAN_OC_CAPWAP_FRAGMENTATION):
++ return CapwapFragmentationStats(
++ p_Manip, &p_FmPcdManipStats->u.frag.u.capwapFrag);
++#endif /* (DPAA_VERSION >= 11) */
++ default:
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("no statistics to this type of manip"));
++ }
++
++ return E_OK;
++}
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++t_Handle FM_PCD_StatisticsSetNode(t_Handle h_FmPcd, t_FmPcdStatsParams *p_StatsParams)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdManip *p_Manip;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL);
++ SANITY_CHECK_RETURN_VALUE(p_StatsParams,E_INVALID_HANDLE,NULL);
++
++ p_Manip = ManipOrStatsSetNode(h_FmPcd, (t_Handle)p_StatsParams, TRUE);
++ if (!p_Manip)
++ return NULL;
++
++ switch (p_Manip->opcode)
++ {
++ case (HMAN_OC_CAPWAP_INDEXED_STATS):
++ /* Indexed statistics */
++ err = IndxStats(p_StatsParams, p_Manip, p_FmPcd);
++ break;
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED Statistics type"));
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ ReleaseManipHandler(p_Manip, p_FmPcd);
++ XX_Free(p_Manip);
++ return NULL;
++ }
++
++ return p_Manip;
++}
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.h
+new file mode 100644
+index 00000000..853bb834
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.h
+@@ -0,0 +1,555 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_manip.h
++
++ @Description FM PCD manip...
++*//***************************************************************************/
++#ifndef __FM_MANIP_H
++#define __FM_MANIP_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++
++#include "fm_cc.h"
++
++
++/***********************************************************************/
++/* Header manipulations defines */
++/***********************************************************************/
++
++#define NUM_OF_SCRATCH_POOL_BUFFERS 1000 /*TODO - Change it!!*/
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++#define HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR 0x2e
++#define HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER 0x31
++#define HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX 0x2f
++#define HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST 0x30
++#define HMAN_OC_CAPWAP_REASSEMBLY 0x11 /* dummy */
++#define HMAN_OC_CAPWAP_INDEXED_STATS 0x32 /* dummy */
++#define HMAN_OC_CAPWAP_FRAGMENTATION 0x33
++#else
++#define HMAN_OC_CAPWAP_MANIP 0x2F
++#define HMAN_OC_CAPWAP_FRAG_CHECK 0x2E
++#define HMAN_OC_CAPWAP_FRAGMENTATION 0x33
++#define HMAN_OC_CAPWAP_REASSEMBLY 0x30
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++#define HMAN_OC_IP_MANIP 0x34
++#define HMAN_OC_IP_FRAGMENTATION 0x74
++#define HMAN_OC_IP_REASSEMBLY 0xB4
++#define HMAN_OC_IPSEC_MANIP 0xF4
++#define HMAN_OC 0x35
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++#define HMAN_RMV_HDR 0x80000000
++#define HMAN_INSRT_INT_FRM_HDR 0x40000000
++
++#define UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP 6
++#define UDP_CHECKSUM_FIELD_SIZE 2
++#define UDP_LENGTH_FIELD_OFFSET_FROM_UDP 4
++
++#define IPv4_DSCECN_FIELD_OFFSET_FROM_IP 1
++#define IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP 2
++#define IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP 10
++#define VLAN_TAG_FIELD_OFFSET_FROM_ETH 12
++#define IPv4_ID_FIELD_OFFSET_FROM_IP 4
++
++#define IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP 4
++#define IPv6_NEXT_HEADER_OFFSET_FROM_IP 6
++
++#define FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE 0x80
++#define FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN 8
++#define FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE 32
++#define FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE 4
++#define FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE 8
++
++
++#define FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_BETWEEN_FRAMES 0x40000000
++#define FM_PCD_MANIP_CAPWAP_REASM_HALT_ON_DUPLICATE_FRAG 0x10000000
++#define FM_PCD_MANIP_CAPWAP_REASM_AUTOMATIC_LEARNIN_HASH_8_WAYS 0x08000000
++#define FM_PCD_MANIP_CAPWAP_REASM_PR_COPY 0x00800000
++
++#define FM_PCD_MANIP_CAPWAP_FRAG_COMPR_OPTION_FIELD_EN 0x80000000
++
++#define FM_PCD_MANIP_INDEXED_STATS_ENTRY_SIZE 4
++#define FM_PCD_MANIP_INDEXED_STATS_CNIA 0x20000000
++#define FM_PCD_MANIP_INDEXED_STATS_DPD 0x10000000
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++
++#if (DPAA_VERSION >= 11)
++#define FM_PCD_MANIP_CAPWAP_DTLS 0x00040000
++#define FM_PCD_MANIP_CAPWAP_NADEN 0x20000000
++
++#define FM_PCD_MANIP_CAPWAP_FRAG_CHECK_MTU_SHIFT 16
++#define FM_PCD_MANIP_CAPWAP_FRAG_CHECK_NO_FRAGMENTATION 0xFFFF0000
++#define FM_PCD_MANIP_CAPWAP_FRAG_CHECK_CNIA 0x20000000
++
++#define FM_PCD_MANIP_CAPWAP_FRAG_COMPRESS_EN 0x04000000
++#define FM_PCD_MANIP_CAPWAP_FRAG_SCRATCH_BPID 24
++#define FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_EN 0x08000000
++#define FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_MASK 0xFF000000
++#define FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_SHIFT 24
++#endif /* (DPAA_VERSION >= 11) */
++
++#define FM_PCD_MANIP_REASM_TABLE_SIZE 0x40
++#define FM_PCD_MANIP_REASM_TABLE_ALIGN 8
++
++#define FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_SIZE 64
++#define FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_ALIGN 8
++#define FM_PCD_MANIP_REASM_TIME_OUT_BETWEEN_FRAMES 0x80000000
++#define FM_PCD_MANIP_REASM_COUPLING_ENABLE 0x40000000
++#define FM_PCD_MANIP_REASM_COUPLING_MASK 0xFF000000
++#define FM_PCD_MANIP_REASM_COUPLING_SHIFT 24
++#define FM_PCD_MANIP_REASM_LIODN_MASK 0x0000003F
++#define FM_PCD_MANIP_REASM_LIODN_SHIFT 56
++#define FM_PCD_MANIP_REASM_ELIODN_MASK 0x000003c0
++#define FM_PCD_MANIP_REASM_ELIODN_SHIFT 38
++#define FM_PCD_MANIP_REASM_COMMON_INT_BUFFER_IDX_MASK 0x000000FF
++#define FM_PCD_MANIP_REASM_COMMON_INT_BUFFER_IDX_SHIFT 24
++#define FM_PCD_MANIP_REASM_TIMEOUT_THREAD_THRESH 1024
++
++#define FM_PCD_MANIP_IP_MTU_SHIFT 16
++#define FM_PCD_MANIP_IP_NO_FRAGMENTATION 0xFFFF0000
++#define FM_PCD_MANIP_IP_CNIA 0x20000000
++
++#define FM_PCD_MANIP_IP_FRAG_DF_SHIFT 28
++#define FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID 24
++#define FM_PCD_MANIP_IP_FRAG_SG_BDID_EN 0x08000000
++#define FM_PCD_MANIP_IP_FRAG_SG_BDID_MASK 0xFF000000
++#define FM_PCD_MANIP_IP_FRAG_SG_BDID_SHIFT 24
++
++#define FM_PCD_MANIP_IPSEC_DEC 0x10000000
++#define FM_PCD_MANIP_IPSEC_VIPV_EN 0x08000000
++#define FM_PCD_MANIP_IPSEC_ECN_EN 0x04000000
++#define FM_PCD_MANIP_IPSEC_DSCP_EN 0x02000000
++#define FM_PCD_MANIP_IPSEC_VIPL_EN 0x01000000
++#define FM_PCD_MANIP_IPSEC_NADEN 0x20000000
++
++#define FM_PCD_MANIP_IPSEC_IP_HDR_LEN_MASK 0x00FF0000
++#define FM_PCD_MANIP_IPSEC_IP_HDR_LEN_SHIFT 16
++
++#define FM_PCD_MANIP_IPSEC_ARW_SIZE_MASK 0xFFFF0000
++#define FM_PCD_MANIP_IPSEC_ARW_SIZE_SHIFT 16
++
++#define e_FM_MANIP_IP_INDX 1
++
++#define HMCD_OPCODE_GENERIC_RMV 0x01
++#define HMCD_OPCODE_GENERIC_INSRT 0x02
++#define HMCD_OPCODE_GENERIC_REPLACE 0x05
++#define HMCD_OPCODE_L2_RMV 0x08
++#define HMCD_OPCODE_L2_INSRT 0x09
++#define HMCD_OPCODE_VLAN_PRI_UPDATE 0x0B
++#define HMCD_OPCODE_IPV4_UPDATE 0x0C
++#define HMCD_OPCODE_IPV6_UPDATE 0x10
++#define HMCD_OPCODE_TCP_UDP_UPDATE 0x0E
++#define HMCD_OPCODE_TCP_UDP_CHECKSUM 0x14
++#define HMCD_OPCODE_REPLACE_IP 0x12
++#define HMCD_OPCODE_RMV_TILL 0x15
++#define HMCD_OPCODE_UDP_INSRT 0x16
++#define HMCD_OPCODE_IP_INSRT 0x17
++#define HMCD_OPCODE_CAPWAP_RMV 0x18
++#define HMCD_OPCODE_CAPWAP_INSRT 0x18
++#define HMCD_OPCODE_GEN_FIELD_REPLACE 0x19
++
++#define HMCD_LAST 0x00800000
++
++#define HMCD_DSCP_VALUES 64
++
++#define HMCD_BASIC_SIZE 4
++#define HMCD_PTR_SIZE 4
++#define HMCD_PARAM_SIZE 4
++#define HMCD_IPV4_ADDR_SIZE 4
++#define HMCD_IPV6_ADDR_SIZE 0x10
++#define HMCD_L4_HDR_SIZE 8
++
++#define HMCD_CAPWAP_INSRT 0x00010000
++#define HMCD_INSRT_UDP_LITE 0x00010000
++#define HMCD_IP_ID_MASK 0x0000FFFF
++#define HMCD_IP_SIZE_MASK 0x0000FF00
++#define HMCD_IP_SIZE_SHIFT 8
++#define HMCD_IP_LAST_PID_MASK 0x000000FF
++#define HMCD_IP_OR_QOS 0x00010000
++#define HMCD_IP_L4_CS_CALC 0x00040000
++#define HMCD_IP_DF_MODE 0x00400000
++
++
++#define HMCD_OC_SHIFT 24
++
++#define HMCD_RMV_OFFSET_SHIFT 0
++#define HMCD_RMV_SIZE_SHIFT 8
++
++#define HMCD_INSRT_OFFSET_SHIFT 0
++#define HMCD_INSRT_SIZE_SHIFT 8
++
++#define HMTD_CFG_TYPE 0x4000
++#define HMTD_CFG_EXT_HMCT 0x0080
++#define HMTD_CFG_PRS_AFTER_HM 0x0040
++#define HMTD_CFG_NEXT_AD_EN 0x0020
++
++#define HMCD_RMV_L2_ETHERNET 0
++#define HMCD_RMV_L2_STACKED_QTAGS 1
++#define HMCD_RMV_L2_ETHERNET_AND_MPLS 2
++#define HMCD_RMV_L2_MPLS 3
++#define HMCD_RMV_L2_PPPOE 4
++
++#define HMCD_INSRT_L2_MPLS 0
++#define HMCD_INSRT_N_UPDATE_L2_MPLS 1
++#define HMCD_INSRT_L2_PPPOE 2
++#define HMCD_INSRT_L2_SIZE_SHIFT 24
++
++#define HMCD_L2_MODE_SHIFT 16
++
++#define HMCD_VLAN_PRI_REP_MODE_SHIFT 16
++#define HMCD_VLAN_PRI_UPDATE 0
++#define HMCD_VLAN_PRI_UPDATE_DSCP_TO_VPRI 1
++
++#define HMCD_IPV4_UPDATE_TTL 0x00000001
++#define HMCD_IPV4_UPDATE_TOS 0x00000002
++#define HMCD_IPV4_UPDATE_DST 0x00000020
++#define HMCD_IPV4_UPDATE_SRC 0x00000040
++#define HMCD_IPV4_UPDATE_ID 0x00000080
++#define HMCD_IPV4_UPDATE_TOS_SHIFT 8
++
++#define HMCD_IPV6_UPDATE_HL 0x00000001
++#define HMCD_IPV6_UPDATE_TC 0x00000002
++#define HMCD_IPV6_UPDATE_DST 0x00000040
++#define HMCD_IPV6_UPDATE_SRC 0x00000080
++#define HMCD_IPV6_UPDATE_TC_SHIFT 8
++
++#define HMCD_TCP_UDP_UPDATE_DST 0x00004000
++#define HMCD_TCP_UDP_UPDATE_SRC 0x00008000
++#define HMCD_TCP_UDP_UPDATE_SRC_SHIFT 16
++
++#define HMCD_IP_REPLACE_REPLACE_IPV4 0x00000000
++#define HMCD_IP_REPLACE_REPLACE_IPV6 0x00010000
++#define HMCD_IP_REPLACE_TTL_HL 0x00200000
++#define HMCD_IP_REPLACE_ID 0x00400000
++
++#define HMCD_IP_REPLACE_L3HDRSIZE_SHIFT 24
++
++#define HMCD_GEN_FIELD_SIZE_SHIFT 16
++#define HMCD_GEN_FIELD_SRC_OFF_SHIFT 8
++#define HMCD_GEN_FIELD_DST_OFF_SHIFT 0
++#define HMCD_GEN_FIELD_MASK_EN 0x00400000
++
++#define HMCD_GEN_FIELD_MASK_OFF_SHIFT 16
++#define HMCD_GEN_FIELD_MASK_SHIFT 24
++
++#define DSCP_TO_VLAN_TABLE_SIZE 32
++
++#define MANIP_GET_HMCT_SIZE(h_Manip) (((t_FmPcdManip *)h_Manip)->tableSize)
++#define MANIP_GET_DATA_SIZE(h_Manip) (((t_FmPcdManip *)h_Manip)->dataSize)
++
++#define MANIP_GET_HMCT_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->p_Hmct)
++#define MANIP_GET_DATA_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->p_Data)
++
++#define MANIP_SET_HMCT_PTR(h_Manip, h_NewPtr) (((t_FmPcdManip *)h_Manip)->p_Hmct = h_NewPtr)
++#define MANIP_SET_DATA_PTR(h_Manip, h_NewPtr) (((t_FmPcdManip *)h_Manip)->p_Data = h_NewPtr)
++
++#define MANIP_GET_HMTD_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->h_Ad)
++#define MANIP_DONT_REPARSE(h_Manip) (((t_FmPcdManip *)h_Manip)->dontParseAfterManip)
++#define MANIP_SET_PREV(h_Manip, h_Prev) (((t_FmPcdManip *)h_Manip)->h_PrevManip = h_Prev)
++#define MANIP_GET_OWNERS(h_Manip) (((t_FmPcdManip *)h_Manip)->owner)
++#define MANIP_GET_TYPE(h_Manip) (((t_FmPcdManip *)h_Manip)->type)
++#define MANIP_SET_UNIFIED_TBL_PTR_INDICATION(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedTablePtr = TRUE)
++#define MANIP_GET_MURAM(h_Manip) (((t_FmPcd *)((t_FmPcdManip *)h_Manip)->h_FmPcd)->h_FmMuram)
++#define MANIP_FREE_HMTD(h_Manip) \
++ {if (((t_FmPcdManip *)h_Manip)->muramAllocate) \
++ FM_MURAM_FreeMem(((t_FmPcd *)((t_FmPcdManip *)h_Manip)->h_FmPcd)->h_FmMuram, ((t_FmPcdManip *)h_Manip)->h_Ad);\
++ else \
++ XX_Free(((t_FmPcdManip *)h_Manip)->h_Ad); \
++ ((t_FmPcdManip *)h_Manip)->h_Ad = NULL; \
++ }
++/* position regarding Manip SW structure */
++#define MANIP_IS_FIRST(h_Manip) (!(((t_FmPcdManip *)h_Manip)->h_PrevManip))
++#define MANIP_IS_CASCADED(h_Manip) (((t_FmPcdManip *)h_Manip)->cascaded)
++#define MANIP_IS_UNIFIED(h_Manip) (!(((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_NONE))
++#define MANIP_IS_UNIFIED_NON_FIRST(h_Manip) ((((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_MID) || \
++ (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_LAST))
++#define MANIP_IS_UNIFIED_NON_LAST(h_Manip) ((((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_FIRST) ||\
++ (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_MID))
++#define MANIP_IS_UNIFIED_FIRST(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_FIRST)
++#define MANIP_IS_UNIFIED_LAST(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_LAST)
++
++#define MANIP_UPDATE_UNIFIED_POSITION(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition = \
++ (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_NONE)? \
++ e_MANIP_UNIFIED_LAST : e_MANIP_UNIFIED_MID)
++
++typedef enum e_ManipUnifiedPosition {
++ e_MANIP_UNIFIED_NONE = 0,
++ e_MANIP_UNIFIED_FIRST,
++ e_MANIP_UNIFIED_MID,
++ e_MANIP_UNIFIED_LAST
++} e_ManipUnifiedPosition;
++
++typedef enum e_ManipInfo {
++ e_MANIP_HMTD,
++ e_MANIP_HMCT,
++ e_MANIP_HANDLER_TABLE_OWNER
++}e_ManipInfo;
++/***********************************************************************/
++/* Memory map */
++/***********************************************************************/
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++typedef struct t_CapwapReasmPram {
++ volatile uint32_t mode;
++ volatile uint32_t autoLearnHashTblPtr;
++ volatile uint32_t intStatsTblPtr;
++ volatile uint32_t reasmFrmDescPoolTblPtr;
++ volatile uint32_t reasmFrmDescIndexPoolTblPtr;
++ volatile uint32_t timeOutTblPtr;
++ volatile uint32_t bufferPoolIdAndRisc1SetIndexes;
++ volatile uint32_t risc23SetIndexes;
++ volatile uint32_t risc4SetIndexesAndExtendedStatsTblPtr;
++ volatile uint32_t extendedStatsTblPtr;
++ volatile uint32_t expirationDelay;
++ volatile uint32_t totalProcessedFragCounter;
++ volatile uint32_t totalUnsuccessfulReasmFramesCounter;
++ volatile uint32_t totalDuplicatedFragCounter;
++ volatile uint32_t totalMalformdFragCounter;
++ volatile uint32_t totalTimeOutCounter;
++ volatile uint32_t totalSetBusyCounter;
++ volatile uint32_t totalRfdPoolBusyCounter;
++ volatile uint32_t totalDiscardedFragsCounter;
++ volatile uint32_t totalMoreThan16FramesCounter;
++ volatile uint32_t internalBufferBusy;
++ volatile uint32_t externalBufferBusy;
++ volatile uint32_t reserved1[4];
++} t_CapwapReasmPram;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++
++typedef _Packed struct t_ReassTbl {
++ volatile uint16_t waysNumAndSetSize;
++ volatile uint16_t autoLearnHashKeyMask;
++ volatile uint32_t reassCommonPrmTblPtr;
++ volatile uint32_t liodnAlAndAutoLearnHashTblPtrHi;
++ volatile uint32_t autoLearnHashTblPtrLow;
++ volatile uint32_t liodnSlAndAutoLearnSetLockTblPtrHi;
++ volatile uint32_t autoLearnSetLockTblPtrLow;
++ volatile uint16_t minFragSize; /* Not relevant for CAPWAP*/
++ volatile uint16_t maxReassemblySize; /* Only relevant for CAPWAP*/
++ volatile uint32_t totalSuccessfullyReasmFramesCounter;
++ volatile uint32_t totalValidFragmentCounter;
++ volatile uint32_t totalProcessedFragCounter;
++ volatile uint32_t totalMalformdFragCounter;
++ volatile uint32_t totalSetBusyCounter;
++ volatile uint32_t totalDiscardedFragsCounter;
++ volatile uint32_t totalMoreThan16FramesCounter;
++ volatile uint32_t reserved2[2];
++} _PackedType t_ReassTbl;
++
++typedef struct t_ReassCommonTbl {
++ volatile uint32_t timeoutModeAndFqid;
++ volatile uint32_t reassFrmDescIndexPoolTblPtr;
++ volatile uint32_t liodnAndReassFrmDescPoolPtrHi;
++ volatile uint32_t reassFrmDescPoolPtrLow;
++ volatile uint32_t timeOutTblPtr;
++ volatile uint32_t expirationDelay;
++ volatile uint32_t internalBufferManagement;
++ volatile uint32_t reserved2;
++ volatile uint32_t totalTimeOutCounter;
++ volatile uint32_t totalRfdPoolBusyCounter;
++ volatile uint32_t totalInternalBufferBusy;
++ volatile uint32_t totalExternalBufferBusy;
++ volatile uint32_t totalSgFragmentCounter;
++ volatile uint32_t totalDmaSemaphoreDepletionCounter;
++ volatile uint32_t totalNCSPCounter;
++ volatile uint32_t discardMask;
++} t_ReassCommonTbl;
++
++typedef _Packed struct t_Hmtd {
++ volatile uint16_t cfg;
++ volatile uint8_t eliodnOffset;
++ volatile uint8_t extHmcdBasePtrHi;
++ volatile uint32_t hmcdBasePtr;
++ volatile uint16_t nextAdIdx;
++ volatile uint8_t res1;
++ volatile uint8_t opCode;
++ volatile uint32_t res2;
++} _PackedType t_Hmtd;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/***********************************************************************/
++/* Driver's internal structures */
++/***********************************************************************/
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++typedef struct
++{
++ t_Handle p_AutoLearnHashTbl;
++ t_Handle p_ReassmFrmDescrPoolTbl;
++ t_Handle p_ReassmFrmDescrIndxPoolTbl;
++ t_Handle p_TimeOutTbl;
++ uint16_t maxNumFramesInProcess;
++ uint8_t numOfTasks;
++ //uint8_t poolId;
++ uint8_t prOffset;
++ uint16_t dataOffset;
++ uint8_t sgBpid;
++ uint8_t hwPortId;
++ uint32_t fqidForTimeOutFrames;
++ uint32_t timeoutRoutineRequestTime;
++ uint32_t bitFor1Micro;
++} t_CapwapFragParams;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++
++typedef struct
++{
++ t_AdOfTypeContLookup *p_Frag;
++#if (DPAA_VERSION == 10)
++ uint8_t scratchBpid;
++#endif /* (DPAA_VERSION == 10) */
++} t_FragParams;
++
++typedef struct t_ReassmParams
++{
++ e_NetHeaderType hdr; /* Header selection */
++ t_ReassCommonTbl *p_ReassCommonTbl;
++ uintptr_t reassFrmDescrIndxPoolTblAddr;
++ uintptr_t reassFrmDescrPoolTblAddr;
++ uintptr_t timeOutTblAddr;
++ uintptr_t internalBufferPoolManagementIndexAddr;
++ uintptr_t internalBufferPoolAddr;
++ uint32_t maxNumFramesInProcess;
++ uint8_t sgBpid;
++ uint8_t dataMemId;
++ uint16_t dataLiodnOffset;
++ uint32_t fqidForTimeOutFrames;
++ e_FmPcdManipReassemTimeOutMode timeOutMode;
++ uint32_t timeoutThresholdForReassmProcess;
++ union {
++ struct {
++ t_Handle h_Ipv4Ad;
++ t_Handle h_Ipv6Ad;
++ bool ipv6Assigned;
++ t_ReassTbl *p_Ipv4ReassTbl;
++ t_ReassTbl *p_Ipv6ReassTbl;
++ uintptr_t ipv4AutoLearnHashTblAddr;
++ uintptr_t ipv6AutoLearnHashTblAddr;
++ uintptr_t ipv4AutoLearnSetLockTblAddr;
++ uintptr_t ipv6AutoLearnSetLockTblAddr;
++ uint16_t minFragSize[2];
++ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry[2];
++ uint8_t relativeSchemeId[2];
++ t_Handle h_Ipv4Scheme;
++ t_Handle h_Ipv6Scheme;
++ uint32_t nonConsistentSpFqid;
++ } ip;
++ struct {
++ t_Handle h_Ad;
++ t_ReassTbl *p_ReassTbl;
++ uintptr_t autoLearnHashTblAddr;
++ uintptr_t autoLearnSetLockTblAddr;
++ uint16_t maxRessembledsSize;
++ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry;
++ uint8_t relativeSchemeId;
++ t_Handle h_Scheme;
++ } capwap;
++ };
++} t_ReassmParams;
++
++typedef struct{
++ e_FmPcdManipType type;
++ t_FmPcdManipParams manipParams;
++ bool muramAllocate;
++ t_Handle h_Ad;
++ uint32_t opcode;
++ bool rmv;
++ bool insrt;
++ t_Handle h_NextManip;
++ t_Handle h_PrevManip;
++ e_FmPcdManipType nextManipType;
++ /* HdrManip parameters*/
++ uint8_t *p_Hmct;
++ uint8_t *p_Data;
++ bool dontParseAfterManip;
++ bool fieldUpdate;
++ bool custom;
++ uint16_t tableSize;
++ uint8_t dataSize;
++ bool cascaded;
++ e_ManipUnifiedPosition unifiedPosition;
++ /* end HdrManip */
++ uint8_t *p_Template;
++ uint16_t owner;
++ uint32_t updateParams;
++ uint32_t shadowUpdateParams;
++ bool frag;
++ bool reassm;
++ uint16_t sizeForFragmentation;
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ t_Handle h_Frag;
++ t_CapwapFragParams capwapFragParams;
++#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
++ union {
++ t_ReassmParams reassmParams;
++ t_FragParams fragParams;
++ };
++ uint8_t icOffset;
++ uint16_t ownerTmp;
++ bool cnia;
++ t_Handle p_StatsTbl;
++ t_Handle h_FmPcd;
++ t_List nodesLst;
++ t_Handle h_Spinlock;
++} t_FmPcdManip;
++
++typedef struct t_FmPcdCcSavedManipParams
++{
++ union
++ {
++ struct
++ {
++ uint16_t dataOffset;
++ //uint8_t poolId;
++ }capwapParams;
++ struct
++ {
++ uint16_t dataOffset;
++ uint8_t poolId;
++ }ipParams;
++ };
++
++} t_FmPcdCcSavedManipParams;
++
++
++#endif /* __FM_MANIP_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c
+new file mode 100644
+index 00000000..91f70a1a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c
+@@ -0,0 +1,2095 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_pcd.c
++
++ @Description FM PCD ...
++*//***************************************************************************/
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "xx_ext.h"
++#include "sprint_ext.h"
++#include "debug_ext.h"
++#include "net_ext.h"
++#include "fm_ext.h"
++#include "fm_pcd_ext.h"
++
++#include "fm_common.h"
++#include "fm_pcd.h"
++#include "fm_pcd_ipc.h"
++#include "fm_hc.h"
++#include "fm_muram_ext.h"
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++
++static t_Error CheckFmPcdParameters(t_FmPcd *p_FmPcd)
++{
++ if (!p_FmPcd->h_Fm)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("h_Fm has to be initialized"));
++
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ {
++ if (p_FmPcd->p_FmPcdKg && !p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG"));
++
++ if (p_FmPcd->p_FmPcdPlcr && !p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG"));
++
++ if (!p_FmPcd->f_Exception)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdExceptions has to be initialized"));
++
++ if ((!p_FmPcd->f_FmPcdIndexedException) && (p_FmPcd->p_FmPcdPlcr || p_FmPcd->p_FmPcdKg))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdIndexedException has to be initialized"));
++
++ if (p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit > PRS_MAX_CYCLE_LIMIT)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("prsMaxParseCycleLimit has to be less than 8191"));
++ }
++
++ return E_OK;
++}
++
++static volatile bool blockingFlag = FALSE;
++static void IpcMsgCompletionCB(t_Handle h_FmPcd,
++ uint8_t *p_Msg,
++ uint8_t *p_Reply,
++ uint32_t replyLength,
++ t_Error status)
++{
++ UNUSED(h_FmPcd);UNUSED(p_Msg);UNUSED(p_Reply);UNUSED(replyLength);UNUSED(status);
++ blockingFlag = FALSE;
++}
++
++static t_Error IpcMsgHandlerCB(t_Handle h_FmPcd,
++ uint8_t *p_Msg,
++ uint32_t msgLength,
++ uint8_t *p_Reply,
++ uint32_t *p_ReplyLength)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_Error err = E_OK;
++ t_FmPcdIpcMsg *p_IpcMsg = (t_FmPcdIpcMsg*)p_Msg;
++ t_FmPcdIpcReply *p_IpcReply = (t_FmPcdIpcReply*)p_Reply;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((msgLength >= sizeof(uint32_t)), E_INVALID_VALUE);
++
++#ifdef DISABLE_SANITY_CHECKS
++ UNUSED(msgLength);
++#endif /* DISABLE_SANITY_CHECKS */
++
++ ASSERT_COND(p_Msg);
++
++ memset(p_IpcReply, 0, (sizeof(uint8_t) * FM_PCD_MAX_REPLY_SIZE));
++ *p_ReplyLength = 0;
++
++ switch (p_IpcMsg->msgId)
++ {
++ case (FM_PCD_MASTER_IS_ALIVE):
++ *(uint8_t*)(p_IpcReply->replyBody) = 1;
++ p_IpcReply->error = E_OK;
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ break;
++ case (FM_PCD_MASTER_IS_ENABLED):
++ /* count partitions registrations */
++ if (p_FmPcd->enabled)
++ p_FmPcd->numOfEnabledGuestPartitionsPcds++;
++ *(uint8_t*)(p_IpcReply->replyBody) = (uint8_t)p_FmPcd->enabled;
++ p_IpcReply->error = E_OK;
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ break;
++ case (FM_PCD_GUEST_DISABLE):
++ if (p_FmPcd->numOfEnabledGuestPartitionsPcds)
++ {
++ p_FmPcd->numOfEnabledGuestPartitionsPcds--;
++ p_IpcReply->error = E_OK;
++ }
++ else
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE,("Trying to disable an unregistered partition"));
++ p_IpcReply->error = E_INVALID_STATE;
++ }
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++ case (FM_PCD_GET_COUNTER):
++ {
++ e_FmPcdCounters inCounter;
++ uint32_t outCounter;
++
++ memcpy((uint8_t*)&inCounter, p_IpcMsg->msgBody, sizeof(uint32_t));
++ outCounter = FM_PCD_GetCounter(h_FmPcd, inCounter);
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&outCounter, sizeof(uint32_t));
++ p_IpcReply->error = E_OK;
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ break;
++ }
++ case (FM_PCD_ALLOC_KG_SCHEMES):
++ {
++ t_FmPcdIpcKgSchemesParams ipcSchemesParams;
++
++ memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams));
++ err = FmPcdKgAllocSchemes(h_FmPcd,
++ ipcSchemesParams.numOfSchemes,
++ ipcSchemesParams.guestId,
++ p_IpcReply->replyBody);
++ p_IpcReply->error = err;
++ *p_ReplyLength = sizeof(uint32_t) + ipcSchemesParams.numOfSchemes*sizeof(uint8_t);
++ break;
++ }
++ case (FM_PCD_FREE_KG_SCHEMES):
++ {
++ t_FmPcdIpcKgSchemesParams ipcSchemesParams;
++
++ memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams));
++ err = FmPcdKgFreeSchemes(h_FmPcd,
++ ipcSchemesParams.numOfSchemes,
++ ipcSchemesParams.guestId,
++ ipcSchemesParams.schemesIds);
++ p_IpcReply->error = err;
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++ }
++ case (FM_PCD_ALLOC_KG_CLSPLAN):
++ {
++ t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams;
++
++ memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams));
++ err = KgAllocClsPlanEntries(h_FmPcd,
++ ipcKgClsPlanParams.numOfClsPlanEntries,
++ ipcKgClsPlanParams.guestId,
++ p_IpcReply->replyBody);
++ p_IpcReply->error = err;
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ break;
++ }
++ case (FM_PCD_FREE_KG_CLSPLAN):
++ {
++ t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams;
++
++ memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams));
++ KgFreeClsPlanEntries(h_FmPcd,
++ ipcKgClsPlanParams.numOfClsPlanEntries,
++ ipcKgClsPlanParams.guestId,
++ ipcKgClsPlanParams.clsPlanBase);
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++ }
++ case (FM_PCD_ALLOC_PROFILES):
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ uint16_t base;
++ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
++ base = PlcrAllocProfilesForPartition(h_FmPcd,
++ ipcAllocParams.base,
++ ipcAllocParams.num,
++ ipcAllocParams.guestId);
++ memcpy(p_IpcReply->replyBody, (uint16_t*)&base, sizeof(uint16_t));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint16_t);
++ break;
++ }
++ case (FM_PCD_FREE_PROFILES):
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
++ PlcrFreeProfilesForPartition(h_FmPcd,
++ ipcAllocParams.base,
++ ipcAllocParams.num,
++ ipcAllocParams.guestId);
++ break;
++ }
++ case (FM_PCD_SET_PORT_PROFILES):
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
++ PlcrSetPortProfiles(h_FmPcd,
++ ipcAllocParams.guestId,
++ ipcAllocParams.num,
++ ipcAllocParams.base);
++ break;
++ }
++ case (FM_PCD_CLEAR_PORT_PROFILES):
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
++ PlcrClearPortProfiles(h_FmPcd,
++ ipcAllocParams.guestId);
++ break;
++ }
++ case (FM_PCD_GET_SW_PRS_OFFSET):
++ {
++ t_FmPcdIpcSwPrsLable ipcSwPrsLable;
++ uint32_t swPrsOffset;
++
++ memcpy((uint8_t*)&ipcSwPrsLable, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcSwPrsLable));
++ swPrsOffset =
++ FmPcdGetSwPrsOffset(h_FmPcd,
++ (e_NetHeaderType)ipcSwPrsLable.enumHdr,
++ ipcSwPrsLable.indexPerHdr);
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&swPrsOffset, sizeof(uint32_t));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ break;
++ }
++ case (FM_PCD_PRS_INC_PORT_STATS):
++ {
++ t_FmPcdIpcPrsIncludePort ipcPrsIncludePort;
++
++ memcpy((uint8_t*)&ipcPrsIncludePort, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcPrsIncludePort));
++ PrsIncludePortInStatistics(h_FmPcd,
++ ipcPrsIncludePort.hardwarePortId,
++ ipcPrsIncludePort.include);
++ break;
++ }
++ default:
++ *p_ReplyLength = 0;
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!"));
++ }
++ return E_OK;
++}
++
++static uint32_t NetEnvLock(t_Handle h_NetEnv)
++{
++ ASSERT_COND(h_NetEnv);
++ return XX_LockIntrSpinlock(((t_FmPcdNetEnv*)h_NetEnv)->h_Spinlock);
++}
++
++static void NetEnvUnlock(t_Handle h_NetEnv, uint32_t intFlags)
++{
++ ASSERT_COND(h_NetEnv);
++ XX_UnlockIntrSpinlock(((t_FmPcdNetEnv*)h_NetEnv)->h_Spinlock, intFlags);
++}
++
++static void EnqueueLockToFreeLst(t_FmPcd *p_FmPcd, t_FmPcdLock *p_Lock)
++{
++ uint32_t intFlags;
++
++ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
++ LIST_AddToTail(&p_Lock->node, &p_FmPcd->freeLocksLst);
++ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
++}
++
++static t_FmPcdLock * DequeueLockFromFreeLst(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdLock *p_Lock = NULL;
++ uint32_t intFlags;
++
++ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
++ if (!LIST_IsEmpty(&p_FmPcd->freeLocksLst))
++ {
++ p_Lock = FM_PCD_LOCK_OBJ(p_FmPcd->freeLocksLst.p_Next);
++ LIST_DelAndInit(&p_Lock->node);
++ }
++ if (p_FmPcd->h_Spinlock)
++ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
++
++ return p_Lock;
++}
++
++static void EnqueueLockToAcquiredLst(t_FmPcd *p_FmPcd, t_FmPcdLock *p_Lock)
++{
++ uint32_t intFlags;
++
++ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
++ LIST_AddToTail(&p_Lock->node, &p_FmPcd->acquiredLocksLst);
++ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
++}
++
++static t_Error FillFreeLocksLst(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdLock *p_Lock;
++ int i;
++
++ for (i=0; i<10; i++)
++ {
++ p_Lock = (t_FmPcdLock *)XX_Malloc(sizeof(t_FmPcdLock));
++ if (!p_Lock)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("FM-PCD lock obj!"));
++ memset(p_Lock, 0, sizeof(t_FmPcdLock));
++ INIT_LIST(&p_Lock->node);
++ p_Lock->h_Spinlock = XX_InitSpinlock();
++ if (!p_Lock->h_Spinlock)
++ {
++ XX_Free(p_Lock);
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("FM-PCD spinlock obj!"));
++ }
++ EnqueueLockToFreeLst(p_FmPcd, p_Lock);
++ }
++
++ return E_OK;
++}
++
++static void ReleaseFreeLocksLst(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdLock *p_Lock;
++
++ p_Lock = DequeueLockFromFreeLst(p_FmPcd);
++ while (p_Lock)
++ {
++ XX_FreeSpinlock(p_Lock->h_Spinlock);
++ XX_Free(p_Lock);
++ p_Lock = DequeueLockFromFreeLst(p_FmPcd);
++ }
++}
++
++
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++
++void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId)
++{
++ ASSERT_COND(p_FmPcd);
++ p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = clsPlanGrpId;
++}
++
++t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams)
++{
++ uint8_t netEnvId = p_GrpParams->netEnvId;
++ int i, k, j;
++
++ ASSERT_COND(p_FmPcd);
++ if (p_FmPcd->netEnvs[netEnvId].clsPlanGrpId != ILLEGAL_CLS_PLAN)
++ {
++ p_GrpParams->grpExists = TRUE;
++ p_GrpParams->clsPlanGrpId = p_FmPcd->netEnvs[netEnvId].clsPlanGrpId;
++ return E_OK;
++ }
++
++ for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++)
++ {
++ for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++)
++ {
++ /* if an option exists, add it to the opts list */
++ if (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt)
++ {
++ /* check if this option already exists, add if it doesn't */
++ for (j = 0;j<p_GrpParams->numOfOptions;j++)
++ {
++ if (p_GrpParams->options[j] == p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt)
++ break;
++ }
++ p_GrpParams->optVectors[j] |= p_FmPcd->netEnvs[netEnvId].unitsVectors[i];
++ if (j == p_GrpParams->numOfOptions)
++ {
++ p_GrpParams->options[p_GrpParams->numOfOptions] = p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt;
++ p_GrpParams->numOfOptions++;
++ }
++ }
++ }
++ }
++
++ if (p_GrpParams->numOfOptions == 0)
++ {
++ if (p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId != ILLEGAL_CLS_PLAN)
++ {
++ p_GrpParams->grpExists = TRUE;
++ p_GrpParams->clsPlanGrpId = p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId;
++ }
++ }
++
++ return E_OK;
++
++}
++
++t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector)
++{
++ uint8_t j,k;
++
++ *p_Vector = 0;
++
++ ASSERT_COND(p_FmPcd);
++ for (j=0; ((j < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[0].hdr != HEADER_TYPE_NONE)); j++)
++ {
++ for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].hdr != HEADER_TYPE_NONE)); k++)
++ {
++ if (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].opt == opt)
++ *p_Vector |= p_FmPcd->netEnvs[netEnvId].unitsVectors[j];
++ }
++ }
++
++ if (!*p_Vector)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested option was not defined for this Network Environment Characteristics module"));
++ else
++ return E_OK;
++}
++
++t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params)
++{
++ int i;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(p_Params->netEnvId < FM_MAX_NUM_OF_PORTS);
++
++ p_Params->vector = 0;
++ for (i=0; i<p_Params->numOfDistinctionUnits ;i++)
++ {
++ if (p_FmPcd->netEnvs[p_Params->netEnvId].units[p_Params->unitIds[i]].hdrs[0].hdr == HEADER_TYPE_NONE)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested unit was not defined for this Network Environment Characteristics module"));
++ ASSERT_COND(p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]]);
++ p_Params->vector |= p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]];
++ }
++
++ return E_OK;
++}
++
++bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector)
++{
++ int i=0, k;
++
++ ASSERT_COND(p_FmPcd);
++ /* check whether a given unit may be used by non-clsPlan users. */
++ /* first, recognize the unit by its vector */
++ while (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)
++ {
++ if (p_FmPcd->netEnvs[netEnvId].unitsVectors[i] == unitVector)
++ {
++ for (k=0;
++ ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE));
++ k++)
++ /* check that no option exists */
++ if ((protocolOpt_t)p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt)
++ return FALSE;
++ break;
++ }
++ i++;
++ }
++ /* assert that a unit was found to mach the vector */
++ ASSERT_COND(p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE);
++
++ return TRUE;
++}
++bool FmPcdNetEnvIsHdrExist(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ int i, k;
++
++ ASSERT_COND(p_FmPcd);
++
++ for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++)
++ {
++ for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++)
++ if (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr == hdr)
++ return TRUE;
++ }
++ for (i=0; ((i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS) &&
++ (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE)); i++)
++ {
++ if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr)
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++uint8_t FmPcdNetEnvGetUnitId(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr, bool interchangeable, protocolOpt_t opt)
++{
++ uint8_t i, k;
++
++ ASSERT_COND(p_FmPcd);
++
++ if (interchangeable)
++ {
++ for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
++ {
++ for (k=0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
++ {
++ if ((p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr == hdr) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt == opt))
++
++ return i;
++ }
++ }
++ }
++ else
++ {
++ for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
++ if ((p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr == hdr) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].opt == opt) &&
++ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[1].hdr == HEADER_TYPE_NONE))
++ return i;
++
++ for (i=0; (i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS) &&
++ (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE); i++)
++ if ((p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr) &&
++ (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].opt == opt))
++ return p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].aliasHdr;
++ }
++
++ return FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS;
++}
++
++t_Error FmPcdUnregisterReassmPort(t_Handle h_FmPcd, t_Handle h_ReasmCommonPramTbl)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdCcReassmTimeoutParams ccReassmTimeoutParams = {0};
++ uint8_t result;
++ t_Error err = E_OK;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(h_ReasmCommonPramTbl);
++
++ ccReassmTimeoutParams.iprcpt = (uint32_t)(XX_VirtToPhys(h_ReasmCommonPramTbl) - p_FmPcd->physicalMuramBase);
++ ccReassmTimeoutParams.activate = FALSE; /*Disable Timeout Task*/
++
++ if ((err = FmHcPcdCcTimeoutReassm(p_FmPcd->h_Hc, &ccReassmTimeoutParams, &result)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ switch (result)
++ {
++ case (0):
++ return E_OK;
++ case (1):
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, (""));
++ case (2):
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, (""));
++ case (3):
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Disable Timeout Task with invalid IPRCPT"));
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++ }
++
++ return E_OK;
++}
++
++e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr)
++{
++ int i;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(netEnvId < FM_MAX_NUM_OF_PORTS);
++
++ for (i=0; (i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS)
++ && (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE); i++)
++ {
++ if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr)
++ return p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].aliasHdr;
++ }
++
++ return HEADER_TYPE_NONE;
++}
++
++void FmPcdPortRegister(t_Handle h_FmPcd, t_Handle h_FmPort, uint8_t hardwarePortId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint16_t swPortIndex = 0;
++
++ ASSERT_COND(h_FmPcd);
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort = h_FmPort;
++}
++
++uint32_t FmPcdGetLcv(t_Handle h_FmPcd, uint32_t netEnvId, uint8_t hdrNum)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(h_FmPcd);
++ return p_FmPcd->netEnvs[netEnvId].lcvs[hdrNum];
++}
++
++uint32_t FmPcdGetMacsecLcv(t_Handle h_FmPcd, uint32_t netEnvId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(h_FmPcd);
++ return p_FmPcd->netEnvs[netEnvId].macsecVector;
++}
++
++uint8_t FmPcdGetNetEnvId(t_Handle h_NetEnv)
++{
++ return ((t_FmPcdNetEnv*)h_NetEnv)->netEnvId;
++}
++
++void FmPcdIncNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId)
++{
++ uint32_t intFlags;
++
++ ASSERT_COND(h_FmPcd);
++
++ intFlags = NetEnvLock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId]);
++ ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners++;
++ NetEnvUnlock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId], intFlags);
++}
++
++void FmPcdDecNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId)
++{
++ uint32_t intFlags;
++
++ ASSERT_COND(h_FmPcd);
++ ASSERT_COND(((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners);
++
++ intFlags = NetEnvLock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId]);
++ ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners--;
++ NetEnvUnlock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId], intFlags);
++}
++
++uint32_t FmPcdLock(t_Handle h_FmPcd)
++{
++ ASSERT_COND(h_FmPcd);
++ return XX_LockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock);
++}
++
++void FmPcdUnlock(t_Handle h_FmPcd, uint32_t intFlags)
++{
++ ASSERT_COND(h_FmPcd);
++ XX_UnlockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock, intFlags);
++}
++
++t_FmPcdLock * FmPcdAcquireLock(t_Handle h_FmPcd)
++{
++ t_FmPcdLock *p_Lock;
++ ASSERT_COND(h_FmPcd);
++ p_Lock = DequeueLockFromFreeLst((t_FmPcd*)h_FmPcd);
++ if (!p_Lock)
++ {
++ FillFreeLocksLst(h_FmPcd);
++ p_Lock = DequeueLockFromFreeLst((t_FmPcd*)h_FmPcd);
++ }
++
++ if (p_Lock)
++ EnqueueLockToAcquiredLst((t_FmPcd*)h_FmPcd, p_Lock);
++ return p_Lock;
++}
++
++void FmPcdReleaseLock(t_Handle h_FmPcd, t_FmPcdLock *p_Lock)
++{
++ uint32_t intFlags;
++ ASSERT_COND(h_FmPcd);
++ intFlags = FmPcdLock(h_FmPcd);
++ LIST_DelAndInit(&p_Lock->node);
++ FmPcdUnlock(h_FmPcd, intFlags);
++ EnqueueLockToFreeLst((t_FmPcd*)h_FmPcd, p_Lock);
++}
++
++bool FmPcdLockTryLockAll(t_Handle h_FmPcd)
++{
++ uint32_t intFlags;
++ t_List *p_Pos, *p_SavedPos=NULL;
++
++ ASSERT_COND(h_FmPcd);
++ intFlags = FmPcdLock(h_FmPcd);
++ LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst)
++ {
++ t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos);
++ if (!FmPcdLockTryLock(p_Lock))
++ {
++ p_SavedPos = p_Pos;
++ break;
++ }
++ }
++ if (p_SavedPos)
++ {
++ LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst)
++ {
++ t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos);
++ if (p_Pos == p_SavedPos)
++ break;
++ FmPcdLockUnlock(p_Lock);
++ }
++ }
++ FmPcdUnlock(h_FmPcd, intFlags);
++
++ CORE_MemoryBarrier();
++
++ if (p_SavedPos)
++ return FALSE;
++
++ return TRUE;
++}
++
++void FmPcdLockUnlockAll(t_Handle h_FmPcd)
++{
++ uint32_t intFlags;
++ t_List *p_Pos;
++
++ ASSERT_COND(h_FmPcd);
++ intFlags = FmPcdLock(h_FmPcd);
++ LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst)
++ {
++ t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos);
++ p_Lock->flag = FALSE;
++ }
++ FmPcdUnlock(h_FmPcd, intFlags);
++
++ CORE_MemoryBarrier();
++}
++
++t_Error FmPcdHcSync(t_Handle h_FmPcd)
++{
++ ASSERT_COND(h_FmPcd);
++ ASSERT_COND(((t_FmPcd*)h_FmPcd)->h_Hc);
++
++ return FmHcPcdSync(((t_FmPcd*)h_FmPcd)->h_Hc);
++}
++
++t_Handle FmPcdGetHcHandle(t_Handle h_FmPcd)
++{
++ ASSERT_COND(h_FmPcd);
++ return ((t_FmPcd*)h_FmPcd)->h_Hc;
++}
++
++bool FmPcdIsAdvancedOffloadSupported(t_Handle h_FmPcd)
++{
++ ASSERT_COND(h_FmPcd);
++ return ((t_FmPcd*)h_FmPcd)->advancedOffloadSupport;
++}
++/*********************** End of inter-module routines ************************/
++
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++
++t_Handle FM_PCD_Config(t_FmPcdParams *p_FmPcdParams)
++{
++ t_FmPcd *p_FmPcd = NULL;
++ t_FmPhysAddr physicalMuramBase;
++ uint8_t i;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPcdParams, E_INVALID_HANDLE,NULL);
++
++ p_FmPcd = (t_FmPcd *) XX_Malloc(sizeof(t_FmPcd));
++ if (!p_FmPcd)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD"));
++ return NULL;
++ }
++ memset(p_FmPcd, 0, sizeof(t_FmPcd));
++
++ p_FmPcd->p_FmPcdDriverParam = (t_FmPcdDriverParam *) XX_Malloc(sizeof(t_FmPcdDriverParam));
++ if (!p_FmPcd->p_FmPcdDriverParam)
++ {
++ XX_Free(p_FmPcd);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Driver Param"));
++ return NULL;
++ }
++ memset(p_FmPcd->p_FmPcdDriverParam, 0, sizeof(t_FmPcdDriverParam));
++
++ p_FmPcd->h_Fm = p_FmPcdParams->h_Fm;
++ p_FmPcd->guestId = FmGetGuestId(p_FmPcd->h_Fm);
++ p_FmPcd->h_FmMuram = FmGetMuramHandle(p_FmPcd->h_Fm);
++ if (p_FmPcd->h_FmMuram)
++ {
++ FmGetPhysicalMuramBase(p_FmPcdParams->h_Fm, &physicalMuramBase);
++ p_FmPcd->physicalMuramBase = (uint64_t)((uint64_t)(&physicalMuramBase)->low | ((uint64_t)(&physicalMuramBase)->high << 32));
++ }
++
++ for (i = 0; i<FM_MAX_NUM_OF_PORTS; i++)
++ p_FmPcd->netEnvs[i].clsPlanGrpId = ILLEGAL_CLS_PLAN;
++
++ if (p_FmPcdParams->useHostCommand)
++ {
++ t_FmHcParams hcParams;
++
++ memset(&hcParams, 0, sizeof(hcParams));
++ hcParams.h_Fm = p_FmPcd->h_Fm;
++ hcParams.h_FmPcd = (t_Handle)p_FmPcd;
++ memcpy((uint8_t*)&hcParams.params, (uint8_t*)&p_FmPcdParams->hc, sizeof(t_FmPcdHcParams));
++ p_FmPcd->h_Hc = FmHcConfigAndInit(&hcParams);
++ if (!p_FmPcd->h_Hc)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD HC"));
++ FM_PCD_Free(p_FmPcd);
++ return NULL;
++ }
++ }
++ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("No Host Command defined for a guest partition."));
++
++ if (p_FmPcdParams->kgSupport)
++ {
++ p_FmPcd->p_FmPcdKg = (t_FmPcdKg *)KgConfig(p_FmPcd, p_FmPcdParams);
++ if (!p_FmPcd->p_FmPcdKg)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Keygen"));
++ FM_PCD_Free(p_FmPcd);
++ return NULL;
++ }
++ }
++
++ if (p_FmPcdParams->plcrSupport)
++ {
++ p_FmPcd->p_FmPcdPlcr = (t_FmPcdPlcr *)PlcrConfig(p_FmPcd, p_FmPcdParams);
++ if (!p_FmPcd->p_FmPcdPlcr)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Policer"));
++ FM_PCD_Free(p_FmPcd);
++ return NULL;
++ }
++ }
++
++ if (p_FmPcdParams->prsSupport)
++ {
++ p_FmPcd->p_FmPcdPrs = (t_FmPcdPrs *)PrsConfig(p_FmPcd, p_FmPcdParams);
++ if (!p_FmPcd->p_FmPcdPrs)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Parser"));
++ FM_PCD_Free(p_FmPcd);
++ return NULL;
++ }
++ }
++
++ p_FmPcd->h_Spinlock = XX_InitSpinlock();
++ if (!p_FmPcd->h_Spinlock)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD spinlock"));
++ FM_PCD_Free(p_FmPcd);
++ return NULL;
++ }
++ INIT_LIST(&p_FmPcd->freeLocksLst);
++ INIT_LIST(&p_FmPcd->acquiredLocksLst);
++
++ p_FmPcd->numOfEnabledGuestPartitionsPcds = 0;
++
++ p_FmPcd->f_Exception = p_FmPcdParams->f_Exception;
++ p_FmPcd->f_FmPcdIndexedException = p_FmPcdParams->f_ExceptionId;
++ p_FmPcd->h_App = p_FmPcdParams->h_App;
++
++ p_FmPcd->p_CcShadow = NULL;
++ p_FmPcd->ccShadowSize = 0;
++ p_FmPcd->ccShadowAlign = 0;
++
++ p_FmPcd->h_ShadowSpinlock = XX_InitSpinlock();
++ if (!p_FmPcd->h_ShadowSpinlock)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD shadow spinlock"));
++ FM_PCD_Free(p_FmPcd);
++ return NULL;
++ }
++
++ return p_FmPcd;
++}
++
++t_Error FM_PCD_Init(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_Error err = E_OK;
++ t_FmPcdIpcMsg msg;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
++
++ FM_GetRevision(p_FmPcd->h_Fm, &p_FmPcd->fmRevInfo);
++
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ {
++ memset(p_FmPcd->fmPcdIpcHandlerModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
++ if (Sprint (p_FmPcd->fmPcdIpcHandlerModuleName, "FM_PCD_%d_%d", FmGetId(p_FmPcd->h_Fm), NCSW_MASTER_ID) != 10)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++ memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
++ if (Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm), p_FmPcd->guestId) != (p_FmPcd->guestId<10 ? 10:11))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++
++ p_FmPcd->h_IpcSession = XX_IpcInitSession(p_FmPcd->fmPcdIpcHandlerModuleName, p_FmPcd->fmPcdModuleName);
++ if (p_FmPcd->h_IpcSession)
++ {
++ t_FmPcdIpcReply reply;
++ uint32_t replyLength;
++ uint8_t isMasterAlive = 0;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_PCD_MASTER_IS_ALIVE;
++ msg.msgBody[0] = p_FmPcd->guestId;
++ blockingFlag = TRUE;
++
++ do
++ {
++ replyLength = sizeof(uint32_t) + sizeof(isMasterAlive);
++ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(p_FmPcd->guestId),
++ (uint8_t*)&reply,
++ &replyLength,
++ IpcMsgCompletionCB,
++ h_FmPcd)) != E_OK)
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ while (blockingFlag) ;
++ if (replyLength != (sizeof(uint32_t) + sizeof(isMasterAlive)))
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ isMasterAlive = *(uint8_t*)(reply.replyBody);
++ } while (!isMasterAlive);
++ }
++ }
++
++ CHECK_INIT_PARAMETERS(p_FmPcd, CheckFmPcdParameters);
++
++ if (p_FmPcd->p_FmPcdKg)
++ {
++ err = KgInit(p_FmPcd);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (p_FmPcd->p_FmPcdPlcr)
++ {
++ err = PlcrInit(p_FmPcd);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (p_FmPcd->p_FmPcdPrs)
++ {
++ err = PrsInit(p_FmPcd);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ {
++ /* register to inter-core messaging mechanism */
++ memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
++ if (Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm),NCSW_MASTER_ID) != 10)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++ err = XX_IpcRegisterMsgHandler(p_FmPcd->fmPcdModuleName, IpcMsgHandlerCB, p_FmPcd, FM_PCD_MAX_REPLY_SIZE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /* IPv6 Frame-Id used for fragmentation */
++ p_FmPcd->ipv6FrameIdAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, 4, 4));
++ if (!p_FmPcd->ipv6FrameIdAddr)
++ {
++ FM_PCD_Free(p_FmPcd);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for IPv6 Frame-Id"));
++ }
++ IOMemSet32(UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr), 0, 4);
++
++ /* CAPWAP Frame-Id used for fragmentation */
++ p_FmPcd->capwapFrameIdAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, 2, 4));
++ if (!p_FmPcd->capwapFrameIdAddr)
++ {
++ FM_PCD_Free(p_FmPcd);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CAPWAP Frame-Id"));
++ }
++ IOMemSet32(UINT_TO_PTR(p_FmPcd->capwapFrameIdAddr), 0, 2);
++
++ XX_Free(p_FmPcd->p_FmPcdDriverParam);
++ p_FmPcd->p_FmPcdDriverParam = NULL;
++
++ FmRegisterPcd(p_FmPcd->h_Fm, p_FmPcd);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_Free(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd =(t_FmPcd *)h_FmPcd;
++ t_Error err = E_OK;
++
++ if (p_FmPcd->ipv6FrameIdAddr)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr));
++
++ if (p_FmPcd->capwapFrameIdAddr)
++ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_FmPcd->capwapFrameIdAddr));
++
++ if (p_FmPcd->enabled)
++ FM_PCD_Disable(p_FmPcd);
++
++ if (p_FmPcd->p_FmPcdDriverParam)
++ {
++ XX_Free(p_FmPcd->p_FmPcdDriverParam);
++ p_FmPcd->p_FmPcdDriverParam = NULL;
++ }
++
++ if (p_FmPcd->p_FmPcdKg)
++ {
++ if ((err = KgFree(p_FmPcd)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ XX_Free(p_FmPcd->p_FmPcdKg);
++ p_FmPcd->p_FmPcdKg = NULL;
++ }
++
++ if (p_FmPcd->p_FmPcdPlcr)
++ {
++ PlcrFree(p_FmPcd);
++ XX_Free(p_FmPcd->p_FmPcdPlcr);
++ p_FmPcd->p_FmPcdPlcr = NULL;
++ }
++
++ if (p_FmPcd->p_FmPcdPrs)
++ {
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ PrsFree(p_FmPcd);
++ XX_Free(p_FmPcd->p_FmPcdPrs);
++ p_FmPcd->p_FmPcdPrs = NULL;
++ }
++
++ if (p_FmPcd->h_Hc)
++ {
++ FmHcFree(p_FmPcd->h_Hc);
++ p_FmPcd->h_Hc = NULL;
++ }
++
++ XX_IpcUnregisterMsgHandler(p_FmPcd->fmPcdModuleName);
++
++ FmUnregisterPcd(p_FmPcd->h_Fm);
++
++ ReleaseFreeLocksLst(p_FmPcd);
++
++ if (p_FmPcd->h_Spinlock)
++ XX_FreeSpinlock(p_FmPcd->h_Spinlock);
++
++ if (p_FmPcd->h_ShadowSpinlock)
++ XX_FreeSpinlock(p_FmPcd->h_ShadowSpinlock);
++
++ XX_Free(p_FmPcd);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_ConfigException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigException - guest mode!"));
++
++ GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_FmPcd->exceptions |= bitMask;
++ else
++ p_FmPcd->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ return E_OK;
++}
++
++t_Error FM_PCD_ConfigHcFramesDataMemory(t_Handle h_FmPcd, uint8_t memId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
++
++ return FmHcSetFramesDataMemory(p_FmPcd->h_Hc, memId);
++}
++
++t_Error FM_PCD_Enable(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++
++ if (p_FmPcd->enabled)
++ return E_OK;
++
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ p_FmPcd->h_IpcSession)
++ {
++ uint8_t enabled;
++ t_FmPcdIpcMsg msg;
++ t_FmPcdIpcReply reply;
++ uint32_t replyLength;
++
++ memset(&reply, 0, sizeof(reply));
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_PCD_MASTER_IS_ENABLED;
++ replyLength = sizeof(uint32_t) + sizeof(enabled);
++ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t) + sizeof(enabled))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ p_FmPcd->enabled = (bool)!!(*(uint8_t*)(reply.replyBody));
++ if (!p_FmPcd->enabled)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-PCD master should be enabled first!"));
++
++ return E_OK;
++ }
++ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++
++ if (p_FmPcd->p_FmPcdKg)
++ KgEnable(p_FmPcd);
++
++ if (p_FmPcd->p_FmPcdPlcr)
++ PlcrEnable(p_FmPcd);
++
++ if (p_FmPcd->p_FmPcdPrs)
++ PrsEnable(p_FmPcd);
++
++ p_FmPcd->enabled = TRUE;
++
++ return E_OK;
++}
++
++t_Error FM_PCD_Disable(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++
++ if (!p_FmPcd->enabled)
++ return E_OK;
++
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ p_FmPcd->h_IpcSession)
++ {
++ t_FmPcdIpcMsg msg;
++ t_FmPcdIpcReply reply;
++ uint32_t replyLength;
++
++ memset(&reply, 0, sizeof(reply));
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_PCD_GUEST_DISABLE;
++ replyLength = sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ if (reply.error == E_OK)
++ p_FmPcd->enabled = FALSE;
++
++ return (t_Error)(reply.error);
++ }
++ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++
++ if (p_FmPcd->numOfEnabledGuestPartitionsPcds != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Trying to disable a master partition PCD while"
++ "guest partitions are still enabled!"));
++
++ if (p_FmPcd->p_FmPcdKg)
++ KgDisable(p_FmPcd);
++
++ if (p_FmPcd->p_FmPcdPlcr)
++ PlcrDisable(p_FmPcd);
++
++ if (p_FmPcd->p_FmPcdPrs)
++ PrsDisable(p_FmPcd);
++
++ p_FmPcd->enabled = FALSE;
++
++ return E_OK;
++}
++
++t_Handle FM_PCD_NetEnvCharacteristicsSet(t_Handle h_FmPcd, t_FmPcdNetEnvParams *p_NetEnvParams)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t intFlags, specialUnits = 0;
++ uint8_t bitId = 0;
++ uint8_t i, j, k;
++ uint8_t netEnvCurrId;
++ uint8_t ipsecAhUnit = 0,ipsecEspUnit = 0;
++ bool ipsecAhExists = FALSE, ipsecEspExists = FALSE, shim1Selected = FALSE;
++ uint8_t hdrNum;
++ t_FmPcdNetEnvParams *p_ModifiedNetEnvParams;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_STATE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_NetEnvParams, E_NULL_POINTER, NULL);
++
++ intFlags = FmPcdLock(p_FmPcd);
++
++ /* find a new netEnv */
++ for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++)
++ if (!p_FmPcd->netEnvs[i].used)
++ break;
++
++ if (i== FM_MAX_NUM_OF_PORTS)
++ {
++ REPORT_ERROR(MAJOR, E_FULL,("No more than %d netEnv's allowed.", FM_MAX_NUM_OF_PORTS));
++ FmPcdUnlock(p_FmPcd, intFlags);
++ return NULL;
++ }
++
++ p_FmPcd->netEnvs[i].used = TRUE;
++ FmPcdUnlock(p_FmPcd, intFlags);
++
++ /* As anyone doesn't have handle of this netEnv yet, no need
++ to protect it with spinlocks */
++
++ p_ModifiedNetEnvParams = (t_FmPcdNetEnvParams *)XX_Malloc(sizeof(t_FmPcdNetEnvParams));
++ if (!p_ModifiedNetEnvParams)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FmPcdNetEnvParams"));
++ return NULL;
++ }
++
++ memcpy(p_ModifiedNetEnvParams, p_NetEnvParams, sizeof(t_FmPcdNetEnvParams));
++ p_NetEnvParams = p_ModifiedNetEnvParams;
++
++ netEnvCurrId = (uint8_t)i;
++
++ /* clear from previous use */
++ memset(&p_FmPcd->netEnvs[netEnvCurrId].units, 0, FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS * sizeof(t_FmPcdIntDistinctionUnit));
++ memset(&p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs, 0, FM_PCD_MAX_NUM_OF_ALIAS_HDRS * sizeof(t_FmPcdNetEnvAliases));
++ memcpy(&p_FmPcd->netEnvs[netEnvCurrId].units, p_NetEnvParams->units, p_NetEnvParams->numOfDistinctionUnits*sizeof(t_FmPcdIntDistinctionUnit));
++
++ p_FmPcd->netEnvs[netEnvCurrId].netEnvId = netEnvCurrId;
++ p_FmPcd->netEnvs[netEnvCurrId].h_FmPcd = p_FmPcd;
++
++ p_FmPcd->netEnvs[netEnvCurrId].clsPlanGrpId = ILLEGAL_CLS_PLAN;
++
++ /* check that header with opt is not interchanged with the same header */
++ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
++ {
++ for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
++ {
++ /* if an option exists, check that other headers are not the same header
++ without option */
++ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt)
++ {
++ for (j = 0; (j < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr != HEADER_TYPE_NONE); j++)
++ {
++ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr == p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr) &&
++ !p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].opt)
++ {
++ REPORT_ERROR(MINOR, E_FULL,
++ ("Illegal unit - header with opt may not be interchangeable with the same header without opt"));
++ XX_Free(p_ModifiedNetEnvParams);
++ return NULL;
++ }
++ }
++ }
++ }
++ }
++
++ /* Specific headers checking */
++ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
++ {
++ for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
++ {
++ /* Some headers pairs may not be defined on different units as the parser
++ doesn't distinguish */
++ /* IPSEC_AH and IPSEC_SPI can't be 2 units, */
++ /* check that header with opt is not interchanged with the same header */
++ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_AH)
++ {
++ if (ipsecEspExists && (ipsecEspUnit != i))
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units"));
++ XX_Free(p_ModifiedNetEnvParams);
++ return NULL;
++ }
++ else
++ {
++ ipsecAhUnit = i;
++ ipsecAhExists = TRUE;
++ }
++ }
++ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_ESP)
++ {
++ if (ipsecAhExists && (ipsecAhUnit != i))
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units"));
++ XX_Free(p_ModifiedNetEnvParams);
++ return NULL;
++ }
++ else
++ {
++ ipsecEspUnit = i;
++ ipsecEspExists = TRUE;
++ }
++ }
++ /* ENCAP_ESP */
++ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_UDP_ENCAP_ESP)
++ {
++ /* IPSec UDP encapsulation is currently set to use SHIM1 */
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_UDP_ENCAP_ESP;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM1;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM1;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
++ }
++#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ /* UDP_LITE */
++ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_UDP_LITE)
++ {
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_UDP_LITE;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_UDP;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_UDP;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
++ }
++#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++
++ /* IP FRAG */
++ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPv4) &&
++ (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == IPV4_FRAG_1))
++ {
++ /* If IPv4+Frag, we need to set 2 units - SHIM 2 and IPv4. We first set SHIM2, and than check if
++ * IPv4 exists. If so we don't need to set an extra unit
++ * We consider as "having IPv4" any IPv4 without interchangable headers
++ * but including any options. */
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_IPv4;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = IPV4_FRAG_1;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
++
++ /* check if IPv4 header exists by itself */
++ if (FmPcdNetEnvGetUnitId(p_FmPcd, netEnvCurrId, HEADER_TYPE_IPv4, FALSE, 0) == FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++ {
++ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits].hdrs[0].hdr = HEADER_TYPE_IPv4;
++ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits++].hdrs[0].opt = 0;
++ }
++ }
++ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPv6) &&
++ (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == IPV6_FRAG_1))
++ {
++ /* If IPv6+Frag, we need to set 2 units - SHIM 2 and IPv6. We first set SHIM2, and than check if
++ * IPv4 exists. If so we don't need to set an extra unit
++ * We consider as "having IPv6" any IPv6 without interchangable headers
++ * but including any options. */
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_IPv6;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = IPV6_FRAG_1;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
++
++ /* check if IPv6 header exists by itself */
++ if (FmPcdNetEnvGetUnitId(p_FmPcd, netEnvCurrId, HEADER_TYPE_IPv6, FALSE, 0) == FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++ {
++ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits].hdrs[0].hdr = HEADER_TYPE_IPv6;
++ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits++].hdrs[0].opt = 0;
++ }
++ }
++#if (DPAA_VERSION >= 11)
++ /* CAPWAP FRAG */
++ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_CAPWAP) &&
++ (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == CAPWAP_FRAG_1))
++ {
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_CAPWAP;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = CAPWAP_FRAG_1;
++ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2;
++ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++ }
++ }
++
++ /* if private header (shim), check that no other headers specified */
++ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
++ {
++ if (IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
++ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[1].hdr != HEADER_TYPE_NONE)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header may not be interchanged with other headers"));
++ XX_Free(p_ModifiedNetEnvParams);
++ return NULL;
++ }
++ }
++
++ for (i = 0; i < p_NetEnvParams->numOfDistinctionUnits; i++)
++ {
++ if (IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
++ switch (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)
++ {
++ case (HEADER_TYPE_USER_DEFINED_SHIM1):
++ if (shim1Selected)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header cannot be selected with UDP_IPSEC_ESP"));
++ XX_Free(p_ModifiedNetEnvParams);
++ return NULL;
++ }
++ shim1Selected = TRUE;
++ p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000001;
++ break;
++ case (HEADER_TYPE_USER_DEFINED_SHIM2):
++ p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000002;
++ break;
++ default:
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Requested SHIM not supported"));
++ }
++ else
++ {
++ p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = (uint32_t)(0x80000000 >> bitId++);
++
++ if (IS_SPECIAL_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
++ p_FmPcd->netEnvs[netEnvCurrId].macsecVector = p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i];
++ }
++ }
++
++ /* define a set of hardware parser LCV's according to the defined netenv */
++
++ /* set an array of LCV's for each header in the netEnv */
++ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
++ {
++ /* private headers have no LCV in the hard parser */
++ if (!IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
++ {
++ for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
++ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
++ {
++ hdrNum = GetPrsHdrNum(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr);
++ if ((hdrNum == ILLEGAL_HDR_NUM) || (hdrNum == NO_HDR_NUM))
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
++ XX_Free(p_ModifiedNetEnvParams);
++ return NULL;
++ }
++ p_FmPcd->netEnvs[netEnvCurrId].lcvs[hdrNum] |= p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i];
++ }
++ }
++ }
++ XX_Free(p_ModifiedNetEnvParams);
++
++ p_FmPcd->netEnvs[netEnvCurrId].h_Spinlock = XX_InitSpinlock();
++ if (!p_FmPcd->netEnvs[netEnvCurrId].h_Spinlock)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd NetEnv spinlock"));
++ return NULL;
++ }
++ return &p_FmPcd->netEnvs[netEnvCurrId];
++}
++
++t_Error FM_PCD_NetEnvCharacteristicsDelete(t_Handle h_NetEnv)
++{
++ t_FmPcdNetEnv *p_NetEnv = (t_FmPcdNetEnv*)h_NetEnv;
++ t_FmPcd *p_FmPcd = p_NetEnv->h_FmPcd;
++ uint32_t intFlags;
++ uint8_t netEnvId = p_NetEnv->netEnvId;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
++
++ /* check that no port is bound to this netEnv */
++ if (p_FmPcd->netEnvs[netEnvId].owners)
++ {
++ RETURN_ERROR(MINOR, E_INVALID_STATE,
++ ("Trying to delete a netEnv that has ports/schemes/trees/clsPlanGrps bound to"));
++ }
++
++ intFlags = FmPcdLock(p_FmPcd);
++
++ p_FmPcd->netEnvs[netEnvId].used = FALSE;
++ p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = ILLEGAL_CLS_PLAN;
++
++ memset(p_FmPcd->netEnvs[netEnvId].units, 0, sizeof(t_FmPcdIntDistinctionUnit)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++ memset(p_FmPcd->netEnvs[netEnvId].unitsVectors, 0, sizeof(uint32_t)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++ memset(p_FmPcd->netEnvs[netEnvId].lcvs, 0, sizeof(uint32_t)*FM_PCD_PRS_NUM_OF_HDRS);
++
++ if (p_FmPcd->netEnvs[netEnvId].h_Spinlock)
++ XX_FreeSpinlock(p_FmPcd->netEnvs[netEnvId].h_Spinlock);
++
++ FmPcdUnlock(p_FmPcd, intFlags);
++ return E_OK;
++}
++
++void FM_PCD_HcTxConf(t_Handle h_FmPcd, t_DpaaFD *p_Fd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN(h_FmPcd, E_INVALID_STATE);
++
++ FmHcTxConf(p_FmPcd->h_Hc, p_Fd);
++}
++
++t_Error FM_PCD_SetAdvancedOffloadSupport(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmCtrlCodeRevisionInfo revInfo;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->enabled, E_INVALID_STATE);
++
++ if ((err = FM_GetFmanCtrlCodeRevision(p_FmPcd->h_Fm, &revInfo)) != E_OK)
++ {
++ DBG(WARNING, ("FM in guest-mode without IPC, can't validate firmware revision."));
++ revInfo.packageRev = IP_OFFLOAD_PACKAGE_NUMBER;
++ }
++ if (!IS_OFFLOAD_PACKAGE(revInfo.packageRev))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Fman ctrl code package"));
++
++ if (!p_FmPcd->h_Hc)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("HC must be initialized in this mode"));
++
++ p_FmPcd->advancedOffloadSupport = TRUE;
++
++ return E_OK;
++}
++
++uint32_t FM_PCD_GetCounter(t_Handle h_FmPcd, e_FmPcdCounters counter)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t outCounter = 0;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0);
++
++ switch (counter)
++ {
++ case (e_FM_PCD_KG_COUNTERS_TOTAL):
++ if (!p_FmPcd->p_FmPcdKg)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("KeyGen is not activated"));
++ return 0;
++ }
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ !p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs &&
++ !p_FmPcd->h_IpcSession)
++ {
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without neither IPC nor mapped register!"));
++ return 0;
++ }
++ break;
++
++ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
++ case (e_FM_PCD_PLCR_COUNTERS_RED):
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
++ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
++ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
++ if (!p_FmPcd->p_FmPcdPlcr)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Policer is not activated"));
++ return 0;
++ }
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ !p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs &&
++ !p_FmPcd->h_IpcSession)
++ {
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in \"guest-mode\" without neither IPC nor mapped register!"));
++ return 0;
++ }
++
++ /* check that counters are enabled */
++ if (p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs &&
++ !(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN))
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled"));
++ return 0;
++ }
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs ||
++ ((p_FmPcd->guestId != NCSW_MASTER_ID) && p_FmPcd->h_IpcSession));
++ break;
++
++ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
++ if (!p_FmPcd->p_FmPcdPrs)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Parser is not activated"));
++ return 0;
++ }
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ !p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs &&
++ !p_FmPcd->h_IpcSession)
++ {
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without neither IPC nor mapped register!"));
++ return 0;
++ }
++ break;
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported type of counter"));
++ return 0;
++ }
++
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ p_FmPcd->h_IpcSession)
++ {
++ t_FmPcdIpcMsg msg;
++ t_FmPcdIpcReply reply;
++ uint32_t replyLength;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_PCD_GET_COUNTER;
++ memcpy(msg.msgBody, (uint8_t *)&counter, sizeof(uint32_t));
++ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) +sizeof(uint32_t),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t) + sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++
++ memcpy((uint8_t*)&outCounter, reply.replyBody, sizeof(uint32_t));
++ return outCounter;
++ }
++
++ switch (counter)
++ {
++ /* Parser statistics */
++ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pds);
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rrs);
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rrs);
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rrs);
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srrs);
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rres);
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rres);
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rres);
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srres);
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spcs);
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spscs);
++ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_hxscs);
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrcs);
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrscs);
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwcs);
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwscs);
++ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
++ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_fcscs);
++ case (e_FM_PCD_KG_COUNTERS_TOTAL):
++ return GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_tpc);
++
++ /* Policer statistics */
++ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
++ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt);
++ case (e_FM_PCD_PLCR_COUNTERS_RED):
++ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt);
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
++ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt);
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
++ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt);
++ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
++ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt);
++ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
++ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt);
++ }
++ return 0;
++}
++
++t_Error FM_PCD_SetException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t bitMask = 0, tmpReg;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
++
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetException - guest mode!"));
++
++ GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception);
++
++ if (bitMask)
++ {
++ if (enable)
++ p_FmPcd->exceptions |= bitMask;
++ else
++ p_FmPcd->exceptions &= ~bitMask;
++
++ switch (exception)
++ {
++ case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC):
++ case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW):
++ if (!p_FmPcd->p_FmPcdKg)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working"));
++ break;
++ case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC):
++ case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR):
++ case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE):
++ case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE):
++ if (!p_FmPcd->p_FmPcdPlcr)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working"));
++ break;
++ case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC):
++ case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC):
++ if (!p_FmPcd->p_FmPcdPrs)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - parser is not working"));
++ break;
++ }
++
++ switch (exception)
++ {
++ case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer);
++ if (enable)
++ tmpReg |= FM_EX_KG_DOUBLE_ECC;
++ else
++ tmpReg &= ~FM_EX_KG_DOUBLE_ECC;
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer, tmpReg);
++ break;
++ case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer);
++ if (enable)
++ tmpReg |= FM_EX_KG_KEYSIZE_OVERFLOW;
++ else
++ tmpReg &= ~FM_EX_KG_KEYSIZE_OVERFLOW;
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer, tmpReg);
++ break;
++ case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_perer);
++ if (enable)
++ tmpReg |= FM_PCD_PRS_DOUBLE_ECC;
++ else
++ tmpReg &= ~FM_PCD_PRS_DOUBLE_ECC;
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_perer, tmpReg);
++ break;
++ case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pever);
++ if (enable)
++ tmpReg |= FM_PCD_PRS_SINGLE_ECC;
++ else
++ tmpReg &= ~FM_PCD_PRS_SINGLE_ECC;
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pever, tmpReg);
++ break;
++ case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier);
++ if (enable)
++ tmpReg |= FM_PCD_PLCR_DOUBLE_ECC;
++ else
++ tmpReg &= ~FM_PCD_PLCR_DOUBLE_ECC;
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg);
++ break;
++ case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier);
++ if (enable)
++ tmpReg |= FM_PCD_PLCR_INIT_ENTRY_ERROR;
++ else
++ tmpReg &= ~FM_PCD_PLCR_INIT_ENTRY_ERROR;
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg);
++ break;
++ case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier);
++ if (enable)
++ tmpReg |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE;
++ else
++ tmpReg &= ~FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE;
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg);
++ break;
++ case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE):
++ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier);
++ if (enable)
++ tmpReg |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE;
++ else
++ tmpReg &= ~FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE;
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg);
++ break;
++ }
++ /* for ECC exceptions driver automatically enables ECC mechanism, if disabled.
++ Driver may disable them automatically, depending on driver's status */
++ if (enable && ((exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) |
++ (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) |
++ (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) |
++ (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC)))
++ FmEnableRamsEcc(p_FmPcd->h_Fm);
++ if (!enable && ((exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) |
++ (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) |
++ (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) |
++ (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC)))
++ FmDisableRamsEcc(p_FmPcd->h_Fm);
++ }
++
++ return E_OK;
++}
++
++t_Error FM_PCD_ForceIntr (t_Handle h_FmPcd, e_FmPcdExceptions exception)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
++
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ForceIntr - guest mode!"));
++
++ switch (exception)
++ {
++ case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC):
++ case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW):
++ if (!p_FmPcd->p_FmPcdKg)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working"));
++ break;
++ case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC):
++ case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR):
++ case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE):
++ case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE):
++ if (!p_FmPcd->p_FmPcdPlcr)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working"));
++ break;
++ case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC):
++ case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC):
++ if (!p_FmPcd->p_FmPcdPrs)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt -parsrer is not working"));
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid interrupt requested"));
++ }
++ switch (exception)
++ {
++ case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC:
++ if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC:
++ if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC:
++ if (!(p_FmPcd->exceptions & FM_EX_KG_DOUBLE_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_feer, FM_EX_KG_DOUBLE_ECC);
++ break;
++ case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW:
++ if (!(p_FmPcd->exceptions & FM_EX_KG_KEYSIZE_OVERFLOW))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_feer, FM_EX_KG_KEYSIZE_OVERFLOW);
++ break;
++ case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC:
++ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_DOUBLE_ECC);
++ break;
++ case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR:
++ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_INIT_ENTRY_ERROR);
++ break;
++ case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE:
++ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE);
++ break;
++ case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE:
++ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE);
++ break;
++ }
++
++ return E_OK;
++}
++
++
++t_Error FM_PCD_ModifyCounter(t_Handle h_FmPcd, e_FmPcdCounters counter, uint32_t value)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
++
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ModifyCounter - guest mode!"));
++
++ switch (counter)
++ {
++ case (e_FM_PCD_KG_COUNTERS_TOTAL):
++ if (!p_FmPcd->p_FmPcdKg)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid counters - KeyGen is not working"));
++ break;
++ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
++ case (e_FM_PCD_PLCR_COUNTERS_RED):
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
++ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
++ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
++ if (!p_FmPcd->p_FmPcdPlcr)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid counters - Policer is not working"));
++ if (!(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN))
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled"));
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
++ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
++ if (!p_FmPcd->p_FmPcdPrs)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter"));
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter"));
++ }
++ switch (counter)
++ {
++ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pds, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rrs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rrs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rrs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srrs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rres, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rres, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rres, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srres, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spcs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spscs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_hxscs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrcs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrscs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwcs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwscs, value);
++ break;
++ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_fcscs, value);
++ break;
++ case (e_FM_PCD_KG_COUNTERS_TOTAL):
++ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_tpc,value);
++ break;
++
++ /*Policer counters*/
++ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt, value);
++ break;
++ case (e_FM_PCD_PLCR_COUNTERS_RED):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt, value);
++ break;
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt, value);
++ break;
++ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt, value);
++ break;
++ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt, value);
++ break;
++ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt, value);
++ break;
++ }
++
++ return E_OK;
++}
++
++t_Handle FM_PCD_GetHcPort(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ return FmHcGetPort(p_FmPcd->h_Hc);
++}
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h
+new file mode 100644
+index 00000000..27ec9c5b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h
+@@ -0,0 +1,543 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_pcd.h
++
++ @Description FM PCD ...
++*//***************************************************************************/
++#ifndef __FM_PCD_H
++#define __FM_PCD_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_common.h"
++#include "fsl_fman_prs.h"
++#include "fsl_fman_kg.h"
++
++#define __ERR_MODULE__ MODULE_FM_PCD
++
++
++/****************************/
++/* Defaults */
++/****************************/
++#define DEFAULT_plcrAutoRefresh FALSE
++#define DEFAULT_fmPcdKgErrorExceptions (FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW)
++#define DEFAULT_fmPcdPlcrErrorExceptions (FM_PCD_EX_PLCR_DOUBLE_ECC | FM_PCD_EX_PLCR_INIT_ENTRY_ERROR)
++#define DEFAULT_fmPcdPlcrExceptions 0
++#define DEFAULT_fmPcdPrsErrorExceptions (FM_PCD_EX_PRS_DOUBLE_ECC)
++
++#define DEFAULT_fmPcdPrsExceptions FM_PCD_EX_PRS_SINGLE_ECC
++#define DEFAULT_numOfUsedProfilesPerWindow 16
++#define DEFAULT_numOfSharedPlcrProfiles 4
++
++/****************************/
++/* Network defines */
++/****************************/
++#define UDP_HEADER_SIZE 8
++
++#define ESP_SPI_OFFSET 0
++#define ESP_SPI_SIZE 4
++#define ESP_SEQ_NUM_OFFSET ESP_SPI_SIZE
++#define ESP_SEQ_NUM_SIZE 4
++
++/****************************/
++/* General defines */
++/****************************/
++#define ILLEGAL_CLS_PLAN 0xff
++#define ILLEGAL_NETENV 0xff
++
++#define FM_PCD_MAX_NUM_OF_ALIAS_HDRS 3
++
++/****************************/
++/* Error defines */
++/****************************/
++
++#define FM_PCD_EX_PLCR_DOUBLE_ECC 0x20000000
++#define FM_PCD_EX_PLCR_INIT_ENTRY_ERROR 0x10000000
++#define FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE 0x08000000
++#define FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE 0x04000000
++
++#define GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception) \
++switch (exception){ \
++ case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC: \
++ bitMask = FM_EX_KG_DOUBLE_ECC; break; \
++ case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC: \
++ bitMask = FM_PCD_EX_PLCR_DOUBLE_ECC; break; \
++ case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW: \
++ bitMask = FM_EX_KG_KEYSIZE_OVERFLOW; break; \
++ case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR: \
++ bitMask = FM_PCD_EX_PLCR_INIT_ENTRY_ERROR; break; \
++ case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE: \
++ bitMask = FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE; break; \
++ case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE: \
++ bitMask = FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE; break; \
++ case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC: \
++ bitMask = FM_PCD_EX_PRS_DOUBLE_ECC; break; \
++ case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC: \
++ bitMask = FM_PCD_EX_PRS_SINGLE_ECC; break; \
++ default: bitMask = 0;break;}
++
++/***********************************************************************/
++/* Policer defines */
++/***********************************************************************/
++#define FM_PCD_PLCR_GCR_STEN 0x40000000
++#define FM_PCD_PLCR_DOUBLE_ECC 0x80000000
++#define FM_PCD_PLCR_INIT_ENTRY_ERROR 0x40000000
++#define FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE 0x80000000
++#define FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE 0x40000000
++
++/***********************************************************************/
++/* Memory map */
++/***********************************************************************/
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++
++typedef struct {
++/* General Configuration and Status Registers */
++ volatile uint32_t fmpl_gcr; /* 0x000 FMPL_GCR - FM Policer General Configuration */
++ volatile uint32_t fmpl_gsr; /* 0x004 FMPL_GSR - FM Policer Global Status Register */
++ volatile uint32_t fmpl_evr; /* 0x008 FMPL_EVR - FM Policer Event Register */
++ volatile uint32_t fmpl_ier; /* 0x00C FMPL_IER - FM Policer Interrupt Enable Register */
++ volatile uint32_t fmpl_ifr; /* 0x010 FMPL_IFR - FM Policer Interrupt Force Register */
++ volatile uint32_t fmpl_eevr; /* 0x014 FMPL_EEVR - FM Policer Error Event Register */
++ volatile uint32_t fmpl_eier; /* 0x018 FMPL_EIER - FM Policer Error Interrupt Enable Register */
++ volatile uint32_t fmpl_eifr; /* 0x01C FMPL_EIFR - FM Policer Error Interrupt Force Register */
++/* Global Statistic Counters */
++ volatile uint32_t fmpl_rpcnt; /* 0x020 FMPL_RPC - FM Policer RED Packets Counter */
++ volatile uint32_t fmpl_ypcnt; /* 0x024 FMPL_YPC - FM Policer YELLOW Packets Counter */
++ volatile uint32_t fmpl_rrpcnt; /* 0x028 FMPL_RRPC - FM Policer Recolored RED Packet Counter */
++ volatile uint32_t fmpl_rypcnt; /* 0x02C FMPL_RYPC - FM Policer Recolored YELLOW Packet Counter */
++ volatile uint32_t fmpl_tpcnt; /* 0x030 FMPL_TPC - FM Policer Total Packet Counter */
++ volatile uint32_t fmpl_flmcnt; /* 0x034 FMPL_FLMC - FM Policer Frame Length Mismatch Counter */
++ volatile uint32_t fmpl_res0[21]; /* 0x038 - 0x08B Reserved */
++/* Profile RAM Access Registers */
++ volatile uint32_t fmpl_par; /* 0x08C FMPL_PAR - FM Policer Profile Action Register*/
++ t_FmPcdPlcrProfileRegs profileRegs;
++/* Error Capture Registers */
++ volatile uint32_t fmpl_serc; /* 0x100 FMPL_SERC - FM Policer Soft Error Capture */
++ volatile uint32_t fmpl_upcr; /* 0x104 FMPL_UPCR - FM Policer Uninitialized Profile Capture Register */
++ volatile uint32_t fmpl_res2; /* 0x108 Reserved */
++/* Debug Registers */
++ volatile uint32_t fmpl_res3[61]; /* 0x10C-0x200 Reserved Debug*/
++/* Profile Selection Mapping Registers Per Port-ID (n=1-11, 16) */
++ volatile uint32_t fmpl_dpmr; /* 0x200 FMPL_DPMR - FM Policer Default Mapping Register */
++ volatile uint32_t fmpl_pmr[63]; /*+default 0x204-0x2FF FMPL_PMR1 - FMPL_PMR63, - FM Policer Profile Mapping Registers.
++ (for port-ID 1-11, only for supported Port-ID registers) */
++} t_FmPcdPlcrRegs;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/***********************************************************************/
++/* Driver's internal structures */
++/***********************************************************************/
++
++typedef struct {
++ bool known;
++ uint8_t id;
++} t_FmPcdKgSchemesExtractsEntry;
++
++typedef struct {
++ t_FmPcdKgSchemesExtractsEntry extractsArray[FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
++} t_FmPcdKgSchemesExtracts;
++
++typedef struct {
++ t_Handle h_Manip;
++ bool keepRes;
++ e_FmPcdEngine nextEngine;
++ uint8_t parseCode;
++} t_FmPcdInfoForManip;
++
++/**************************************************************************//**
++ @Description A structure of parameters to communicate
++ between the port and PCD regarding the KG scheme.
++*//***************************************************************************/
++typedef struct {
++ uint8_t netEnvId; /* in */
++ uint8_t numOfDistinctionUnits; /* in */
++ uint8_t unitIds[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /* in */
++ uint32_t vector; /* out */
++} t_NetEnvParams;
++
++typedef struct {
++ bool allocated;
++ uint8_t ownerId; /* guestId for KG in multi-partition only.
++ portId for PLCR in any environment */
++} t_FmPcdAllocMng;
++
++typedef struct {
++ volatile bool lock;
++ bool used;
++ uint8_t owners;
++ uint8_t netEnvId;
++ uint8_t guestId;
++ uint8_t baseEntry;
++ uint16_t sizeOfGrp;
++ protocolOpt_t optArray[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
++} t_FmPcdKgClsPlanGrp;
++
++typedef struct {
++ t_Handle h_FmPcd;
++ uint8_t schemeId;
++ t_FmPcdLock *p_Lock;
++ bool valid;
++ uint8_t netEnvId;
++ uint8_t owners;
++ uint32_t matchVector;
++ uint32_t ccUnits;
++ bool nextRelativePlcrProfile;
++ uint16_t relativeProfileId;
++ uint16_t numOfProfiles;
++ t_FmPcdKgKeyOrder orderedArray;
++ e_FmPcdEngine nextEngine;
++ e_FmPcdDoneAction doneAction;
++ bool requiredActionFlag;
++ uint32_t requiredAction;
++ bool extractedOrs;
++ uint8_t bitOffsetInPlcrProfile;
++ bool directPlcr;
++#if (DPAA_VERSION >= 11)
++ bool vspe;
++#endif
++} t_FmPcdKgScheme;
++
++typedef union {
++ struct fman_kg_scheme_regs schemeRegs;
++ struct fman_kg_pe_regs portRegs;
++ struct fman_kg_cp_regs clsPlanRegs;
++} u_FmPcdKgIndirectAccessRegs;
++
++typedef struct {
++ struct fman_kg_regs *p_FmPcdKgRegs;
++ uint32_t schemeExceptionsBitMask;
++ uint8_t numOfSchemes;
++ t_Handle h_HwSpinlock;
++ uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES];
++ t_FmPcdKgScheme schemes[FM_PCD_KG_NUM_OF_SCHEMES];
++ t_FmPcdKgClsPlanGrp clsPlanGrps[FM_MAX_NUM_OF_PORTS];
++ uint8_t emptyClsPlanGrpId;
++ t_FmPcdAllocMng schemesMng[FM_PCD_KG_NUM_OF_SCHEMES]; /* only for MASTER ! */
++ t_FmPcdAllocMng clsPlanBlocksMng[FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP];
++ u_FmPcdKgIndirectAccessRegs *p_IndirectAccessRegs;
++} t_FmPcdKg;
++
++typedef struct {
++ uint16_t profilesBase;
++ uint16_t numOfProfiles;
++ t_Handle h_FmPort;
++} t_FmPcdPlcrMapParam;
++
++typedef struct {
++ uint16_t absoluteProfileId;
++ t_Handle h_FmPcd;
++ bool valid;
++ t_FmPcdLock *p_Lock;
++ t_FmPcdAllocMng profilesMng;
++ bool requiredActionFlag;
++ uint32_t requiredAction;
++ e_FmPcdEngine nextEngineOnGreen; /**< Green next engine type */
++ u_FmPcdPlcrNextEngineParams paramsOnGreen; /**< Green next engine params */
++
++ e_FmPcdEngine nextEngineOnYellow; /**< Yellow next engine type */
++ u_FmPcdPlcrNextEngineParams paramsOnYellow; /**< Yellow next engine params */
++
++ e_FmPcdEngine nextEngineOnRed; /**< Red next engine type */
++ u_FmPcdPlcrNextEngineParams paramsOnRed; /**< Red next engine params */
++} t_FmPcdPlcrProfile;
++
++typedef struct {
++ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
++ uint16_t partPlcrProfilesBase;
++ uint16_t partNumOfPlcrProfiles;
++ t_FmPcdPlcrProfile profiles[FM_PCD_PLCR_NUM_ENTRIES];
++ uint16_t numOfSharedProfiles;
++ uint16_t sharedProfilesIds[FM_PCD_PLCR_NUM_ENTRIES];
++ t_FmPcdPlcrMapParam portsMapping[FM_MAX_NUM_OF_PORTS];
++ t_Handle h_HwSpinlock;
++ t_Handle h_SwSpinlock;
++} t_FmPcdPlcr;
++
++typedef struct {
++ uint32_t *p_SwPrsCode;
++ uint32_t *p_CurrSwPrs;
++ uint8_t currLabel;
++ struct fman_prs_regs *p_FmPcdPrsRegs;
++ t_FmPcdPrsLabelParams labelsTable[FM_PCD_PRS_NUM_OF_LABELS];
++ uint32_t fmPcdPrsPortIdStatistics;
++} t_FmPcdPrs;
++
++typedef struct {
++ struct {
++ e_NetHeaderType hdr;
++ protocolOpt_t opt; /* only one option !! */
++ } hdrs[FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS];
++} t_FmPcdIntDistinctionUnit;
++
++typedef struct {
++ e_NetHeaderType hdr;
++ protocolOpt_t opt; /* only one option !! */
++ e_NetHeaderType aliasHdr;
++} t_FmPcdNetEnvAliases;
++
++typedef struct {
++ uint8_t netEnvId;
++ t_Handle h_FmPcd;
++ t_Handle h_Spinlock;
++ bool used;
++ uint8_t owners;
++ uint8_t clsPlanGrpId;
++ t_FmPcdIntDistinctionUnit units[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
++ uint32_t unitsVectors[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
++ uint32_t lcvs[FM_PCD_PRS_NUM_OF_HDRS];
++ uint32_t macsecVector;
++ t_FmPcdNetEnvAliases aliasHdrs[FM_PCD_MAX_NUM_OF_ALIAS_HDRS];
++} t_FmPcdNetEnv;
++
++typedef struct {
++ struct fman_prs_cfg dfltCfg;
++ bool plcrAutoRefresh;
++ uint16_t prsMaxParseCycleLimit;
++} t_FmPcdDriverParam;
++
++typedef struct {
++ t_Handle h_Fm;
++ t_Handle h_FmMuram;
++ t_FmRevisionInfo fmRevInfo;
++
++ uint64_t physicalMuramBase;
++
++ t_Handle h_Spinlock;
++ t_List freeLocksLst;
++ t_List acquiredLocksLst;
++
++ t_Handle h_IpcSession; /* relevant for guest only */
++ bool enabled;
++ uint8_t guestId; /**< Guest Partition Id */
++ uint8_t numOfEnabledGuestPartitionsPcds;
++ char fmPcdModuleName[MODULE_NAME_SIZE];
++ char fmPcdIpcHandlerModuleName[MODULE_NAME_SIZE]; /* relevant for guest only - this is the master's name */
++ t_FmPcdNetEnv netEnvs[FM_MAX_NUM_OF_PORTS];
++ t_FmPcdKg *p_FmPcdKg;
++ t_FmPcdPlcr *p_FmPcdPlcr;
++ t_FmPcdPrs *p_FmPcdPrs;
++
++ void *p_CcShadow; /**< CC MURAM shadow */
++ uint32_t ccShadowSize;
++ uint32_t ccShadowAlign;
++ volatile bool shadowLock;
++ t_Handle h_ShadowSpinlock;
++
++ t_Handle h_Hc;
++
++ uint32_t exceptions;
++ t_FmPcdExceptionCallback *f_Exception;
++ t_FmPcdIdExceptionCallback *f_FmPcdIndexedException;
++ t_Handle h_App;
++ uintptr_t ipv6FrameIdAddr;
++ uintptr_t capwapFrameIdAddr;
++ bool advancedOffloadSupport;
++
++ t_FmPcdDriverParam *p_FmPcdDriverParam;
++} t_FmPcd;
++
++#if (DPAA_VERSION >= 11)
++typedef uint8_t t_FmPcdFrmReplicUpdateType;
++#define FRM_REPLIC_UPDATE_COUNTER 0x01
++#define FRM_REPLIC_UPDATE_INFO 0x02
++#endif /* (DPAA_VERSION >= 11) */
++/***********************************************************************/
++/* PCD internal routines */
++/***********************************************************************/
++
++t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector);
++t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params);
++bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector);
++t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams);
++void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId);
++e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr);
++uint8_t FmPcdNetEnvGetUnitIdForSingleHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr);
++uint8_t FmPcdNetEnvGetUnitId(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr, bool interchangeable, protocolOpt_t opt);
++
++t_Error FmPcdManipBuildIpReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv, t_Handle h_CcTree, t_Handle h_Manip, bool isIpv4, uint8_t groupId);
++t_Error FmPcdManipDeleteIpReassmSchemes(t_Handle h_Manip);
++t_Error FmPcdManipBuildCapwapReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv, t_Handle h_CcTree, t_Handle h_Manip, uint8_t groupId);
++t_Error FmPcdManipDeleteCapwapReassmSchemes(t_Handle h_Manip);
++bool FmPcdManipIpReassmIsIpv6Hdr(t_Handle h_Manip);
++
++t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams);
++t_Error KgInit(t_FmPcd *p_FmPcd);
++t_Error KgFree(t_FmPcd *p_FmPcd);
++void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set);
++bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId);
++void KgEnable(t_FmPcd *p_FmPcd);
++void KgDisable(t_FmPcd *p_FmPcd);
++t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First);
++void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base);
++
++/* only for MULTI partittion */
++t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds);
++t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds);
++/* only for SINGLE partittion */
++t_Error KgBindPortToSchemes(t_Handle h_FmPcd , uint8_t hardwarePortId, uint32_t spReg);
++
++t_FmPcdLock *FmPcdAcquireLock(t_Handle h_FmPcd);
++void FmPcdReleaseLock(t_Handle h_FmPcd, t_FmPcdLock *p_Lock);
++
++t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams);
++t_Error PlcrInit(t_FmPcd *p_FmPcd);
++t_Error PlcrFree(t_FmPcd *p_FmPcd);
++void PlcrEnable(t_FmPcd *p_FmPcd);
++void PlcrDisable(t_FmPcd *p_FmPcd);
++uint16_t PlcrAllocProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId);
++void PlcrFreeProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId);
++t_Error PlcrSetPortProfiles(t_FmPcd *p_FmPcd,
++ uint8_t hardwarePortId,
++ uint16_t numOfProfiles,
++ uint16_t base);
++t_Error PlcrClearPortProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId);
++
++t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams);
++t_Error PrsInit(t_FmPcd *p_FmPcd);
++void PrsEnable(t_FmPcd *p_FmPcd);
++void PrsDisable(t_FmPcd *p_FmPcd);
++void PrsFree(t_FmPcd *p_FmPcd );
++t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include);
++
++t_Error FmPcdCcGetGrpParams(t_Handle treeId, uint8_t grpId, uint32_t *p_GrpBits, uint8_t *p_GrpBase);
++uint8_t FmPcdCcGetOffset(t_Handle h_CcNode);
++uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode);
++uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode);
++t_Error ValidateNextEngineParams(t_Handle h_FmPcd, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, e_FmPcdCcStatsMode supportedStatsMode);
++
++void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add);
++t_Error FmPcdManipCheckParamsForCcNextEngine(t_FmPcdCcNextEngineParams *p_InfoForManip, uint32_t *requiredAction);
++void FmPcdManipUpdateAdResultForCc(t_Handle h_Manip,
++ t_FmPcdCcNextEngineParams *p_CcNextEngineParams,
++ t_Handle p_Ad,
++ t_Handle *p_AdNewPtr);
++void FmPcdManipUpdateAdContLookupForCc(t_Handle h_Manip, t_Handle p_Ad, t_Handle *p_AdNew, uint32_t adTableOffset);
++void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add);
++t_Error FmPcdManipCheckParamsWithCcNodeParams(t_Handle h_Manip, t_Handle h_FmPcdCcNode);
++#ifdef FM_CAPWAP_SUPPORT
++t_Handle FmPcdManipApplSpecificBuild(void);
++bool FmPcdManipIsCapwapApplSpecific(t_Handle h_Manip);
++#endif /* FM_CAPWAP_SUPPORT */
++#if (DPAA_VERSION >= 11)
++void * FrmReplicGroupGetSourceTableDescriptor(t_Handle h_ReplicGroup);
++void FrmReplicGroupUpdateOwner(t_Handle h_ReplicGroup, bool add);
++void FrmReplicGroupUpdateAd(t_Handle h_ReplicGroup, void *p_Ad, t_Handle *h_AdNew);
++
++void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node,
++ t_Handle h_ReplicGroup,
++ t_List *p_AdTables,
++ uint32_t *p_NumOfAdTables);
++#endif /* (DPAA_VERSION >= 11) */
++
++void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo, t_Handle h_Spinlock);
++void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock);
++t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock);
++t_List *FmPcdManipGetSpinlock(t_Handle h_Manip);
++t_List *FmPcdManipGetNodeLstPointedOnThisManip(t_Handle h_Manip);
++
++typedef struct
++{
++ t_Handle h_StatsAd;
++ t_Handle h_StatsCounters;
++#if (DPAA_VERSION >= 11)
++ t_Handle h_StatsFLRs;
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmPcdCcStatsParams;
++
++void NextStepAd(t_Handle h_Ad,
++ t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
++ t_FmPcd *p_FmPcd);
++void ReleaseLst(t_List *p_List);
++
++static __inline__ t_Handle FmPcdGetMuramHandle(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ ASSERT_COND(p_FmPcd);
++ return p_FmPcd->h_FmMuram;
++}
++
++static __inline__ uint64_t FmPcdGetMuramPhysBase(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ ASSERT_COND(p_FmPcd);
++ return p_FmPcd->physicalMuramBase;
++}
++
++static __inline__ uint32_t FmPcdLockSpinlock(t_FmPcdLock *p_Lock)
++{
++ ASSERT_COND(p_Lock);
++ return XX_LockIntrSpinlock(p_Lock->h_Spinlock);
++}
++
++static __inline__ void FmPcdUnlockSpinlock(t_FmPcdLock *p_Lock, uint32_t flags)
++{
++ ASSERT_COND(p_Lock);
++ XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, flags);
++}
++
++static __inline__ bool FmPcdLockTryLock(t_FmPcdLock *p_Lock)
++{
++ uint32_t intFlags;
++
++ ASSERT_COND(p_Lock);
++ intFlags = XX_LockIntrSpinlock(p_Lock->h_Spinlock);
++ if (p_Lock->flag)
++ {
++ XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, intFlags);
++ return FALSE;
++ }
++ p_Lock->flag = TRUE;
++ XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, intFlags);
++ return TRUE;
++}
++
++static __inline__ void FmPcdLockUnlock(t_FmPcdLock *p_Lock)
++{
++ ASSERT_COND(p_Lock);
++ p_Lock->flag = FALSE;
++}
++
++
++#endif /* __FM_PCD_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h
+new file mode 100644
+index 00000000..325d3e33
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h
+@@ -0,0 +1,280 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_pcd_ipc.h
++
++ @Description FM PCD Inter-Partition prototypes, structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_PCD_IPC_H
++#define __FM_PCD_IPC_H
++
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Description Structure for getting a sw parser address according to a label
++ Fields commented 'IN' are passed by the port module to be used
++ by the FM module.
++ Fields commented 'OUT' will be filled by FM before returning to port.
++*//***************************************************************************/
++typedef _Packed struct t_FmPcdIpcSwPrsLable
++{
++ uint32_t enumHdr; /**< IN. The existence of this header will invoke
++ the sw parser code. */
++ uint8_t indexPerHdr; /**< IN. Normally 0, if more than one sw parser
++ attachments for the same header, use this
++
++ index to distinguish between them. */
++} _PackedType t_FmPcdIpcSwPrsLable;
++
++/**************************************************************************//**
++ @Description Structure for port-PCD communication.
++ Fields commented 'IN' are passed by the port module to be used
++ by the FM module.
++ Fields commented 'OUT' will be filled by FM before returning to port.
++ Some fields are optional (depending on configuration) and
++ will be analized by the port and FM modules accordingly.
++*//***************************************************************************/
++
++typedef struct t_FmPcdIpcKgSchemesParams
++{
++ uint8_t guestId;
++ uint8_t numOfSchemes;
++ uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES];
++} _PackedType t_FmPcdIpcKgSchemesParams;
++
++typedef struct t_FmPcdIpcKgClsPlanParams
++{
++ uint8_t guestId;
++ uint16_t numOfClsPlanEntries;
++ uint8_t clsPlanBase;
++} _PackedType t_FmPcdIpcKgClsPlanParams;
++
++typedef _Packed struct t_FmPcdIpcPrsIncludePort
++{
++ uint8_t hardwarePortId;
++ bool include;
++} _PackedType t_FmPcdIpcPrsIncludePort;
++
++
++#define FM_PCD_MAX_REPLY_SIZE 16
++#define FM_PCD_MAX_MSG_SIZE 36
++#define FM_PCD_MAX_REPLY_BODY_SIZE 36
++
++typedef _Packed struct {
++ uint32_t msgId;
++ uint8_t msgBody[FM_PCD_MAX_MSG_SIZE];
++} _PackedType t_FmPcdIpcMsg;
++
++typedef _Packed struct t_FmPcdIpcReply {
++ uint32_t error;
++ uint8_t replyBody[FM_PCD_MAX_REPLY_BODY_SIZE];
++} _PackedType t_FmPcdIpcReply;
++
++typedef _Packed struct t_FmIpcResourceAllocParams {
++ uint8_t guestId;
++ uint16_t base;
++ uint16_t num;
++}_PackedType t_FmIpcResourceAllocParams;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++
++/**************************************************************************//**
++ @Function FM_PCD_ALLOC_KG_SCHEMES
++
++ @Description Used by FM PCD front-end in order to allocate KG resources
++
++ @Param[in/out] t_FmPcdIpcKgAllocParams Pointer
++*//***************************************************************************/
++#define FM_PCD_ALLOC_KG_SCHEMES 3
++
++/**************************************************************************//**
++ @Function FM_PCD_FREE_KG_SCHEMES
++
++ @Description Used by FM PCD front-end in order to Free KG resources
++
++ @Param[in/out] t_FmPcdIpcKgSchemesParams Pointer
++*//***************************************************************************/
++#define FM_PCD_FREE_KG_SCHEMES 4
++
++/**************************************************************************//**
++ @Function FM_PCD_ALLOC_PROFILES
++
++ @Description Used by FM PCD front-end in order to allocate Policer profiles
++
++ @Param[in/out] t_FmIpcResourceAllocParams Pointer
++*//***************************************************************************/
++#define FM_PCD_ALLOC_PROFILES 5
++
++/**************************************************************************//**
++ @Function FM_PCD_FREE_PROFILES
++
++ @Description Used by FM PCD front-end in order to Free Policer profiles
++
++ @Param[in/out] t_FmIpcResourceAllocParams Pointer
++*//***************************************************************************/
++#define FM_PCD_FREE_PROFILES 6
++
++/**************************************************************************//**
++ @Function FM_PCD_SET_PORT_PROFILES
++
++ @Description Used by FM PCD front-end in order to allocate Policer profiles
++ for specific port
++
++ @Param[in/out] t_FmIpcResourceAllocParams Pointer
++*//***************************************************************************/
++#define FM_PCD_SET_PORT_PROFILES 7
++
++/**************************************************************************//**
++ @Function FM_PCD_CLEAR_PORT_PROFILES
++
++ @Description Used by FM PCD front-end in order to allocate Policer profiles
++ for specific port
++
++ @Param[in/out] t_FmIpcResourceAllocParams Pointer
++*//***************************************************************************/
++#define FM_PCD_CLEAR_PORT_PROFILES 8
++
++/**************************************************************************//**
++ @Function FM_PCD_GET_PHYS_MURAM_BASE
++
++ @Description Used by FM PCD front-end in order to get MURAM base address
++
++ @Param[in/out] t_FmPcdIcPhysAddr Pointer
++*//***************************************************************************/
++#define FM_PCD_GET_PHYS_MURAM_BASE 9
++
++/**************************************************************************//**
++ @Function FM_PCD_GET_SW_PRS_OFFSET
++
++ @Description Used by FM front-end to get the SW parser offset of the start of
++ code relevant to a given label.
++
++ @Param[in/out] t_FmPcdIpcSwPrsLable Pointer
++*//***************************************************************************/
++#define FM_PCD_GET_SW_PRS_OFFSET 10
++
++/**************************************************************************//**
++ @Function FM_PCD_MASTER_IS_ENABLED
++
++ @Description Used by FM front-end in order to verify
++ PCD enablement.
++
++ @Param[in] bool Pointer
++*//***************************************************************************/
++#define FM_PCD_MASTER_IS_ENABLED 15
++
++/**************************************************************************//**
++ @Function FM_PCD_GUEST_DISABLE
++
++ @Description Used by FM front-end to inform back-end when
++ front-end PCD is disabled
++
++ @Param[in] None
++*//***************************************************************************/
++#define FM_PCD_GUEST_DISABLE 16
++
++/**************************************************************************//**
++ @Function FM_PCD_FREE_KG_CLSPLAN
++
++ @Description Used by FM PCD front-end in order to Free KG classification plan entries
++
++ @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer
++*//***************************************************************************/
++#define FM_PCD_FREE_KG_CLSPLAN 22
++
++/**************************************************************************//**
++ @Function FM_PCD_ALLOC_KG_CLSPLAN
++
++ @Description Used by FM PCD front-end in order to allocate KG classification plan entries
++
++ @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer
++*//***************************************************************************/
++#define FM_PCD_ALLOC_KG_CLSPLAN 23
++
++/**************************************************************************//**
++ @Function FM_PCD_MASTER_IS_ALIVE
++
++ @Description Used by FM front-end to check that back-end exists
++
++ @Param[in] None
++*//***************************************************************************/
++#define FM_PCD_MASTER_IS_ALIVE 24
++
++/**************************************************************************//**
++ @Function FM_PCD_GET_COUNTER
++
++ @Description Used by FM front-end to read PCD counters
++
++ @Param[in/out] t_FmPcdIpcGetCounter Pointer
++*//***************************************************************************/
++#define FM_PCD_GET_COUNTER 25
++
++/**************************************************************************//**
++ @Function FM_PCD_PRS_INC_PORT_STATS
++
++ @Description Used by FM front-end to set/clear statistics for port
++
++ @Param[in/out] t_FmPcdIpcPrsIncludePort Pointer
++*//***************************************************************************/
++#define FM_PCD_PRS_INC_PORT_STATS 26
++
++#if (DPAA_VERSION >= 11)
++/* TODO - doc */
++#define FM_PCD_ALLOC_SP 27
++#endif /* (DPAA_VERSION >= 11) */
++
++
++/** @} */ /* end of FM_PCD_IPC_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++#endif /* __FM_PCD_IPC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c
+new file mode 100644
+index 00000000..e3753305
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c
+@@ -0,0 +1,1847 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_plcr.c
++
++ @Description FM PCD POLICER...
++*//***************************************************************************/
++#include <linux/math64.h>
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++#include "net_ext.h"
++#include "fm_ext.h"
++
++#include "fm_common.h"
++#include "fm_pcd.h"
++#include "fm_hc.h"
++#include "fm_pcd_ipc.h"
++#include "fm_plcr.h"
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++
++static uint32_t PlcrProfileLock(t_Handle h_Profile)
++{
++ ASSERT_COND(h_Profile);
++ return FmPcdLockSpinlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock);
++}
++
++static void PlcrProfileUnlock(t_Handle h_Profile, uint32_t intFlags)
++{
++ ASSERT_COND(h_Profile);
++ FmPcdUnlockSpinlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock, intFlags);
++}
++
++static bool PlcrProfileFlagTryLock(t_Handle h_Profile)
++{
++ ASSERT_COND(h_Profile);
++ return FmPcdLockTryLock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock);
++}
++
++static void PlcrProfileFlagUnlock(t_Handle h_Profile)
++{
++ ASSERT_COND(h_Profile);
++ FmPcdLockUnlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock);
++}
++
++static uint32_t PlcrHwLock(t_Handle h_FmPcdPlcr)
++{
++ ASSERT_COND(h_FmPcdPlcr);
++ return XX_LockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_HwSpinlock);
++}
++
++static void PlcrHwUnlock(t_Handle h_FmPcdPlcr, uint32_t intFlags)
++{
++ ASSERT_COND(h_FmPcdPlcr);
++ XX_UnlockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_HwSpinlock, intFlags);
++}
++
++static uint32_t PlcrSwLock(t_Handle h_FmPcdPlcr)
++{
++ ASSERT_COND(h_FmPcdPlcr);
++ return XX_LockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_SwSpinlock);
++}
++
++static void PlcrSwUnlock(t_Handle h_FmPcdPlcr, uint32_t intFlags)
++{
++ ASSERT_COND(h_FmPcdPlcr);
++ XX_UnlockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_SwSpinlock, intFlags);
++}
++
++static bool IsProfileShared(t_Handle h_FmPcd, uint16_t absoluteProfileId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint16_t i;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, FALSE);
++
++ for (i=0;i<p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles;i++)
++ if (p_FmPcd->p_FmPcdPlcr->sharedProfilesIds[i] == absoluteProfileId)
++ return TRUE;
++ return FALSE;
++}
++
++static t_Error SetProfileNia(t_FmPcd *p_FmPcd, e_FmPcdEngine nextEngine, u_FmPcdPlcrNextEngineParams *p_NextEngineParams, uint32_t *nextAction)
++{
++ uint32_t nia;
++ uint16_t absoluteProfileId;
++ uint8_t relativeSchemeId, physicalSchemeId;
++
++ nia = FM_PCD_PLCR_NIA_VALID;
++
++ switch (nextEngine)
++ {
++ case e_FM_PCD_DONE :
++ switch (p_NextEngineParams->action)
++ {
++ case e_FM_PCD_DROP_FRAME :
++ nia |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd);
++ break;
++ case e_FM_PCD_ENQ_FRAME:
++ nia |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ break;
++ case e_FM_PCD_KG:
++ physicalSchemeId = FmPcdKgGetSchemeId(p_NextEngineParams->h_DirectScheme);
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId);
++ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
++ if (!FmPcdKgIsSchemeValidSw(p_NextEngineParams->h_DirectScheme))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid direct scheme."));
++ if (!KgIsSchemeAlwaysDirect(p_FmPcd, relativeSchemeId))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Policer Profile may point only to a scheme that is always direct."));
++ nia |= NIA_ENG_KG | NIA_KG_DIRECT | physicalSchemeId;
++ break;
++ case e_FM_PCD_PLCR:
++ absoluteProfileId = ((t_FmPcdPlcrProfile *)p_NextEngineParams->h_Profile)->absoluteProfileId;
++ if (!IsProfileShared(p_FmPcd, absoluteProfileId))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next profile must be a shared profile"));
++ if (!FmPcdPlcrIsProfileValid(p_FmPcd, absoluteProfileId))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid profile "));
++ nia |= NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ *nextAction = nia;
++
++ return E_OK;
++}
++
++static uint32_t CalcFPP(uint32_t fpp)
++{
++ if (fpp > 15)
++ return 15 - (0x1f - fpp);
++ else
++ return 16 + fpp;
++}
++
++static void GetInfoRateReg(e_FmPcdPlcrRateMode rateMode,
++ uint32_t rate,
++ uint64_t tsuInTenthNano,
++ uint32_t fppShift,
++ uint64_t *p_Integer,
++ uint64_t *p_Fraction)
++{
++ uint64_t tmp, div;
++
++ if (rateMode == e_FM_PCD_PLCR_BYTE_MODE)
++ {
++ /* now we calculate the initial integer for the bigger rate */
++ /* from Kbps to Bytes/TSU */
++ tmp = (uint64_t)rate;
++ tmp *= 1000; /* kb --> b */
++ tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */
++
++ div = 1000000000; /* nano */
++ div *= 10; /* 10 nano */
++ div *= 8; /* bit to byte */
++ }
++ else
++ {
++ /* now we calculate the initial integer for the bigger rate */
++ /* from Kbps to Bytes/TSU */
++ tmp = (uint64_t)rate;
++ tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */
++
++ div = 1000000000; /* nano */
++ div *= 10; /* 10 nano */
++ }
++ *p_Integer = div64_u64(tmp<<fppShift, div);
++
++ /* for calculating the fraction, we will recalculate cir and deduct the integer.
++ * For precision, we will multiply by 2^16. we do not divid back, since we write
++ * this value as fraction - see spec.
++ */
++ *p_Fraction = div64_u64(((tmp<<fppShift)<<16) - ((*p_Integer<<16)*div), div);
++}
++
++/* .......... */
++
++static void CalcRates(uint32_t bitFor1Micro,
++ t_FmPcdPlcrNonPassthroughAlgParams *p_NonPassthroughAlgParam,
++ uint32_t *cir,
++ uint32_t *cbs,
++ uint32_t *pir_eir,
++ uint32_t *pbs_ebs,
++ uint32_t *fpp)
++{
++ uint64_t integer, fraction;
++ uint32_t temp, tsuInTenthNanos;
++ uint8_t fppShift=0;
++
++ /* we want the tsu to count 10 nano for better precision normally tsu is 3.9 nano, now we will get 39 */
++ tsuInTenthNanos = (uint32_t)(1000*10/(1 << bitFor1Micro));
++
++ /* we choose the faster rate to calibrate fpp */
++ /* The meaning of this step:
++ * when fppShift is 0 it means all TS bits are treated as integer and TSU is the TS LSB count.
++ * In this configuration we calculate the integer and fraction that represent the higher infoRate
++ * When this is done, we can tell where we have "spare" unused bits and optimize the division of TS
++ * into "integer" and "fraction" where the logic is - as many bits as possible for integer at
++ * high rate, as many bits as possible for fraction at low rate.
++ */
++ if (p_NonPassthroughAlgParam->committedInfoRate > p_NonPassthroughAlgParam->peakOrExcessInfoRate)
++ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->committedInfoRate, tsuInTenthNanos, 0, &integer, &fraction);
++ else
++ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrExcessInfoRate, tsuInTenthNanos, 0, &integer, &fraction);
++
++ /* we shift integer, as in cir/pir it is represented by the MSB 16 bits, and
++ * the LSB bits are for the fraction */
++ temp = (uint32_t)((integer<<16) & 0x00000000FFFFFFFF);
++ /* temp is effected by the rate. For low rates it may be as low as 0, and then we'll
++ * take max FP = 31.
++ * For high rates it will never exceed the 32 bit reg (after the 16 shift), as it is
++ * limited by the 10G physical port.
++ */
++ if (temp != 0)
++ {
++ /* In this case, the largest rate integer is non 0, if it does not occupy all (high) 16
++ * bits of the PIR_EIR we can use this fact and enlarge it to occupy all 16 bits.
++ * The logic is to have as many bits for integer in the higher rates, but if we have "0"s
++ * in the integer part of the cir/pir register, than these bits are wasted. So we want
++ * to use these bits for the fraction. in this way we will have for fraction - the number
++ * of "0" bits and the rest - for integer.
++ * In other words: For each bit we shift it in PIR_EIR, we move the FP in the TS
++ * one bit to the left - preserving the relationship and achieving more bits
++ * for integer in the TS.
++ */
++
++ /* count zeroes left of the higher used bit (in order to shift the value such that
++ * unused bits may be used for fraction).
++ */
++ while ((temp & 0x80000000) == 0)
++ {
++ temp = temp << 1;
++ fppShift++;
++ }
++ if (fppShift > 15)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, ("timeStampPeriod to Information rate ratio is too small"));
++ return;
++ }
++ }
++ else
++ {
++ temp = (uint32_t)fraction; /* fraction will alyas be smaller than 2^16 */
++ if (!temp)
++ /* integer and fraction are 0, we set FP to its max val */
++ fppShift = 31;
++ else
++ {
++ /* integer was 0 but fraction is not. FP is 16 for the fraction,
++ * + all left zeroes of the fraction. */
++ fppShift=16;
++ /* count zeroes left of the higher used bit (in order to shift the value such that
++ * unused bits may be used for fraction).
++ */
++ while ((temp & 0x8000) == 0)
++ {
++ temp = temp << 1;
++ fppShift++;
++ }
++ }
++ }
++
++ /*
++ * This means that the FM TS register will now be used so that 'fppShift' bits are for
++ * fraction and the rest for integer */
++ /* now we re-calculate cir and pir_eir with the calculated FP */
++ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->committedInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction);
++ *cir = (uint32_t)(integer << 16 | (fraction & 0xFFFF));
++ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrExcessInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction);
++ *pir_eir = (uint32_t)(integer << 16 | (fraction & 0xFFFF));
++
++ *cbs = p_NonPassthroughAlgParam->committedBurstSize;
++ *pbs_ebs = p_NonPassthroughAlgParam->peakOrExcessBurstSize;
++
++ /* convert FP as it should be written to reg.
++ * 0-15 --> 16-31
++ * 16-31 --> 0-15
++ */
++ *fpp = CalcFPP(fppShift);
++}
++
++static void WritePar(t_FmPcd *p_FmPcd, uint32_t par)
++{
++ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++
++ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
++ WRITE_UINT32(p_FmPcdPlcrRegs->fmpl_par, par);
++
++ while (GET_UINT32(p_FmPcdPlcrRegs->fmpl_par) & FM_PCD_PLCR_PAR_GO) ;
++}
++
++static t_Error BuildProfileRegs(t_FmPcd *p_FmPcd,
++ t_FmPcdPlcrProfileParams *p_ProfileParams,
++ t_FmPcdPlcrProfileRegs *p_PlcrRegs)
++{
++ t_Error err = E_OK;
++ uint32_t pemode, gnia, ynia, rnia, bitFor1Micro;
++
++ ASSERT_COND(p_FmPcd);
++
++ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
++ if (bitFor1Micro == 0)
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
++
++/* Set G, Y, R Nia */
++ err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnGreen, &(p_ProfileParams->paramsOnGreen), &gnia);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnYellow, &(p_ProfileParams->paramsOnYellow), &ynia);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnRed, &(p_ProfileParams->paramsOnRed), &rnia);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++/* Mode fmpl_pemode */
++ pemode = FM_PCD_PLCR_PEMODE_PI;
++
++ switch (p_ProfileParams->algSelection)
++ {
++ case e_FM_PCD_PLCR_PASS_THROUGH:
++ p_PlcrRegs->fmpl_pecir = 0;
++ p_PlcrRegs->fmpl_pecbs = 0;
++ p_PlcrRegs->fmpl_pepepir_eir = 0;
++ p_PlcrRegs->fmpl_pepbs_ebs = 0;
++ p_PlcrRegs->fmpl_pelts = 0;
++ p_PlcrRegs->fmpl_pects = 0;
++ p_PlcrRegs->fmpl_pepts_ets = 0;
++ pemode &= ~FM_PCD_PLCR_PEMODE_ALG_MASK;
++ switch (p_ProfileParams->colorMode)
++ {
++ case e_FM_PCD_PLCR_COLOR_BLIND:
++ pemode |= FM_PCD_PLCR_PEMODE_CBLND;
++ switch (p_ProfileParams->color.dfltColor)
++ {
++ case e_FM_PCD_PLCR_GREEN:
++ pemode &= ~FM_PCD_PLCR_PEMODE_DEFC_MASK;
++ break;
++ case e_FM_PCD_PLCR_YELLOW:
++ pemode |= FM_PCD_PLCR_PEMODE_DEFC_Y;
++ break;
++ case e_FM_PCD_PLCR_RED:
++ pemode |= FM_PCD_PLCR_PEMODE_DEFC_R;
++ break;
++ case e_FM_PCD_PLCR_OVERRIDE:
++ pemode |= FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ break;
++ case e_FM_PCD_PLCR_COLOR_AWARE:
++ pemode &= ~FM_PCD_PLCR_PEMODE_CBLND;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ break;
++
++ case e_FM_PCD_PLCR_RFC_2698:
++ /* Select algorithm MODE[ALG] = "01" */
++ pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC2698;
++ if (p_ProfileParams->nonPassthroughAlgParams.committedInfoRate > p_ProfileParams->nonPassthroughAlgParams.peakOrExcessInfoRate)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("in RFC2698 Peak rate must be equal or larger than committedInfoRate."));
++ goto cont_rfc;
++ case e_FM_PCD_PLCR_RFC_4115:
++ /* Select algorithm MODE[ALG] = "10" */
++ pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC4115;
++cont_rfc:
++ /* Select Color-Blind / Color-Aware operation (MODE[CBLND]) */
++ switch (p_ProfileParams->colorMode)
++ {
++ case e_FM_PCD_PLCR_COLOR_BLIND:
++ pemode |= FM_PCD_PLCR_PEMODE_CBLND;
++ break;
++ case e_FM_PCD_PLCR_COLOR_AWARE:
++ pemode &= ~FM_PCD_PLCR_PEMODE_CBLND;
++ /*In color aware more select override color interpretation (MODE[OVCLR]) */
++ switch (p_ProfileParams->color.override)
++ {
++ case e_FM_PCD_PLCR_GREEN:
++ pemode &= ~FM_PCD_PLCR_PEMODE_OVCLR_MASK;
++ break;
++ case e_FM_PCD_PLCR_YELLOW:
++ pemode |= FM_PCD_PLCR_PEMODE_OVCLR_Y;
++ break;
++ case e_FM_PCD_PLCR_RED:
++ pemode |= FM_PCD_PLCR_PEMODE_OVCLR_R;
++ break;
++ case e_FM_PCD_PLCR_OVERRIDE:
++ pemode |= FM_PCD_PLCR_PEMODE_OVCLR_G_NC;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ /* Select Measurement Unit Mode to BYTE or PACKET (MODE[PKT]) */
++ switch (p_ProfileParams->nonPassthroughAlgParams.rateMode)
++ {
++ case e_FM_PCD_PLCR_BYTE_MODE :
++ pemode &= ~FM_PCD_PLCR_PEMODE_PKT;
++ switch (p_ProfileParams->nonPassthroughAlgParams.byteModeParams.frameLengthSelection)
++ {
++ case e_FM_PCD_PLCR_L2_FRM_LEN:
++ pemode |= FM_PCD_PLCR_PEMODE_FLS_L2;
++ break;
++ case e_FM_PCD_PLCR_L3_FRM_LEN:
++ pemode |= FM_PCD_PLCR_PEMODE_FLS_L3;
++ break;
++ case e_FM_PCD_PLCR_L4_FRM_LEN:
++ pemode |= FM_PCD_PLCR_PEMODE_FLS_L4;
++ break;
++ case e_FM_PCD_PLCR_FULL_FRM_LEN:
++ pemode |= FM_PCD_PLCR_PEMODE_FLS_FULL;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ switch (p_ProfileParams->nonPassthroughAlgParams.byteModeParams.rollBackFrameSelection)
++ {
++ case e_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN:
++ pemode &= ~FM_PCD_PLCR_PEMODE_RBFLS;
++ break;
++ case e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN:
++ pemode |= FM_PCD_PLCR_PEMODE_RBFLS;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ break;
++ case e_FM_PCD_PLCR_PACKET_MODE :
++ pemode |= FM_PCD_PLCR_PEMODE_PKT;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ /* Select timeStamp floating point position (MODE[FPP]) to fit the actual traffic rates. For PACKET
++ mode with low traffic rates move the fixed point to the left to increase fraction accuracy. For BYTE
++ mode with high traffic rates move the fixed point to the right to increase integer accuracy. */
++
++ /* Configure Traffic Parameters*/
++ {
++ uint32_t cir=0, cbs=0, pir_eir=0, pbs_ebs=0, fpp=0;
++
++ CalcRates(bitFor1Micro, &p_ProfileParams->nonPassthroughAlgParams, &cir, &cbs, &pir_eir, &pbs_ebs, &fpp);
++
++ /* Set Committed Information Rate (CIR) */
++ p_PlcrRegs->fmpl_pecir = cir;
++ /* Set Committed Burst Size (CBS). */
++ p_PlcrRegs->fmpl_pecbs = cbs;
++ /* Set Peak Information Rate (PIR_EIR used as PIR) */
++ p_PlcrRegs->fmpl_pepepir_eir = pir_eir;
++ /* Set Peak Burst Size (PBS_EBS used as PBS) */
++ p_PlcrRegs->fmpl_pepbs_ebs = pbs_ebs;
++
++ /* Initialize the Metering Buckets to be full (write them with 0xFFFFFFFF. */
++ /* Peak Rate Token Bucket Size (PTS_ETS used as PTS) */
++ p_PlcrRegs->fmpl_pepts_ets = 0xFFFFFFFF;
++ /* Committed Rate Token Bucket Size (CTS) */
++ p_PlcrRegs->fmpl_pects = 0xFFFFFFFF;
++
++ /* Set the FPP based on calculation */
++ pemode |= (fpp << FM_PCD_PLCR_PEMODE_FPP_SHIFT);
++ }
++ break; /* FM_PCD_PLCR_PEMODE_ALG_RFC2698 , FM_PCD_PLCR_PEMODE_ALG_RFC4115 */
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ p_PlcrRegs->fmpl_pemode = pemode;
++
++ p_PlcrRegs->fmpl_pegnia = gnia;
++ p_PlcrRegs->fmpl_peynia = ynia;
++ p_PlcrRegs->fmpl_pernia = rnia;
++
++ /* Zero Counters */
++ p_PlcrRegs->fmpl_pegpc = 0;
++ p_PlcrRegs->fmpl_peypc = 0;
++ p_PlcrRegs->fmpl_perpc = 0;
++ p_PlcrRegs->fmpl_perypc = 0;
++ p_PlcrRegs->fmpl_perrpc = 0;
++
++ return E_OK;
++}
++
++static t_Error AllocSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds)
++{
++ uint32_t profilesFound;
++ uint16_t i, k=0;
++ uint32_t intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ if (!numOfProfiles)
++ return E_OK;
++
++ if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES)
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big."));
++
++ intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr);
++ /* Find numOfProfiles free profiles (may be spread) */
++ profilesFound = 0;
++ for (i=0;i<FM_PCD_PLCR_NUM_ENTRIES; i++)
++ if (!p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated)
++ {
++ profilesFound++;
++ profilesIds[k] = i;
++ k++;
++ if (profilesFound == numOfProfiles)
++ break;
++ }
++
++ if (profilesFound != numOfProfiles)
++ {
++ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,NO_MSG);
++ }
++
++ for (i = 0;i<k;i++)
++ {
++ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = TRUE;
++ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.ownerId = 0;
++ }
++ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++
++ return E_OK;
++}
++
++static void FreeSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds)
++{
++ uint16_t i;
++
++ SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE);
++
++ ASSERT_COND(numOfProfiles);
++
++ for (i=0; i < numOfProfiles; i++)
++ {
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated);
++ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = FALSE;
++ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.ownerId = p_FmPcd->guestId;
++ }
++}
++
++static void UpdateRequiredActionFlag(t_Handle h_FmPcd, uint16_t absoluteProfileId, bool set)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ /* this routine is protected by calling routine */
++
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
++
++ if (set)
++ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredActionFlag = TRUE;
++ else
++ {
++ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction = 0;
++ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredActionFlag = FALSE;
++ }
++}
++
++/*********************************************/
++/*............Policer Exception..............*/
++/*********************************************/
++static void EventsCB(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint32_t event, mask, force;
++
++ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
++ event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr);
++ mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier);
++
++ event &= mask;
++
++ /* clear the forced events */
++ force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr);
++ if (force & event)
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, force & ~event);
++
++
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr, event);
++
++ if (event & FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE)
++ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE);
++ if (event & FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE)
++ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE);
++}
++
++/* ..... */
++
++static void ErrorExceptionsCB(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint32_t event, force, captureReg, mask;
++
++ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
++ event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr);
++ mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier);
++
++ event &= mask;
++
++ /* clear the forced events */
++ force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr);
++ if (force & event)
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, force & ~event);
++
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr, event);
++
++ if (event & FM_PCD_PLCR_DOUBLE_ECC)
++ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC);
++ if (event & FM_PCD_PLCR_INIT_ENTRY_ERROR)
++ {
++ captureReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr);
++ /*ASSERT_COND(captureReg & PLCR_ERR_UNINIT_CAP);
++ p_UnInitCapt->profileNum = (uint8_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK);
++ p_UnInitCapt->portId = (uint8_t)((captureReg & PLCR_ERR_UNINIT_PID_MASK) >>PLCR_ERR_UNINIT_PID_SHIFT) ;
++ p_UnInitCapt->absolute = (bool)(captureReg & PLCR_ERR_UNINIT_ABSOLUTE_MASK);*/
++ p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR,(uint16_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK));
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr, PLCR_ERR_UNINIT_CAP);
++ }
++}
++
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++
++t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams)
++{
++ t_FmPcdPlcr *p_FmPcdPlcr;
++ uint16_t i=0;
++
++ UNUSED(p_FmPcd);
++ UNUSED(p_FmPcdParams);
++
++ p_FmPcdPlcr = (t_FmPcdPlcr *) XX_Malloc(sizeof(t_FmPcdPlcr));
++ if (!p_FmPcdPlcr)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer structure allocation FAILED"));
++ return NULL;
++ }
++ memset(p_FmPcdPlcr, 0, sizeof(t_FmPcdPlcr));
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ {
++ p_FmPcdPlcr->p_FmPcdPlcrRegs = (t_FmPcdPlcrRegs *)UINT_TO_PTR(FmGetPcdPlcrBaseAddr(p_FmPcdParams->h_Fm));
++ p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = DEFAULT_plcrAutoRefresh;
++ p_FmPcd->exceptions |= (DEFAULT_fmPcdPlcrExceptions | DEFAULT_fmPcdPlcrErrorExceptions);
++ }
++
++ p_FmPcdPlcr->numOfSharedProfiles = DEFAULT_numOfSharedPlcrProfiles;
++
++ p_FmPcdPlcr->partPlcrProfilesBase = p_FmPcdParams->partPlcrProfilesBase;
++ p_FmPcdPlcr->partNumOfPlcrProfiles = p_FmPcdParams->partNumOfPlcrProfiles;
++ /* for backward compatabilty. if no policer profile, will set automatically to the max */
++ if ((p_FmPcd->guestId == NCSW_MASTER_ID) &&
++ (p_FmPcdPlcr->partNumOfPlcrProfiles == 0))
++ p_FmPcdPlcr->partNumOfPlcrProfiles = FM_PCD_PLCR_NUM_ENTRIES;
++
++ for (i=0; i<FM_PCD_PLCR_NUM_ENTRIES; i++)
++ p_FmPcdPlcr->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
++
++ return p_FmPcdPlcr;
++}
++
++t_Error PlcrInit(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam;
++ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
++ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++ t_Error err = E_OK;
++ uint32_t tmpReg32 = 0;
++ uint16_t base;
++
++ if ((p_FmPcdPlcr->partPlcrProfilesBase + p_FmPcdPlcr->partNumOfPlcrProfiles) > FM_PCD_PLCR_NUM_ENTRIES)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("partPlcrProfilesBase+partNumOfPlcrProfiles out of range!!!"));
++
++ p_FmPcdPlcr->h_HwSpinlock = XX_InitSpinlock();
++ if (!p_FmPcdPlcr->h_HwSpinlock)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer HW spinlock"));
++
++ p_FmPcdPlcr->h_SwSpinlock = XX_InitSpinlock();
++ if (!p_FmPcdPlcr->h_SwSpinlock)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer SW spinlock"));
++
++ base = PlcrAllocProfilesForPartition(p_FmPcd,
++ p_FmPcdPlcr->partPlcrProfilesBase,
++ p_FmPcdPlcr->partNumOfPlcrProfiles,
++ p_FmPcd->guestId);
++ if (base == (uint16_t)ILLEGAL_BASE)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++
++ if (p_FmPcdPlcr->numOfSharedProfiles)
++ {
++ err = AllocSharedProfiles(p_FmPcd,
++ p_FmPcdPlcr->numOfSharedProfiles,
++ p_FmPcdPlcr->sharedProfilesIds);
++ if (err)
++ RETURN_ERROR(MAJOR, err,NO_MSG);
++ }
++
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ return E_OK;
++
++ /**********************FMPL_GCR******************/
++ tmpReg32 = 0;
++ tmpReg32 |= FM_PCD_PLCR_GCR_STEN;
++ if (p_Param->plcrAutoRefresh)
++ tmpReg32 |= FM_PCD_PLCR_GCR_DAR;
++ tmpReg32 |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
++
++ WRITE_UINT32(p_Regs->fmpl_gcr, tmpReg32);
++ /**********************FMPL_GCR******************/
++
++ /**********************FMPL_EEVR******************/
++ WRITE_UINT32(p_Regs->fmpl_eevr, (FM_PCD_PLCR_DOUBLE_ECC | FM_PCD_PLCR_INIT_ENTRY_ERROR));
++ /**********************FMPL_EEVR******************/
++ /**********************FMPL_EIER******************/
++ tmpReg32 = 0;
++ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC)
++ {
++ FmEnableRamsEcc(p_FmPcd->h_Fm);
++ tmpReg32 |= FM_PCD_PLCR_DOUBLE_ECC;
++ }
++ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR)
++ tmpReg32 |= FM_PCD_PLCR_INIT_ENTRY_ERROR;
++ WRITE_UINT32(p_Regs->fmpl_eier, tmpReg32);
++ /**********************FMPL_EIER******************/
++
++ /**********************FMPL_EVR******************/
++ WRITE_UINT32(p_Regs->fmpl_evr, (FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE | FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE));
++ /**********************FMPL_EVR******************/
++ /**********************FMPL_IER******************/
++ tmpReg32 = 0;
++ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE)
++ tmpReg32 |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE;
++ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE)
++ tmpReg32 |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE;
++ WRITE_UINT32(p_Regs->fmpl_ier, tmpReg32);
++ /**********************FMPL_IER******************/
++
++ /* register even if no interrupts enabled, to allow future enablement */
++ FmRegisterIntr(p_FmPcd->h_Fm,
++ e_FM_MOD_PLCR,
++ 0,
++ e_FM_INTR_TYPE_ERR,
++ ErrorExceptionsCB,
++ p_FmPcd);
++ FmRegisterIntr(p_FmPcd->h_Fm,
++ e_FM_MOD_PLCR,
++ 0,
++ e_FM_INTR_TYPE_NORMAL,
++ EventsCB,
++ p_FmPcd);
++
++ /* driver initializes one DFLT profile at the last entry*/
++ /**********************FMPL_DPMR******************/
++ tmpReg32 = 0;
++ WRITE_UINT32(p_Regs->fmpl_dpmr, tmpReg32);
++ p_FmPcd->p_FmPcdPlcr->profiles[0].profilesMng.allocated = TRUE;
++
++ return E_OK;
++}
++
++t_Error PlcrFree(t_FmPcd *p_FmPcd)
++{
++ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_ERR);
++ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_NORMAL);
++
++ if (p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles)
++ FreeSharedProfiles(p_FmPcd,
++ p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles,
++ p_FmPcd->p_FmPcdPlcr->sharedProfilesIds);
++
++ if (p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles)
++ PlcrFreeProfilesForPartition(p_FmPcd,
++ p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase,
++ p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles,
++ p_FmPcd->guestId);
++
++ if (p_FmPcd->p_FmPcdPlcr->h_SwSpinlock)
++ XX_FreeSpinlock(p_FmPcd->p_FmPcdPlcr->h_SwSpinlock);
++
++ if (p_FmPcd->p_FmPcdPlcr->h_HwSpinlock)
++ XX_FreeSpinlock(p_FmPcd->p_FmPcdPlcr->h_HwSpinlock);
++
++ return E_OK;
++}
++
++void PlcrEnable(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++
++ WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) | FM_PCD_PLCR_GCR_EN);
++}
++
++void PlcrDisable(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++
++ WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) & ~FM_PCD_PLCR_GCR_EN);
++}
++
++uint16_t PlcrAllocProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId)
++{
++ uint32_t intFlags;
++ uint16_t profilesFound = 0;
++ int i = 0;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr);
++
++ if (!numOfProfiles)
++ return 0;
++
++ if ((numOfProfiles > FM_PCD_PLCR_NUM_ENTRIES) ||
++ (base + numOfProfiles > FM_PCD_PLCR_NUM_ENTRIES))
++ return (uint16_t)ILLEGAL_BASE;
++
++ if (p_FmPcd->h_IpcSession)
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ t_FmPcdIpcMsg msg;
++ t_FmPcdIpcReply reply;
++ t_Error err;
++ uint32_t replyLength;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
++ ipcAllocParams.guestId = p_FmPcd->guestId;
++ ipcAllocParams.num = p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles;
++ ipcAllocParams.base = p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase;
++ msg.msgId = FM_PCD_ALLOC_PROFILES;
++ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
++ replyLength = sizeof(uint32_t) + sizeof(uint16_t);
++ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if ((err != E_OK) ||
++ (replyLength != (sizeof(uint32_t) + sizeof(uint16_t))))
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return (uint16_t)ILLEGAL_BASE;
++ }
++ else
++ memcpy((uint8_t*)&p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase, reply.replyBody, sizeof(uint16_t));
++ if (p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase == (uint16_t)ILLEGAL_BASE)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return (uint16_t)ILLEGAL_BASE;
++ }
++ }
++ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ {
++ DBG(WARNING, ("FM Guest mode, without IPC - can't validate Policer-profiles range!"));
++ return (uint16_t)ILLEGAL_BASE;
++ }
++
++ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
++ for (i=base; i<(base+numOfProfiles); i++)
++ if (p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == (uint8_t)ILLEGAL_BASE)
++ profilesFound++;
++ else
++ break;
++
++ if (profilesFound == numOfProfiles)
++ for (i=base; i<(base+numOfProfiles); i++)
++ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = guestId;
++ else
++ {
++ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
++ return (uint16_t)ILLEGAL_BASE;
++ }
++ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
++
++ return base;
++}
++
++void PlcrFreeProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId)
++{
++ int i = 0;
++
++ ASSERT_COND(p_FmPcd);
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr);
++
++ if (p_FmPcd->h_IpcSession)
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ t_FmPcdIpcMsg msg;
++ t_Error err;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
++ ipcAllocParams.guestId = p_FmPcd->guestId;
++ ipcAllocParams.num = p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles;
++ ipcAllocParams.base = p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase;
++ msg.msgId = FM_PCD_FREE_PROFILES;
++ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
++ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return;
++ }
++ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ {
++ DBG(WARNING, ("FM Guest mode, without IPC - can't validate Policer-profiles range!"));
++ return;
++ }
++
++ for (i=base; i<(base+numOfProfiles); i++)
++ {
++ if (p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == guestId)
++ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
++ else
++ DBG(WARNING, ("Request for freeing storage profile window which wasn't allocated to this partition"));
++ }
++}
++
++t_Error PlcrSetPortProfiles(t_FmPcd *p_FmPcd,
++ uint8_t hardwarePortId,
++ uint16_t numOfProfiles,
++ uint16_t base)
++{
++ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++ uint32_t log2Num, tmpReg32;
++
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ !p_Regs &&
++ p_FmPcd->h_IpcSession)
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ t_FmPcdIpcMsg msg;
++ t_Error err;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
++ ipcAllocParams.guestId = hardwarePortId;
++ ipcAllocParams.num = numOfProfiles;
++ ipcAllocParams.base = base;
++ msg.msgId = FM_PCD_SET_PORT_PROFILES;
++ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
++ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ return E_OK;
++ }
++ else if (!p_Regs)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ if (GET_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1]) & FM_PCD_PLCR_PMR_V)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("The requesting port has already an allocated profiles window."));
++
++ /**********************FMPL_PMRx******************/
++ LOG2((uint64_t)numOfProfiles, log2Num);
++ tmpReg32 = base;
++ tmpReg32 |= log2Num << 16;
++ tmpReg32 |= FM_PCD_PLCR_PMR_V;
++ WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], tmpReg32);
++
++ return E_OK;
++}
++
++t_Error PlcrClearPortProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId)
++{
++ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ !p_Regs &&
++ p_FmPcd->h_IpcSession)
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ t_FmPcdIpcMsg msg;
++ t_Error err;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
++ ipcAllocParams.guestId = hardwarePortId;
++ msg.msgId = FM_PCD_CLEAR_PORT_PROFILES;
++ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
++ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ return E_OK;
++ }
++ else if (!p_Regs)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++ WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], 0);
++
++ return E_OK;
++}
++
++t_Error FmPcdPlcrAllocProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_Error err = E_OK;
++ uint32_t profilesFound;
++ uint32_t intFlags;
++ uint16_t i, first, swPortIndex = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ if (!numOfProfiles)
++ return E_OK;
++
++ ASSERT_COND(hardwarePortId);
++
++ if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES)
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big."));
++
++ if (!POWER_OF_2(numOfProfiles))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numProfiles must be a power of 2."));
++
++ first = 0;
++ profilesFound = 0;
++ intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr);
++
++ for (i=0; i<FM_PCD_PLCR_NUM_ENTRIES; )
++ {
++ if (!p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated)
++ {
++ profilesFound++;
++ i++;
++ if (profilesFound == numOfProfiles)
++ break;
++ }
++ else
++ {
++ profilesFound = 0;
++ /* advance i to the next aligned address */
++ i = first = (uint16_t)(first + numOfProfiles);
++ }
++ }
++
++ if (profilesFound == numOfProfiles)
++ {
++ for (i=first; i<first + numOfProfiles; i++)
++ {
++ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated = TRUE;
++ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = hardwarePortId;
++ }
++ }
++ else
++ {
++ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++ RETURN_ERROR(MINOR, E_FULL, ("No profiles."));
++ }
++ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++
++ err = PlcrSetPortProfiles(p_FmPcd, hardwarePortId, numOfProfiles, first);
++ if (err)
++ {
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = numOfProfiles;
++ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = first;
++
++ return E_OK;
++}
++
++t_Error FmPcdPlcrFreeProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_Error err = E_OK;
++ uint32_t intFlags;
++ uint16_t i, swPortIndex = 0;
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
++
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ err = PlcrClearPortProfiles(p_FmPcd, hardwarePortId);
++ if (err)
++ RETURN_ERROR(MAJOR, err,NO_MSG);
++
++ intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr);
++ for (i=p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase;
++ i<(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase +
++ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles);
++ i++)
++ {
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == hardwarePortId);
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated);
++
++ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated = FALSE;
++ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = p_FmPcd->guestId;
++ }
++ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++
++ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = 0;
++ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = 0;
++
++ return E_OK;
++}
++
++t_Error FmPcdPlcrCcGetSetParams(t_Handle h_FmPcd, uint16_t profileIndx ,uint32_t requiredAction)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
++ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcdPlcr->p_FmPcdPlcrRegs;
++ uint32_t tmpReg32, intFlags;
++ t_Error err;
++
++ /* Calling function locked all PCD modules, so no need to lock here */
++
++ if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile out of range"));
++
++ if (!FmPcdPlcrIsProfileValid(p_FmPcd, profileIndx))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile is not valid"));
++
++ /*intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx]);*/
++
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcPcdPlcrCcGetSetParams(p_FmPcd->h_Hc, profileIndx, requiredAction);
++
++ UpdateRequiredActionFlag(p_FmPcd, profileIndx, TRUE);
++ FmPcdPlcrUpdateRequiredAction(p_FmPcd, profileIndx, requiredAction);
++
++ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
++ return err;
++ }
++
++ /* lock the HW because once we read the registers we don't want them to be changed
++ * by another access. (We can copy to a tmp location and release the lock!) */
++
++ intFlags = PlcrHwLock(p_FmPcdPlcr);
++ WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx));
++
++ if (!p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].requiredActionFlag ||
++ !(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].requiredAction & requiredAction))
++ {
++ if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
++ {
++ if ((p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnGreen!= e_FM_PCD_DONE) ||
++ (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnYellow!= e_FM_PCD_DONE) ||
++ (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnRed!= e_FM_PCD_DONE))
++ {
++ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
++ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
++ RETURN_ERROR (MAJOR, E_OK, ("In this case the next engine can be e_FM_PCD_DONE"));
++ }
++
++ if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnGreen.action == e_FM_PCD_ENQ_FRAME)
++ {
++ tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia);
++ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
++ {
++ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
++ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
++ }
++ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia, tmpReg32);
++ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
++ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA;
++ WritePar(p_FmPcd, tmpReg32);
++ }
++
++ if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnYellow.action == e_FM_PCD_ENQ_FRAME)
++ {
++ tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia);
++ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
++ {
++ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
++ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
++ }
++ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia, tmpReg32);
++ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
++ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA;
++ WritePar(p_FmPcd, tmpReg32);
++ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
++ }
++
++ if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnRed.action == e_FM_PCD_ENQ_FRAME)
++ {
++ tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia);
++ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
++ {
++ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
++ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
++ }
++ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia, tmpReg32);
++ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
++ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA;
++ WritePar(p_FmPcd, tmpReg32);
++
++ }
++ }
++ }
++ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
++
++ UpdateRequiredActionFlag(p_FmPcd, profileIndx, TRUE);
++ FmPcdPlcrUpdateRequiredAction(p_FmPcd, profileIndx, requiredAction);
++
++ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
++
++ return E_OK;
++}
++
++uint32_t FmPcdPlcrGetRequiredActionFlag(t_Handle h_FmPcd, uint16_t absoluteProfileId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
++
++ return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredActionFlag;
++}
++
++uint32_t FmPcdPlcrGetRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
++
++ return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction;
++}
++
++bool FmPcdPlcrIsProfileValid(t_Handle h_FmPcd, uint16_t absoluteProfileId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
++
++ ASSERT_COND(absoluteProfileId < FM_PCD_PLCR_NUM_ENTRIES);
++
++ return p_FmPcdPlcr->profiles[absoluteProfileId].valid;
++}
++
++void FmPcdPlcrValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t intFlags;
++
++ ASSERT_COND(!p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
++
++ intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId]);
++ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = TRUE;
++ PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId], intFlags);
++}
++
++void FmPcdPlcrInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t intFlags;
++
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
++
++ intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId]);
++ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = FALSE;
++ PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId], intFlags);
++}
++
++uint16_t FmPcdPlcrProfileGetAbsoluteId(t_Handle h_Profile)
++{
++ return ((t_FmPcdPlcrProfile*)h_Profile)->absoluteProfileId;
++}
++
++t_Error FmPcdPlcrGetAbsoluteIdByProfileParams(t_Handle h_FmPcd,
++ e_FmPcdProfileTypeSelection profileType,
++ t_Handle h_FmPort,
++ uint16_t relativeProfile,
++ uint16_t *p_AbsoluteId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
++ uint8_t i;
++
++ switch (profileType)
++ {
++ case e_FM_PCD_PLCR_PORT_PRIVATE:
++ /* get port PCD id from port handle */
++ for (i=0;i<FM_MAX_NUM_OF_PORTS;i++)
++ if (p_FmPcd->p_FmPcdPlcr->portsMapping[i].h_FmPort == h_FmPort)
++ break;
++ if (i == FM_MAX_NUM_OF_PORTS)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE , ("Invalid port handle."));
++
++ if (!p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Port has no allocated profiles"));
++ if (relativeProfile >= p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range"));
++ *p_AbsoluteId = (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[i].profilesBase + relativeProfile);
++ break;
++ case e_FM_PCD_PLCR_SHARED:
++ if (relativeProfile >= p_FmPcdPlcr->numOfSharedProfiles)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range"));
++ *p_AbsoluteId = (uint16_t)(p_FmPcdPlcr->sharedProfilesIds[relativeProfile]);
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Invalid policer profile type"));
++ }
++
++ return E_OK;
++}
++
++uint16_t FmPcdPlcrGetPortProfilesBase(t_Handle h_FmPcd, uint8_t hardwarePortId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint16_t swPortIndex = 0;
++
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase;
++}
++
++uint16_t FmPcdPlcrGetPortNumOfProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint16_t swPortIndex = 0;
++
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles;
++
++}
++uint32_t FmPcdPlcrBuildWritePlcrActionReg(uint16_t absoluteProfileId)
++{
++ return (uint32_t)(FM_PCD_PLCR_PAR_GO |
++ ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT));
++}
++
++uint32_t FmPcdPlcrBuildWritePlcrActionRegs(uint16_t absoluteProfileId)
++{
++ return (uint32_t)(FM_PCD_PLCR_PAR_GO |
++ ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) |
++ FM_PCD_PLCR_PAR_PWSEL_MASK);
++}
++
++bool FmPcdPlcrHwProfileIsValid(uint32_t profileModeReg)
++{
++
++ if (profileModeReg & FM_PCD_PLCR_PEMODE_PI)
++ return TRUE;
++ else
++ return FALSE;
++}
++
++uint32_t FmPcdPlcrBuildReadPlcrActionReg(uint16_t absoluteProfileId)
++{
++ return (uint32_t)(FM_PCD_PLCR_PAR_GO |
++ FM_PCD_PLCR_PAR_R |
++ ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) |
++ FM_PCD_PLCR_PAR_PWSEL_MASK);
++}
++
++uint32_t FmPcdPlcrBuildCounterProfileReg(e_FmPcdPlcrProfileCounters counter)
++{
++ switch (counter)
++ {
++ case (e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER):
++ return FM_PCD_PLCR_PAR_PWSEL_PEGPC;
++ case (e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER):
++ return FM_PCD_PLCR_PAR_PWSEL_PEYPC;
++ case (e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER) :
++ return FM_PCD_PLCR_PAR_PWSEL_PERPC;
++ case (e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER) :
++ return FM_PCD_PLCR_PAR_PWSEL_PERYPC;
++ case (e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER) :
++ return FM_PCD_PLCR_PAR_PWSEL_PERRPC;
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ return 0;
++ }
++}
++
++uint32_t FmPcdPlcrBuildNiaProfileReg(bool green, bool yellow, bool red)
++{
++
++ uint32_t tmpReg32 = 0;
++
++ if (green)
++ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA;
++ if (yellow)
++ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA;
++ if (red)
++ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA;
++
++ return tmpReg32;
++}
++
++void FmPcdPlcrUpdateRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId, uint32_t requiredAction)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ /* this routine is protected by calling routine */
++
++ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
++
++ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction |= requiredAction;
++}
++
++/*********************** End of inter-module routines ************************/
++
++
++/**************************************************/
++/*............Policer API.........................*/
++/**************************************************/
++
++t_Error FM_PCD_ConfigPlcrAutoRefreshMode(t_Handle h_FmPcd, bool enable)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE);
++
++ if (!FmIsMaster(p_FmPcd->h_Fm))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPlcrAutoRefreshMode - guest mode!"));
++
++ p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = enable;
++
++ return E_OK;
++}
++
++t_Error FM_PCD_ConfigPlcrNumOfSharedProfiles(t_Handle h_FmPcd, uint16_t numOfSharedPlcrProfiles)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE);
++
++ p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles = numOfSharedPlcrProfiles;
++
++ return E_OK;
++}
++
++t_Error FM_PCD_SetPlcrStatistics(t_Handle h_FmPcd, bool enable)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t tmpReg32;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE);
++
++ if (!FmIsMaster(p_FmPcd->h_Fm))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPlcrStatistics - guest mode!"));
++
++ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr);
++ if (enable)
++ tmpReg32 |= FM_PCD_PLCR_GCR_STEN;
++ else
++ tmpReg32 &= ~FM_PCD_PLCR_GCR_STEN;
++
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr, tmpReg32);
++ return E_OK;
++}
++
++t_Handle FM_PCD_PlcrProfileSet(t_Handle h_FmPcd,
++ t_FmPcdPlcrProfileParams *p_ProfileParams)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
++ t_FmPcdPlcrProfileRegs plcrProfileReg;
++ uint32_t intFlags;
++ uint16_t absoluteProfileId;
++ t_Error err = E_OK;
++ uint32_t tmpReg32;
++ t_FmPcdPlcrProfile *p_Profile;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
++
++ if (p_ProfileParams->modify)
++ {
++ p_Profile = (t_FmPcdPlcrProfile *)p_ProfileParams->id.h_Profile;
++ p_FmPcd = p_Profile->h_FmPcd;
++ absoluteProfileId = p_Profile->absoluteProfileId;
++ if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big "));
++ return NULL;
++ }
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, NULL);
++
++ /* Try lock profile using flag */
++ if (!PlcrProfileFlagTryLock(p_Profile))
++ {
++ DBG(TRACE, ("Profile Try Lock - BUSY"));
++ /* Signal to caller BUSY condition */
++ p_ProfileParams->id.h_Profile = NULL;
++ return NULL;
++ }
++ }
++ else
++ {
++ p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, NULL);
++
++ /* SMP: needs to be protected only if another core now changes the windows */
++ err = FmPcdPlcrGetAbsoluteIdByProfileParams(h_FmPcd,
++ p_ProfileParams->id.newParams.profileType,
++ p_ProfileParams->id.newParams.h_FmPort,
++ p_ProfileParams->id.newParams.relativeProfileId,
++ &absoluteProfileId);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return NULL;
++ }
++
++ if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big "));
++ return NULL;
++ }
++
++ if (FmPcdPlcrIsProfileValid(p_FmPcd, absoluteProfileId))
++ {
++ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Policer Profile is already used"));
++ return NULL;
++ }
++
++ /* initialize profile struct */
++ p_Profile = &p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId];
++
++ p_Profile->h_FmPcd = p_FmPcd;
++ p_Profile->absoluteProfileId = absoluteProfileId;
++
++ p_Profile->p_Lock = FmPcdAcquireLock(p_FmPcd);
++ if (!p_Profile->p_Lock)
++ REPORT_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM Policer Profile lock obj!"));
++ }
++
++ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL);
++
++ p_Profile->nextEngineOnGreen = p_ProfileParams->nextEngineOnGreen;
++ memcpy(&p_Profile->paramsOnGreen, &(p_ProfileParams->paramsOnGreen), sizeof(u_FmPcdPlcrNextEngineParams));
++
++ p_Profile->nextEngineOnYellow = p_ProfileParams->nextEngineOnYellow;
++ memcpy(&p_Profile->paramsOnYellow, &(p_ProfileParams->paramsOnYellow), sizeof(u_FmPcdPlcrNextEngineParams));
++
++ p_Profile->nextEngineOnRed = p_ProfileParams->nextEngineOnRed;
++ memcpy(&p_Profile->paramsOnRed, &(p_ProfileParams->paramsOnRed), sizeof(u_FmPcdPlcrNextEngineParams));
++
++ memset(&plcrProfileReg, 0, sizeof(t_FmPcdPlcrProfileRegs));
++
++ /* build the policer profile registers */
++ err = BuildProfileRegs(h_FmPcd, p_ProfileParams, &plcrProfileReg);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ if (p_ProfileParams->modify)
++ /* unlock */
++ PlcrProfileFlagUnlock(p_Profile);
++ if (!p_ProfileParams->modify &&
++ p_Profile->p_Lock)
++ /* release allocated Profile lock */
++ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
++ return NULL;
++ }
++
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcPcdPlcrSetProfile(p_FmPcd->h_Hc, (t_Handle)p_Profile, &plcrProfileReg);
++ if (p_ProfileParams->modify)
++ PlcrProfileFlagUnlock(p_Profile);
++ if (err)
++ {
++ /* release the allocated scheme lock */
++ if (!p_ProfileParams->modify &&
++ p_Profile->p_Lock)
++ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
++
++ return NULL;
++ }
++ if (!p_ProfileParams->modify)
++ FmPcdPlcrValidateProfileSw(p_FmPcd,absoluteProfileId);
++ return (t_Handle)p_Profile;
++ }
++
++ p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++ SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, NULL);
++
++ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pemode , plcrProfileReg.fmpl_pemode);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia , plcrProfileReg.fmpl_pegnia);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia , plcrProfileReg.fmpl_peynia);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia , plcrProfileReg.fmpl_pernia);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecir , plcrProfileReg.fmpl_pecir);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecbs , plcrProfileReg.fmpl_pecbs);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepepir_eir,plcrProfileReg.fmpl_pepepir_eir);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepbs_ebs,plcrProfileReg.fmpl_pepbs_ebs);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pelts , plcrProfileReg.fmpl_pelts);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pects , plcrProfileReg.fmpl_pects);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepts_ets,plcrProfileReg.fmpl_pepts_ets);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc , plcrProfileReg.fmpl_pegpc);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc , plcrProfileReg.fmpl_peypc);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc , plcrProfileReg.fmpl_perpc);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc , plcrProfileReg.fmpl_perypc);
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc , plcrProfileReg.fmpl_perrpc);
++
++ tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(absoluteProfileId);
++ WritePar(p_FmPcd, tmpReg32);
++
++ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++
++ if (!p_ProfileParams->modify)
++ FmPcdPlcrValidateProfileSw(p_FmPcd,absoluteProfileId);
++ else
++ PlcrProfileFlagUnlock(p_Profile);
++
++ return (t_Handle)p_Profile;
++}
++
++t_Error FM_PCD_PlcrProfileDelete(t_Handle h_Profile)
++{
++ t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile;
++ t_FmPcd *p_FmPcd;
++ uint16_t profileIndx;
++ uint32_t tmpReg32, intFlags;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE);
++ p_FmPcd = p_Profile->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ profileIndx = p_Profile->absoluteProfileId;
++
++ UpdateRequiredActionFlag(p_FmPcd, profileIndx, FALSE);
++
++ FmPcdPlcrInvalidateProfileSw(p_FmPcd,profileIndx);
++
++ if (p_FmPcd->h_Hc)
++ {
++ err = FmHcPcdPlcrDeleteProfile(p_FmPcd->h_Hc, h_Profile);
++ if (p_Profile->p_Lock)
++ /* release allocated Profile lock */
++ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
++
++ return err;
++ }
++
++ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
++ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs.fmpl_pemode, ~FM_PCD_PLCR_PEMODE_PI);
++
++ tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx);
++ WritePar(p_FmPcd, tmpReg32);
++ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++
++
++ if (p_Profile->p_Lock)
++ /* release allocated Profile lock */
++ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
++
++ /* we do not memset profile as all its fields are being re-initialized at "set",
++ * plus its allocation information is still valid. */
++ return E_OK;
++}
++
++/***************************************************/
++/*............Policer Profile Counter..............*/
++/***************************************************/
++uint32_t FM_PCD_PlcrProfileGetCounter(t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter)
++{
++ t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile;
++ t_FmPcd *p_FmPcd;
++ uint16_t profileIndx;
++ uint32_t intFlags, counterVal = 0;
++ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
++
++ SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE);
++ p_FmPcd = p_Profile->h_FmPcd;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++
++ if (p_FmPcd->h_Hc)
++ return FmHcPcdPlcrGetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter);
++
++ p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++ SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, 0);
++
++ profileIndx = p_Profile->absoluteProfileId;
++
++ if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big "));
++ return 0;
++ }
++ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
++ WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx));
++
++ switch (counter)
++ {
++ case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER:
++ counterVal = (GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc));
++ break;
++ case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER:
++ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc);
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER:
++ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc);
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER:
++ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc);
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER:
++ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc);
++ break;
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ break;
++ }
++ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++
++ return counterVal;
++}
++
++t_Error FM_PCD_PlcrProfileSetCounter(t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value)
++{
++ t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile;
++ t_FmPcd *p_FmPcd;
++ uint16_t profileIndx;
++ uint32_t tmpReg32, intFlags;
++ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
++
++ SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE);
++
++ p_FmPcd = p_Profile->h_FmPcd;
++ profileIndx = p_Profile->absoluteProfileId;
++
++ if (p_FmPcd->h_Hc)
++ return FmHcPcdPlcrSetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter, value);
++
++ p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++ SANITY_CHECK_RETURN_ERROR(p_FmPcdPlcrRegs, E_INVALID_HANDLE);
++
++ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
++ switch (counter)
++ {
++ case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER:
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc, value);
++ break;
++ case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER:
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc, value);
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER:
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc, value);
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER:
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc ,value);
++ break;
++ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER:
++ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc ,value);
++ break;
++ default:
++ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ /* Activate the atomic write action by writing FMPL_PAR with: GO=1, RW=1, PSI=0, PNUM =
++ * Profile Number, PWSEL=0xFFFF (select all words).
++ */
++ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
++ tmpReg32 |= FmPcdPlcrBuildCounterProfileReg(counter);
++ WritePar(p_FmPcd, tmpReg32);
++ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
++
++ return E_OK;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.h
+new file mode 100644
+index 00000000..2bb8b969
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.h
+@@ -0,0 +1,165 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_plcr.h
++
++ @Description FM Policer private header
++*//***************************************************************************/
++#ifndef __FM_PLCR_H
++#define __FM_PLCR_H
++
++#include "std_ext.h"
++
++
++/***********************************************************************/
++/* Policer defines */
++/***********************************************************************/
++
++#define FM_PCD_PLCR_PAR_GO 0x80000000
++#define FM_PCD_PLCR_PAR_PWSEL_MASK 0x0000FFFF
++#define FM_PCD_PLCR_PAR_R 0x40000000
++
++/* shifts */
++#define FM_PCD_PLCR_PAR_PNUM_SHIFT 16
++
++/* masks */
++#define FM_PCD_PLCR_PEMODE_PI 0x80000000
++#define FM_PCD_PLCR_PEMODE_CBLND 0x40000000
++#define FM_PCD_PLCR_PEMODE_ALG_MASK 0x30000000
++#define FM_PCD_PLCR_PEMODE_ALG_RFC2698 0x10000000
++#define FM_PCD_PLCR_PEMODE_ALG_RFC4115 0x20000000
++#define FM_PCD_PLCR_PEMODE_DEFC_MASK 0x0C000000
++#define FM_PCD_PLCR_PEMODE_DEFC_Y 0x04000000
++#define FM_PCD_PLCR_PEMODE_DEFC_R 0x08000000
++#define FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE 0x0C000000
++#define FM_PCD_PLCR_PEMODE_OVCLR_MASK 0x03000000
++#define FM_PCD_PLCR_PEMODE_OVCLR_Y 0x01000000
++#define FM_PCD_PLCR_PEMODE_OVCLR_R 0x02000000
++#define FM_PCD_PLCR_PEMODE_OVCLR_G_NC 0x03000000
++#define FM_PCD_PLCR_PEMODE_PKT 0x00800000
++#define FM_PCD_PLCR_PEMODE_FPP_MASK 0x001F0000
++#define FM_PCD_PLCR_PEMODE_FPP_SHIFT 16
++#define FM_PCD_PLCR_PEMODE_FLS_MASK 0x0000F000
++#define FM_PCD_PLCR_PEMODE_FLS_L2 0x00003000
++#define FM_PCD_PLCR_PEMODE_FLS_L3 0x0000B000
++#define FM_PCD_PLCR_PEMODE_FLS_L4 0x0000E000
++#define FM_PCD_PLCR_PEMODE_FLS_FULL 0x0000F000
++#define FM_PCD_PLCR_PEMODE_RBFLS 0x00000800
++#define FM_PCD_PLCR_PEMODE_TRA 0x00000004
++#define FM_PCD_PLCR_PEMODE_TRB 0x00000002
++#define FM_PCD_PLCR_PEMODE_TRC 0x00000001
++#define FM_PCD_PLCR_DOUBLE_ECC 0x80000000
++#define FM_PCD_PLCR_INIT_ENTRY_ERROR 0x40000000
++#define FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE 0x80000000
++#define FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE 0x40000000
++
++#define FM_PCD_PLCR_NIA_VALID 0x80000000
++
++#define FM_PCD_PLCR_GCR_EN 0x80000000
++#define FM_PCD_PLCR_GCR_STEN 0x40000000
++#define FM_PCD_PLCR_GCR_DAR 0x20000000
++#define FM_PCD_PLCR_GCR_DEFNIA 0x00FFFFFF
++#define FM_PCD_PLCR_NIA_ABS 0x00000100
++
++#define FM_PCD_PLCR_GSR_BSY 0x80000000
++#define FM_PCD_PLCR_GSR_DQS 0x60000000
++#define FM_PCD_PLCR_GSR_RPB 0x20000000
++#define FM_PCD_PLCR_GSR_FQS 0x0C000000
++#define FM_PCD_PLCR_GSR_LPALG 0x0000C000
++#define FM_PCD_PLCR_GSR_LPCA 0x00003000
++#define FM_PCD_PLCR_GSR_LPNUM 0x000000FF
++
++#define FM_PCD_PLCR_EVR_PSIC 0x80000000
++#define FM_PCD_PLCR_EVR_AAC 0x40000000
++
++#define FM_PCD_PLCR_PAR_PSI 0x20000000
++#define FM_PCD_PLCR_PAR_PNUM 0x00FF0000
++/* PWSEL Selctive select options */
++#define FM_PCD_PLCR_PAR_PWSEL_PEMODE 0x00008000 /* 0 */
++#define FM_PCD_PLCR_PAR_PWSEL_PEGNIA 0x00004000 /* 1 */
++#define FM_PCD_PLCR_PAR_PWSEL_PEYNIA 0x00002000 /* 2 */
++#define FM_PCD_PLCR_PAR_PWSEL_PERNIA 0x00001000 /* 3 */
++#define FM_PCD_PLCR_PAR_PWSEL_PECIR 0x00000800 /* 4 */
++#define FM_PCD_PLCR_PAR_PWSEL_PECBS 0x00000400 /* 5 */
++#define FM_PCD_PLCR_PAR_PWSEL_PEPIR_EIR 0x00000200 /* 6 */
++#define FM_PCD_PLCR_PAR_PWSEL_PEPBS_EBS 0x00000100 /* 7 */
++#define FM_PCD_PLCR_PAR_PWSEL_PELTS 0x00000080 /* 8 */
++#define FM_PCD_PLCR_PAR_PWSEL_PECTS 0x00000040 /* 9 */
++#define FM_PCD_PLCR_PAR_PWSEL_PEPTS_ETS 0x00000020 /* 10 */
++#define FM_PCD_PLCR_PAR_PWSEL_PEGPC 0x00000010 /* 11 */
++#define FM_PCD_PLCR_PAR_PWSEL_PEYPC 0x00000008 /* 12 */
++#define FM_PCD_PLCR_PAR_PWSEL_PERPC 0x00000004 /* 13 */
++#define FM_PCD_PLCR_PAR_PWSEL_PERYPC 0x00000002 /* 14 */
++#define FM_PCD_PLCR_PAR_PWSEL_PERRPC 0x00000001 /* 15 */
++
++#define FM_PCD_PLCR_PAR_PMR_BRN_1TO1 0x0000 /* - Full bit replacement. {PBNUM[0:N-1]
++ 1-> 2^N specific locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_2TO2 0x1 /* - {PBNUM[0:N-2],PNUM[N-1]}.
++ 2-> 2^(N-1) base locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_4TO4 0x2 /* - {PBNUM[0:N-3],PNUM[N-2:N-1]}.
++ 4-> 2^(N-2) base locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_8TO8 0x3 /* - {PBNUM[0:N-4],PNUM[N-3:N-1]}.
++ 8->2^(N-3) base locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_16TO16 0x4 /* - {PBNUM[0:N-5],PNUM[N-4:N-1]}.
++ 16-> 2^(N-4) base locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_32TO32 0x5 /* {PBNUM[0:N-6],PNUM[N-5:N-1]}.
++ 32-> 2^(N-5) base locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_64TO64 0x6 /* {PBNUM[0:N-7],PNUM[N-6:N-1]}.
++ 64-> 2^(N-6) base locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_128TO128 0x7 /* {PBNUM[0:N-8],PNUM[N-7:N-1]}.
++ 128-> 2^(N-7) base locations. */
++#define FM_PCD_PLCR_PAR_PMR_BRN_256TO256 0x8 /* - No bit replacement for N=8. {PNUM[N-8:N-1]}.
++ When N=8 this option maps all 256 profiles by the DISPATCH bus into one group. */
++
++#define FM_PCD_PLCR_PMR_V 0x80000000
++#define PLCR_ERR_ECC_CAP 0x80000000
++#define PLCR_ERR_ECC_TYPE_DOUBLE 0x40000000
++#define PLCR_ERR_ECC_PNUM_MASK 0x00000FF0
++#define PLCR_ERR_ECC_OFFSET_MASK 0x0000000F
++
++#define PLCR_ERR_UNINIT_CAP 0x80000000
++#define PLCR_ERR_UNINIT_NUM_MASK 0x000000FF
++#define PLCR_ERR_UNINIT_PID_MASK 0x003f0000
++#define PLCR_ERR_UNINIT_ABSOLUTE_MASK 0x00008000
++
++/* shifts */
++#define PLCR_ERR_ECC_PNUM_SHIFT 4
++#define PLCR_ERR_UNINIT_PID_SHIFT 16
++
++#define FM_PCD_PLCR_PMR_BRN_SHIFT 16
++
++#define PLCR_PORT_WINDOW_SIZE(hardwarePortId)
++
++
++#endif /* __FM_PLCR_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c
+new file mode 100644
+index 00000000..ff4f0a2f
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c
+@@ -0,0 +1,423 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_pcd.c
++
++ @Description FM PCD ...
++*//***************************************************************************/
++#include <linux/math64.h>
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++#include "net_ext.h"
++
++#include "fm_common.h"
++#include "fm_pcd.h"
++#include "fm_pcd_ipc.h"
++#include "fm_prs.h"
++#include "fsl_fman_prs.h"
++
++
++static void PcdPrsErrorException(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint32_t event, ev_mask;
++ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++
++ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
++ ev_mask = fman_prs_get_err_ev_mask(PrsRegs);
++
++ event = fman_prs_get_err_event(PrsRegs, ev_mask);
++
++ fman_prs_ack_err_event(PrsRegs, event);
++
++ DBG(TRACE, ("parser error - 0x%08x\n",event));
++
++ if(event & FM_PCD_PRS_DOUBLE_ECC)
++ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC);
++}
++
++static void PcdPrsException(t_Handle h_FmPcd)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ uint32_t event, ev_mask;
++ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++
++ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
++ ev_mask = fman_prs_get_expt_ev_mask(PrsRegs);
++ event = fman_prs_get_expt_event(PrsRegs, ev_mask);
++
++ ASSERT_COND(event & FM_PCD_PRS_SINGLE_ECC);
++
++ DBG(TRACE, ("parser event - 0x%08x\n",event));
++
++ fman_prs_ack_expt_event(PrsRegs, event);
++
++ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC);
++}
++
++t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams)
++{
++ t_FmPcdPrs *p_FmPcdPrs;
++ uintptr_t baseAddr;
++
++ UNUSED(p_FmPcd);
++ UNUSED(p_FmPcdParams);
++
++ p_FmPcdPrs = (t_FmPcdPrs *) XX_Malloc(sizeof(t_FmPcdPrs));
++ if (!p_FmPcdPrs)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Parser structure allocation FAILED"));
++ return NULL;
++ }
++ memset(p_FmPcdPrs, 0, sizeof(t_FmPcdPrs));
++ fman_prs_defconfig(&p_FmPcd->p_FmPcdDriverParam->dfltCfg);
++
++ if (p_FmPcd->guestId == NCSW_MASTER_ID)
++ {
++ baseAddr = FmGetPcdPrsBaseAddr(p_FmPcdParams->h_Fm);
++ p_FmPcdPrs->p_SwPrsCode = (uint32_t *)UINT_TO_PTR(baseAddr);
++ p_FmPcdPrs->p_FmPcdPrsRegs = (struct fman_prs_regs *)UINT_TO_PTR(baseAddr + PRS_REGS_OFFSET);
++ }
++
++ p_FmPcdPrs->fmPcdPrsPortIdStatistics = p_FmPcd->p_FmPcdDriverParam->dfltCfg.port_id_stat;
++ p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = p_FmPcd->p_FmPcdDriverParam->dfltCfg.max_prs_cyc_lim;
++ p_FmPcd->exceptions |= p_FmPcd->p_FmPcdDriverParam->dfltCfg.prs_exceptions;
++
++ return p_FmPcdPrs;
++}
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ static uint8_t swPrsPatch[] = SW_PRS_UDP_LITE_PATCH;
++#else
++ static uint8_t swPrsPatch[] = SW_PRS_OFFLOAD_PATCH;
++#endif /* FM_CAPWAP_SUPPORT */
++
++t_Error PrsInit(t_FmPcd *p_FmPcd)
++{
++ t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam;
++ uint32_t *p_TmpCode;
++ uint32_t *p_LoadTarget = (uint32_t *)PTR_MOVE(p_FmPcd->p_FmPcdPrs->p_SwPrsCode,
++ FM_PCD_SW_PRS_SIZE-FM_PCD_PRS_SW_PATCHES_SIZE);
++ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++ uint32_t i;
++
++ ASSERT_COND(sizeof(swPrsPatch) <= (FM_PCD_PRS_SW_PATCHES_SIZE-FM_PCD_PRS_SW_TAIL_SIZE));
++
++ /* nothing to do in guest-partition */
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ return E_OK;
++
++ p_TmpCode = (uint32_t *)XX_MallocSmart(ROUND_UP(sizeof(swPrsPatch),4), 0, sizeof(uint32_t));
++ if (!p_TmpCode)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Tmp Sw-Parser code allocation FAILED"));
++ memset((uint8_t *)p_TmpCode, 0, ROUND_UP(sizeof(swPrsPatch),4));
++ memcpy((uint8_t *)p_TmpCode, (uint8_t *)swPrsPatch, sizeof(swPrsPatch));
++
++ fman_prs_init(PrsRegs, &p_Param->dfltCfg);
++
++ /* register even if no interrupts enabled, to allow future enablement */
++ FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR, PcdPrsErrorException, p_FmPcd);
++
++ /* register even if no interrupts enabled, to allow future enablement */
++ FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL, PcdPrsException, p_FmPcd);
++
++ if(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC)
++ FmEnableRamsEcc(p_FmPcd->h_Fm);
++
++ if(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC)
++ FmEnableRamsEcc(p_FmPcd->h_Fm);
++
++ /* load sw parser Ip-Frag patch */
++ for (i=0; i<DIV_CEIL(sizeof(swPrsPatch), 4); i++)
++ WRITE_UINT32(p_LoadTarget[i], GET_UINT32(p_TmpCode[i]));
++
++ XX_FreeSmart(p_TmpCode);
++
++ return E_OK;
++}
++
++void PrsFree(t_FmPcd *p_FmPcd)
++{
++ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
++ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR);
++ /* register even if no interrupts enabled, to allow future enablement */
++ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL);
++}
++
++void PrsEnable(t_FmPcd *p_FmPcd)
++{
++ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++
++ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
++ fman_prs_enable(PrsRegs);
++}
++
++void PrsDisable(t_FmPcd *p_FmPcd)
++{
++ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++
++ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
++ fman_prs_disable(PrsRegs);
++}
++
++int PrsIsEnabled(t_FmPcd *p_FmPcd)
++{
++ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++
++ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
++ return fman_prs_is_enabled(PrsRegs);
++}
++
++t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include)
++{
++ struct fman_prs_regs *PrsRegs;
++ uint32_t bitMask = 0;
++ uint8_t prsPortId;
++
++ SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE);
++
++ PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++
++ GET_FM_PCD_PRS_PORT_ID(prsPortId, hardwarePortId);
++ GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId);
++
++ if (include)
++ p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics |= bitMask;
++ else
++ p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics &= ~bitMask;
++
++ fman_prs_set_stst_port_msk(PrsRegs,
++ p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics);
++
++ return E_OK;
++}
++
++t_Error FmPcdPrsIncludePortInStatistics(t_Handle h_FmPcd, uint8_t hardwarePortId, bool include)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE);
++
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ p_FmPcd->h_IpcSession)
++ {
++ t_FmPcdIpcPrsIncludePort prsIncludePortParams;
++ t_FmPcdIpcMsg msg;
++
++ prsIncludePortParams.hardwarePortId = hardwarePortId;
++ prsIncludePortParams.include = include;
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_PCD_PRS_INC_PORT_STATS;
++ memcpy(msg.msgBody, &prsIncludePortParams, sizeof(prsIncludePortParams));
++ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) +sizeof(prsIncludePortParams),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ return E_OK;
++ }
++ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++
++ return PrsIncludePortInStatistics(p_FmPcd, hardwarePortId, include);
++}
++
++uint32_t FmPcdGetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
++ t_FmPcdPrsLabelParams *p_Label;
++ int i;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE, 0);
++
++ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
++ p_FmPcd->h_IpcSession)
++ {
++ t_Error err = E_OK;
++ t_FmPcdIpcSwPrsLable labelParams;
++ t_FmPcdIpcMsg msg;
++ uint32_t prsOffset = 0;
++ t_FmPcdIpcReply reply;
++ uint32_t replyLength;
++
++ memset(&reply, 0, sizeof(reply));
++ memset(&msg, 0, sizeof(msg));
++ labelParams.enumHdr = (uint32_t)hdr;
++ labelParams.indexPerHdr = indexPerHdr;
++ msg.msgId = FM_PCD_GET_SW_PRS_OFFSET;
++ memcpy(msg.msgBody, &labelParams, sizeof(labelParams));
++ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) +sizeof(labelParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t) + sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++
++ memcpy((uint8_t*)&prsOffset, reply.replyBody, sizeof(uint32_t));
++ return prsOffset;
++ }
++ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++
++ ASSERT_COND(p_FmPcd->p_FmPcdPrs->currLabel < FM_PCD_PRS_NUM_OF_LABELS);
++
++ for (i=0; i<p_FmPcd->p_FmPcdPrs->currLabel; i++)
++ {
++ p_Label = &p_FmPcd->p_FmPcdPrs->labelsTable[i];
++
++ if ((hdr == p_Label->hdr) && (indexPerHdr == p_Label->indexPerHdr))
++ return p_Label->instructionOffset;
++ }
++
++ REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Sw Parser attachment Not found"));
++ return (uint32_t)ILLEGAL_BASE;
++}
++
++void FM_PCD_SetPrsStatistics(t_Handle h_FmPcd, bool enable)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ struct fman_prs_regs *PrsRegs;
++
++ SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE);
++
++ PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
++
++
++ if(p_FmPcd->guestId != NCSW_MASTER_ID)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPrsStatistics - guest mode!"));
++ return;
++ }
++
++ fman_prs_set_stst(PrsRegs, enable);
++}
++
++t_Error FM_PCD_PrsLoadSw(t_Handle h_FmPcd, t_FmPcdPrsSwParams *p_SwPrs)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++ uint32_t *p_LoadTarget;
++ uint32_t *p_TmpCode;
++ int i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_SwPrs, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->enabled, E_INVALID_HANDLE);
++
++ if (p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM in guest-mode!"));
++
++ if (!p_SwPrs->override)
++ {
++ if(p_FmPcd->p_FmPcdPrs->p_CurrSwPrs > p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("SW parser base must be larger than current loaded code"));
++ }
++ else
++ p_FmPcd->p_FmPcdPrs->currLabel = 0;
++
++ if (p_SwPrs->size > FM_PCD_SW_PRS_SIZE - FM_PCD_PRS_SW_TAIL_SIZE - p_SwPrs->base*2)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_SwPrs->size may not be larger than MAX_SW_PRS_CODE_SIZE"));
++
++ if (p_FmPcd->p_FmPcdPrs->currLabel + p_SwPrs->numOfLabels > FM_PCD_PRS_NUM_OF_LABELS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceeded number of labels allowed "));
++
++ p_TmpCode = (uint32_t *)XX_MallocSmart(ROUND_UP(p_SwPrs->size,4), 0, sizeof(uint32_t));
++ if (!p_TmpCode)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Tmp Sw-Parser code allocation FAILED"));
++ memset((uint8_t *)p_TmpCode, 0, ROUND_UP(p_SwPrs->size,4));
++ memcpy((uint8_t *)p_TmpCode, p_SwPrs->p_Code, p_SwPrs->size);
++
++ /* save sw parser labels */
++ memcpy(&p_FmPcd->p_FmPcdPrs->labelsTable[p_FmPcd->p_FmPcdPrs->currLabel],
++ p_SwPrs->labelsTable,
++ p_SwPrs->numOfLabels*sizeof(t_FmPcdPrsLabelParams));
++ p_FmPcd->p_FmPcdPrs->currLabel += p_SwPrs->numOfLabels;
++
++ /* load sw parser code */
++ p_LoadTarget = p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4;
++
++ for(i=0; i<DIV_CEIL(p_SwPrs->size, 4); i++)
++ WRITE_UINT32(p_LoadTarget[i], GET_UINT32(p_TmpCode[i]));
++
++ p_FmPcd->p_FmPcdPrs->p_CurrSwPrs =
++ p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4 + ROUND_UP(p_SwPrs->size,4);
++
++ /* copy data parameters */
++ for (i=0;i<FM_PCD_PRS_NUM_OF_HDRS;i++)
++ WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+PRS_SW_DATA/4+i), p_SwPrs->swPrsDataParams[i]);
++
++ /* Clear last 4 bytes */
++ WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+(PRS_SW_DATA-FM_PCD_PRS_SW_TAIL_SIZE)/4), 0);
++
++ XX_FreeSmart(p_TmpCode);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_ConfigPrsMaxCycleLimit(t_Handle h_FmPcd,uint16_t value)
++{
++ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
++
++ if(p_FmPcd->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPrsMaxCycleLimit - guest mode!"));
++
++ p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = value;
++
++ return E_OK;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h
+new file mode 100644
+index 00000000..056f225e
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h
+@@ -0,0 +1,316 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_prs.h
++
++ @Description FM Parser private header
++ *//***************************************************************************/
++#ifndef __FM_PRS_H
++#define __FM_PRS_H
++
++#include "std_ext.h"
++
++/***********************************************************************/
++/* SW parser IP_FRAG patch */
++/***********************************************************************/
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++#define SW_PRS_UDP_LITE_PATCH \
++{\
++ 0x31,0x52,0x00,0xDA,0xFC,0x00,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x50,0x2C,0x40,0x00,0x31,0x92,0x50,0x2C, \
++ 0x00,0x88,0x18,0x2F,0x00,0x01,0x1B,0xFE,0x18,0x71, \
++ 0x02,0x1F,0x00,0x08,0x00,0x83,0x02,0x1F,0x00,0x20, \
++ 0x28,0x1B,0x00,0x05,0x29,0x1F,0x30,0xD0,0x60,0x4F, \
++ 0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F,0x00,0x52, \
++ 0x00,0x01,0x07,0x01,0x60,0x3B,0x00,0x00,0x30,0xD0, \
++ 0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
++ 0x40,0x4C,0x00,0x00,0x02,0x8F,0x00,0x00,0x30,0xF2, \
++ 0x00,0x06,0x18,0x5D,0x00,0x00,0x9F,0xFF,0x30,0xF2, \
++ 0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0,0x00,0x52, \
++ 0x00,0x08,0x28,0x1A,0x60,0x37,0x00,0x00,0x30,0xF2, \
++ 0x18,0x5D,0x06,0x00,0x29,0x1E,0x30,0xF2,0x2F,0x0E, \
++ 0x30,0x72,0x00,0x00,0x9B,0x8F,0x00,0x06,0x2F,0x0E, \
++ 0x32,0xF1,0x32,0xB0,0x00,0x4F,0x00,0x57,0x00,0x28, \
++ 0x00,0x00,0x97,0x9E,0x00,0x4E,0x30,0x72,0x00,0x06, \
++ 0x2F,0x0E,0x32,0xC1,0x32,0xF0,0x00,0x4A,0x00,0x80, \
++ 0x00,0x02,0x00,0x00,0x97,0x9E,0x40,0x7E,0x00,0x08, \
++ 0x08,0x16,0x00,0x54,0x00,0x01,0x1B,0xFE,0x00,0x00, \
++ 0x9F,0x9E,0x40,0xB3,0x00,0x00,0x02,0x1F,0x00,0x08, \
++ 0x28,0x1B,0x30,0x73,0x29,0x1F,0x30,0xD0,0x60,0x9F, \
++ 0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F,0x00,0x52, \
++ 0x00,0x01,0x07,0x01,0x60,0x8B,0x00,0x00,0x30,0xD0, \
++ 0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
++ 0x40,0x9C,0x00,0x00,0x02,0x8F,0x00,0x00,0x30,0xF2, \
++ 0x00,0x06,0x18,0xAD,0x00,0x00,0x9F,0xFF,0x30,0xF2, \
++ 0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0,0x00,0x52, \
++ 0x00,0x08,0x28,0x1A,0x60,0x87,0x00,0x00,0x30,0xF2, \
++ 0x18,0xAD,0x06,0x00,0x29,0x1E,0x30,0xF2,0x50,0xB3, \
++ 0xFF,0xFF,0x18,0xB8,0x08,0x16,0x00,0x54,0x00,0x01, \
++ 0x1B,0xFE,0x18,0xC5,0x32,0xF1,0x28,0x5D,0x32,0xF1, \
++ 0x00,0x55,0x00,0x08,0x28,0x5F,0x00,0x00,0x8F,0x9F, \
++ 0x29,0x33,0x08,0x16,0x00,0x49,0x00,0x01,0x1B,0xFF, \
++ 0x00,0x01,0x1B,0xFF \
++}
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++
++#if (DPAA_VERSION == 10)
++/* Version: 106.1.9 */
++#define SW_PRS_OFFLOAD_PATCH \
++{ \
++ 0x31,0x52,0x00,0xDA,0x0A,0x00,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x43,0x0A,0x00,0x00,0x00,0x01,0x1B,0xFE, \
++ 0x00,0x00,0x99,0x00,0x53,0x13,0x00,0x00,0x00,0x00, \
++ 0x9F,0x98,0x53,0x13,0x00,0x00,0x1B,0x23,0x33,0xF1, \
++ 0x00,0xF9,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
++ 0x28,0x7F,0x00,0x03,0x00,0x02,0x00,0x00,0x00,0x01, \
++ 0x32,0xC1,0x32,0xF0,0x00,0x4A,0x00,0x80,0x1F,0xFF, \
++ 0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA,0x06,0x00, \
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x2F,0x00,0x00, \
++ 0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA,0x00,0x40, \
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x95,0x00,0x00, \
++ 0x00,0x00,0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55, \
++ 0x00,0x28,0x28,0x43,0x30,0x7E,0x43,0x45,0x00,0x00, \
++ 0x30,0x7E,0x43,0x45,0x00,0x3C,0x1B,0x5D,0x32,0x11, \
++ 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x83,0x8F, \
++ 0x2F,0x0F,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \
++ 0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11,0x00,0x00, \
++ 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \
++ 0x28,0x43,0x06,0x00,0x1B,0x3E,0x30,0x7E,0x53,0x79, \
++ 0x00,0x2B,0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81, \
++ 0x00,0x00,0x87,0x8F,0x28,0x23,0x06,0x00,0x32,0x11, \
++ 0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81, \
++ 0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01, \
++ 0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00,0x00,0x01, \
++ 0x1B,0xFE,0x00,0x00,0x9B,0x8E,0x53,0x90,0x00,0x00, \
++ 0x06,0x29,0x00,0x00,0x83,0x8F,0x28,0x23,0x06,0x00, \
++ 0x06,0x29,0x32,0xC1,0x00,0x55,0x00,0x28,0x00,0x00, \
++ 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \
++ 0x28,0x43,0x06,0x00,0x00,0x01,0x1B,0xFE,0x32,0xC1, \
++ 0x00,0x55,0x00,0x28,0x28,0x43,0x1B,0xCF,0x00,0x00, \
++ 0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55,0x00,0x28, \
++ 0x28,0x43,0x30,0x7E,0x43,0xBF,0x00,0x2C,0x32,0x11, \
++ 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F, \
++ 0x28,0x23,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \
++ 0x00,0x81,0x00,0x00,0x83,0x8F,0x2F,0x0F,0x06,0x00, \
++ 0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01, \
++ 0x00,0x81,0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50, \
++ 0x00,0x01,0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00, \
++ 0x1B,0x9C,0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00, \
++ 0x00,0x00,0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02, \
++ 0x00,0x00,0x00,0x01,0x32,0xC1,0x32,0xF0,0x00,0x4A, \
++ 0x00,0x80,0x1F,0xFF,0x00,0x01,0x1B,0xFE, \
++}
++
++#else
++#define SW_PRS_OFFLOAD_PATCH \
++{ \
++ 0x31,0x52,0x00,0xDA,0x0E,0x4F,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x51,0x16,0x08,0x4B,0x31,0x53,0x00,0xFB, \
++ 0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x29,0x2B, \
++ 0x33,0xF1,0x00,0xFB,0x00,0xDF,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x28,0x7F,0x31,0x52,0x00,0xDA,0x0A,0x00, \
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x20,0x00,0x00, \
++ 0x00,0x01,0x1B,0xFE,0x00,0x00,0x99,0x00,0x51,0x29, \
++ 0x00,0x00,0x00,0x00,0x9F,0x98,0x51,0x29,0x00,0x00, \
++ 0x19,0x44,0x09,0x5F,0x00,0x20,0x00,0x00,0x09,0x4F, \
++ 0x00,0x20,0x00,0x00,0x34,0xB7,0x00,0xF9,0x00,0x00, \
++ 0x01,0x00,0x00,0x00,0x00,0x00,0x2B,0x97,0x31,0xB3, \
++ 0x29,0x8F,0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00, \
++ 0x00,0x00,0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02, \
++ 0x00,0x00,0x00,0x01,0x1B,0xFE,0x00,0x01,0x1B,0xFE, \
++ 0x31,0x52,0x00,0xDA,0xFC,0x00,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x51,0x52,0x40,0x00,0x31,0x92,0x51,0x52, \
++ 0x00,0x88,0x19,0x55,0x08,0x05,0x00,0x00,0x19,0x99, \
++ 0x02,0x1F,0x00,0x08,0x00,0x83,0x02,0x1F,0x00,0x20, \
++ 0x28,0x1B,0x00,0x05,0x29,0x1F,0x30,0xD0,0x61,0x75, \
++ 0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F,0x00,0x52, \
++ 0x00,0x01,0x07,0x01,0x61,0x61,0x00,0x00,0x30,0xD0, \
++ 0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
++ 0x41,0x72,0x00,0x00,0x02,0x8F,0x00,0x00,0x30,0xF2, \
++ 0x00,0x06,0x19,0x83,0x00,0x00,0x9F,0xFF,0x30,0xF2, \
++ 0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0,0x00,0x52, \
++ 0x00,0x08,0x28,0x1A,0x61,0x5D,0x00,0x00,0x30,0xF2, \
++ 0x19,0x83,0x06,0x00,0x29,0x1E,0x30,0xF2,0x29,0x0E, \
++ 0x30,0x72,0x00,0x00,0x9B,0x8F,0x00,0x06,0x29,0x0E, \
++ 0x32,0xF1,0x32,0xB0,0x00,0x4F,0x00,0x57,0x00,0x28, \
++ 0x00,0x00,0x97,0x9E,0x00,0x4E,0x30,0x72,0x00,0x06, \
++ 0x29,0x0E,0x08,0x05,0x00,0x01,0x31,0x52,0x00,0xDA, \
++ 0x0E,0x4F,0x00,0x00,0x00,0x00,0x00,0x00,0x51,0xAF, \
++ 0x04,0x4B,0x31,0x53,0x00,0xFB,0xFF,0xF0,0x00,0x00, \
++ 0x00,0x00,0x00,0x00,0x29,0x2B,0x33,0xF1,0x00,0xFB, \
++ 0x00,0xDF,0x00,0x00,0x00,0x00,0x00,0x00,0x28,0x7F, \
++ 0x31,0x52,0x00,0xDA,0x06,0x00,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x41,0xB9,0x00,0x00,0x00,0x01,0x1B,0xFE, \
++ 0x31,0x52,0x00,0xDA,0x00,0x40,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x42,0x06,0x00,0x00,0x00,0x00,0x9B,0x8F, \
++ 0x28,0x01,0x32,0xC1,0x00,0x55,0x00,0x28,0x28,0x43, \
++ 0x30,0x00,0x41,0xEB,0x00,0x2C,0x32,0x11,0x32,0xC0, \
++ 0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F,0x28,0x23, \
++ 0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81, \
++ 0x00,0x00,0x83,0x8F,0x28,0x01,0x06,0x00,0x32,0x11, \
++ 0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81, \
++ 0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01, \
++ 0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00,0x19,0xC8, \
++ 0x09,0x5F,0x00,0x20,0x00,0x00,0x09,0x4F,0x00,0x20, \
++ 0x00,0x00,0x34,0xB7,0x00,0xF9,0x00,0x00,0x01,0x00, \
++ 0x00,0x00,0x00,0x00,0x2B,0x97,0x31,0xB3,0x29,0x8F, \
++ 0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02,0x00,0x00, \
++ 0x00,0x01,0x1B,0xFE,0x30,0x50,0x52,0x0B,0x00,0x00, \
++ 0x00,0x01,0x1B,0xFE,0x32,0xF1,0x32,0xC0,0x00,0x4F, \
++ 0x00,0x81,0x00,0x02,0x00,0x00,0x97,0x9E,0x42,0x18, \
++ 0x00,0x08,0x08,0x16,0x00,0x54,0x00,0x01,0x1B,0xFE, \
++ 0x00,0x00,0x9F,0x9E,0x42,0x4D,0x00,0x00,0x02,0x1F, \
++ 0x00,0x08,0x28,0x1B,0x30,0x73,0x29,0x1F,0x30,0xD0, \
++ 0x62,0x39,0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F, \
++ 0x00,0x52,0x00,0x01,0x07,0x01,0x62,0x25,0x00,0x00, \
++ 0x30,0xD0,0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x42,0x36,0x00,0x00,0x02,0x8F,0x00,0x00, \
++ 0x30,0xF2,0x00,0x06,0x1A,0x47,0x00,0x00,0x9F,0xFF, \
++ 0x30,0xF2,0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0, \
++ 0x00,0x52,0x00,0x08,0x28,0x1A,0x62,0x21,0x00,0x00, \
++ 0x30,0xF2,0x1A,0x47,0x06,0x00,0x29,0x1E,0x30,0xF2, \
++ 0x52,0x4D,0xFF,0xFF,0x1A,0x52,0x08,0x16,0x00,0x54, \
++ 0x00,0x01,0x1B,0xFE,0x1A,0x5F,0x32,0xF1,0x28,0x5D, \
++ 0x32,0xF1,0x00,0x55,0x00,0x08,0x28,0x5F,0x00,0x00, \
++ 0x8F,0x9F,0x29,0x33,0x08,0x16,0x00,0x49,0x00,0x01, \
++ 0x1B,0xFF,0x00,0x01,0x1B,0xFF,0x31,0x52,0x00,0xDA, \
++ 0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x6D, \
++ 0x40,0x00,0x31,0x92,0x52,0x6D,0x00,0x88,0x1A,0x70, \
++ 0x08,0x05,0x00,0x00,0x1A,0xB4,0x02,0x1F,0x00,0x08, \
++ 0x00,0x83,0x02,0x1F,0x00,0x20,0x28,0x1B,0x00,0x05, \
++ 0x29,0x1F,0x30,0xD0,0x62,0x90,0x00,0x07,0x00,0x05, \
++ 0x00,0x00,0xC3,0x8F,0x00,0x52,0x00,0x01,0x07,0x01, \
++ 0x62,0x7C,0x00,0x00,0x30,0xD0,0x00,0xDA,0x00,0x01, \
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x42,0x8D,0x00,0x00, \
++ 0x02,0x8F,0x00,0x00,0x30,0xF2,0x00,0x06,0x1A,0x9E, \
++ 0x00,0x00,0x9F,0xFF,0x30,0xF2,0x00,0x06,0x29,0x1E, \
++ 0x07,0x08,0x30,0xD0,0x00,0x52,0x00,0x08,0x28,0x1A, \
++ 0x62,0x78,0x00,0x00,0x30,0xF2,0x1A,0x9E,0x06,0x00, \
++ 0x29,0x1E,0x30,0xF2,0x29,0x0E,0x30,0x72,0x00,0x00, \
++ 0x9B,0x8F,0x00,0x06,0x29,0x0E,0x32,0xF1,0x32,0xB0, \
++ 0x00,0x4F,0x00,0x57,0x00,0x28,0x00,0x00,0x97,0x9E, \
++ 0x00,0x4E,0x30,0x72,0x00,0x06,0x29,0x0E,0x08,0x05, \
++ 0x00,0x01,0x31,0x52,0x00,0xDA,0x0E,0x4F,0x00,0x00, \
++ 0x00,0x00,0x00,0x00,0x52,0xCA,0x04,0x4B,0x31,0x53, \
++ 0x00,0xFB,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00, \
++ 0x29,0x2B,0x33,0xF1,0x00,0xFB,0x00,0xDF,0x00,0x00, \
++ 0x00,0x00,0x00,0x00,0x28,0x7F,0x31,0x52,0x00,0xDA, \
++ 0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x42,0xD4, \
++ 0x00,0x00,0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA, \
++ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x37, \
++ 0x00,0x00,0x00,0x00,0x9B,0x8F,0x28,0x01,0x32,0xC1, \
++ 0x00,0x55,0x00,0x28,0x28,0x43,0x30,0x00,0x42,0xEA, \
++ 0x00,0x00,0x30,0x00,0x42,0xEA,0x00,0x3C,0x1B,0x02, \
++ 0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00, \
++ 0x83,0x8F,0x28,0x01,0x06,0x00,0x32,0x11,0x32,0xC0, \
++ 0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11, \
++ 0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04, \
++ 0x00,0x4D,0x28,0x43,0x06,0x00,0x1A,0xE3,0x30,0x00, \
++ 0x43,0x20,0x00,0x2B,0x00,0x00,0x9B,0x8E,0x43,0x0E, \
++ 0x00,0x00,0x32,0xC1,0x00,0x55,0x00,0x28,0x28,0x43, \
++ 0x1B,0x1F,0x06,0x29,0x00,0x00,0x83,0x8F,0x28,0x23, \
++ 0x06,0x00,0x06,0x29,0x32,0xC1,0x00,0x55,0x00,0x28, \
++ 0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04, \
++ 0x00,0x4D,0x28,0x43,0x06,0x00,0x1B,0x37,0x32,0x11, \
++ 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F, \
++ 0x28,0x23,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \
++ 0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11,0x00,0x00, \
++ 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \
++ 0x28,0x43,0x06,0x00,0x30,0x50,0x53,0x3C,0x00,0x00, \
++ 0x00,0x01,0x1B,0xFE,0x32,0xF1,0x32,0xC0,0x00,0x4F, \
++ 0x00,0x81,0x00,0x02,0x00,0x00,0x97,0x9E,0x43,0x49, \
++ 0x00,0x08,0x08,0x16,0x00,0x54,0x00,0x01,0x1B,0xFE, \
++ 0x00,0x00,0x9F,0x9E,0x43,0x7E,0x00,0x00,0x02,0x1F, \
++ 0x00,0x08,0x28,0x1B,0x30,0x73,0x29,0x1F,0x30,0xD0, \
++ 0x63,0x6A,0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F, \
++ 0x00,0x52,0x00,0x01,0x07,0x01,0x63,0x56,0x00,0x00, \
++ 0x30,0xD0,0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x43,0x67,0x00,0x00,0x02,0x8F,0x00,0x00, \
++ 0x30,0xF2,0x00,0x06,0x1B,0x78,0x00,0x00,0x9F,0xFF, \
++ 0x30,0xF2,0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0, \
++ 0x00,0x52,0x00,0x08,0x28,0x1A,0x63,0x52,0x00,0x00, \
++ 0x30,0xF2,0x1B,0x78,0x06,0x00,0x29,0x1E,0x30,0xF2, \
++ 0x53,0x7E,0xFF,0xFF,0x1B,0x83,0x08,0x16,0x00,0x54, \
++ 0x00,0x01,0x1B,0xFE,0x1B,0x90,0x32,0xF1,0x28,0x5D, \
++ 0x32,0xF1,0x00,0x55,0x00,0x08,0x28,0x5F,0x00,0x00, \
++ 0x8F,0x9F,0x29,0x33,0x08,0x16,0x00,0x49,0x00,0x01, \
++ 0x1B,0xFF,0x00,0x01,0x1B,0xFF,0x08,0x07,0x00,0x02, \
++ 0x00,0x00,0x8D,0x80,0x53,0x9C,0x00,0x01,0x30,0x71, \
++ 0x00,0x55,0x00,0x01,0x28,0x0F,0x00,0x00,0x8D,0x00, \
++ 0x53,0xA4,0x00,0x01,0x30,0x71,0x00,0x55,0x00,0x01, \
++ 0x28,0x0F,0x00,0x00,0x83,0x8E,0x53,0xB9,0x00,0x00, \
++ 0x00,0x00,0x86,0x08,0x30,0x71,0x00,0x7B,0x03,0xB9, \
++ 0x33,0xB4,0x00,0xDA,0xFF,0xFF,0x00,0x0F,0x00,0x00, \
++ 0x00,0x00,0x00,0x00,0x86,0x09,0x01,0x03,0x00,0x7D, \
++ 0x03,0xB9,0x1B,0xC8,0x33,0xD1,0x00,0xF9,0x00,0x10, \
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x28,0x7B,0x09,0x5F, \
++ 0x00,0x1A,0x00,0x00,0x09,0x4F,0x00,0x1A,0x00,0x00, \
++ 0x00,0x01,0x1B,0xFF,0x00,0x00,0x8C,0x00,0x53,0xF0, \
++ 0x00,0x01,0x34,0xF5,0x00,0xFB,0xFF,0xFF,0x00,0x7F, \
++ 0x00,0x00,0x00,0x00,0x2A,0x9F,0x00,0x00,0x93,0x8F, \
++ 0x28,0x49,0x00,0x00,0x97,0x8F,0x28,0x4B,0x34,0x61, \
++ 0x28,0x4D,0x34,0x71,0x28,0x4F,0x34,0xB7,0x00,0xF9, \
++ 0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x2B,0x97, \
++ 0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00,0x00,0x00, \
++ 0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02,0x00,0x00, \
++ 0x00,0x01,0x1B,0xFF,0x00,0x01,0x1B,0xFF, \
++}
++#endif /* (DPAA_VERSION == 10) */
++
++/****************************/
++/* Parser defines */
++/****************************/
++#define FM_PCD_PRS_SW_TAIL_SIZE 4 /**< Number of bytes that must be cleared at
++ the end of the SW parser area */
++
++/* masks */
++#define PRS_ERR_CAP 0x80000000
++#define PRS_ERR_TYPE_DOUBLE 0x40000000
++#define PRS_ERR_SINGLE_ECC_CNT_MASK 0x00FF0000
++#define PRS_ERR_ADDR_MASK 0x000001FF
++
++/* others */
++#define PRS_MAX_CYCLE_LIMIT 8191
++#define PRS_SW_DATA 0x00000800
++#define PRS_REGS_OFFSET 0x00000840
++
++#define GET_FM_PCD_PRS_PORT_ID(prsPortId,hardwarePortId) \
++ prsPortId = (uint8_t)(hardwarePortId & 0x0f)
++
++#define GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId) \
++ bitMask = 0x80000000>>prsPortId
++
++#endif /* __FM_PRS_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.c
+new file mode 100644
+index 00000000..ee82f730
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.c
+@@ -0,0 +1,984 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_replic.c
++
++ @Description FM frame replicator
++*//***************************************************************************/
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_muram_ext.h"
++#include "fm_common.h"
++#include "fm_hc.h"
++#include "fm_replic.h"
++#include "fm_cc.h"
++#include "list_ext.h"
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++static uint8_t GetMemberPosition(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ uint32_t memberIndex,
++ bool isAddOperation)
++{
++ uint8_t memberPosition;
++ uint32_t lastMemberIndex;
++
++ ASSERT_COND(p_ReplicGroup);
++
++ /* the last member index is different between add and remove operation -
++ in case of remove - this is exactly the last member index
++ in case of add - this is the last member index + 1 - e.g.
++ if we have 4 members, the index of the actual last member is 3(because the
++ index starts from 0) therefore in order to add a new member as the last
++ member we shall use memberIndex = 4 and not 3
++ */
++ if (isAddOperation)
++ lastMemberIndex = p_ReplicGroup->numOfEntries;
++ else
++ lastMemberIndex = p_ReplicGroup->numOfEntries-1;
++
++ /* last */
++ if (memberIndex == lastMemberIndex)
++ memberPosition = FRM_REPLIC_LAST_MEMBER_INDEX;
++ else
++ {
++ /* first */
++ if (memberIndex == 0)
++ memberPosition = FRM_REPLIC_FIRST_MEMBER_INDEX;
++ else
++ {
++ /* middle */
++ ASSERT_COND(memberIndex < lastMemberIndex);
++ memberPosition = FRM_REPLIC_MIDDLE_MEMBER_INDEX;
++ }
++ }
++ return memberPosition;
++}
++
++static t_Error MemberCheckParams(t_Handle h_FmPcd,
++ t_FmPcdCcNextEngineParams *p_MemberParams)
++{
++ t_Error err;
++
++
++ if ((p_MemberParams->nextEngine != e_FM_PCD_DONE) &&
++ (p_MemberParams->nextEngine != e_FM_PCD_KG) &&
++ (p_MemberParams->nextEngine != e_FM_PCD_PLCR))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Next engine of a member should be MatchTable(cc) or Done or Policer"));
++
++ /* check the regular parameters of the next engine */
++ err = ValidateNextEngineParams(h_FmPcd, p_MemberParams, e_FM_PCD_CC_STATS_MODE_NONE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("member next engine parameters"));
++
++ return E_OK;
++}
++
++static t_Error CheckParams(t_Handle h_FmPcd,
++ t_FmPcdFrmReplicGroupParams *p_ReplicGroupParam)
++{
++ int i;
++ t_Error err;
++
++ /* check that max num of entries is at least 2 */
++ if (!IN_RANGE(2, p_ReplicGroupParam->maxNumOfEntries, FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES))
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("maxNumOfEntries in the frame replicator parameters should be 2-%d",FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES));
++
++ /* check that number of entries is greater than zero */
++ if (!p_ReplicGroupParam->numOfEntries)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOFEntries in the frame replicator group should be greater than zero"));
++
++ /* check that max num of entries is equal or greater than number of entries */
++ if (p_ReplicGroupParam->maxNumOfEntries < p_ReplicGroupParam->numOfEntries)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxNumOfEntries should be equal or greater than numOfEntries"));
++
++ for (i=0; i<p_ReplicGroupParam->numOfEntries; i++)
++ {
++ err = MemberCheckParams(h_FmPcd, &p_ReplicGroupParam->nextEngineParams[i]);
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("member check parameters"));
++ }
++ return E_OK;
++}
++
++static t_FmPcdFrmReplicMember *GetAvailableMember(t_FmPcdFrmReplicGroup *p_ReplicGroup)
++{
++ t_FmPcdFrmReplicMember *p_ReplicMember = NULL;
++ t_List *p_Next;
++
++ if (!LIST_IsEmpty(&p_ReplicGroup->availableMembersList))
++ {
++ p_Next = LIST_FIRST(&p_ReplicGroup->availableMembersList);
++ p_ReplicMember = LIST_OBJECT(p_Next, t_FmPcdFrmReplicMember, node);
++ ASSERT_COND(p_ReplicMember);
++ LIST_DelAndInit(p_Next);
++ }
++ return p_ReplicMember;
++}
++
++static void PutAvailableMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_FmPcdFrmReplicMember *p_ReplicMember)
++{
++ LIST_AddToTail(&p_ReplicMember->node, &p_ReplicGroup->availableMembersList);
++}
++
++static void AddMemberToList(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_FmPcdFrmReplicMember *p_CurrentMember,
++ t_List *p_ListHead)
++{
++ LIST_Add(&p_CurrentMember->node, p_ListHead);
++
++ p_ReplicGroup->numOfEntries++;
++}
++
++static void RemoveMemberFromList(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_FmPcdFrmReplicMember *p_CurrentMember)
++{
++ ASSERT_COND(p_ReplicGroup->numOfEntries);
++ LIST_DelAndInit(&p_CurrentMember->node);
++ p_ReplicGroup->numOfEntries--;
++}
++
++static void LinkSourceToMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_AdOfTypeContLookup *p_SourceTd,
++ t_FmPcdFrmReplicMember *p_ReplicMember)
++{
++ t_FmPcd *p_FmPcd;
++
++ ASSERT_COND(p_SourceTd);
++ ASSERT_COND(p_ReplicMember);
++ ASSERT_COND(p_ReplicGroup);
++ ASSERT_COND(p_ReplicGroup->h_FmPcd);
++
++ /* Link the first member in the group to the source TD */
++ p_FmPcd = p_ReplicGroup->h_FmPcd;
++
++ WRITE_UINT32(p_SourceTd->matchTblPtr,
++ (uint32_t)(XX_VirtToPhys(p_ReplicMember->p_MemberAd) -
++ p_FmPcd->physicalMuramBase));
++}
++
++static void LinkMemberToMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_FmPcdFrmReplicMember *p_CurrentMember,
++ t_FmPcdFrmReplicMember *p_NextMember)
++{
++ t_AdOfTypeResult *p_CurrReplicAd = (t_AdOfTypeResult*)p_CurrentMember->p_MemberAd;
++ t_AdOfTypeResult *p_NextReplicAd = NULL;
++ t_FmPcd *p_FmPcd;
++ uint32_t offset = 0;
++
++ /* Check if the next member exists or it's NULL (- means that this is the last member) */
++ if (p_NextMember)
++ {
++ p_NextReplicAd = (t_AdOfTypeResult*)p_NextMember->p_MemberAd;
++ p_FmPcd = p_ReplicGroup->h_FmPcd;
++ offset = (XX_VirtToPhys(p_NextReplicAd) - (p_FmPcd->physicalMuramBase));
++ offset = ((offset>>NEXT_FRM_REPLIC_ADDR_SHIFT)<< NEXT_FRM_REPLIC_MEMBER_INDEX_SHIFT);
++ }
++
++ /* link the current AD to point to the AD of the next member */
++ WRITE_UINT32(p_CurrReplicAd->res, offset);
++}
++
++static t_Error ModifyDescriptor(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ void *p_OldDescriptor,
++ void *p_NewDescriptor)
++{
++ t_Handle h_Hc;
++ t_Error err;
++ t_FmPcd *p_FmPcd;
++
++ ASSERT_COND(p_ReplicGroup);
++ ASSERT_COND(p_ReplicGroup->h_FmPcd);
++ ASSERT_COND(p_OldDescriptor);
++ ASSERT_COND(p_NewDescriptor);
++
++ p_FmPcd = p_ReplicGroup->h_FmPcd;
++ h_Hc = FmPcdGetHcHandle(p_FmPcd);
++ if (!h_Hc)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Host command"));
++
++ err = FmHcPcdCcDoDynamicChange(h_Hc,
++ (uint32_t)(XX_VirtToPhys(p_OldDescriptor) - p_FmPcd->physicalMuramBase),
++ (uint32_t)(XX_VirtToPhys(p_NewDescriptor) - p_FmPcd->physicalMuramBase));
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("Dynamic change host command"));
++
++ return E_OK;
++}
++
++static void FillReplicAdOfTypeResult(void *p_ReplicAd, bool last)
++{
++ t_AdOfTypeResult *p_CurrReplicAd = (t_AdOfTypeResult*)p_ReplicAd;
++ uint32_t tmp;
++
++ tmp = GET_UINT32(p_CurrReplicAd->plcrProfile);
++ if (last)
++ /* clear the NL bit in case it's the last member in the group*/
++ WRITE_UINT32(p_CurrReplicAd->plcrProfile,(tmp & ~FRM_REPLIC_NL_BIT));
++ else
++ /* set the NL bit in case it's not the last member in the group */
++ WRITE_UINT32(p_CurrReplicAd->plcrProfile, (tmp |FRM_REPLIC_NL_BIT));
++
++ /* set FR bit in the action descriptor */
++ tmp = GET_UINT32(p_CurrReplicAd->nia);
++ WRITE_UINT32(p_CurrReplicAd->nia,
++ (tmp | FRM_REPLIC_FR_BIT | FM_PCD_AD_RESULT_EXTENDED_MODE ));
++}
++
++static void BuildSourceTd(void *p_Ad)
++{
++ t_AdOfTypeContLookup *p_SourceTd;
++
++ ASSERT_COND(p_Ad);
++
++ p_SourceTd = (t_AdOfTypeContLookup *)p_Ad;
++
++ IOMemSet32((uint8_t*)p_SourceTd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* initialize the source table descriptor */
++ WRITE_UINT32(p_SourceTd->ccAdBase, FM_PCD_AD_CONT_LOOKUP_TYPE);
++ WRITE_UINT32(p_SourceTd->pcAndOffsets, FRM_REPLIC_SOURCE_TD_OPCODE);
++}
++
++static t_Error BuildShadowAndModifyDescriptor(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_FmPcdFrmReplicMember *p_NextMember,
++ t_FmPcdFrmReplicMember *p_CurrentMember,
++ bool sourceDescriptor,
++ bool last)
++{
++ t_FmPcd *p_FmPcd;
++ t_FmPcdFrmReplicMember shadowMember;
++ t_Error err;
++
++ ASSERT_COND(p_ReplicGroup);
++ ASSERT_COND(p_ReplicGroup->h_FmPcd);
++
++ p_FmPcd = p_ReplicGroup->h_FmPcd;
++ ASSERT_COND(p_FmPcd->p_CcShadow);
++
++ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
++ return ERROR_CODE(E_BUSY);
++
++ if (sourceDescriptor)
++ {
++ BuildSourceTd(p_FmPcd->p_CcShadow);
++ LinkSourceToMember(p_ReplicGroup, p_FmPcd->p_CcShadow, p_NextMember);
++
++ /* Modify the source table descriptor according to the prepared shadow descriptor */
++ err = ModifyDescriptor(p_ReplicGroup,
++ p_ReplicGroup->p_SourceTd,
++ p_FmPcd->p_CcShadow/* new prepared source td */);
++
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("Modify source Descriptor in BuildShadowAndModifyDescriptor"));
++
++ }
++ else
++ {
++ IO2IOCpy32(p_FmPcd->p_CcShadow,
++ p_CurrentMember->p_MemberAd,
++ FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* update the last bit in the shadow ad */
++ FillReplicAdOfTypeResult(p_FmPcd->p_CcShadow, last);
++
++ shadowMember.p_MemberAd = p_FmPcd->p_CcShadow;
++
++ /* update the next FR member index */
++ LinkMemberToMember(p_ReplicGroup, &shadowMember, p_NextMember);
++
++ /* Modify the next member according to the prepared shadow descriptor */
++ err = ModifyDescriptor(p_ReplicGroup,
++ p_CurrentMember->p_MemberAd,
++ p_FmPcd->p_CcShadow);
++
++ RELEASE_LOCK(p_FmPcd->shadowLock);
++ if (err)
++ RETURN_ERROR(MAJOR, err, ("Modify Descriptor in BuildShadowAndModifyDescriptor"));
++ }
++
++
++ return E_OK;
++}
++
++static t_FmPcdFrmReplicMember* GetMemberByIndex(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ uint16_t memberIndex)
++{
++ int i=0;
++ t_List *p_Pos;
++ t_FmPcdFrmReplicMember *p_Member = NULL;
++
++ LIST_FOR_EACH(p_Pos, &p_ReplicGroup->membersList)
++ {
++ if (i == memberIndex)
++ {
++ p_Member = LIST_OBJECT(p_Pos, t_FmPcdFrmReplicMember, node);
++ return p_Member;
++ }
++ i++;
++ }
++ return p_Member;
++}
++
++static t_Error AllocMember(t_FmPcdFrmReplicGroup *p_ReplicGroup)
++{
++ t_FmPcdFrmReplicMember *p_CurrentMember;
++ t_Handle h_Muram;
++
++ ASSERT_COND(p_ReplicGroup);
++
++ h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd);
++ ASSERT_COND(h_Muram);
++
++ /* Initialize an internal structure of a member to add to the available members list */
++ p_CurrentMember = (t_FmPcdFrmReplicMember *)XX_Malloc(sizeof(t_FmPcdFrmReplicMember));
++ if (!p_CurrentMember)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Frame replicator member"));
++
++ memset(p_CurrentMember, 0 ,sizeof(t_FmPcdFrmReplicMember));
++
++ /* Allocate the member AD */
++ p_CurrentMember->p_MemberAd =
++ (t_AdOfTypeResult*)FM_MURAM_AllocMem(h_Muram,
++ FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_CurrentMember->p_MemberAd)
++ {
++ XX_Free(p_CurrentMember);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("member AD table"));
++ }
++ IOMemSet32((uint8_t*)p_CurrentMember->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ /* Add the new member to the available members list */
++ LIST_AddToTail(&p_CurrentMember->node, &(p_ReplicGroup->availableMembersList));
++
++ return E_OK;
++}
++
++static t_FmPcdFrmReplicMember* InitMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_FmPcdCcNextEngineParams *p_MemberParams,
++ bool last)
++{
++ t_FmPcdFrmReplicMember *p_CurrentMember = NULL;
++
++ ASSERT_COND(p_ReplicGroup);
++
++ /* Get an available member from the internal members list */
++ p_CurrentMember = GetAvailableMember(p_ReplicGroup);
++ if (!p_CurrentMember)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Available member"));
++ return NULL;
++ }
++ p_CurrentMember->h_Manip = NULL;
++
++ /* clear the Ad of the new member */
++ IOMemSet32((uint8_t*)p_CurrentMember->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++ INIT_LIST(&p_CurrentMember->node);
++
++ /* Initialize the Ad of the member */
++ NextStepAd(p_CurrentMember->p_MemberAd,
++ NULL,
++ p_MemberParams,
++ p_ReplicGroup->h_FmPcd);
++
++ /* save Manip handle (for free needs) */
++ if (p_MemberParams->h_Manip)
++ p_CurrentMember->h_Manip = p_MemberParams->h_Manip;
++
++ /* Initialize the relevant frame replicator fields in the AD */
++ FillReplicAdOfTypeResult(p_CurrentMember->p_MemberAd, last);
++
++ return p_CurrentMember;
++}
++
++static void FreeMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ t_FmPcdFrmReplicMember *p_Member)
++{
++ /* Note: Can't free the member AD just returns the member to the available
++ member list - therefore only memset the AD */
++
++ /* zero the AD */
++ IOMemSet32(p_Member->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
++
++
++ /* return the member to the available members list */
++ PutAvailableMember(p_ReplicGroup, p_Member);
++}
++
++static t_Error RemoveMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
++ uint16_t memberIndex)
++{
++ t_FmPcd *p_FmPcd = NULL;
++ t_FmPcdFrmReplicMember *p_CurrentMember = NULL, *p_PreviousMember = NULL, *p_NextMember = NULL;
++ t_Error err;
++ uint8_t memberPosition;
++
++ p_FmPcd = p_ReplicGroup->h_FmPcd;
++ ASSERT_COND(p_FmPcd);
++ UNUSED(p_FmPcd);
++
++ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex);
++ ASSERT_COND(p_CurrentMember);
++
++ /* determine the member position in the group */
++ memberPosition = GetMemberPosition(p_ReplicGroup,
++ memberIndex,
++ FALSE/*remove operation*/);
++
++ switch (memberPosition)
++ {
++ case FRM_REPLIC_FIRST_MEMBER_INDEX:
++ p_NextMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex+1));
++ ASSERT_COND(p_NextMember);
++
++ /* update the source td itself by using a host command */
++ err = BuildShadowAndModifyDescriptor(p_ReplicGroup,
++ p_NextMember,
++ NULL,
++ TRUE/*sourceDescriptor*/,
++ FALSE/*last*/);
++ break;
++
++ case FRM_REPLIC_MIDDLE_MEMBER_INDEX:
++ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
++ ASSERT_COND(p_PreviousMember);
++
++ p_NextMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex+1));
++ ASSERT_COND(p_NextMember);
++
++ err = BuildShadowAndModifyDescriptor(p_ReplicGroup,
++ p_NextMember,
++ p_PreviousMember,
++ FALSE/*sourceDescriptor*/,
++ FALSE/*last*/);
++
++ break;
++
++ case FRM_REPLIC_LAST_MEMBER_INDEX:
++ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
++ ASSERT_COND(p_PreviousMember);
++
++ err = BuildShadowAndModifyDescriptor(p_ReplicGroup,
++ NULL,
++ p_PreviousMember,
++ FALSE/*sourceDescriptor*/,
++ TRUE/*last*/);
++ break;
++
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member position in remove member"));
++ }
++
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (p_CurrentMember->h_Manip)
++ {
++ FmPcdManipUpdateOwner(p_CurrentMember->h_Manip, FALSE);
++ p_CurrentMember->h_Manip = NULL;
++ }
++
++ /* remove the member from the driver internal members list */
++ RemoveMemberFromList(p_ReplicGroup, p_CurrentMember);
++
++ /* return the member to the available members list */
++ FreeMember(p_ReplicGroup, p_CurrentMember);
++
++ return E_OK;
++}
++
++static void DeleteGroup(t_FmPcdFrmReplicGroup *p_ReplicGroup)
++{
++ int i, j;
++ t_Handle h_Muram;
++ t_FmPcdFrmReplicMember *p_Member, *p_CurrentMember;
++
++ if (p_ReplicGroup)
++ {
++ ASSERT_COND(p_ReplicGroup->h_FmPcd);
++ h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd);
++ ASSERT_COND(h_Muram);
++
++ /* free the source table descriptor */
++ if (p_ReplicGroup->p_SourceTd)
++ {
++ FM_MURAM_FreeMem(h_Muram, p_ReplicGroup->p_SourceTd);
++ p_ReplicGroup->p_SourceTd = NULL;
++ }
++
++ /* Remove all members from the members linked list (hw and sw) and
++ return the members to the available members list */
++ if (p_ReplicGroup->numOfEntries)
++ {
++ j = p_ReplicGroup->numOfEntries-1;
++
++ /* manually removal of the member because there are no owners of
++ this group */
++ for (i=j; i>=0; i--)
++ {
++ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)i/*memberIndex*/);
++ ASSERT_COND(p_CurrentMember);
++
++ if (p_CurrentMember->h_Manip)
++ {
++ FmPcdManipUpdateOwner(p_CurrentMember->h_Manip, FALSE);
++ p_CurrentMember->h_Manip = NULL;
++ }
++
++ /* remove the member from the internal driver members list */
++ RemoveMemberFromList(p_ReplicGroup, p_CurrentMember);
++
++ /* return the member to the available members list */
++ FreeMember(p_ReplicGroup, p_CurrentMember);
++ }
++ }
++
++ /* Free members AD */
++ for (i=0; i<p_ReplicGroup->maxNumOfEntries; i++)
++ {
++ p_Member = GetAvailableMember(p_ReplicGroup);
++ ASSERT_COND(p_Member);
++ if (p_Member->p_MemberAd)
++ {
++ FM_MURAM_FreeMem(h_Muram, p_Member->p_MemberAd);
++ p_Member->p_MemberAd = NULL;
++ }
++ XX_Free(p_Member);
++ }
++
++ /* release the group lock */
++ if (p_ReplicGroup->p_Lock)
++ FmPcdReleaseLock(p_ReplicGroup->h_FmPcd, p_ReplicGroup->p_Lock);
++
++ /* free the replicator group */
++ XX_Free(p_ReplicGroup);
++ }
++}
++
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++
++/* NOTE: the inter-module routines are locked by cc in case of using them */
++void * FrmReplicGroupGetSourceTableDescriptor(t_Handle h_ReplicGroup)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
++ ASSERT_COND(p_ReplicGroup);
++
++ return (p_ReplicGroup->p_SourceTd);
++}
++
++void FrmReplicGroupUpdateAd(t_Handle h_ReplicGroup,
++ void *p_Ad,
++ t_Handle *h_AdNew)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
++ t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult*)p_Ad;
++ t_FmPcd *p_FmPcd;
++
++ ASSERT_COND(p_ReplicGroup);
++ p_FmPcd = p_ReplicGroup->h_FmPcd;
++
++ /* build a bypass ad */
++ WRITE_UINT32(p_AdResult->fqid, FM_PCD_AD_BYPASS_TYPE |
++ (uint32_t)((XX_VirtToPhys(p_ReplicGroup->p_SourceTd)) - p_FmPcd->physicalMuramBase));
++
++ *h_AdNew = NULL;
++}
++
++void FrmReplicGroupUpdateOwner(t_Handle h_ReplicGroup,
++ bool add)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
++ ASSERT_COND(p_ReplicGroup);
++
++ /* update the group owner counter */
++ if (add)
++ p_ReplicGroup->owners++;
++ else
++ {
++ ASSERT_COND(p_ReplicGroup->owners);
++ p_ReplicGroup->owners--;
++ }
++}
++
++t_Error FrmReplicGroupTryLock(t_Handle h_ReplicGroup)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
++
++ ASSERT_COND(h_ReplicGroup);
++
++ if (FmPcdLockTryLock(p_ReplicGroup->p_Lock))
++ return E_OK;
++
++ return ERROR_CODE(E_BUSY);
++}
++
++void FrmReplicGroupUnlock(t_Handle h_ReplicGroup)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
++
++ ASSERT_COND(h_ReplicGroup);
++
++ FmPcdLockUnlock(p_ReplicGroup->p_Lock);
++}
++/*********************** End of inter-module routines ************************/
++
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++t_Handle FM_PCD_FrmReplicSetGroup(t_Handle h_FmPcd,
++ t_FmPcdFrmReplicGroupParams *p_ReplicGroupParam)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup;
++ t_FmPcdFrmReplicMember *p_CurrentMember, *p_NextMember = NULL;
++ int i;
++ t_Error err;
++ bool last = FALSE;
++ t_Handle h_Muram;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_ReplicGroupParam, E_INVALID_HANDLE, NULL);
++
++ if (!FmPcdIsAdvancedOffloadSupported(h_FmPcd))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Advanced-offload must be enabled"));
++ return NULL;
++ }
++
++ err = CheckParams(h_FmPcd, p_ReplicGroupParam);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, (NO_MSG));
++ return NULL;
++ }
++
++ p_ReplicGroup = (t_FmPcdFrmReplicGroup*)XX_Malloc(sizeof(t_FmPcdFrmReplicGroup));
++ if (!p_ReplicGroup)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
++ return NULL;
++ }
++ memset(p_ReplicGroup, 0, sizeof(t_FmPcdFrmReplicGroup));
++
++ /* initialize lists for internal driver use */
++ INIT_LIST(&p_ReplicGroup->availableMembersList);
++ INIT_LIST(&p_ReplicGroup->membersList);
++
++ p_ReplicGroup->h_FmPcd = h_FmPcd;
++
++ h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd);
++ ASSERT_COND(h_Muram);
++
++ /* initialize the group lock */
++ p_ReplicGroup->p_Lock = FmPcdAcquireLock(p_ReplicGroup->h_FmPcd);
++ if (!p_ReplicGroup->p_Lock)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Replic group lock"));
++ DeleteGroup(p_ReplicGroup);
++ return NULL;
++ }
++
++ /* Allocate the frame replicator source table descriptor */
++ p_ReplicGroup->p_SourceTd =
++ (t_Handle)FM_MURAM_AllocMem(h_Muram,
++ FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (!p_ReplicGroup->p_SourceTd)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("frame replicator source table descriptor"));
++ DeleteGroup(p_ReplicGroup);
++ return NULL;
++ }
++
++ /* update the shadow size - required for the host commands */
++ err = FmPcdUpdateCcShadow(p_ReplicGroup->h_FmPcd,
++ FM_PCD_CC_AD_ENTRY_SIZE,
++ FM_PCD_CC_AD_TABLE_ALIGN);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, ("Update CC shadow"));
++ DeleteGroup(p_ReplicGroup);
++ return NULL;
++ }
++
++ p_ReplicGroup->maxNumOfEntries = p_ReplicGroupParam->maxNumOfEntries;
++
++ /* Allocate the maximal number of members ADs and Statistics AD for the group
++ It prevents allocation of Muram in run-time */
++ for (i=0; i<p_ReplicGroup->maxNumOfEntries; i++)
++ {
++ err = AllocMember(p_ReplicGroup);
++ if (err)
++ {
++ REPORT_ERROR(MAJOR, err, ("allocate a new member"));
++ DeleteGroup(p_ReplicGroup);
++ return NULL;
++ }
++ }
++
++ /* Initialize the members linked lists:
++ (hw - the one that is used by the FMan controller and
++ sw - the one that is managed by the driver internally) */
++ for (i=(p_ReplicGroupParam->numOfEntries-1); i>=0; i--)
++ {
++ /* check if this is the last member in the group */
++ if (i == (p_ReplicGroupParam->numOfEntries-1))
++ last = TRUE;
++ else
++ last = FALSE;
++
++ /* Initialize a new member */
++ p_CurrentMember = InitMember(p_ReplicGroup,
++ &(p_ReplicGroupParam->nextEngineParams[i]),
++ last);
++ if (!p_CurrentMember)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("No available member"));
++ DeleteGroup(p_ReplicGroup);
++ return NULL;
++ }
++
++ /* Build the members group - link two consecutive members in the hw linked list */
++ LinkMemberToMember(p_ReplicGroup, p_CurrentMember, p_NextMember);
++
++ /* update the driver internal members list to be compatible to the hw members linked list */
++ AddMemberToList(p_ReplicGroup, p_CurrentMember, &p_ReplicGroup->membersList);
++
++ p_NextMember = p_CurrentMember;
++ }
++
++ /* initialize the source table descriptor */
++ BuildSourceTd(p_ReplicGroup->p_SourceTd);
++
++ /* link the source table descriptor to point to the first member in the group */
++ LinkSourceToMember(p_ReplicGroup, p_ReplicGroup->p_SourceTd, p_NextMember);
++
++ return p_ReplicGroup;
++}
++
++t_Error FM_PCD_FrmReplicDeleteGroup(t_Handle h_ReplicGroup)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
++
++ SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE);
++
++ if (p_ReplicGroup->owners)
++ RETURN_ERROR(MAJOR,
++ E_INVALID_STATE,
++ ("the group has owners and can't be deleted"));
++
++ DeleteGroup(p_ReplicGroup);
++
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* API Run-time Frame replicator Control unit functions */
++/*****************************************************************************/
++t_Error FM_PCD_FrmReplicAddMember(t_Handle h_ReplicGroup,
++ uint16_t memberIndex,
++ t_FmPcdCcNextEngineParams *p_MemberParams)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup*) h_ReplicGroup;
++ t_FmPcdFrmReplicMember *p_NewMember, *p_CurrentMember = NULL, *p_PreviousMember = NULL;
++ t_Error err;
++ uint8_t memberPosition;
++
++ SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_MemberParams, E_INVALID_HANDLE);
++
++ /* group lock */
++ err = FrmReplicGroupTryLock(p_ReplicGroup);
++ if (GET_ERROR_TYPE(err) == E_BUSY)
++ return ERROR_CODE(E_BUSY);
++
++ if (memberIndex > p_ReplicGroup->numOfEntries)
++ {
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
++ ("memberIndex is greater than the members in the list"));
++ }
++
++ if (memberIndex >= p_ReplicGroup->maxNumOfEntries)
++ {
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("memberIndex is greater than the allowed number of members in the group"));
++ }
++
++ if ((p_ReplicGroup->numOfEntries + 1) > FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES)
++ {
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("numOfEntries with new entry can not be larger than %d\n",
++ FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES));
++ }
++
++ err = MemberCheckParams(p_ReplicGroup->h_FmPcd, p_MemberParams);
++ if (err)
++ {
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++ RETURN_ERROR(MAJOR, err, ("member check parameters in add operation"));
++ }
++ /* determine the member position in the group */
++ memberPosition = GetMemberPosition(p_ReplicGroup,
++ memberIndex,
++ TRUE/* add operation */);
++
++ /* Initialize a new member */
++ p_NewMember = InitMember(p_ReplicGroup,
++ p_MemberParams,
++ (memberPosition == FRM_REPLIC_LAST_MEMBER_INDEX ? TRUE : FALSE));
++ if (!p_NewMember)
++ {
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("No available member"));
++ }
++
++ switch (memberPosition)
++ {
++ case FRM_REPLIC_FIRST_MEMBER_INDEX:
++ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex);
++ ASSERT_COND(p_CurrentMember);
++
++ LinkMemberToMember(p_ReplicGroup, p_NewMember, p_CurrentMember);
++
++ /* update the internal group source TD */
++ LinkSourceToMember(p_ReplicGroup,
++ p_ReplicGroup->p_SourceTd,
++ p_NewMember);
++
++ /* add member to the internal sw member list */
++ AddMemberToList(p_ReplicGroup,
++ p_NewMember,
++ &p_ReplicGroup->membersList);
++ break;
++
++ case FRM_REPLIC_MIDDLE_MEMBER_INDEX:
++ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex);
++ ASSERT_COND(p_CurrentMember);
++
++ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
++ ASSERT_COND(p_PreviousMember);
++
++ LinkMemberToMember(p_ReplicGroup, p_NewMember, p_CurrentMember);
++ LinkMemberToMember(p_ReplicGroup, p_PreviousMember, p_NewMember);
++
++ AddMemberToList(p_ReplicGroup, p_NewMember, &p_PreviousMember->node);
++ break;
++
++ case FRM_REPLIC_LAST_MEMBER_INDEX:
++ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
++ ASSERT_COND(p_PreviousMember);
++
++ LinkMemberToMember(p_ReplicGroup, p_PreviousMember, p_NewMember);
++ FillReplicAdOfTypeResult(p_PreviousMember->p_MemberAd, FALSE/*last*/);
++
++ /* add the new member to the internal sw member list */
++ AddMemberToList(p_ReplicGroup, p_NewMember, &p_PreviousMember->node);
++ break;
++
++ default:
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member position in add member"));
++
++ }
++
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++
++ return E_OK;
++}
++
++t_Error FM_PCD_FrmReplicRemoveMember(t_Handle h_ReplicGroup,
++ uint16_t memberIndex)
++{
++ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup*) h_ReplicGroup;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE);
++
++ /* lock */
++ err = FrmReplicGroupTryLock(p_ReplicGroup);
++ if (GET_ERROR_TYPE(err) == E_BUSY)
++ return ERROR_CODE(E_BUSY);
++
++ if (memberIndex >= p_ReplicGroup->numOfEntries)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member index to remove"));
++
++ /* Design decision: group must contain at least one member
++ No possibility to remove the last member from the group */
++ if (p_ReplicGroup->numOfEntries == 1)
++ RETURN_ERROR(MAJOR, E_CONFLICT, ("Can't remove the last member. At least one member should be related to a group."));
++
++ err = RemoveMember(p_ReplicGroup, memberIndex);
++
++ /* unlock */
++ FrmReplicGroupUnlock(p_ReplicGroup);
++
++ switch (GET_ERROR_TYPE(err))
++ {
++ case E_OK:
++ return E_OK;
++
++ case E_BUSY:
++ DBG(TRACE, ("E_BUSY error"));
++ return ERROR_CODE(E_BUSY);
++
++ default:
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++}
++
++/*********************** End of API routines ************************/
++
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.h
+new file mode 100644
+index 00000000..0e8e8bc0
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.h
+@@ -0,0 +1,101 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_replic.h
++
++ @Description FM frame replicator
++*//***************************************************************************/
++#ifndef __FM_REPLIC_H
++#define __FM_REPLIC_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++
++
++#define FRM_REPLIC_SOURCE_TD_OPCODE 0x75
++#define NEXT_FRM_REPLIC_ADDR_SHIFT 4
++#define NEXT_FRM_REPLIC_MEMBER_INDEX_SHIFT 16
++#define FRM_REPLIC_FR_BIT 0x08000000
++#define FRM_REPLIC_NL_BIT 0x10000000
++#define FRM_REPLIC_INVALID_MEMBER_INDEX 0xffff
++#define FRM_REPLIC_FIRST_MEMBER_INDEX 0
++
++#define FRM_REPLIC_MIDDLE_MEMBER_INDEX 1
++#define FRM_REPLIC_LAST_MEMBER_INDEX 2
++
++#define SOURCE_TD_ITSELF_OPTION 0x01
++#define SOURCE_TD_COPY_OPTION 0x02
++#define SOURCE_TD_ITSELF_AND_COPY_OPTION SOURCE_TD_ITSELF_OPTION | SOURCE_TD_COPY_OPTION
++#define SOURCE_TD_NONE 0x04
++
++/*typedef enum e_SourceTdOption
++{
++ e_SOURCE_TD_NONE = 0,
++ e_SOURCE_TD_ITSELF_OPTION = 1,
++ e_SOURCE_TD_COPY_OPTION = 2,
++ e_SOURCE_TD_ITSELF_AND_COPY_OPTION = e_SOURCE_TD_ITSELF_OPTION | e_SOURCE_TD_COPY_OPTION
++} e_SourceTdOption;
++*/
++
++typedef struct
++{
++ volatile uint32_t type;
++ volatile uint32_t frGroupPointer;
++ volatile uint32_t operationCode;
++ volatile uint32_t reserved;
++} t_FrmReplicGroupSourceAd;
++
++typedef struct t_FmPcdFrmReplicMember
++{
++ void *p_MemberAd; /**< pointer to the member AD */
++ void *p_StatisticsAd;/**< pointer to the statistics AD of the member */
++ t_Handle h_Manip; /**< manip handle - need for free routines */
++ t_List node;
++} t_FmPcdFrmReplicMember;
++
++typedef struct t_FmPcdFrmReplicGroup
++{
++ t_Handle h_FmPcd;
++
++ uint8_t maxNumOfEntries;/**< maximal number of members in the group */
++ uint8_t numOfEntries; /**< actual number of members in the group */
++ uint16_t owners; /**< how many keys share this frame replicator group */
++ void *p_SourceTd; /**< pointer to the frame replicator source table descriptor */
++ t_List membersList; /**< the members list - should reflect the order of the members as in the hw linked list*/
++ t_List availableMembersList;/**< list of all the available members in the group */
++ t_FmPcdLock *p_Lock;
++} t_FmPcdFrmReplicGroup;
++
++
++#endif /* __FM_REPLIC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_kg.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_kg.c
+new file mode 100644
+index 00000000..49b86e8e
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_kg.c
+@@ -0,0 +1,888 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "fsl_fman_kg.h"
++
++/****************************************/
++/* static functions */
++/****************************************/
++
++
++static uint32_t build_ar_bind_scheme(uint8_t hwport_id, bool write)
++{
++ uint32_t rw;
++
++ rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ;
++
++ return (uint32_t)(FM_KG_KGAR_GO |
++ rw |
++ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
++ hwport_id |
++ FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP);
++}
++
++static void clear_pe_all_scheme(struct fman_kg_regs *regs, uint8_t hwport_id)
++{
++ uint32_t ar;
++
++ fman_kg_write_sp(regs, 0xffffffff, 0);
++
++ ar = build_ar_bind_scheme(hwport_id, TRUE);
++ fman_kg_write_ar_wait(regs, ar);
++}
++
++static uint32_t build_ar_bind_cls_plan(uint8_t hwport_id, bool write)
++{
++ uint32_t rw;
++
++ rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ;
++
++ return (uint32_t)(FM_KG_KGAR_GO |
++ rw |
++ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
++ hwport_id |
++ FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP);
++}
++
++static void clear_pe_all_cls_plan(struct fman_kg_regs *regs, uint8_t hwport_id)
++{
++ uint32_t ar;
++
++ fman_kg_write_cpp(regs, 0);
++
++ ar = build_ar_bind_cls_plan(hwport_id, TRUE);
++ fman_kg_write_ar_wait(regs, ar);
++}
++
++static uint8_t get_gen_ht_code(enum fman_kg_gen_extract_src src,
++ bool no_validation,
++ uint8_t *offset)
++{
++ int code;
++
++ switch (src) {
++ case E_FMAN_KG_GEN_EXTRACT_ETH:
++ code = no_validation ? 0x73 : 0x3;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_ETYPE:
++ code = no_validation ? 0x77 : 0x7;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_SNAP:
++ code = no_validation ? 0x74 : 0x4;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_1:
++ code = no_validation ? 0x75 : 0x5;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_N:
++ code = no_validation ? 0x76 : 0x6;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_PPPoE:
++ code = no_validation ? 0x78 : 0x8;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_MPLS_1:
++ code = no_validation ? 0x79 : 0x9;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_MPLS_2:
++ code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x19;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_MPLS_3:
++ code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x29;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_MPLS_N:
++ code = no_validation ? 0x7a : 0xa;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_IPv4_1:
++ code = no_validation ? 0x7b : 0xb;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_IPv6_1:
++ code = no_validation ? 0x7b : 0x1b;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_IPv4_2:
++ code = no_validation ? 0x7c : 0xc;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_IPv6_2:
++ code = no_validation ? 0x7c : 0x1c;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_MINENCAP:
++ code = no_validation ? 0x7c : 0x2c;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_IP_PID:
++ code = no_validation ? 0x72 : 0x2;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_GRE:
++ code = no_validation ? 0x7d : 0xd;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_TCP:
++ code = no_validation ? 0x7e : 0xe;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_UDP:
++ code = no_validation ? 0x7e : 0x1e;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_SCTP:
++ code = no_validation ? 0x7e : 0x3e;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_DCCP:
++ code = no_validation ? 0x7e : 0x4e;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_IPSEC_AH:
++ code = no_validation ? 0x7e : 0x2e;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_IPSEC_ESP:
++ code = no_validation ? 0x7e : 0x6e;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_SHIM_1:
++ code = 0x70;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_SHIM_2:
++ code = 0x71;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_FROM_DFLT:
++ code = 0x10;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_FROM_FRAME_START:
++ code = 0x40;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_FROM_PARSE_RESULT:
++ code = 0x20;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_FROM_END_OF_PARSE:
++ code = 0x7f;
++ break;
++
++ case E_FMAN_KG_GEN_EXTRACT_FROM_FQID:
++ code = 0x20;
++ *offset += 0x20;
++ break;
++
++ default:
++ code = FM_KG_SCH_GEN_HT_INVALID;
++ }
++
++ return (uint8_t)code;
++}
++
++static uint32_t build_ar_scheme(uint8_t scheme,
++ uint8_t hwport_id,
++ bool update_counter,
++ bool write)
++{
++ uint32_t rw;
++
++ rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
++
++ return (uint32_t)(FM_KG_KGAR_GO |
++ rw |
++ FM_KG_KGAR_SEL_SCHEME_ENTRY |
++ hwport_id |
++ ((uint32_t)scheme << FM_KG_KGAR_NUM_SHIFT) |
++ (update_counter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT : 0));
++}
++
++static uint32_t build_ar_cls_plan(uint8_t grp,
++ uint8_t entries_mask,
++ uint8_t hwport_id,
++ bool write)
++{
++ uint32_t rw;
++
++ rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
++
++ return (uint32_t)(FM_KG_KGAR_GO |
++ rw |
++ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
++ hwport_id |
++ ((uint32_t)grp << FM_KG_KGAR_NUM_SHIFT) |
++ ((uint32_t)entries_mask << FM_KG_KGAR_WSEL_SHIFT));
++}
++
++int fman_kg_write_ar_wait(struct fman_kg_regs *regs, uint32_t fmkg_ar)
++{
++ iowrite32be(fmkg_ar, &regs->fmkg_ar);
++ /* Wait for GO to be idle and read error */
++ while ((fmkg_ar = ioread32be(&regs->fmkg_ar)) & FM_KG_KGAR_GO) ;
++ if (fmkg_ar & FM_PCD_KG_KGAR_ERR)
++ return -EINVAL;
++ return 0;
++}
++
++void fman_kg_write_sp(struct fman_kg_regs *regs, uint32_t sp, bool add)
++{
++
++ struct fman_kg_pe_regs *kgpe_regs;
++ uint32_t tmp;
++
++ kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
++ tmp = ioread32be(&kgpe_regs->fmkg_pe_sp);
++
++ if (add)
++ tmp |= sp;
++ else /* clear */
++ tmp &= ~sp;
++
++ iowrite32be(tmp, &kgpe_regs->fmkg_pe_sp);
++
++}
++
++void fman_kg_write_cpp(struct fman_kg_regs *regs, uint32_t cpp)
++{
++ struct fman_kg_pe_regs *kgpe_regs;
++
++ kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
++
++ iowrite32be(cpp, &kgpe_regs->fmkg_pe_cpp);
++}
++
++void fman_kg_get_event(struct fman_kg_regs *regs,
++ uint32_t *event,
++ uint32_t *scheme_idx)
++{
++ uint32_t mask, force;
++
++ *event = ioread32be(&regs->fmkg_eer);
++ mask = ioread32be(&regs->fmkg_eeer);
++ *scheme_idx = ioread32be(&regs->fmkg_seer);
++ *scheme_idx &= ioread32be(&regs->fmkg_seeer);
++
++ *event &= mask;
++
++ /* clear the forced events */
++ force = ioread32be(&regs->fmkg_feer);
++ if (force & *event)
++ iowrite32be(force & ~*event ,&regs->fmkg_feer);
++
++ iowrite32be(*event, &regs->fmkg_eer);
++ iowrite32be(*scheme_idx, &regs->fmkg_seer);
++}
++
++
++void fman_kg_init(struct fman_kg_regs *regs,
++ uint32_t exceptions,
++ uint32_t dflt_nia)
++{
++ uint32_t tmp;
++ int i;
++
++ iowrite32be(FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW,
++ &regs->fmkg_eer);
++
++ tmp = 0;
++ if (exceptions & FM_EX_KG_DOUBLE_ECC)
++ tmp |= FM_EX_KG_DOUBLE_ECC;
++
++ if (exceptions & FM_EX_KG_KEYSIZE_OVERFLOW)
++ tmp |= FM_EX_KG_KEYSIZE_OVERFLOW;
++
++ iowrite32be(tmp, &regs->fmkg_eeer);
++ iowrite32be(0, &regs->fmkg_fdor);
++ iowrite32be(0, &regs->fmkg_gdv0r);
++ iowrite32be(0, &regs->fmkg_gdv1r);
++ iowrite32be(dflt_nia, &regs->fmkg_gcr);
++
++ /* Clear binding between ports to schemes and classification plans
++ * so that all ports are not bound to any scheme/classification plan */
++ for (i = 0; i < FMAN_MAX_NUM_OF_HW_PORTS; i++) {
++ clear_pe_all_scheme(regs, (uint8_t)i);
++ clear_pe_all_cls_plan(regs, (uint8_t)i);
++ }
++}
++
++void fman_kg_enable_scheme_interrupts(struct fman_kg_regs *regs)
++{
++ /* enable and enable all scheme interrupts */
++ iowrite32be(0xFFFFFFFF, &regs->fmkg_seer);
++ iowrite32be(0xFFFFFFFF, &regs->fmkg_seeer);
++}
++
++void fman_kg_enable(struct fman_kg_regs *regs)
++{
++ iowrite32be(ioread32be(&regs->fmkg_gcr) | FM_KG_KGGCR_EN,
++ &regs->fmkg_gcr);
++}
++
++void fman_kg_disable(struct fman_kg_regs *regs)
++{
++ iowrite32be(ioread32be(&regs->fmkg_gcr) & ~FM_KG_KGGCR_EN,
++ &regs->fmkg_gcr);
++}
++
++void fman_kg_set_data_after_prs(struct fman_kg_regs *regs, uint8_t offset)
++{
++ iowrite32be(offset, &regs->fmkg_fdor);
++}
++
++void fman_kg_set_dflt_val(struct fman_kg_regs *regs,
++ uint8_t def_id,
++ uint32_t val)
++{
++ if(def_id == 0)
++ iowrite32be(val, &regs->fmkg_gdv0r);
++ else
++ iowrite32be(val, &regs->fmkg_gdv1r);
++}
++
++
++void fman_kg_set_exception(struct fman_kg_regs *regs,
++ uint32_t exception,
++ bool enable)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->fmkg_eeer);
++
++ if (enable) {
++ tmp |= exception;
++ } else {
++ tmp &= ~exception;
++ }
++
++ iowrite32be(tmp, &regs->fmkg_eeer);
++}
++
++void fman_kg_get_exception(struct fman_kg_regs *regs,
++ uint32_t *events,
++ uint32_t *scheme_ids,
++ bool clear)
++{
++ uint32_t mask;
++
++ *events = ioread32be(&regs->fmkg_eer);
++ mask = ioread32be(&regs->fmkg_eeer);
++ *events &= mask;
++
++ *scheme_ids = 0;
++
++ if (*events & FM_EX_KG_KEYSIZE_OVERFLOW) {
++ *scheme_ids = ioread32be(&regs->fmkg_seer);
++ mask = ioread32be(&regs->fmkg_seeer);
++ *scheme_ids &= mask;
++ }
++
++ if (clear) {
++ iowrite32be(*scheme_ids, &regs->fmkg_seer);
++ iowrite32be(*events, &regs->fmkg_eer);
++ }
++}
++
++void fman_kg_get_capture(struct fman_kg_regs *regs,
++ struct fman_kg_ex_ecc_attr *ecc_attr,
++ bool clear)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->fmkg_serc);
++
++ if (tmp & KG_FMKG_SERC_CAP) {
++ /* Captured data is valid */
++ ecc_attr->valid = TRUE;
++ ecc_attr->double_ecc =
++ (bool)((tmp & KG_FMKG_SERC_CET) ? TRUE : FALSE);
++ ecc_attr->single_ecc_count =
++ (uint8_t)((tmp & KG_FMKG_SERC_CNT_MSK) >>
++ KG_FMKG_SERC_CNT_SHIFT);
++ ecc_attr->addr = (uint16_t)(tmp & KG_FMKG_SERC_ADDR_MSK);
++
++ if (clear)
++ iowrite32be(KG_FMKG_SERC_CAP, &regs->fmkg_serc);
++ } else {
++ /* No ECC error is captured */
++ ecc_attr->valid = FALSE;
++ }
++}
++
++int fman_kg_build_scheme(struct fman_kg_scheme_params *params,
++ struct fman_kg_scheme_regs *scheme_regs)
++{
++ struct fman_kg_extract_params *extract_params;
++ struct fman_kg_gen_extract_params *gen_params;
++ uint32_t tmp_reg, i, select, mask, fqb;
++ uint8_t offset, shift, ht;
++
++ /* Zero out all registers so no need to care about unused ones */
++ memset(scheme_regs, 0, sizeof(struct fman_kg_scheme_regs));
++
++ /* Mode register */
++ tmp_reg = fm_kg_build_nia(params->next_engine,
++ params->next_engine_action);
++ if (tmp_reg == KG_NIA_INVALID) {
++ return -EINVAL;
++ }
++
++ if (params->next_engine == E_FMAN_PCD_PLCR) {
++ tmp_reg |= FMAN_KG_SCH_MODE_NIA_PLCR;
++ }
++ else if (params->next_engine == E_FMAN_PCD_CC) {
++ tmp_reg |= (uint32_t)params->cc_params.base_offset <<
++ FMAN_KG_SCH_MODE_CCOBASE_SHIFT;
++ }
++
++ tmp_reg |= FMAN_KG_SCH_MODE_EN;
++ scheme_regs->kgse_mode = tmp_reg;
++
++ /* Match vector */
++ scheme_regs->kgse_mv = params->match_vector;
++
++ extract_params = &params->extract_params;
++
++ /* Scheme default values registers */
++ scheme_regs->kgse_dv0 = extract_params->def_scheme_0;
++ scheme_regs->kgse_dv1 = extract_params->def_scheme_1;
++
++ /* Extract Known Fields Command register */
++ scheme_regs->kgse_ekfc = extract_params->known_fields;
++
++ /* Entry Extract Known Default Value register */
++ tmp_reg = 0;
++ tmp_reg |= extract_params->known_fields_def.mac_addr <<
++ FMAN_KG_SCH_DEF_MAC_ADDR_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.vlan_tci <<
++ FMAN_KG_SCH_DEF_VLAN_TCI_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.etype <<
++ FMAN_KG_SCH_DEF_ETYPE_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.ppp_sid <<
++ FMAN_KG_SCH_DEF_PPP_SID_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.ppp_pid <<
++ FMAN_KG_SCH_DEF_PPP_PID_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.mpls <<
++ FMAN_KG_SCH_DEF_MPLS_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.ip_addr <<
++ FMAN_KG_SCH_DEF_IP_ADDR_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.ptype <<
++ FMAN_KG_SCH_DEF_PTYPE_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.ip_tos_tc <<
++ FMAN_KG_SCH_DEF_IP_TOS_TC_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.ipv6_fl <<
++ FMAN_KG_SCH_DEF_IPv6_FL_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.ipsec_spi <<
++ FMAN_KG_SCH_DEF_IPSEC_SPI_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.l4_port <<
++ FMAN_KG_SCH_DEF_L4_PORT_SHIFT;
++ tmp_reg |= extract_params->known_fields_def.tcp_flg <<
++ FMAN_KG_SCH_DEF_TCP_FLG_SHIFT;
++
++ scheme_regs->kgse_ekdv = tmp_reg;
++
++ /* Generic extract registers */
++ if (extract_params->gen_extract_num > FM_KG_NUM_OF_GENERIC_REGS) {
++ return -EINVAL;
++ }
++
++ for (i = 0; i < extract_params->gen_extract_num; i++) {
++ gen_params = extract_params->gen_extract + i;
++
++ tmp_reg = FMAN_KG_SCH_GEN_VALID;
++ tmp_reg |= (uint32_t)gen_params->def_val <<
++ FMAN_KG_SCH_GEN_DEF_SHIFT;
++
++ if (gen_params->type == E_FMAN_KG_HASH_EXTRACT) {
++ if ((gen_params->extract > FMAN_KG_SCH_GEN_SIZE_MAX) ||
++ (gen_params->extract == 0)) {
++ return -EINVAL;
++ }
++ } else {
++ tmp_reg |= FMAN_KG_SCH_GEN_OR;
++ }
++
++ tmp_reg |= (uint32_t)gen_params->extract <<
++ FMAN_KG_SCH_GEN_SIZE_SHIFT;
++ tmp_reg |= (uint32_t)gen_params->mask <<
++ FMAN_KG_SCH_GEN_MASK_SHIFT;
++
++ offset = gen_params->offset;
++ ht = get_gen_ht_code(gen_params->src,
++ gen_params->no_validation,
++ &offset);
++ tmp_reg |= (uint32_t)ht << FMAN_KG_SCH_GEN_HT_SHIFT;
++ tmp_reg |= offset;
++
++ scheme_regs->kgse_gec[i] = tmp_reg;
++ }
++
++ /* Masks registers */
++ if (extract_params->masks_num > FM_KG_EXTRACT_MASKS_NUM) {
++ return -EINVAL;
++ }
++
++ select = 0;
++ mask = 0;
++ fqb = 0;
++ for (i = 0; i < extract_params->masks_num; i++) {
++ /* MCSx fields */
++ KG_GET_MASK_SEL_SHIFT(shift, i);
++ if (extract_params->masks[i].is_known) {
++ /* Mask known field */
++ select |= extract_params->masks[i].field_or_gen_idx <<
++ shift;
++ } else {
++ /* Mask generic extract */
++ select |= (extract_params->masks[i].field_or_gen_idx +
++ FM_KG_MASK_SEL_GEN_BASE) << shift;
++ }
++
++ /* MOx fields - spread between se_bmch and se_fqb registers */
++ KG_GET_MASK_OFFSET_SHIFT(shift, i);
++ if (i < 2) {
++ select |= (uint32_t)extract_params->masks[i].offset <<
++ shift;
++ } else {
++ fqb |= (uint32_t)extract_params->masks[i].offset <<
++ shift;
++ }
++
++ /* BMx fields */
++ KG_GET_MASK_SHIFT(shift, i);
++ mask |= (uint32_t)extract_params->masks[i].mask << shift;
++ }
++
++ /* Finish with rest of BMx fileds -
++ * don't mask bits for unused masks by setting
++ * corresponding BMx field = 0xFF */
++ for (i = extract_params->masks_num; i < FM_KG_EXTRACT_MASKS_NUM; i++) {
++ KG_GET_MASK_SHIFT(shift, i);
++ mask |= 0xFF << shift;
++ }
++
++ scheme_regs->kgse_bmch = select;
++ scheme_regs->kgse_bmcl = mask;
++
++ /* Finish with FQB register initialization.
++ * Check fqid is 24-bit value. */
++ if (params->base_fqid & ~0x00FFFFFF) {
++ return -EINVAL;
++ }
++
++ fqb |= params->base_fqid;
++ scheme_regs->kgse_fqb = fqb;
++
++ /* Hash Configuration register */
++ tmp_reg = 0;
++ if (params->hash_params.use_hash) {
++ /* Check hash mask is 24-bit value */
++ if (params->hash_params.mask & ~0x00FFFFFF) {
++ return -EINVAL;
++ }
++
++ /* Hash function produces 64-bit value, 24 bits of that
++ * are used to generate fq_id and policer profile.
++ * Thus, maximal shift is 40 bits to allow 24 bits out of 64.
++ */
++ if (params->hash_params.shift_r > FMAN_KG_SCH_HASH_HSHIFT_MAX) {
++ return -EINVAL;
++ }
++
++ tmp_reg |= params->hash_params.mask;
++ tmp_reg |= (uint32_t)params->hash_params.shift_r <<
++ FMAN_KG_SCH_HASH_HSHIFT_SHIFT;
++
++ if (params->hash_params.sym) {
++ tmp_reg |= FMAN_KG_SCH_HASH_SYM;
++ }
++
++ }
++
++ if (params->bypass_fqid_gen) {
++ tmp_reg |= FMAN_KG_SCH_HASH_NO_FQID_GEN;
++ }
++
++ scheme_regs->kgse_hc = tmp_reg;
++
++ /* Policer Profile register */
++ if (params->policer_params.bypass_pp_gen) {
++ tmp_reg = 0;
++ } else {
++ /* Lower 8 bits of 24-bits extracted from hash result
++ * are used for policer profile generation.
++ * That leaves maximum shift value = 23. */
++ if (params->policer_params.shift > FMAN_KG_SCH_PP_SHIFT_MAX) {
++ return -EINVAL;
++ }
++
++ tmp_reg = params->policer_params.base;
++ tmp_reg |= ((uint32_t)params->policer_params.shift <<
++ FMAN_KG_SCH_PP_SH_SHIFT) &
++ FMAN_KG_SCH_PP_SH_MASK;
++ tmp_reg |= ((uint32_t)params->policer_params.shift <<
++ FMAN_KG_SCH_PP_SL_SHIFT) &
++ FMAN_KG_SCH_PP_SL_MASK;
++ tmp_reg |= (uint32_t)params->policer_params.mask <<
++ FMAN_KG_SCH_PP_MASK_SHIFT;
++ }
++
++ scheme_regs->kgse_ppc = tmp_reg;
++
++ /* Coarse Classification Bit Select register */
++ if (params->next_engine == E_FMAN_PCD_CC) {
++ scheme_regs->kgse_ccbs = params->cc_params.qlcv_bits_sel;
++ }
++
++ /* Packets Counter register */
++ if (params->update_counter) {
++ scheme_regs->kgse_spc = params->counter_value;
++ }
++
++ return 0;
++}
++
++int fman_kg_write_scheme(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id,
++ struct fman_kg_scheme_regs *scheme_regs,
++ bool update_counter)
++{
++ struct fman_kg_scheme_regs *kgse_regs;
++ uint32_t tmp_reg;
++ int err, i;
++
++ /* Write indirect scheme registers */
++ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
++
++ iowrite32be(scheme_regs->kgse_mode, &kgse_regs->kgse_mode);
++ iowrite32be(scheme_regs->kgse_ekfc, &kgse_regs->kgse_ekfc);
++ iowrite32be(scheme_regs->kgse_ekdv, &kgse_regs->kgse_ekdv);
++ iowrite32be(scheme_regs->kgse_bmch, &kgse_regs->kgse_bmch);
++ iowrite32be(scheme_regs->kgse_bmcl, &kgse_regs->kgse_bmcl);
++ iowrite32be(scheme_regs->kgse_fqb, &kgse_regs->kgse_fqb);
++ iowrite32be(scheme_regs->kgse_hc, &kgse_regs->kgse_hc);
++ iowrite32be(scheme_regs->kgse_ppc, &kgse_regs->kgse_ppc);
++ iowrite32be(scheme_regs->kgse_spc, &kgse_regs->kgse_spc);
++ iowrite32be(scheme_regs->kgse_dv0, &kgse_regs->kgse_dv0);
++ iowrite32be(scheme_regs->kgse_dv1, &kgse_regs->kgse_dv1);
++ iowrite32be(scheme_regs->kgse_ccbs, &kgse_regs->kgse_ccbs);
++ iowrite32be(scheme_regs->kgse_mv, &kgse_regs->kgse_mv);
++
++ for (i = 0 ; i < FM_KG_NUM_OF_GENERIC_REGS ; i++)
++ iowrite32be(scheme_regs->kgse_gec[i], &kgse_regs->kgse_gec[i]);
++
++ /* Write AR (Action register) */
++ tmp_reg = build_ar_scheme(scheme_id, hwport_id, update_counter, TRUE);
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++ return err;
++}
++
++int fman_kg_delete_scheme(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id)
++{
++ struct fman_kg_scheme_regs *kgse_regs;
++ uint32_t tmp_reg;
++ int err, i;
++
++ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
++
++ /* Clear all registers including enable bit in mode register */
++ for (i = 0; i < (sizeof(struct fman_kg_scheme_regs)) / 4; ++i) {
++ iowrite32be(0, ((uint32_t *)kgse_regs + i));
++ }
++
++ /* Write AR (Action register) */
++ tmp_reg = build_ar_scheme(scheme_id, hwport_id, FALSE, TRUE);
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++ return err;
++}
++
++int fman_kg_get_scheme_counter(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id,
++ uint32_t *counter)
++{
++ struct fman_kg_scheme_regs *kgse_regs;
++ uint32_t tmp_reg;
++ int err;
++
++ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
++
++ tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE);
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++
++ if (err != 0)
++ return err;
++
++ *counter = ioread32be(&kgse_regs->kgse_spc);
++
++ return 0;
++}
++
++int fman_kg_set_scheme_counter(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id,
++ uint32_t counter)
++{
++ struct fman_kg_scheme_regs *kgse_regs;
++ uint32_t tmp_reg;
++ int err;
++
++ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
++
++ tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE);
++
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++ if (err != 0)
++ return err;
++
++ /* Keygen indirect access memory contains all scheme_id registers
++ * by now. Change only counter value. */
++ iowrite32be(counter, &kgse_regs->kgse_spc);
++
++ /* Write back scheme registers */
++ tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, TRUE);
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++
++ return err;
++}
++
++uint32_t fman_kg_get_schemes_total_counter(struct fman_kg_regs *regs)
++{
++ return ioread32be(&regs->fmkg_tpc);
++}
++
++int fman_kg_build_cls_plan(struct fman_kg_cls_plan_params *params,
++ struct fman_kg_cp_regs *cls_plan_regs)
++{
++ uint8_t entries_set, entry_bit;
++ int i;
++
++ /* Zero out all group's register */
++ memset(cls_plan_regs, 0, sizeof(struct fman_kg_cp_regs));
++
++ /* Go over all classification entries in params->entries_mask and
++ * configure the corresponding cpe register */
++ entries_set = params->entries_mask;
++ for (i = 0; entries_set; i++) {
++ entry_bit = (uint8_t)(0x80 >> i);
++ if ((entry_bit & entries_set) == 0)
++ continue;
++ entries_set ^= entry_bit;
++ cls_plan_regs->kgcpe[i] = params->mask_vector[i];
++ }
++
++ return 0;
++}
++
++int fman_kg_write_cls_plan(struct fman_kg_regs *regs,
++ uint8_t grp_id,
++ uint8_t entries_mask,
++ uint8_t hwport_id,
++ struct fman_kg_cp_regs *cls_plan_regs)
++{
++ struct fman_kg_cp_regs *kgcpe_regs;
++ uint32_t tmp_reg;
++ int i, err;
++
++ /* Check group index is valid and the group isn't empty */
++ if (grp_id >= FM_KG_CLS_PLAN_GRPS_NUM)
++ return -EINVAL;
++
++ /* Write indirect classification plan registers */
++ kgcpe_regs = (struct fman_kg_cp_regs *)&(regs->fmkg_indirect[0]);
++
++ for (i = 0; i < FM_KG_NUM_CLS_PLAN_ENTR; i++) {
++ iowrite32be(cls_plan_regs->kgcpe[i], &kgcpe_regs->kgcpe[i]);
++ }
++
++ tmp_reg = build_ar_cls_plan(grp_id, entries_mask, hwport_id, TRUE);
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++ return err;
++}
++
++int fman_kg_write_bind_schemes(struct fman_kg_regs *regs,
++ uint8_t hwport_id,
++ uint32_t schemes)
++{
++ struct fman_kg_pe_regs *kg_pe_regs;
++ uint32_t tmp_reg;
++ int err;
++
++ kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
++
++ iowrite32be(schemes, &kg_pe_regs->fmkg_pe_sp);
++
++ tmp_reg = build_ar_bind_scheme(hwport_id, TRUE);
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++ return err;
++}
++
++int fman_kg_build_bind_cls_plans(uint8_t grp_base,
++ uint8_t grp_mask,
++ uint32_t *bind_cls_plans)
++{
++ /* Check grp_base and grp_mask are 5-bits values */
++ if ((grp_base & ~0x0000001F) || (grp_mask & ~0x0000001F))
++ return -EINVAL;
++
++ *bind_cls_plans = (uint32_t) ((grp_mask << FMAN_KG_PE_CPP_MASK_SHIFT) | grp_base);
++ return 0;
++}
++
++
++int fman_kg_write_bind_cls_plans(struct fman_kg_regs *regs,
++ uint8_t hwport_id,
++ uint32_t bind_cls_plans)
++{
++ struct fman_kg_pe_regs *kg_pe_regs;
++ uint32_t tmp_reg;
++ int err;
++
++ kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
++
++ iowrite32be(bind_cls_plans, &kg_pe_regs->fmkg_pe_cpp);
++
++ tmp_reg = build_ar_bind_cls_plan(hwport_id, TRUE);
++ err = fman_kg_write_ar_wait(regs, tmp_reg);
++ return err;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_prs.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_prs.c
+new file mode 100644
+index 00000000..108779db
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_prs.c
+@@ -0,0 +1,129 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "fsl_fman_prs.h"
++
++uint32_t fman_prs_get_err_event(struct fman_prs_regs *regs, uint32_t ev_mask)
++{
++ return ioread32be(&regs->fmpr_perr) & ev_mask;
++}
++
++uint32_t fman_prs_get_err_ev_mask(struct fman_prs_regs *regs)
++{
++ return ioread32be(&regs->fmpr_perer);
++}
++
++void fman_prs_ack_err_event(struct fman_prs_regs *regs, uint32_t event)
++{
++ iowrite32be(event, &regs->fmpr_perr);
++}
++
++uint32_t fman_prs_get_expt_event(struct fman_prs_regs *regs, uint32_t ev_mask)
++{
++ return ioread32be(&regs->fmpr_pevr) & ev_mask;
++}
++
++uint32_t fman_prs_get_expt_ev_mask(struct fman_prs_regs *regs)
++{
++ return ioread32be(&regs->fmpr_pever);
++}
++
++void fman_prs_ack_expt_event(struct fman_prs_regs *regs, uint32_t event)
++{
++ iowrite32be(event, &regs->fmpr_pevr);
++}
++
++void fman_prs_defconfig(struct fman_prs_cfg *cfg)
++{
++ cfg->port_id_stat = 0;
++ cfg->max_prs_cyc_lim = DEFAULT_MAX_PRS_CYC_LIM;
++ cfg->prs_exceptions = 0x03000000;
++}
++
++int fman_prs_init(struct fman_prs_regs *regs, struct fman_prs_cfg *cfg)
++{
++ uint32_t tmp;
++
++ iowrite32be(cfg->max_prs_cyc_lim, &regs->fmpr_rpclim);
++ iowrite32be((FM_PCD_PRS_SINGLE_ECC | FM_PCD_PRS_PORT_IDLE_STS),
++ &regs->fmpr_pevr);
++
++ if (cfg->prs_exceptions & FM_PCD_EX_PRS_SINGLE_ECC)
++ iowrite32be(FM_PCD_PRS_SINGLE_ECC, &regs->fmpr_pever);
++ else
++ iowrite32be(0, &regs->fmpr_pever);
++
++ iowrite32be(FM_PCD_PRS_DOUBLE_ECC, &regs->fmpr_perr);
++
++ tmp = 0;
++ if (cfg->prs_exceptions & FM_PCD_EX_PRS_DOUBLE_ECC)
++ tmp |= FM_PCD_PRS_DOUBLE_ECC;
++ iowrite32be(tmp, &regs->fmpr_perer);
++
++ iowrite32be(cfg->port_id_stat, &regs->fmpr_ppsc);
++
++ return 0;
++}
++
++void fman_prs_enable(struct fman_prs_regs *regs)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->fmpr_rpimac) | FM_PCD_PRS_RPIMAC_EN;
++ iowrite32be(tmp, &regs->fmpr_rpimac);
++}
++
++void fman_prs_disable(struct fman_prs_regs *regs)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&regs->fmpr_rpimac) & ~FM_PCD_PRS_RPIMAC_EN;
++ iowrite32be(tmp, &regs->fmpr_rpimac);
++}
++
++int fman_prs_is_enabled(struct fman_prs_regs *regs)
++{
++ return ioread32be(&regs->fmpr_rpimac) & FM_PCD_PRS_RPIMAC_EN;
++}
++
++void fman_prs_set_stst_port_msk(struct fman_prs_regs *regs, uint32_t pid_msk)
++{
++ iowrite32be(pid_msk, &regs->fmpr_ppsc);
++}
++
++void fman_prs_set_stst(struct fman_prs_regs *regs, bool enable)
++{
++ if (enable)
++ iowrite32be(FM_PCD_PRS_PPSC_ALL_PORTS, &regs->fmpr_ppsc);
++ else
++ iowrite32be(0, &regs->fmpr_ppsc);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/Makefile
+new file mode 100644
+index 00000000..7d928e0a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++obj-y += fsl-ncsw-Pcd.o
++
++fsl-ncsw-Pcd-objs := fm_port.o fm_port_im.o fman_port.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.c
+new file mode 100644
+index 00000000..ec6e0ed5
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.c
+@@ -0,0 +1,6436 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_port.c
++
++ @Description FM driver routines implementation.
++ *//***************************************************************************/
++#include "error_ext.h"
++#include "std_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++#include "debug_ext.h"
++#include "fm_muram_ext.h"
++
++#include "fman_common.h"
++#include "fm_port.h"
++#include "fm_port_dsar.h"
++#include "common/general.h"
++
++/****************************************/
++/* static functions */
++/****************************************/
++static t_Error FmPortConfigAutoResForDeepSleepSupport1(t_FmPort *p_FmPort);
++
++static t_Error CheckInitParameters(t_FmPort *p_FmPort)
++{
++ t_FmPortDriverParam *p_Params = p_FmPort->p_FmPortDriverParam;
++ struct fman_port_cfg *p_DfltConfig = &p_Params->dfltCfg;
++ t_Error ans = E_OK;
++ uint32_t unusedMask;
++
++ if (p_FmPort->imEn)
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ if (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
++ > 2)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("fifoDeqPipelineDepth for IM 10G can't be larger than 2"));
++
++ if ((ans = FmPortImCheckInitParameters(p_FmPort)) != E_OK)
++ return ERROR_CODE(ans);
++ }
++ else
++ {
++ /****************************************/
++ /* Rx only */
++ /****************************************/
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ /* external buffer pools */
++ if (!p_Params->extBufPools.numOfPoolsUsed)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("extBufPools.numOfPoolsUsed=0. At least one buffer pool must be defined"));
++
++ if (FmSpCheckBufPoolsParams(&p_Params->extBufPools,
++ p_Params->p_BackupBmPools,
++ &p_Params->bufPoolDepletion) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++
++ /* Check that part of IC that needs copying is small enough to enter start margin */
++ if (p_Params->intContext.size
++ && (p_Params->intContext.size
++ + p_Params->intContext.extBufOffset
++ > p_Params->bufMargins.startMargins))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("intContext.size is larger than start margins"));
++
++ if ((p_Params->liodnOffset != (uint16_t)DPAA_LIODN_DONT_OVERRIDE)
++ && (p_Params->liodnOffset & ~FM_LIODN_OFFSET_MASK))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1));
++
++#ifdef FM_NO_BACKUP_POOLS
++ if ((p_FmPort->fmRevInfo.majorRev != 4) && (p_FmPort->fmRevInfo.majorRev < 6))
++ if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("BackupBmPools"));
++#endif /* FM_NO_BACKUP_POOLS */
++ }
++
++ /****************************************/
++ /* Non Rx ports */
++ /****************************************/
++ else
++ {
++ if (p_Params->deqSubPortal >= FM_MAX_NUM_OF_SUB_PORTALS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ (" deqSubPortal has to be in the range of 0 - %d", FM_MAX_NUM_OF_SUB_PORTALS));
++
++ /* to protect HW internal-context from overwrite */
++ if ((p_Params->intContext.size)
++ && (p_Params->intContext.intContextOffset
++ < MIN_TX_INT_OFFSET))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("non-Rx intContext.intContextOffset can't be smaller than %d", MIN_TX_INT_OFFSET));
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
++ /* in O/H DEFAULT_notSupported indicates that it is not supported and should not be checked */
++ || (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
++ != DEFAULT_notSupported))
++ {
++ /* Check that not larger than 8 */
++ if ((!p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth)
++ || (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
++ > MAX_FIFO_PIPELINE_DEPTH))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("fifoDeqPipelineDepth can't be larger than %d", MAX_FIFO_PIPELINE_DEPTH));
++ }
++ }
++
++ /****************************************/
++ /* Rx Or Offline Parsing */
++ /****************************************/
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ {
++ if (!p_Params->dfltFqid)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("dfltFqid must be between 1 and 2^24-1"));
++#if defined(FM_CAPWAP_SUPPORT) && defined(FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004)
++ if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace % 16)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufferPrefixContent.manipExtraSpace has to be devidable by 16"));
++#endif /* defined(FM_CAPWAP_SUPPORT) && ... */
++ }
++
++ /****************************************/
++ /* All ports */
++ /****************************************/
++ /* common BMI registers values */
++ /* Check that Queue Id is not larger than 2^24, and is not 0 */
++ if ((p_Params->errFqid & ~0x00FFFFFF) || !p_Params->errFqid)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("errFqid must be between 1 and 2^24-1"));
++ if (p_Params->dfltFqid & ~0x00FFFFFF)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("dfltFqid must be between 1 and 2^24-1"));
++ }
++
++ /****************************************/
++ /* Rx only */
++ /****************************************/
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ if (p_DfltConfig->rx_pri_elevation % BMI_FIFO_UNITS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("rxFifoPriElevationLevel has to be divisible by %d", BMI_FIFO_UNITS));
++ if ((p_DfltConfig->rx_pri_elevation < BMI_FIFO_UNITS)
++ || (p_DfltConfig->rx_pri_elevation > MAX_PORT_FIFO_SIZE))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("rxFifoPriElevationLevel has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
++ if (p_DfltConfig->rx_fifo_thr % BMI_FIFO_UNITS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("rxFifoThreshold has to be divisible by %d", BMI_FIFO_UNITS));
++ if ((p_DfltConfig->rx_fifo_thr < BMI_FIFO_UNITS)
++ || (p_DfltConfig->rx_fifo_thr > MAX_PORT_FIFO_SIZE))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("rxFifoThreshold has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
++
++ /* Check that not larger than 16 */
++ if (p_DfltConfig->rx_cut_end_bytes > FRAME_END_DATA_SIZE)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE));
++
++ if (FmSpCheckBufMargins(&p_Params->bufMargins) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++
++ /* extra FIFO size (allowed only to Rx ports) */
++ if (p_Params->setSizeOfFifo
++ && (p_FmPort->fifoBufs.extra % BMI_FIFO_UNITS))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("fifoBufs.extra has to be divisible by %d", BMI_FIFO_UNITS));
++
++ if (p_Params->bufPoolDepletion.poolsGrpModeEnable
++ && !p_Params->bufPoolDepletion.numOfPools)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("bufPoolDepletion.numOfPools can not be 0 when poolsGrpModeEnable=TRUE"));
++#ifdef FM_CSI_CFED_LIMIT
++ if (p_FmPort->fmRevInfo.majorRev == 4)
++ {
++ /* Check that not larger than 16 */
++ if (p_DfltConfig->rx_cut_end_bytes + p_DfltConfig->checksum_bytes_ignore > FRAME_END_DATA_SIZE)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("cheksumLastBytesIgnore + cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE));
++ }
++#endif /* FM_CSI_CFED_LIMIT */
++ }
++
++ /****************************************/
++ /* Non Rx ports */
++ /****************************************/
++ /* extra FIFO size (allowed only to Rx ports) */
++ else
++ if (p_FmPort->fifoBufs.extra)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ (" No fifoBufs.extra for non Rx ports"));
++
++ /****************************************/
++ /* Tx only */
++ /****************************************/
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G))
++ {
++ if (p_DfltConfig->tx_fifo_min_level % BMI_FIFO_UNITS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("txFifoMinFillLevel has to be divisible by %d", BMI_FIFO_UNITS));
++ if (p_DfltConfig->tx_fifo_min_level > (MAX_PORT_FIFO_SIZE - 256))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("txFifoMinFillLevel has to be in the range of 0 - %d", (MAX_PORT_FIFO_SIZE - 256)));
++ if (p_DfltConfig->tx_fifo_low_comf_level % BMI_FIFO_UNITS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("txFifoLowComfLevel has to be divisible by %d", BMI_FIFO_UNITS));
++ if ((p_DfltConfig->tx_fifo_low_comf_level < BMI_FIFO_UNITS)
++ || (p_DfltConfig->tx_fifo_low_comf_level > MAX_PORT_FIFO_SIZE))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("txFifoLowComfLevel has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
++
++ if (p_FmPort->portType == e_FM_PORT_TYPE_TX)
++ if (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
++ > 2)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("fifoDeqPipelineDepth for 1G can't be larger than 2"));
++ }
++
++ /****************************************/
++ /* Non Tx Ports */
++ /****************************************/
++ /* If discard override was selected , no frames may be discarded. */
++ else
++ if (p_DfltConfig->discard_override && p_Params->errorsToDiscard)
++ RETURN_ERROR(
++ MAJOR,
++ E_CONFLICT,
++ ("errorsToDiscard is not empty, but frmDiscardOverride selected (all discarded frames to be enqueued to error queue)."));
++
++ /****************************************/
++ /* Rx and Offline parsing */
++ /****************************************/
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ unusedMask = BMI_STATUS_OP_MASK_UNUSED;
++ else
++ unusedMask = BMI_STATUS_RX_MASK_UNUSED;
++
++ /* Check that no common bits with BMI_STATUS_MASK_UNUSED */
++ if (p_Params->errorsToDiscard & unusedMask)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
++ ("errorsToDiscard contains undefined bits"));
++ }
++
++ /****************************************/
++ /* Offline Ports */
++ /****************************************/
++#ifdef FM_OP_OPEN_DMA_MIN_LIMIT
++ if ((p_FmPort->fmRevInfo.majorRev >= 6)
++ && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ && p_Params->setNumOfOpenDmas
++ && (p_FmPort->openDmas.num < MIN_NUM_OF_OP_DMAS))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For Offline port, openDmas.num can't be smaller than %d", MIN_NUM_OF_OP_DMAS));
++#endif /* FM_OP_OPEN_DMA_MIN_LIMIT */
++
++ /****************************************/
++ /* Offline & HC Ports */
++ /****************************************/
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
++ {
++#ifndef FM_FRAME_END_PARAMS_FOR_OP
++ if ((p_FmPort->fmRevInfo.majorRev < 6) &&
++ (p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore != DEFAULT_notSupported))
++ /* this is an indication that user called config for this mode which is not supported in this integration */
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("cheksumLastBytesIgnore is available for Rx & Tx ports only"));
++#endif /* !FM_FRAME_END_PARAMS_FOR_OP */
++
++#ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP
++ if ((!((p_FmPort->fmRevInfo.majorRev == 4) ||
++ (p_FmPort->fmRevInfo.majorRev >= 6))) &&
++ (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth != DEFAULT_notSupported))
++ /* this is an indication that user called config for this mode which is not supported in this integration */
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("fifoDeqPipelineDepth is available for Tx ports only"));
++#endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */
++ }
++
++ /****************************************/
++ /* All ports */
++ /****************************************/
++ /* Check that not larger than 16 */
++ if ((p_Params->cheksumLastBytesIgnore > FRAME_END_DATA_SIZE)
++ && ((p_Params->cheksumLastBytesIgnore != DEFAULT_notSupported)))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("cheksumLastBytesIgnore can't be larger than %d", FRAME_END_DATA_SIZE));
++
++ if (FmSpCheckIntContextParams(&p_Params->intContext) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++
++ /* common BMI registers values */
++ if (p_Params->setNumOfTasks
++ && ((!p_FmPort->tasks.num)
++ || (p_FmPort->tasks.num > MAX_NUM_OF_TASKS)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("tasks.num can't be larger than %d", MAX_NUM_OF_TASKS));
++ if (p_Params->setNumOfTasks
++ && (p_FmPort->tasks.extra > MAX_NUM_OF_EXTRA_TASKS))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("tasks.extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS));
++ if (p_Params->setNumOfOpenDmas
++ && ((!p_FmPort->openDmas.num)
++ || (p_FmPort->openDmas.num > MAX_NUM_OF_DMAS)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("openDmas.num can't be larger than %d", MAX_NUM_OF_DMAS));
++ if (p_Params->setNumOfOpenDmas
++ && (p_FmPort->openDmas.extra > MAX_NUM_OF_EXTRA_DMAS))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("openDmas.extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS));
++ if (p_Params->setSizeOfFifo
++ && (!p_FmPort->fifoBufs.num
++ || (p_FmPort->fifoBufs.num > MAX_PORT_FIFO_SIZE)))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("fifoBufs.num has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
++ if (p_Params->setSizeOfFifo && (p_FmPort->fifoBufs.num % BMI_FIFO_UNITS))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("fifoBufs.num has to be divisible by %d", BMI_FIFO_UNITS));
++
++#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++ if (p_FmPort->fmRevInfo.majorRev == 4)
++ if (p_FmPort->p_FmPortDriverParam->deqPrefetchOption != DEFAULT_notSupported)
++ /* this is an indication that user called config for this mode which is not supported in this integration */
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("deqPrefetchOption"));
++#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
++
++ return E_OK;
++}
++
++static t_Error VerifySizeOfFifo(t_FmPort *p_FmPort)
++{
++ uint32_t minFifoSizeRequired = 0, optFifoSizeForB2B = 0;
++
++ /*************************/
++ /* TX PORTS */
++ /*************************/
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G))
++ {
++ minFifoSizeRequired =
++ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
++ + (3 * BMI_FIFO_UNITS));
++ if (!p_FmPort->imEn)
++ minFifoSizeRequired +=
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
++ * BMI_FIFO_UNITS;
++
++ optFifoSizeForB2B = minFifoSizeRequired;
++
++ /* Add some margin for back-to-back capability to improve performance,
++ allows the hardware to pipeline new frame dma while the previous
++ frame not yet transmitted. */
++ if (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
++ optFifoSizeForB2B += 3 * BMI_FIFO_UNITS;
++ else
++ optFifoSizeForB2B += 2 * BMI_FIFO_UNITS;
++ }
++
++ /*************************/
++ /* RX IM PORTS */
++ /*************************/
++ else
++ if (((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ && p_FmPort->imEn)
++ {
++ optFifoSizeForB2B =
++ minFifoSizeRequired =
++ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
++ + (4 * BMI_FIFO_UNITS));
++ }
++
++ /*************************/
++ /* RX non-IM PORTS */
++ /*************************/
++ else
++ if (((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ && !p_FmPort->imEn)
++ {
++ if (p_FmPort->fmRevInfo.majorRev == 4)
++ {
++ if (p_FmPort->rxPoolsParams.numOfPools == 1)
++ minFifoSizeRequired = 8 * BMI_FIFO_UNITS;
++ else
++ minFifoSizeRequired =
++ (uint32_t)(ROUND_UP(p_FmPort->rxPoolsParams.secondLargestBufSize, BMI_FIFO_UNITS)
++ + (7 * BMI_FIFO_UNITS));
++ }
++ else
++ {
++#if (DPAA_VERSION >= 11)
++ minFifoSizeRequired =
++ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
++ + (5 * BMI_FIFO_UNITS));
++ /* 4 according to spec + 1 for FOF>0 */
++#else
++ minFifoSizeRequired = (uint32_t)
++ (ROUND_UP(MIN(p_FmPort->maxFrameLength, p_FmPort->rxPoolsParams.largestBufSize), BMI_FIFO_UNITS)
++ + (7*BMI_FIFO_UNITS));
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ optFifoSizeForB2B = minFifoSizeRequired;
++
++ /* Add some margin for back-to-back capability to improve performance,
++ allows the hardware to pipeline new frame dma while the previous
++ frame not yet transmitted. */
++ if (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ optFifoSizeForB2B += 8 * BMI_FIFO_UNITS;
++ else
++ optFifoSizeForB2B += 3 * BMI_FIFO_UNITS;
++ }
++
++ /* For O/H ports, check fifo size and update if necessary */
++ else
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
++ {
++#if (DPAA_VERSION >= 11)
++ optFifoSizeForB2B =
++ minFifoSizeRequired =
++ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
++ + ((p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
++ + 5) * BMI_FIFO_UNITS));
++ /* 4 according to spec + 1 for FOF>0 */
++#else
++ optFifoSizeForB2B = minFifoSizeRequired = (uint32_t)((p_FmPort->tasks.num + 2) * BMI_FIFO_UNITS);
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ ASSERT_COND(minFifoSizeRequired > 0);
++ ASSERT_COND(optFifoSizeForB2B >= minFifoSizeRequired);
++
++ /* Verify the size */
++ if (p_FmPort->fifoBufs.num < minFifoSizeRequired)
++ DBG(INFO,
++ ("FIFO size is %d and should be enlarged to %d bytes",p_FmPort->fifoBufs.num, minFifoSizeRequired));
++ else if (p_FmPort->fifoBufs.num < optFifoSizeForB2B)
++ DBG(INFO,
++ ("For back-to-back frames processing, FIFO size is %d and needs to enlarge to %d bytes", p_FmPort->fifoBufs.num, optFifoSizeForB2B));
++
++ return E_OK;
++}
++
++static void FmPortDriverParamFree(t_FmPort *p_FmPort)
++{
++ if (p_FmPort->p_FmPortDriverParam)
++ {
++ XX_Free(p_FmPort->p_FmPortDriverParam);
++ p_FmPort->p_FmPortDriverParam = NULL;
++ }
++}
++
++static t_Error SetExtBufferPools(t_FmPort *p_FmPort)
++{
++ t_FmExtPools *p_ExtBufPools = &p_FmPort->p_FmPortDriverParam->extBufPools;
++ t_FmBufPoolDepletion *p_BufPoolDepletion =
++ &p_FmPort->p_FmPortDriverParam->bufPoolDepletion;
++ uint8_t orderedArray[FM_PORT_MAX_NUM_OF_EXT_POOLS];
++ uint16_t sizesArray[BM_MAX_NUM_OF_POOLS];
++ int i = 0, j = 0, err;
++ struct fman_port_bpools bpools;
++
++ memset(&orderedArray, 0, sizeof(uint8_t) * FM_PORT_MAX_NUM_OF_EXT_POOLS);
++ memset(&sizesArray, 0, sizeof(uint16_t) * BM_MAX_NUM_OF_POOLS);
++ memcpy(&p_FmPort->extBufPools, p_ExtBufPools, sizeof(t_FmExtPools));
++
++ FmSpSetBufPoolsInAscOrderOfBufSizes(p_ExtBufPools, orderedArray,
++ sizesArray);
++
++ /* Prepare flibs bpools structure */
++ memset(&bpools, 0, sizeof(struct fman_port_bpools));
++ bpools.count = p_ExtBufPools->numOfPoolsUsed;
++ bpools.counters_enable = TRUE;
++ for (i = 0; i < p_ExtBufPools->numOfPoolsUsed; i++)
++ {
++ bpools.bpool[i].bpid = orderedArray[i];
++ bpools.bpool[i].size = sizesArray[orderedArray[i]];
++ /* functionality available only for some derivatives (limited by config) */
++ if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
++ for (j = 0;
++ j
++ < p_FmPort->p_FmPortDriverParam->p_BackupBmPools->numOfBackupPools;
++ j++)
++ if (orderedArray[i]
++ == p_FmPort->p_FmPortDriverParam->p_BackupBmPools->poolIds[j])
++ {
++ bpools.bpool[i].is_backup = TRUE;
++ break;
++ }
++ }
++
++ /* save pools parameters for later use */
++ p_FmPort->rxPoolsParams.numOfPools = p_ExtBufPools->numOfPoolsUsed;
++ p_FmPort->rxPoolsParams.largestBufSize =
++ sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 1]];
++ p_FmPort->rxPoolsParams.secondLargestBufSize =
++ sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 2]];
++
++ /* FMBM_RMPD reg. - pool depletion */
++ if (p_BufPoolDepletion->poolsGrpModeEnable)
++ {
++ bpools.grp_bp_depleted_num = p_BufPoolDepletion->numOfPools;
++ for (i = 0; i < BM_MAX_NUM_OF_POOLS; i++)
++ {
++ if (p_BufPoolDepletion->poolsToConsider[i])
++ {
++ for (j = 0; j < p_ExtBufPools->numOfPoolsUsed; j++)
++ {
++ if (i == orderedArray[j])
++ {
++ bpools.bpool[j].grp_bp_depleted = TRUE;
++ break;
++ }
++ }
++ }
++ }
++ }
++
++ if (p_BufPoolDepletion->singlePoolModeEnable)
++ {
++ for (i = 0; i < BM_MAX_NUM_OF_POOLS; i++)
++ {
++ if (p_BufPoolDepletion->poolsToConsiderForSingleMode[i])
++ {
++ for (j = 0; j < p_ExtBufPools->numOfPoolsUsed; j++)
++ {
++ if (i == orderedArray[j])
++ {
++ bpools.bpool[j].single_bp_depleted = TRUE;
++ break;
++ }
++ }
++ }
++ }
++ }
++
++#if (DPAA_VERSION >= 11)
++ /* fill QbbPEV */
++ if (p_BufPoolDepletion->poolsGrpModeEnable
++ || p_BufPoolDepletion->singlePoolModeEnable)
++ {
++ for (i = 0; i < FM_MAX_NUM_OF_PFC_PRIORITIES; i++)
++ {
++ if (p_BufPoolDepletion->pfcPrioritiesEn[i] == TRUE)
++ {
++ bpools.bpool[i].pfc_priorities_en = TRUE;
++ }
++ }
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* Issue flibs function */
++ err = fman_port_set_bpools(&p_FmPort->port, &bpools);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_bpools"));
++
++ if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
++ XX_Free(p_FmPort->p_FmPortDriverParam->p_BackupBmPools);
++
++ return E_OK;
++}
++
++static t_Error ClearPerfCnts(t_FmPort *p_FmPort)
++{
++ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL, 0);
++ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL, 0);
++ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL, 0);
++ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL, 0);
++ return E_OK;
++}
++
++static t_Error InitLowLevelDriver(t_FmPort *p_FmPort)
++{
++ t_FmPortDriverParam *p_DriverParams = p_FmPort->p_FmPortDriverParam;
++ struct fman_port_params portParams;
++ uint32_t tmpVal;
++ t_Error err;
++
++ /* Set up flibs parameters and issue init function */
++
++ memset(&portParams, 0, sizeof(struct fman_port_params));
++ portParams.discard_mask = p_DriverParams->errorsToDiscard;
++ portParams.dflt_fqid = p_DriverParams->dfltFqid;
++ portParams.err_fqid = p_DriverParams->errFqid;
++ portParams.deq_sp = p_DriverParams->deqSubPortal;
++ portParams.dont_release_buf = p_DriverParams->dontReleaseBuf;
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ portParams.err_mask = (RX_ERRS_TO_ENQ & ~portParams.discard_mask);
++ if (!p_FmPort->imEn)
++ {
++ if (p_DriverParams->forwardReuseIntContext)
++ p_DriverParams->dfltCfg.rx_fd_bits =
++ (uint8_t)(BMI_PORT_RFNE_FRWD_RPD >> 24);
++ }
++ break;
++
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ portParams.err_mask = (OP_ERRS_TO_ENQ & ~portParams.discard_mask);
++ break;
++ break;
++
++ default:
++ break;
++ }
++
++ tmpVal =
++ (uint32_t)(
++ (p_FmPort->internalBufferOffset % OFFSET_UNITS) ? (p_FmPort->internalBufferOffset
++ / OFFSET_UNITS + 1) :
++ (p_FmPort->internalBufferOffset / OFFSET_UNITS));
++ p_FmPort->internalBufferOffset = (uint8_t)(tmpVal * OFFSET_UNITS);
++ p_DriverParams->dfltCfg.int_buf_start_margin =
++ p_FmPort->internalBufferOffset;
++
++ p_DriverParams->dfltCfg.ext_buf_start_margin =
++ p_DriverParams->bufMargins.startMargins;
++ p_DriverParams->dfltCfg.ext_buf_end_margin =
++ p_DriverParams->bufMargins.endMargins;
++
++ p_DriverParams->dfltCfg.ic_ext_offset =
++ p_DriverParams->intContext.extBufOffset;
++ p_DriverParams->dfltCfg.ic_int_offset =
++ p_DriverParams->intContext.intContextOffset;
++ p_DriverParams->dfltCfg.ic_size = p_DriverParams->intContext.size;
++
++ p_DriverParams->dfltCfg.stats_counters_enable = TRUE;
++ p_DriverParams->dfltCfg.perf_counters_enable = TRUE;
++ p_DriverParams->dfltCfg.queue_counters_enable = TRUE;
++
++ p_DriverParams->dfltCfg.perf_cnt_params.task_val =
++ (uint8_t)p_FmPort->tasks.num;
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING ||
++ p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)p_DriverParams->dfltCfg.perf_cnt_params.queue_val = 0;
++ else
++ p_DriverParams->dfltCfg.perf_cnt_params.queue_val = 1;
++ p_DriverParams->dfltCfg.perf_cnt_params.dma_val =
++ (uint8_t)p_FmPort->openDmas.num;
++ p_DriverParams->dfltCfg.perf_cnt_params.fifo_val = p_FmPort->fifoBufs.num;
++
++ if (0
++ != fman_port_init(&p_FmPort->port, &p_DriverParams->dfltCfg,
++ &portParams))
++ RETURN_ERROR(MAJOR, E_NO_DEVICE, ("fman_port_init"));
++
++ if (p_FmPort->imEn && ((err = FmPortImInit(p_FmPort)) != E_OK))
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ else
++ {
++ // from QMIInit
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ {
++ if (p_DriverParams->deqPrefetchOption == e_FM_PORT_DEQ_NO_PREFETCH)
++ FmSetPortPreFetchConfiguration(p_FmPort->h_Fm, p_FmPort->portId,
++ FALSE);
++ else
++ FmSetPortPreFetchConfiguration(p_FmPort->h_Fm, p_FmPort->portId,
++ TRUE);
++ }
++ }
++ /* The code bellow is a trick so the FM will not release the buffer
++ to BM nor will try to enqueue the frame to QM */
++ if (((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX)) && (!p_FmPort->imEn))
++ {
++ if (!p_DriverParams->dfltFqid && p_DriverParams->dontReleaseBuf)
++ {
++ /* override fmbm_tcfqid 0 with a false non-0 value. This will force FM to
++ * act according to tfene. Otherwise, if fmbm_tcfqid is 0 the FM will release
++ * buffers to BM regardless of fmbm_tfene
++ */
++ WRITE_UINT32(p_FmPort->port.bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF);
++ WRITE_UINT32(p_FmPort->port.bmi_regs->tx.fmbm_tfene,
++ NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE);
++ }
++ }
++
++ return E_OK;
++}
++
++static bool CheckRxBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter)
++{
++ UNUSED(p_FmPort);
++
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_CYCLE):
++ case (e_FM_PORT_COUNTERS_TASK_UTIL):
++ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
++ case (e_FM_PORT_COUNTERS_DMA_UTIL):
++ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
++ case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION):
++ case (e_FM_PORT_COUNTERS_FRAME):
++ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_BAD_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
++ case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD):
++ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
++ case (e_FM_PORT_COUNTERS_PREPARE_TO_ENQUEUE_COUNTER):
++ return TRUE;
++ default:
++ return FALSE;
++ }
++}
++
++static bool CheckTxBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter)
++{
++ UNUSED(p_FmPort);
++
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_CYCLE):
++ case (e_FM_PORT_COUNTERS_TASK_UTIL):
++ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
++ case (e_FM_PORT_COUNTERS_DMA_UTIL):
++ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
++ case (e_FM_PORT_COUNTERS_FRAME):
++ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
++ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
++ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
++ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
++ return TRUE;
++ default:
++ return FALSE;
++ }
++}
++
++static bool CheckOhBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter)
++{
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_CYCLE):
++ case (e_FM_PORT_COUNTERS_TASK_UTIL):
++ case (e_FM_PORT_COUNTERS_DMA_UTIL):
++ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
++ case (e_FM_PORT_COUNTERS_FRAME):
++ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
++ case (e_FM_PORT_COUNTERS_WRED_DISCARD):
++ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
++ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
++ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
++ return TRUE;
++ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
++ return FALSE;
++ else
++ return TRUE;
++ default:
++ return FALSE;
++ }
++}
++
++static t_Error BmiPortCheckAndGetCounterType(
++ t_FmPort *p_FmPort, e_FmPortCounters counter,
++ enum fman_port_stats_counters *p_StatsType,
++ enum fman_port_perf_counters *p_PerfType, bool *p_IsStats)
++{
++ volatile uint32_t *p_Reg;
++ bool isValid;
++
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_Reg = &p_FmPort->port.bmi_regs->rx.fmbm_rstc;
++ isValid = CheckRxBmiCounter(p_FmPort, counter);
++ break;
++ case (e_FM_PORT_TYPE_TX_10G):
++ case (e_FM_PORT_TYPE_TX):
++ p_Reg = &p_FmPort->port.bmi_regs->tx.fmbm_tstc;
++ isValid = CheckTxBmiCounter(p_FmPort, counter);
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
++ p_Reg = &p_FmPort->port.bmi_regs->oh.fmbm_ostc;
++ isValid = CheckOhBmiCounter(p_FmPort, counter);
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported port type"));
++ }
++
++ if (!isValid)
++ RETURN_ERROR(MINOR, E_INVALID_STATE,
++ ("Requested counter is not available for this port type"));
++
++ /* check that counters are enabled */
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_CYCLE):
++ case (e_FM_PORT_COUNTERS_TASK_UTIL):
++ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
++ case (e_FM_PORT_COUNTERS_DMA_UTIL):
++ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
++ case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION):
++ /* performance counters - may be read when disabled */
++ *p_IsStats = FALSE;
++ break;
++ case (e_FM_PORT_COUNTERS_FRAME):
++ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
++ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
++ case (e_FM_PORT_COUNTERS_RX_BAD_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
++ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
++ case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD):
++ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
++ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
++ case (e_FM_PORT_COUNTERS_WRED_DISCARD):
++ *p_IsStats = TRUE;
++ if (!(GET_UINT32(*p_Reg) & BMI_COUNTERS_EN))
++ RETURN_ERROR(MINOR, E_INVALID_STATE,
++ ("Requested counter was not enabled"));
++ break;
++ default:
++ break;
++ }
++
++ /* Set counter */
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_CYCLE):
++ *p_PerfType = E_FMAN_PORT_PERF_CNT_CYCLE;
++ break;
++ case (e_FM_PORT_COUNTERS_TASK_UTIL):
++ *p_PerfType = E_FMAN_PORT_PERF_CNT_TASK_UTIL;
++ break;
++ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
++ *p_PerfType = E_FMAN_PORT_PERF_CNT_QUEUE_UTIL;
++ break;
++ case (e_FM_PORT_COUNTERS_DMA_UTIL):
++ *p_PerfType = E_FMAN_PORT_PERF_CNT_DMA_UTIL;
++ break;
++ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
++ *p_PerfType = E_FMAN_PORT_PERF_CNT_FIFO_UTIL;
++ break;
++ case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION):
++ *p_PerfType = E_FMAN_PORT_PERF_CNT_RX_PAUSE;
++ break;
++ case (e_FM_PORT_COUNTERS_FRAME):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_FRAME;
++ break;
++ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_DISCARD;
++ break;
++ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_DEALLOC_BUF;
++ break;
++ case (e_FM_PORT_COUNTERS_RX_BAD_FRAME):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME;
++ break;
++ case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME;
++ break;
++ case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF;
++ break;
++ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_FILTERED_FRAME;
++ break;
++ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_DMA_ERR;
++ break;
++ case (e_FM_PORT_COUNTERS_WRED_DISCARD):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_WRED_DISCARD;
++ break;
++ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_LEN_ERR;
++ break;
++ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
++ *p_StatsType = E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT;
++ break;
++ default:
++ break;
++ }
++
++ return E_OK;
++}
++
++static t_Error AdditionalPrsParams(t_FmPort *p_FmPort,
++ t_FmPcdPrsAdditionalHdrParams *p_HdrParams,
++ uint32_t *p_SoftSeqAttachReg)
++{
++ uint8_t hdrNum, Ipv4HdrNum;
++ u_FmPcdHdrPrsOpts *p_prsOpts;
++ uint32_t tmpReg = *p_SoftSeqAttachReg, tmpPrsOffset;
++
++ if (IS_PRIVATE_HEADER(p_HdrParams->hdr)
++ || IS_SPECIAL_HEADER(p_HdrParams->hdr))
++ RETURN_ERROR(
++ MAJOR, E_NOT_SUPPORTED,
++ ("No additional parameters for private or special headers."));
++
++ if (p_HdrParams->errDisable)
++ tmpReg |= PRS_HDR_ERROR_DIS;
++
++ /* Set parser options */
++ if (p_HdrParams->usePrsOpts)
++ {
++ p_prsOpts = &p_HdrParams->prsOpts;
++ switch (p_HdrParams->hdr)
++ {
++ case (HEADER_TYPE_MPLS):
++ if (p_prsOpts->mplsPrsOptions.labelInterpretationEnable)
++ tmpReg |= PRS_HDR_MPLS_LBL_INTER_EN;
++ hdrNum = GetPrsHdrNum(p_prsOpts->mplsPrsOptions.nextParse);
++ if (hdrNum == ILLEGAL_HDR_NUM)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++ Ipv4HdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
++ if (hdrNum < Ipv4HdrNum)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("Header must be equal or higher than IPv4"));
++ tmpReg |= ((uint32_t)hdrNum * PRS_HDR_ENTRY_SIZE)
++ << PRS_HDR_MPLS_NEXT_HDR_SHIFT;
++ break;
++ case (HEADER_TYPE_PPPoE):
++ if (p_prsOpts->pppoePrsOptions.enableMTUCheck)
++ tmpReg |= PRS_HDR_PPPOE_MTU_CHECK_EN;
++ break;
++ case (HEADER_TYPE_IPv6):
++ if (p_prsOpts->ipv6PrsOptions.routingHdrEnable)
++ tmpReg |= PRS_HDR_IPV6_ROUTE_HDR_EN;
++ break;
++ case (HEADER_TYPE_TCP):
++ if (p_prsOpts->tcpPrsOptions.padIgnoreChecksum)
++ tmpReg |= PRS_HDR_TCP_PAD_REMOVAL;
++ else
++ tmpReg &= ~PRS_HDR_TCP_PAD_REMOVAL;
++ break;
++ case (HEADER_TYPE_UDP):
++ if (p_prsOpts->udpPrsOptions.padIgnoreChecksum)
++ tmpReg |= PRS_HDR_UDP_PAD_REMOVAL;
++ else
++ tmpReg &= ~PRS_HDR_UDP_PAD_REMOVAL;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid header"));
++ }
++ }
++
++ /* set software parsing (address is divided in 2 since parser uses 2 byte access. */
++ if (p_HdrParams->swPrsEnable)
++ {
++ tmpPrsOffset = FmPcdGetSwPrsOffset(p_FmPort->h_FmPcd, p_HdrParams->hdr,
++ p_HdrParams->indexPerHdr);
++ if (tmpPrsOffset == ILLEGAL_BASE)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++ tmpReg |= (PRS_HDR_SW_PRS_EN | tmpPrsOffset);
++ }
++ *p_SoftSeqAttachReg = tmpReg;
++
++ return E_OK;
++}
++
++static uint32_t GetPortSchemeBindParams(
++ t_Handle h_FmPort, t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ uint32_t walking1Mask = 0x80000000, tmp;
++ uint8_t idx = 0;
++
++ p_SchemeBind->netEnvId = p_FmPort->netEnvId;
++ p_SchemeBind->hardwarePortId = p_FmPort->hardwarePortId;
++ p_SchemeBind->useClsPlan = p_FmPort->useClsPlan;
++ p_SchemeBind->numOfSchemes = 0;
++ tmp = p_FmPort->schemesPerPortVector;
++ if (tmp)
++ {
++ while (tmp)
++ {
++ if (tmp & walking1Mask)
++ {
++ p_SchemeBind->schemesIds[p_SchemeBind->numOfSchemes] = idx;
++ p_SchemeBind->numOfSchemes++;
++ tmp &= ~walking1Mask;
++ }
++ walking1Mask >>= 1;
++ idx++;
++ }
++ }
++
++ return tmp;
++}
++
++static void FmPortCheckNApplyMacsec(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ volatile uint32_t *p_BmiCfgReg = NULL;
++ uint32_t macsecEn = BMI_PORT_CFG_EN_MACSEC;
++ uint32_t lcv, walking1Mask = 0x80000000;
++ uint8_t cnt = 0;
++
++ ASSERT_COND(p_FmPort);
++ ASSERT_COND(p_FmPort->h_FmPcd);
++ ASSERT_COND(!p_FmPort->p_FmPortDriverParam);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ return;
++
++ p_BmiCfgReg = &p_FmPort->port.bmi_regs->rx.fmbm_rcfg;
++ /* get LCV for MACSEC */
++ if ((lcv = FmPcdGetMacsecLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId))
++ != 0)
++ {
++ while (!(lcv & walking1Mask))
++ {
++ cnt++;
++ walking1Mask >>= 1;
++ }
++
++ macsecEn |= (uint32_t)cnt << BMI_PORT_CFG_MS_SEL_SHIFT;
++ WRITE_UINT32(*p_BmiCfgReg, GET_UINT32(*p_BmiCfgReg) | macsecEn);
++ }
++}
++
++static t_Error SetPcd(t_FmPort *p_FmPort, t_FmPortPcdParams *p_PcdParams)
++{
++ t_Error err = E_OK;
++ uint32_t tmpReg;
++ volatile uint32_t *p_BmiNia = NULL;
++ volatile uint32_t *p_BmiPrsNia = NULL;
++ volatile uint32_t *p_BmiPrsStartOffset = NULL;
++ volatile uint32_t *p_BmiInitPrsResult = NULL;
++ volatile uint32_t *p_BmiCcBase = NULL;
++ uint16_t hdrNum, L3HdrNum, greHdrNum;
++ int i;
++ bool isEmptyClsPlanGrp;
++ uint32_t tmpHxs[FM_PCD_PRS_NUM_OF_HDRS];
++ uint16_t absoluteProfileId;
++ uint8_t physicalSchemeId;
++ uint32_t ccTreePhysOffset;
++ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
++ uint32_t initialSwPrs = 0;
++
++ ASSERT_COND(p_FmPort);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for non-independant mode ports only"));
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++
++ p_FmPort->netEnvId = FmPcdGetNetEnvId(p_PcdParams->h_NetEnv);
++
++ p_FmPort->pcdEngines = 0;
++
++ /* initialize p_FmPort->pcdEngines field in port's structure */
++ switch (p_PcdParams->pcdSupport)
++ {
++ case (e_FM_PORT_PCD_SUPPORT_NONE):
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("No PCD configuration required if e_FM_PORT_PCD_SUPPORT_NONE selected"));
++ case (e_FM_PORT_PCD_SUPPORT_PRS_ONLY):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PLCR_ONLY):
++ p_FmPort->pcdEngines |= FM_PCD_PLCR;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ p_FmPort->pcdEngines |= FM_PCD_PLCR;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ p_FmPort->pcdEngines |= FM_PCD_KG;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++ p_FmPort->pcdEngines |= FM_PCD_KG;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ p_FmPort->pcdEngines |= FM_PCD_KG;
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++ p_FmPort->pcdEngines |= FM_PCD_PLCR;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++ p_FmPort->pcdEngines |= FM_PCD_PLCR;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR):
++ p_FmPort->pcdEngines |= FM_PCD_PRS;
++ p_FmPort->pcdEngines |= FM_PCD_KG;
++ p_FmPort->pcdEngines |= FM_PCD_PLCR;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_CC_ONLY):
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++ break;
++#ifdef FM_CAPWAP_SUPPORT
++ case (e_FM_PORT_PCD_SUPPORT_CC_AND_KG):
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++ p_FmPort->pcdEngines |= FM_PCD_KG;
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR):
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++ p_FmPort->pcdEngines |= FM_PCD_KG;
++ p_FmPort->pcdEngines |= FM_PCD_PLCR;
++ break;
++#endif /* FM_CAPWAP_SUPPORT */
++
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid pcdSupport"));
++ }
++
++ if ((p_FmPort->pcdEngines & FM_PCD_PRS)
++ && (p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams
++ > FM_PCD_PRS_NUM_OF_HDRS))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("Port parser numOfHdrsWithAdditionalParams may not exceed %d", FM_PCD_PRS_NUM_OF_HDRS));
++
++ /* check that parameters exist for each and only each defined engine */
++ if ((!!(p_FmPort->pcdEngines & FM_PCD_PRS) != !!p_PcdParams->p_PrsParams)
++ || (!!(p_FmPort->pcdEngines & FM_PCD_KG)
++ != !!p_PcdParams->p_KgParams)
++ || (!!(p_FmPort->pcdEngines & FM_PCD_CC)
++ != !!p_PcdParams->p_CcParams))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("PCD initialization structure is not consistent with pcdSupport"));
++
++ /* get PCD registers pointers */
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
++ p_BmiPrsNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne;
++ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso;
++ p_BmiInitPrsResult = &p_FmPort->port.bmi_regs->rx.fmbm_rprai[0];
++ p_BmiCcBase = &p_FmPort->port.bmi_regs->rx.fmbm_rccb;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
++ p_BmiPrsNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne;
++ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso;
++ p_BmiInitPrsResult = &p_FmPort->port.bmi_regs->oh.fmbm_oprai[0];
++ p_BmiCcBase = &p_FmPort->port.bmi_regs->oh.fmbm_occb;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ }
++
++ /* set PCD port parameter */
++ if (p_FmPort->pcdEngines & FM_PCD_CC)
++ {
++ err = FmPcdCcBindTree(p_FmPort->h_FmPcd, p_PcdParams,
++ p_PcdParams->p_CcParams->h_CcTree,
++ &ccTreePhysOffset, p_FmPort);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset);
++ p_FmPort->ccTreeId = p_PcdParams->p_CcParams->h_CcTree;
++ }
++
++ if (p_FmPort->pcdEngines & FM_PCD_KG)
++ {
++ if (p_PcdParams->p_KgParams->numOfSchemes == 0)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("For ports using Keygen, at least one scheme must be bound. "));
++
++ err = FmPcdKgSetOrBindToClsPlanGrp(p_FmPort->h_FmPcd,
++ p_FmPort->hardwarePortId,
++ p_FmPort->netEnvId,
++ p_FmPort->optArray,
++ &p_FmPort->clsPlanGrpId,
++ &isEmptyClsPlanGrp);
++ if (err)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("FmPcdKgSetOrBindToClsPlanGrp failed. "));
++
++ p_FmPort->useClsPlan = !isEmptyClsPlanGrp;
++
++ schemeBind.netEnvId = p_FmPort->netEnvId;
++ schemeBind.hardwarePortId = p_FmPort->hardwarePortId;
++ schemeBind.numOfSchemes = p_PcdParams->p_KgParams->numOfSchemes;
++ schemeBind.useClsPlan = p_FmPort->useClsPlan;
++
++ /* for each scheme */
++ for (i = 0; i < p_PcdParams->p_KgParams->numOfSchemes; i++)
++ {
++ ASSERT_COND(p_PcdParams->p_KgParams->h_Schemes[i]);
++ physicalSchemeId = FmPcdKgGetSchemeId(
++ p_PcdParams->p_KgParams->h_Schemes[i]);
++ schemeBind.schemesIds[i] = physicalSchemeId;
++ /* build vector */
++ p_FmPort->schemesPerPortVector |= 1
++ << (31 - (uint32_t)physicalSchemeId);
++#if (DPAA_VERSION >= 11)
++ /*because of the state that VSPE is defined per port - all PCD path should be according to this requirement
++ if !VSPE - in port, for relevant scheme VSPE can not be set*/
++ if (!p_FmPort->vspe
++ && FmPcdKgGetVspe((p_PcdParams->p_KgParams->h_Schemes[i])))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("VSPE is not at port level"));
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /***************************/
++ /* configure NIA after BMI */
++ /***************************/
++ /* rfne may contain FDCS bits, so first we read them. */
++ p_FmPort->savedBmiNia = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK;
++
++ /* If policer is used directly after BMI or PRS */
++ if ((p_FmPort->pcdEngines & FM_PCD_PLCR)
++ && ((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PLCR_ONLY)
++ || (p_PcdParams->pcdSupport
++ == e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR)))
++ {
++ if (!p_PcdParams->p_PlcrParams->h_Profile)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Profile should be initialized"));
++
++ absoluteProfileId = (uint16_t)FmPcdPlcrProfileGetAbsoluteId(
++ p_PcdParams->p_PlcrParams->h_Profile);
++
++ if (!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Private port profile not valid."));
++
++ tmpReg = (uint32_t)(absoluteProfileId | NIA_PLCR_ABSOLUTE);
++
++ if (p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */
++ /* update BMI HPNIA */
++ WRITE_UINT32(*p_BmiPrsNia, (uint32_t)(NIA_ENG_PLCR | tmpReg));
++ else
++ /* e_FM_PCD_SUPPORT_PLCR_ONLY */
++ /* update BMI NIA */
++ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PLCR);
++ }
++
++ /* if CC is used directly after BMI */
++ if ((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_ONLY)
++#ifdef FM_CAPWAP_SUPPORT
++ || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG)
++ || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR)
++#endif /* FM_CAPWAP_SUPPORT */
++ )
++ {
++ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_OPERATION,
++ ("e_FM_PORT_PCD_SUPPORT_CC_xx available for offline parsing ports only"));
++ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC);
++ /* check that prs start offset == RIM[FOF] */
++ }
++
++ if (p_FmPort->pcdEngines & FM_PCD_PRS)
++ {
++ ASSERT_COND(p_PcdParams->p_PrsParams);
++#if (DPAA_VERSION >= 11)
++ if (p_PcdParams->p_PrsParams->firstPrsHdr == HEADER_TYPE_CAPWAP)
++ hdrNum = OFFLOAD_SW_PATCH_CAPWAP_LABEL;
++ else
++ {
++#endif /* (DPAA_VERSION >= 11) */
++ /* if PRS is used it is always first */
++ hdrNum = GetPrsHdrNum(p_PcdParams->p_PrsParams->firstPrsHdr);
++ if (hdrNum == ILLEGAL_HDR_NUM)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unsupported header."));
++#if (DPAA_VERSION >= 11)
++ }
++#endif /* (DPAA_VERSION >= 11) */
++ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PRS | (uint32_t)(hdrNum));
++ /* set after parser NIA */
++ tmpReg = 0;
++ switch (p_PcdParams->pcdSupport)
++ {
++ case (e_FM_PORT_PCD_SUPPORT_PRS_ONLY):
++ WRITE_UINT32(*p_BmiPrsNia,
++ GET_NIA_BMI_AC_ENQ_FRAME(p_FmPort->h_FmPcd));
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC):
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR):
++ tmpReg = NIA_KG_CC_EN;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG):
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR):
++ if (p_PcdParams->p_KgParams->directScheme)
++ {
++ physicalSchemeId = FmPcdKgGetSchemeId(
++ p_PcdParams->p_KgParams->h_DirectScheme);
++ /* check that this scheme was bound to this port */
++ for (i = 0; i < p_PcdParams->p_KgParams->numOfSchemes; i++)
++ if (p_PcdParams->p_KgParams->h_DirectScheme
++ == p_PcdParams->p_KgParams->h_Schemes[i])
++ break;
++ if (i == p_PcdParams->p_KgParams->numOfSchemes)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("Direct scheme is not one of the port selected schemes."));
++ tmpReg |= (uint32_t)(NIA_KG_DIRECT | physicalSchemeId);
++ }
++ WRITE_UINT32(*p_BmiPrsNia, NIA_ENG_KG | tmpReg);
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC):
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR):
++ WRITE_UINT32(*p_BmiPrsNia,
++ (uint32_t)(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC));
++ break;
++ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR):
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid PCD support"));
++ }
++
++ /* set start parsing offset */
++ WRITE_UINT32(*p_BmiPrsStartOffset,
++ p_PcdParams->p_PrsParams->parsingOffset);
++
++ /************************************/
++ /* Parser port parameters */
++ /************************************/
++ /* stop before configuring */
++ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP);
++ /* wait for parser to be in idle state */
++ while (GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE)
++ ;
++
++ /* set soft seq attachment register */
++ memset(tmpHxs, 0, FM_PCD_PRS_NUM_OF_HDRS * sizeof(uint32_t));
++
++ /* set protocol options */
++ for (i = 0; p_FmPort->optArray[i]; i++)
++ switch (p_FmPort->optArray[i])
++ {
++ case (ETH_BROADCAST):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_ETH);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_ETH_BC_SHIFT;
++ break;
++ case (ETH_MULTICAST):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_ETH);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_ETH_MC_SHIFT;
++ break;
++ case (VLAN_STACKED):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_VLAN);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_VLAN_STACKED_SHIFT;
++ break;
++ case (MPLS_STACKED):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_MPLS);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_MPLS_STACKED_SHIFT;
++ break;
++ case (IPV4_BROADCAST_1):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_1_BC_SHIFT;
++ break;
++ case (IPV4_MULTICAST_1):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_1_MC_SHIFT;
++ break;
++ case (IPV4_UNICAST_2):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_2_UC_SHIFT;
++ break;
++ case (IPV4_MULTICAST_BROADCAST_2):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_2_MC_BC_SHIFT;
++ break;
++ case (IPV6_MULTICAST_1):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_1_MC_SHIFT;
++ break;
++ case (IPV6_UNICAST_2):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_2_UC_SHIFT;
++ break;
++ case (IPV6_MULTICAST_2):
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_2_MC_SHIFT;
++ break;
++ }
++
++ if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId,
++ HEADER_TYPE_UDP_ENCAP_ESP))
++ {
++ if (p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams == FM_PCD_PRS_NUM_OF_HDRS)
++ RETURN_ERROR(
++ MINOR, E_INVALID_VALUE,
++ ("If HEADER_TYPE_UDP_ENCAP_ESP is used, numOfHdrsWithAdditionalParams may be up to FM_PCD_PRS_NUM_OF_HDRS - 1"));
++
++ p_PcdParams->p_PrsParams->additionalParams[p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].hdr =
++ HEADER_TYPE_UDP;
++ p_PcdParams->p_PrsParams->additionalParams[p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].swPrsEnable =
++ TRUE;
++ p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams++;
++ }
++
++ /* set MPLS default next header - HW reset workaround */
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_MPLS);
++ tmpHxs[hdrNum] |= PRS_HDR_MPLS_LBL_INTER_EN;
++ L3HdrNum = GetPrsHdrNum(HEADER_TYPE_USER_DEFINED_L3);
++ tmpHxs[hdrNum] |= (uint32_t)L3HdrNum << PRS_HDR_MPLS_NEXT_HDR_SHIFT;
++
++ /* for GRE, disable errors */
++ greHdrNum = GetPrsHdrNum(HEADER_TYPE_GRE);
++ tmpHxs[greHdrNum] |= PRS_HDR_ERROR_DIS;
++
++ /* For UDP remove PAD from L4 checksum calculation */
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_UDP);
++ tmpHxs[hdrNum] |= PRS_HDR_UDP_PAD_REMOVAL;
++ /* For TCP remove PAD from L4 checksum calculation */
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_TCP);
++ tmpHxs[hdrNum] |= PRS_HDR_TCP_PAD_REMOVAL;
++
++ /* config additional params for specific headers */
++ for (i = 0; i < p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams;
++ i++)
++ {
++ /* case for using sw parser as the initial NIA address, before
++ * HW parsing
++ */
++ if ((p_PcdParams->p_PrsParams->additionalParams[i].hdr == HEADER_TYPE_NONE) &&
++ p_PcdParams->p_PrsParams->additionalParams[i].swPrsEnable)
++ {
++ initialSwPrs = FmPcdGetSwPrsOffset(p_FmPort->h_FmPcd, HEADER_TYPE_NONE,
++ p_PcdParams->p_PrsParams->additionalParams[i].indexPerHdr);
++ if (initialSwPrs == ILLEGAL_BASE)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++
++ /* clear parser first HXS */
++ p_FmPort->savedBmiNia &= ~BMI_RFNE_HXS_MASK; /* 0x000000FF */
++ /* rewrite with soft parser start */
++ p_FmPort->savedBmiNia |= initialSwPrs;
++ continue;
++ }
++
++ hdrNum =
++ GetPrsHdrNum(p_PcdParams->p_PrsParams->additionalParams[i].hdr);
++ if (hdrNum == ILLEGAL_HDR_NUM)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++ if (hdrNum == NO_HDR_NUM)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("Private headers may not use additional parameters"));
++
++ err = AdditionalPrsParams(
++ p_FmPort, &p_PcdParams->p_PrsParams->additionalParams[i],
++ &tmpHxs[hdrNum]);
++ if (err)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++ }
++
++ /* Check if ip-reassembly port - need to link sw-parser code */
++ if (p_FmPort->h_IpReassemblyManip)
++ {
++ /* link to sw parser code for IP Frag - only if no other code is applied. */
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
++ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
++ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv4_IPR_LABEL);
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
++ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPR_LABEL);
++ } else {
++ if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId, HEADER_TYPE_UDP_LITE))
++ {
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
++ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL);
++ } else if ((FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)
++ && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)))
++ {
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
++ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL);
++ }
++ }
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId,
++ HEADER_TYPE_UDP_LITE))
++ {
++ /* link to sw parser code for udp lite - only if no other code is applied. */
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
++ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | UDP_LITE_SW_PATCH_LABEL);
++ }
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++ for (i = 0; i < FM_PCD_PRS_NUM_OF_HDRS; i++)
++ {
++ /* For all header set LCV as taken from netEnv*/
++ WRITE_UINT32(
++ p_FmPort->p_FmPortPrsRegs->hdrs[i].lcv,
++ FmPcdGetLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId, (uint8_t)i));
++ /* set HXS register according to default+Additional params+protocol options */
++ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->hdrs[i].softSeqAttach,
++ tmpHxs[i]);
++ }
++
++ /* set tpid. */
++ tmpReg = PRS_TPID_DFLT;
++ if (p_PcdParams->p_PrsParams->setVlanTpid1)
++ {
++ tmpReg &= PRS_TPID2_MASK;
++ tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid1
++ << PRS_PCTPID_SHIFT;
++ }
++ if (p_PcdParams->p_PrsParams->setVlanTpid2)
++ {
++ tmpReg &= PRS_TPID1_MASK;
++ tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid2;
++ }WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pctpid, tmpReg);
++
++ /* enable parser */
++ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, 0);
++
++ if (p_PcdParams->p_PrsParams->prsResultPrivateInfo)
++ p_FmPort->privateInfo =
++ p_PcdParams->p_PrsParams->prsResultPrivateInfo;
++
++ } /* end parser */
++ else {
++ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)
++ && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ {
++ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
++ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->hdrs[hdrNum].softSeqAttach,
++ (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL));
++ }
++
++ WRITE_UINT32(*p_BmiPrsStartOffset, 0);
++
++ p_FmPort->privateInfo = 0;
++ }
++
++ FmPortCheckNApplyMacsec(p_FmPort);
++
++ WRITE_UINT32(
++ *p_BmiPrsStartOffset,
++ GET_UINT32(*p_BmiPrsStartOffset) + p_FmPort->internalBufferOffset);
++
++ /* set initial parser result - used for all engines */
++ for (i = 0; i < FM_PORT_PRS_RESULT_NUM_OF_WORDS; i++)
++ {
++ if (!i)
++ WRITE_UINT32(
++ *(p_BmiInitPrsResult),
++ (uint32_t)(((uint32_t)p_FmPort->privateInfo << BMI_PR_PORTID_SHIFT) | BMI_PRS_RESULT_HIGH));
++ else
++ {
++ if (i < FM_PORT_PRS_RESULT_NUM_OF_WORDS / 2)
++ WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_HIGH);
++ else
++ WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_LOW);
++ }
++ }
++
++ return E_OK;
++}
++
++static t_Error DeletePcd(t_FmPort *p_FmPort)
++{
++ t_Error err = E_OK;
++ volatile uint32_t *p_BmiNia = NULL;
++ volatile uint32_t *p_BmiPrsStartOffset = NULL;
++
++ ASSERT_COND(p_FmPort);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for non-independant mode ports only"));
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++
++ if (!p_FmPort->pcdEngines)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("called for non PCD port"));
++
++ /* get PCD registers pointers */
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
++ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
++ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ }
++
++ if ((GET_UINT32(*p_BmiNia) & GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME())
++ != GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME())
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("port has to be detached previousely"));
++
++ WRITE_UINT32(*p_BmiPrsStartOffset, 0);
++
++ /* "cut" PCD out of the port's flow - go to BMI */
++ /* WRITE_UINT32(*p_BmiNia, (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); */
++
++ if (p_FmPort->pcdEngines & FM_PCD_PRS)
++ {
++ /* stop parser */
++ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP);
++ /* wait for parser to be in idle state */
++ while (GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE)
++ ;
++ }
++
++ if (p_FmPort->pcdEngines & FM_PCD_KG)
++ {
++ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
++
++ /* unbind all schemes */
++ p_FmPort->schemesPerPortVector = GetPortSchemeBindParams(p_FmPort,
++ &schemeBind);
++
++ err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ err = FmPcdKgDeleteOrUnbindPortToClsPlanGrp(p_FmPort->h_FmPcd,
++ p_FmPort->hardwarePortId,
++ p_FmPort->clsPlanGrpId);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ p_FmPort->useClsPlan = FALSE;
++ }
++
++ if (p_FmPort->pcdEngines & FM_PCD_CC)
++ {
++ /* unbind - we need to get the treeId too */
++ err = FmPcdCcUnbindTree(p_FmPort->h_FmPcd, p_FmPort->ccTreeId);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ p_FmPort->pcdEngines = 0;
++
++ return E_OK;
++}
++
++static t_Error AttachPCD(t_FmPort *p_FmPort)
++{
++ volatile uint32_t *p_BmiNia = NULL;
++
++ ASSERT_COND(p_FmPort);
++
++ /* get PCD registers pointers */
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
++ else
++ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
++
++ /* check that current NIA is BMI to BMI */
++ if ((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK)
++ != GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME())
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("may be called only for ports in BMI-to-BMI state."));
++
++ if (p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)
++ if (FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 1,
++ p_FmPort->orFmanCtrl) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (p_FmPort->requiredAction & UPDATE_NIA_CMNE)
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ocmne,
++ p_FmPort->savedBmiCmne);
++ else
++ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcmne,
++ p_FmPort->savedBmiCmne);
++ }
++
++ if (p_FmPort->requiredAction & UPDATE_NIA_PNEN)
++ WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen,
++ p_FmPort->savedQmiPnen);
++
++ if (p_FmPort->requiredAction & UPDATE_NIA_FENE)
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofene,
++ p_FmPort->savedBmiFene);
++ else
++ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfene,
++ p_FmPort->savedBmiFene);
++ }
++
++ if (p_FmPort->requiredAction & UPDATE_NIA_FPNE)
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofpne,
++ p_FmPort->savedBmiFpne);
++ else
++ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfpne,
++ p_FmPort->savedBmiFpne);
++ }
++
++ if (p_FmPort->requiredAction & UPDATE_OFP_DPTE)
++ {
++ ASSERT_COND(p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING);
++
++ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofp,
++ p_FmPort->savedBmiOfp);
++ }
++
++ WRITE_UINT32(*p_BmiNia, p_FmPort->savedBmiNia);
++
++ if (p_FmPort->requiredAction & UPDATE_NIA_PNDN)
++ {
++ p_FmPort->origNonRxQmiRegsPndn =
++ GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn);
++ WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn,
++ p_FmPort->savedNonRxQmiRegsPndn);
++ }
++
++ return E_OK;
++}
++
++static t_Error DetachPCD(t_FmPort *p_FmPort)
++{
++ volatile uint32_t *p_BmiNia = NULL;
++
++ ASSERT_COND(p_FmPort);
++
++ /* get PCD registers pointers */
++ if (p_FmPort->requiredAction & UPDATE_NIA_PNDN)
++ WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn,
++ p_FmPort->origNonRxQmiRegsPndn);
++
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
++ else
++ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
++
++ WRITE_UINT32(
++ *p_BmiNia,
++ (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME());
++
++ if (FmPcdGetHcHandle(p_FmPort->h_FmPcd))
++ FmPcdHcSync(p_FmPort->h_FmPcd);
++
++ if (p_FmPort->requiredAction & UPDATE_NIA_FENE)
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofene,
++ NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR);
++ else
++ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfene,
++ NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR);
++ }
++
++ if (p_FmPort->requiredAction & UPDATE_NIA_PNEN)
++ WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pnen,
++ NIA_ENG_BMI | NIA_BMI_AC_RELEASE);
++
++ if (p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)
++ if (FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 2,
++ p_FmPort->orFmanCtrl) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ p_FmPort->requiredAction = 0;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++void FmPortSetMacsecCmd(t_Handle h_FmPort, uint8_t dfltSci)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ volatile uint32_t *p_BmiCfgReg = NULL;
++ uint32_t tmpReg;
++
++ SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_OPERATION, ("The routine is relevant for Tx ports only"));
++ return;
++ }
++
++ p_BmiCfgReg = &p_FmPort->port.bmi_regs->tx.fmbm_tfca;
++ tmpReg = GET_UINT32(*p_BmiCfgReg) & ~BMI_CMD_ATTR_MACCMD_MASK;
++ tmpReg |= BMI_CMD_ATTR_MACCMD_SECURED;
++ tmpReg |= (((uint32_t)dfltSci << BMI_CMD_ATTR_MACCMD_SC_SHIFT)
++ & BMI_CMD_ATTR_MACCMD_SC_MASK);
++
++ WRITE_UINT32(*p_BmiCfgReg, tmpReg);
++}
++
++uint8_t FmPortGetNetEnvId(t_Handle h_FmPort)
++{
++ return ((t_FmPort*)h_FmPort)->netEnvId;
++}
++
++uint8_t FmPortGetHardwarePortId(t_Handle h_FmPort)
++{
++ return ((t_FmPort*)h_FmPort)->hardwarePortId;
++}
++
++uint32_t FmPortGetPcdEngines(t_Handle h_FmPort)
++{
++ return ((t_FmPort*)h_FmPort)->pcdEngines;
++}
++
++#if (DPAA_VERSION >= 11)
++t_Error FmPortSetGprFunc(t_Handle h_FmPort, e_FmPortGprFuncType gprFunc,
++ void **p_Value)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ uint32_t muramPageOffset;
++
++ ASSERT_COND(p_FmPort);
++ ASSERT_COND(p_Value);
++
++ if (p_FmPort->gprFunc != e_FM_PORT_GPR_EMPTY)
++ {
++ if (p_FmPort->gprFunc != gprFunc)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("gpr was assigned with different func"));
++ }
++ else
++ {
++ switch (gprFunc)
++ {
++ case (e_FM_PORT_GPR_MURAM_PAGE):
++ p_FmPort->p_ParamsPage = FM_MURAM_AllocMem(p_FmPort->h_FmMuram,
++ 256, 8);
++ if (!p_FmPort->p_ParamsPage)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for page"));
++
++ IOMemSet32(p_FmPort->p_ParamsPage, 0, 256);
++ muramPageOffset =
++ (uint32_t)(XX_VirtToPhys(p_FmPort->p_ParamsPage)
++ - p_FmPort->fmMuramPhysBaseAddr);
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ WRITE_UINT32(
++ p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr,
++ muramPageOffset);
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ WRITE_UINT32(
++ p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ogpr,
++ muramPageOffset);
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Invalid port type"));
++ }
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++ p_FmPort->gprFunc = gprFunc;
++ }
++
++ switch (p_FmPort->gprFunc)
++ {
++ case (e_FM_PORT_GPR_MURAM_PAGE):
++ *p_Value = p_FmPort->p_ParamsPage;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ return E_OK;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++t_Error FmPortGetSetCcParams(t_Handle h_FmPort,
++ t_FmPortGetSetCcParams *p_CcParams)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int tmpInt;
++ volatile uint32_t *p_BmiPrsStartOffset = NULL;
++
++ /* this function called from Cc for pass and receive parameters port params between CC and PORT*/
++
++ if ((p_CcParams->getCcParams.type & OFFSET_OF_PR)
++ && (p_FmPort->bufferOffsets.prsResultOffset != ILLEGAL_BASE))
++ {
++ p_CcParams->getCcParams.prOffset =
++ (uint8_t)p_FmPort->bufferOffsets.prsResultOffset;
++ p_CcParams->getCcParams.type &= ~OFFSET_OF_PR;
++ }
++ if (p_CcParams->getCcParams.type & HW_PORT_ID)
++ {
++ p_CcParams->getCcParams.hardwarePortId =
++ (uint8_t)p_FmPort->hardwarePortId;
++ p_CcParams->getCcParams.type &= ~HW_PORT_ID;
++ }
++ if ((p_CcParams->getCcParams.type & OFFSET_OF_DATA)
++ && (p_FmPort->bufferOffsets.dataOffset != ILLEGAL_BASE))
++ {
++ p_CcParams->getCcParams.dataOffset =
++ (uint16_t)p_FmPort->bufferOffsets.dataOffset;
++ p_CcParams->getCcParams.type &= ~OFFSET_OF_DATA;
++ }
++ if (p_CcParams->getCcParams.type & NUM_OF_TASKS)
++ {
++ p_CcParams->getCcParams.numOfTasks = (uint8_t)p_FmPort->tasks.num;
++ p_CcParams->getCcParams.type &= ~NUM_OF_TASKS;
++ }
++ if (p_CcParams->getCcParams.type & NUM_OF_EXTRA_TASKS)
++ {
++ p_CcParams->getCcParams.numOfExtraTasks =
++ (uint8_t)p_FmPort->tasks.extra;
++ p_CcParams->getCcParams.type &= ~NUM_OF_EXTRA_TASKS;
++ }
++ if (p_CcParams->getCcParams.type & FM_REV)
++ {
++ p_CcParams->getCcParams.revInfo.majorRev = p_FmPort->fmRevInfo.majorRev;
++ p_CcParams->getCcParams.revInfo.minorRev = p_FmPort->fmRevInfo.minorRev;
++ p_CcParams->getCcParams.type &= ~FM_REV;
++ }
++ if (p_CcParams->getCcParams.type & DISCARD_MASK)
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ p_CcParams->getCcParams.discardMask =
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm);
++ else
++ p_CcParams->getCcParams.discardMask =
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm);
++ p_CcParams->getCcParams.type &= ~DISCARD_MASK;
++ }
++ if (p_CcParams->getCcParams.type & MANIP_EXTRA_SPACE)
++ {
++ p_CcParams->getCcParams.internalBufferOffset =
++ p_FmPort->internalBufferOffset;
++ p_CcParams->getCcParams.type &= ~MANIP_EXTRA_SPACE;
++ }
++ if (p_CcParams->getCcParams.type & GET_NIA_FPNE)
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ p_CcParams->getCcParams.nia =
++ GET_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofpne);
++ else
++ p_CcParams->getCcParams.nia =
++ GET_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfpne);
++ p_CcParams->getCcParams.type &= ~GET_NIA_FPNE;
++ }
++ if (p_CcParams->getCcParams.type & GET_NIA_PNDN)
++ {
++ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ p_CcParams->getCcParams.nia =
++ GET_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn);
++ p_CcParams->getCcParams.type &= ~GET_NIA_PNDN;
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)
++ && !(p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY))
++ {
++ p_FmPort->requiredAction |= UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY;
++ p_FmPort->orFmanCtrl = p_CcParams->setCcParams.orFmanCtrl;
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_NIA_PNEN)
++ && !(p_FmPort->requiredAction & UPDATE_NIA_PNEN))
++ {
++ p_FmPort->savedQmiPnen = p_CcParams->setCcParams.nia;
++ p_FmPort->requiredAction |= UPDATE_NIA_PNEN;
++ }
++ else
++ if (p_CcParams->setCcParams.type & UPDATE_NIA_PNEN)
++ {
++ if (p_FmPort->savedQmiPnen != p_CcParams->setCcParams.nia)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("PNEN was defined previously different"));
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_NIA_PNDN)
++ && !(p_FmPort->requiredAction & UPDATE_NIA_PNDN))
++ {
++ p_FmPort->savedNonRxQmiRegsPndn = p_CcParams->setCcParams.nia;
++ p_FmPort->requiredAction |= UPDATE_NIA_PNDN;
++ }
++ else
++ if (p_CcParams->setCcParams.type & UPDATE_NIA_PNDN)
++ {
++ if (p_FmPort->savedNonRxQmiRegsPndn != p_CcParams->setCcParams.nia)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("PNDN was defined previously different"));
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_NIA_FENE)
++ && (p_CcParams->setCcParams.overwrite
++ || !(p_FmPort->requiredAction & UPDATE_NIA_FENE)))
++ {
++ p_FmPort->savedBmiFene = p_CcParams->setCcParams.nia;
++ p_FmPort->requiredAction |= UPDATE_NIA_FENE;
++ }
++ else
++ if (p_CcParams->setCcParams.type & UPDATE_NIA_FENE)
++ {
++ if (p_FmPort->savedBmiFene != p_CcParams->setCcParams.nia)
++ RETURN_ERROR( MAJOR, E_INVALID_STATE,
++ ("xFENE was defined previously different"));
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_NIA_FPNE)
++ && !(p_FmPort->requiredAction & UPDATE_NIA_FPNE))
++ {
++ p_FmPort->savedBmiFpne = p_CcParams->setCcParams.nia;
++ p_FmPort->requiredAction |= UPDATE_NIA_FPNE;
++ }
++ else
++ if (p_CcParams->setCcParams.type & UPDATE_NIA_FPNE)
++ {
++ if (p_FmPort->savedBmiFpne != p_CcParams->setCcParams.nia)
++ RETURN_ERROR( MAJOR, E_INVALID_STATE,
++ ("xFPNE was defined previously different"));
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_NIA_CMNE)
++ && !(p_FmPort->requiredAction & UPDATE_NIA_CMNE))
++ {
++ p_FmPort->savedBmiCmne = p_CcParams->setCcParams.nia;
++ p_FmPort->requiredAction |= UPDATE_NIA_CMNE;
++ }
++ else
++ if (p_CcParams->setCcParams.type & UPDATE_NIA_CMNE)
++ {
++ if (p_FmPort->savedBmiCmne != p_CcParams->setCcParams.nia)
++ RETURN_ERROR( MAJOR, E_INVALID_STATE,
++ ("xCMNE was defined previously different"));
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_PSO)
++ && !(p_FmPort->requiredAction & UPDATE_PSO))
++ {
++ /* get PCD registers pointers */
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ }
++
++ /* set start parsing offset */
++ tmpInt = (int)GET_UINT32(*p_BmiPrsStartOffset)
++ + p_CcParams->setCcParams.psoSize;
++ if (tmpInt > 0)
++ WRITE_UINT32(*p_BmiPrsStartOffset, (uint32_t)tmpInt);
++
++ p_FmPort->requiredAction |= UPDATE_PSO;
++ p_FmPort->savedPrsStartOffset = p_CcParams->setCcParams.psoSize;
++ }
++ else
++ if (p_CcParams->setCcParams.type & UPDATE_PSO)
++ {
++ if (p_FmPort->savedPrsStartOffset
++ != p_CcParams->setCcParams.psoSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("parser start offset was defoned previousley different"));
++ }
++
++ if ((p_CcParams->setCcParams.type & UPDATE_OFP_DPTE)
++ && !(p_FmPort->requiredAction & UPDATE_OFP_DPTE))
++ {
++ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ p_FmPort->savedBmiOfp = GET_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofp);
++ p_FmPort->savedBmiOfp &= ~BMI_FIFO_PIPELINE_DEPTH_MASK;
++ p_FmPort->savedBmiOfp |= p_CcParams->setCcParams.ofpDpde
++ << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
++ p_FmPort->requiredAction |= UPDATE_OFP_DPTE;
++ }
++
++ return E_OK;
++}
++/*********************** End of inter-module routines ************************/
++
++/****************************************/
++/* API Init unit functions */
++/****************************************/
++
++t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams)
++{
++ t_FmPort *p_FmPort;
++ uintptr_t baseAddr = p_FmPortParams->baseAddr;
++ uint32_t tmpReg;
++
++ /* Allocate FM structure */
++ p_FmPort = (t_FmPort *)XX_Malloc(sizeof(t_FmPort));
++ if (!p_FmPort)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver structure"));
++ return NULL;
++ }
++ memset(p_FmPort, 0, sizeof(t_FmPort));
++
++ /* Allocate the FM driver's parameters structure */
++ p_FmPort->p_FmPortDriverParam = (t_FmPortDriverParam *)XX_Malloc(
++ sizeof(t_FmPortDriverParam));
++ if (!p_FmPort->p_FmPortDriverParam)
++ {
++ XX_Free(p_FmPort);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver parameters"));
++ return NULL;
++ }
++ memset(p_FmPort->p_FmPortDriverParam, 0, sizeof(t_FmPortDriverParam));
++
++ /* Initialize FM port parameters which will be kept by the driver */
++ p_FmPort->portType = p_FmPortParams->portType;
++ p_FmPort->portId = p_FmPortParams->portId;
++ p_FmPort->pcdEngines = FM_PCD_NONE;
++ p_FmPort->f_Exception = p_FmPortParams->f_Exception;
++ p_FmPort->h_App = p_FmPortParams->h_App;
++ p_FmPort->h_Fm = p_FmPortParams->h_Fm;
++
++ /* get FM revision */
++ FM_GetRevision(p_FmPort->h_Fm, &p_FmPort->fmRevInfo);
++
++ /* calculate global portId number */
++ p_FmPort->hardwarePortId = SwPortIdToHwPortId(p_FmPort->portType,
++ p_FmPortParams->portId,
++ p_FmPort->fmRevInfo.majorRev,
++ p_FmPort->fmRevInfo.minorRev);
++
++ if (p_FmPort->fmRevInfo.majorRev >= 6)
++ {
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
++ && (p_FmPortParams->portId != FM_OH_PORT_ID))
++ DBG(WARNING,
++ ("Port ID %d is recommended for HC port. Overwriting HW defaults to be suitable for HC.",
++ FM_OH_PORT_ID));
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ && (p_FmPortParams->portId == FM_OH_PORT_ID))
++ DBG(WARNING, ("Use non-zero portId for OP port due to insufficient resources on portId 0."));
++ }
++
++ /* Set up FM port parameters for initialization phase only */
++
++ /* First, fill in flibs struct */
++ fman_port_defconfig(&p_FmPort->p_FmPortDriverParam->dfltCfg,
++ (enum fman_port_type)p_FmPort->portType);
++ /* Overwrite some integration specific parameters */
++ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_pri_elevation =
++ DEFAULT_PORT_rxFifoPriElevationLevel;
++ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_fifo_thr =
++ DEFAULT_PORT_rxFifoThreshold;
++
++#if defined(FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675) || defined(FM_ERROR_VSP_NO_MATCH_SW006)
++ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006675 = TRUE;
++#else
++ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006675 = FALSE;
++#endif
++ if ((p_FmPort->fmRevInfo.majorRev == 6)
++ && (p_FmPort->fmRevInfo.minorRev == 0))
++ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006320 = TRUE;
++ else
++ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006320 = FALSE;
++
++ /* Excessive Threshold register - exists for pre-FMv3 chips only */
++ if (p_FmPort->fmRevInfo.majorRev < 6)
++ {
++#ifdef FM_NO_RESTRICT_ON_ACCESS_RSRC
++ p_FmPort->p_FmPortDriverParam->dfltCfg.excessive_threshold_register =
++ TRUE;
++#endif
++ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_rebm_has_sgd = FALSE;
++ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_tfne_has_features = FALSE;
++ }
++ else
++ {
++ p_FmPort->p_FmPortDriverParam->dfltCfg.excessive_threshold_register =
++ FALSE;
++ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_rebm_has_sgd = TRUE;
++ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_tfne_has_features = TRUE;
++ }
++ if (p_FmPort->fmRevInfo.majorRev == 4)
++ p_FmPort->p_FmPortDriverParam->dfltCfg.qmi_deq_options_support = FALSE;
++ else
++ p_FmPort->p_FmPortDriverParam->dfltCfg.qmi_deq_options_support = TRUE;
++
++ /* Continue with other parameters */
++ p_FmPort->p_FmPortDriverParam->baseAddr = baseAddr;
++ /* set memory map pointers */
++ p_FmPort->p_FmPortQmiRegs =
++ (t_FmPortQmiRegs *)UINT_TO_PTR(baseAddr + QMI_PORT_REGS_OFFSET);
++ p_FmPort->p_FmPortBmiRegs =
++ (u_FmPortBmiRegs *)UINT_TO_PTR(baseAddr + BMI_PORT_REGS_OFFSET);
++ p_FmPort->p_FmPortPrsRegs =
++ (t_FmPortPrsRegs *)UINT_TO_PTR(baseAddr + PRS_PORT_REGS_OFFSET);
++
++ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.privDataSize =
++ DEFAULT_PORT_bufferPrefixContent_privDataSize;
++ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult =
++ DEFAULT_PORT_bufferPrefixContent_passPrsResult;
++ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp =
++ DEFAULT_PORT_bufferPrefixContent_passTimeStamp;
++ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passAllOtherPCDInfo =
++ DEFAULT_PORT_bufferPrefixContent_passTimeStamp;
++ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign =
++ DEFAULT_PORT_bufferPrefixContent_dataAlign;
++ /* p_FmPort->p_FmPortDriverParam->dmaSwapData = (e_FmDmaSwapOption)DEFAULT_PORT_dmaSwapData;
++ p_FmPort->p_FmPortDriverParam->dmaIntContextCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaIntContextCacheAttr;
++ p_FmPort->p_FmPortDriverParam->dmaHeaderCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaHeaderCacheAttr;
++ p_FmPort->p_FmPortDriverParam->dmaScatterGatherCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaScatterGatherCacheAttr;
++ p_FmPort->p_FmPortDriverParam->dmaWriteOptimize = DEFAULT_PORT_dmaWriteOptimize;
++ */
++ p_FmPort->p_FmPortDriverParam->liodnBase = p_FmPortParams->liodnBase;
++ p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore =
++ DEFAULT_PORT_cheksumLastBytesIgnore;
++
++ p_FmPort->maxFrameLength = DEFAULT_PORT_maxFrameLength;
++ /* resource distribution. */
++ p_FmPort->fifoBufs.num = DEFAULT_PORT_numOfFifoBufs(p_FmPort->portType)
++ * BMI_FIFO_UNITS;
++ p_FmPort->fifoBufs.extra = DEFAULT_PORT_extraNumOfFifoBufs
++ * BMI_FIFO_UNITS;
++ p_FmPort->openDmas.num = DEFAULT_PORT_numOfOpenDmas(p_FmPort->portType);
++ p_FmPort->openDmas.extra =
++ DEFAULT_PORT_extraNumOfOpenDmas(p_FmPort->portType);
++ p_FmPort->tasks.num = DEFAULT_PORT_numOfTasks(p_FmPort->portType);
++ p_FmPort->tasks.extra = DEFAULT_PORT_extraNumOfTasks(p_FmPort->portType);
++
++
++#ifdef FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
++ if ((p_FmPort->fmRevInfo.majorRev == 6)
++ && (p_FmPort->fmRevInfo.minorRev == 0)
++ && ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX)))
++ {
++ p_FmPort->openDmas.num = 16;
++ p_FmPort->openDmas.extra = 0;
++ }
++#endif /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 */
++
++ /* Port type specific initialization: */
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX):
++ case (e_FM_PORT_TYPE_RX_10G):
++ /* Initialize FM port parameters for initialization phase only */
++ p_FmPort->p_FmPortDriverParam->cutBytesFromEnd =
++ DEFAULT_PORT_cutBytesFromEnd;
++ p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = FALSE;
++ p_FmPort->p_FmPortDriverParam->frmDiscardOverride =
++ DEFAULT_PORT_frmDiscardOverride;
++
++ tmpReg =
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfp);
++ p_FmPort->p_FmPortDriverParam->rxFifoPriElevationLevel =
++ (((tmpReg & BMI_RX_FIFO_PRI_ELEVATION_MASK)
++ >> BMI_RX_FIFO_PRI_ELEVATION_SHIFT) + 1)
++ * BMI_FIFO_UNITS;
++ p_FmPort->p_FmPortDriverParam->rxFifoThreshold = (((tmpReg
++ & BMI_RX_FIFO_THRESHOLD_MASK)
++ >> BMI_RX_FIFO_THRESHOLD_SHIFT) + 1) * BMI_FIFO_UNITS;
++
++ p_FmPort->p_FmPortDriverParam->bufMargins.endMargins =
++ DEFAULT_PORT_BufMargins_endMargins;
++ p_FmPort->p_FmPortDriverParam->errorsToDiscard =
++ DEFAULT_PORT_errorsToDiscard;
++ p_FmPort->p_FmPortDriverParam->forwardReuseIntContext =
++ DEFAULT_PORT_forwardIntContextReuse;
++#if (DPAA_VERSION >= 11)
++ p_FmPort->p_FmPortDriverParam->noScatherGather =
++ DEFAULT_PORT_noScatherGather;
++#endif /* (DPAA_VERSION >= 11) */
++ break;
++
++ case (e_FM_PORT_TYPE_TX):
++ p_FmPort->p_FmPortDriverParam->dontReleaseBuf = FALSE;
++#ifdef FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127
++ tmpReg = 0x00001013;
++ WRITE_UINT32( p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfp,
++ tmpReg);
++#endif /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 */
++ case (e_FM_PORT_TYPE_TX_10G):
++ tmpReg =
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfp);
++ p_FmPort->p_FmPortDriverParam->txFifoMinFillLevel = ((tmpReg
++ & BMI_TX_FIFO_MIN_FILL_MASK)
++ >> BMI_TX_FIFO_MIN_FILL_SHIFT) * BMI_FIFO_UNITS;
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
++ (uint8_t)(((tmpReg & BMI_FIFO_PIPELINE_DEPTH_MASK)
++ >> BMI_FIFO_PIPELINE_DEPTH_SHIFT) + 1);
++ p_FmPort->p_FmPortDriverParam->txFifoLowComfLevel = (((tmpReg
++ & BMI_TX_LOW_COMF_MASK) >> BMI_TX_LOW_COMF_SHIFT) + 1)
++ * BMI_FIFO_UNITS;
++
++ p_FmPort->p_FmPortDriverParam->deqType = DEFAULT_PORT_deqType;
++ p_FmPort->p_FmPortDriverParam->deqPrefetchOption =
++ DEFAULT_PORT_deqPrefetchOption;
++ p_FmPort->p_FmPortDriverParam->deqHighPriority =
++ (bool)((p_FmPort->portType == e_FM_PORT_TYPE_TX) ? DEFAULT_PORT_deqHighPriority_1G :
++ DEFAULT_PORT_deqHighPriority_10G);
++ p_FmPort->p_FmPortDriverParam->deqByteCnt =
++ (uint16_t)(
++ (p_FmPort->portType == e_FM_PORT_TYPE_TX) ? DEFAULT_PORT_deqByteCnt_1G :
++ DEFAULT_PORT_deqByteCnt_10G);
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_FmPort->p_FmPortDriverParam->errorsToDiscard =
++ DEFAULT_PORT_errorsToDiscard;
++#if (DPAA_VERSION >= 11)
++ p_FmPort->p_FmPortDriverParam->noScatherGather =
++ DEFAULT_PORT_noScatherGather;
++#endif /* (DPAA_VERSION >= 11) */
++ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
++ p_FmPort->p_FmPortDriverParam->deqPrefetchOption =
++ DEFAULT_PORT_deqPrefetchOption_HC;
++ p_FmPort->p_FmPortDriverParam->deqHighPriority =
++ DEFAULT_PORT_deqHighPriority_1G;
++ p_FmPort->p_FmPortDriverParam->deqType = DEFAULT_PORT_deqType;
++ p_FmPort->p_FmPortDriverParam->deqByteCnt =
++ DEFAULT_PORT_deqByteCnt_1G;
++
++ tmpReg =
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofp);
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
++ (uint8_t)(((tmpReg & BMI_FIFO_PIPELINE_DEPTH_MASK)
++ >> BMI_FIFO_PIPELINE_DEPTH_SHIFT) + 1);
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
++ && (p_FmPortParams->portId != FM_OH_PORT_ID))
++ {
++ /* Overwrite HC defaults */
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
++ DEFAULT_PORT_fifoDeqPipelineDepth_OH;
++ }
++
++#ifndef FM_FRAME_END_PARAMS_FOR_OP
++ if (p_FmPort->fmRevInfo.majorRev < 6)
++ p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore = DEFAULT_notSupported;
++#endif /* !FM_FRAME_END_PARAMS_FOR_OP */
++
++#ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP
++ if (!((p_FmPort->fmRevInfo.majorRev == 4) ||
++ (p_FmPort->fmRevInfo.majorRev >= 6)))
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = DEFAULT_notSupported;
++#endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */
++ break;
++
++ default:
++ XX_Free(p_FmPort->p_FmPortDriverParam);
++ XX_Free(p_FmPort);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ return NULL;
++ }
++#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++ if (p_FmPort->fmRevInfo.majorRev == 4)
++ p_FmPort->p_FmPortDriverParam->deqPrefetchOption = (e_FmPortDeqPrefetchOption)DEFAULT_notSupported;
++#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
++
++ p_FmPort->imEn = p_FmPortParams->independentModeEnable;
++
++ if (p_FmPort->imEn)
++ {
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G))
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
++ DEFAULT_PORT_fifoDeqPipelineDepth_IM;
++ FmPortConfigIM(p_FmPort, p_FmPortParams);
++ }
++ else
++ {
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX):
++ case (e_FM_PORT_TYPE_RX_10G):
++ /* Initialize FM port parameters for initialization phase only */
++ memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools,
++ &p_FmPortParams->specificParams.rxParams.extBufPools,
++ sizeof(t_FmExtPools));
++ p_FmPort->p_FmPortDriverParam->errFqid =
++ p_FmPortParams->specificParams.rxParams.errFqid;
++ p_FmPort->p_FmPortDriverParam->dfltFqid =
++ p_FmPortParams->specificParams.rxParams.dfltFqid;
++ p_FmPort->p_FmPortDriverParam->liodnOffset =
++ p_FmPortParams->specificParams.rxParams.liodnOffset;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ case (e_FM_PORT_TYPE_TX):
++ case (e_FM_PORT_TYPE_TX_10G):
++ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
++ p_FmPort->p_FmPortDriverParam->errFqid =
++ p_FmPortParams->specificParams.nonRxParams.errFqid;
++ p_FmPort->p_FmPortDriverParam->deqSubPortal =
++ (uint8_t)(p_FmPortParams->specificParams.nonRxParams.qmChannel
++ & QMI_DEQ_CFG_SUBPORTAL_MASK);
++ p_FmPort->p_FmPortDriverParam->dfltFqid =
++ p_FmPortParams->specificParams.nonRxParams.dfltFqid;
++ break;
++ default:
++ XX_Free(p_FmPort->p_FmPortDriverParam);
++ XX_Free(p_FmPort);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ return NULL;
++ }
++ }
++
++ memset(p_FmPort->name, 0, (sizeof(char)) * MODULE_NAME_SIZE);
++ if (Sprint(
++ p_FmPort->name,
++ "FM-%d-port-%s-%d",
++ FmGetId(p_FmPort->h_Fm),
++ ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) ? "OH" :
++ (p_FmPort->portType == e_FM_PORT_TYPE_RX ? "1g-RX" :
++ (p_FmPort->portType == e_FM_PORT_TYPE_TX ? "1g-TX" :
++ (p_FmPort->portType
++ == e_FM_PORT_TYPE_RX_10G ? "10g-RX" :
++ "10g-TX")))),
++ p_FmPort->portId) == 0)
++ {
++ XX_Free(p_FmPort->p_FmPortDriverParam);
++ XX_Free(p_FmPort);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++ return NULL;
++ }
++
++ p_FmPort->h_Spinlock = XX_InitSpinlock();
++ if (!p_FmPort->h_Spinlock)
++ {
++ XX_Free(p_FmPort->p_FmPortDriverParam);
++ XX_Free(p_FmPort);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++ return NULL;
++ }
++
++ return p_FmPort;
++}
++
++t_FmPort *rx_port = 0;
++t_FmPort *tx_port = 0;
++
++/**************************************************************************//**
++ @Function FM_PORT_Init
++
++ @Description Initializes the FM module
++
++ @Param[in] h_FmPort - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++ *//***************************************************************************/
++t_Error FM_PORT_Init(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_FmPortDriverParam *p_DriverParams;
++ t_Error errCode;
++ t_FmInterModulePortInitParams fmParams;
++ t_FmRevisionInfo revInfo;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ errCode = FmSpBuildBufferStructure(
++ &p_FmPort->p_FmPortDriverParam->intContext,
++ &p_FmPort->p_FmPortDriverParam->bufferPrefixContent,
++ &p_FmPort->p_FmPortDriverParam->bufMargins,
++ &p_FmPort->bufferOffsets, &p_FmPort->internalBufferOffset);
++ if (errCode != E_OK)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
++ if ((p_FmPort->p_FmPortDriverParam->bcbWorkaround) &&
++ (p_FmPort->portType == e_FM_PORT_TYPE_RX))
++ {
++ p_FmPort->p_FmPortDriverParam->errorsToDiscard |= FM_PORT_FRM_ERR_PHYSICAL;
++ if (!p_FmPort->fifoBufs.num)
++ p_FmPort->fifoBufs.num = DEFAULT_PORT_numOfFifoBufs(p_FmPort->portType)*BMI_FIFO_UNITS;
++ p_FmPort->fifoBufs.num += 4*KILOBYTE;
++ }
++#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
++
++ CHECK_INIT_PARAMETERS(p_FmPort, CheckInitParameters);
++
++ p_DriverParams = p_FmPort->p_FmPortDriverParam;
++
++ /* Set up flibs port structure */
++ memset(&p_FmPort->port, 0, sizeof(struct fman_port));
++ p_FmPort->port.type = (enum fman_port_type)p_FmPort->portType;
++ FM_GetRevision(p_FmPort->h_Fm, &revInfo);
++ p_FmPort->port.fm_rev_maj = revInfo.majorRev;
++ p_FmPort->port.fm_rev_min = revInfo.minorRev;
++ p_FmPort->port.bmi_regs =
++ (union fman_port_bmi_regs *)UINT_TO_PTR(p_DriverParams->baseAddr + BMI_PORT_REGS_OFFSET);
++ p_FmPort->port.qmi_regs =
++ (struct fman_port_qmi_regs *)UINT_TO_PTR(p_DriverParams->baseAddr + QMI_PORT_REGS_OFFSET);
++ p_FmPort->port.ext_pools_num = (uint8_t)((revInfo.majorRev == 4) ? 4 : 8);
++ p_FmPort->port.im_en = p_FmPort->imEn;
++ p_FmPort->p_FmPortPrsRegs =
++ (t_FmPortPrsRegs *)UINT_TO_PTR(p_DriverParams->baseAddr + PRS_PORT_REGS_OFFSET);
++
++ if (((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) && !p_FmPort->imEn)
++ {
++ /* Call the external Buffer routine which also checks fifo
++ size and updates it if necessary */
++ /* define external buffer pools and pool depletion*/
++ errCode = SetExtBufferPools(p_FmPort);
++ if (errCode)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++ /* check if the largest external buffer pool is large enough */
++ if (p_DriverParams->bufMargins.startMargins + MIN_EXT_BUF_SIZE
++ + p_DriverParams->bufMargins.endMargins
++ > p_FmPort->rxPoolsParams.largestBufSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("bufMargins.startMargins (%d) + minimum buf size (64) + bufMargins.endMargins (%d) is larger than maximum external buffer size (%d)", p_DriverParams->bufMargins.startMargins, p_DriverParams->bufMargins.endMargins, p_FmPort->rxPoolsParams.largestBufSize));
++ }
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ {
++ {
++#ifdef FM_NO_OP_OBSERVED_POOLS
++ t_FmRevisionInfo revInfo;
++
++ FM_GetRevision(p_FmPort->h_Fm, &revInfo);
++ if ((revInfo.majorRev == 4) && (p_DriverParams->enBufPoolDepletion))
++#endif /* FM_NO_OP_OBSERVED_POOLS */
++ {
++ /* define external buffer pools */
++ errCode = SetExtBufferPools(p_FmPort);
++ if (errCode)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++ }
++ }
++ }
++
++ /************************************************************/
++ /* Call FM module routine for communicating parameters */
++ /************************************************************/
++ memset(&fmParams, 0, sizeof(fmParams));
++ fmParams.hardwarePortId = p_FmPort->hardwarePortId;
++ fmParams.portType = (e_FmPortType)p_FmPort->portType;
++ fmParams.numOfTasks = (uint8_t)p_FmPort->tasks.num;
++ fmParams.numOfExtraTasks = (uint8_t)p_FmPort->tasks.extra;
++ fmParams.numOfOpenDmas = (uint8_t)p_FmPort->openDmas.num;
++ fmParams.numOfExtraOpenDmas = (uint8_t)p_FmPort->openDmas.extra;
++
++ if (p_FmPort->fifoBufs.num)
++ {
++ errCode = VerifySizeOfFifo(p_FmPort);
++ if (errCode != E_OK)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++ }
++ fmParams.sizeOfFifo = p_FmPort->fifoBufs.num;
++ fmParams.extraSizeOfFifo = p_FmPort->fifoBufs.extra;
++ fmParams.independentMode = p_FmPort->imEn;
++ fmParams.liodnOffset = p_DriverParams->liodnOffset;
++ fmParams.liodnBase = p_DriverParams->liodnBase;
++ fmParams.deqPipelineDepth =
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth;
++ fmParams.maxFrameLength = p_FmPort->maxFrameLength;
++#ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) ||
++ (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
++ {
++ if (!((p_FmPort->fmRevInfo.majorRev == 4) ||
++ (p_FmPort->fmRevInfo.majorRev >= 6)))
++ /* HC ports do not have fifoDeqPipelineDepth, but it is needed only
++ * for deq threshold calculation.
++ */
++ fmParams.deqPipelineDepth = 2;
++ }
++#endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */
++
++ errCode = FmGetSetPortParams(p_FmPort->h_Fm, &fmParams);
++ if (errCode)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++
++ /* get params for use in init */
++ p_FmPort->fmMuramPhysBaseAddr =
++ (uint64_t)((uint64_t)(fmParams.fmMuramPhysBaseAddr.low)
++ | ((uint64_t)(fmParams.fmMuramPhysBaseAddr.high) << 32));
++ p_FmPort->h_FmMuram = FmGetMuramHandle(p_FmPort->h_Fm);
++
++ errCode = InitLowLevelDriver(p_FmPort);
++ if (errCode != E_OK)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++
++ FmPortDriverParamFree(p_FmPort);
++
++#if (DPAA_VERSION >= 11)
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ {
++ t_FmPcdCtrlParamsPage *p_ParamsPage;
++
++ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
++ (void**)&p_ParamsPage);
++ ASSERT_COND(p_ParamsPage);
++
++ WRITE_UINT32(p_ParamsPage->misc, FM_CTL_PARAMS_PAGE_ALWAYS_ON);
++#ifdef FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ {
++ WRITE_UINT32(
++ p_ParamsPage->misc,
++ (GET_UINT32(p_ParamsPage->misc) | FM_CTL_PARAMS_PAGE_OP_FIX_EN));
++ WRITE_UINT32(
++ p_ParamsPage->discardMask,
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm));
++ }
++#endif /* FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675 */
++#ifdef FM_ERROR_VSP_NO_MATCH_SW006
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ WRITE_UINT32(
++ p_ParamsPage->errorsDiscardMask,
++ (GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm) | GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsem)));
++ else
++ WRITE_UINT32(
++ p_ParamsPage->errorsDiscardMask,
++ (GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm) | GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsem)));
++#endif /* FM_ERROR_VSP_NO_MATCH_SW006 */
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (p_FmPort->deepSleepVars.autoResMaxSizes)
++ FmPortConfigAutoResForDeepSleepSupport1(p_FmPort);
++ return E_OK;
++}
++
++/**************************************************************************//**
++ @Function FM_PORT_Free
++
++ @Description Frees all resources that were assigned to FM module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmPort - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++ *//***************************************************************************/
++t_Error FM_PORT_Free(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_FmInterModulePortFreeParams fmParams;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++
++ if (p_FmPort->pcdEngines)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("Trying to free a port with PCD. FM_PORT_DeletePCD must be called first."));
++
++ if (p_FmPort->enabled)
++ {
++ if (FM_PORT_Disable(p_FmPort) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM_PORT_Disable FAILED"));
++ }
++
++ if (p_FmPort->imEn)
++ FmPortImFree(p_FmPort);
++
++ FmPortDriverParamFree(p_FmPort);
++
++ memset(&fmParams, 0, sizeof(fmParams));
++ fmParams.hardwarePortId = p_FmPort->hardwarePortId;
++ fmParams.portType = (e_FmPortType)p_FmPort->portType;
++ fmParams.deqPipelineDepth =
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth;
++
++ FmFreePortParams(p_FmPort->h_Fm, &fmParams);
++
++#if (DPAA_VERSION >= 11)
++ if (FmVSPFreeForPort(p_FmPort->h_Fm, p_FmPort->portType, p_FmPort->portId)
++ != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("VSP free of port FAILED"));
++
++ if (p_FmPort->p_ParamsPage)
++ FM_MURAM_FreeMem(p_FmPort->h_FmMuram, p_FmPort->p_ParamsPage);
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (p_FmPort->h_Spinlock)
++ XX_FreeSpinlock(p_FmPort->h_Spinlock);
++
++ XX_Free(p_FmPort);
++
++ return E_OK;
++}
++
++/*************************************************/
++/* API Advanced Init unit functions */
++/*************************************************/
++
++t_Error FM_PORT_ConfigNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_OpenDmas)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->setNumOfOpenDmas = TRUE;
++ memcpy(&p_FmPort->openDmas, p_OpenDmas, sizeof(t_FmPortRsrc));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ memcpy(&p_FmPort->tasks, p_NumOfTasks, sizeof(t_FmPortRsrc));
++ p_FmPort->p_FmPortDriverParam->setNumOfTasks = TRUE;
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->setSizeOfFifo = TRUE;
++ memcpy(&p_FmPort->fifoBufs, p_SizeOfFifo, sizeof(t_FmPortRsrc));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDeqHighPriority(t_Handle h_FmPort, bool highPri)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("not available for Rx ports"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_high_pri = highPri;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDeqType(t_Handle h_FmPort, e_FmPortDeqType deqType)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("not available for Rx ports"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_type =
++ (enum fman_port_deq_type)deqType;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDeqPrefetchOption(
++ t_Handle h_FmPort, e_FmPortDeqPrefetchOption deqPrefetchOption)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("not available for Rx ports"));
++ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_prefetch_opt =
++ (enum fman_port_deq_prefetch)deqPrefetchOption;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigBackupPools(t_Handle h_FmPort,
++ t_FmBackupBmPools *p_BackupBmPools)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->p_BackupBmPools =
++ (t_FmBackupBmPools *)XX_Malloc(sizeof(t_FmBackupBmPools));
++ if (!p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BackupBmPools allocation failed"));
++ memcpy(p_FmPort->p_FmPortDriverParam->p_BackupBmPools, p_BackupBmPools,
++ sizeof(t_FmBackupBmPools));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDeqByteCnt(t_Handle h_FmPort, uint16_t deqByteCnt)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("not available for Rx ports"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_byte_cnt = deqByteCnt;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigBufferPrefixContent(
++ t_Handle h_FmPort, t_FmBufferPrefixContent *p_FmBufferPrefixContent)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ memcpy(&p_FmPort->p_FmPortDriverParam->bufferPrefixContent,
++ p_FmBufferPrefixContent, sizeof(t_FmBufferPrefixContent));
++ /* if dataAlign was not initialized by user, we return to driver's default */
++ if (!p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign)
++ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign =
++ DEFAULT_PORT_bufferPrefixContent_dataAlign;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigCheksumLastBytesIgnore(t_Handle h_FmPort,
++ uint8_t checksumLastBytesIgnore)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.checksum_bytes_ignore =
++ checksumLastBytesIgnore;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigCutBytesFromEnd(t_Handle h_FmPort,
++ uint8_t cutBytesFromEnd)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_cut_end_bytes = cutBytesFromEnd;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigPoolDepletion(t_Handle h_FmPort,
++ t_FmBufPoolDepletion *p_BufPoolDepletion)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE;
++ memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion, p_BufPoolDepletion,
++ sizeof(t_FmBufPoolDepletion));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigObservedPoolDepletion(
++ t_Handle h_FmPort,
++ t_FmPortObservedBufPoolDepletion *p_FmPortObservedBufPoolDepletion)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for OP ports only"));
++
++ p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE;
++ memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion,
++ &p_FmPortObservedBufPoolDepletion->poolDepletionParams,
++ sizeof(t_FmBufPoolDepletion));
++ memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools,
++ &p_FmPortObservedBufPoolDepletion->poolsParams,
++ sizeof(t_FmExtPools));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigExtBufPools(t_Handle h_FmPort, t_FmExtPools *p_FmExtPools)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for OP ports only"));
++
++ memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, p_FmExtPools,
++ sizeof(t_FmExtPools));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDontReleaseTxBufToBM(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Tx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->dontReleaseBuf = TRUE;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDfltColor(t_Handle h_FmPort, e_FmPortColor color)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ p_FmPort->p_FmPortDriverParam->dfltCfg.color = (enum fman_port_color)color;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigSyncReq(t_Handle h_FmPort, bool syncReq)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("Not available for Tx ports"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.sync_req = syncReq;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigFrmDiscardOverride(t_Handle h_FmPort, bool override)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("Not available for Tx ports"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.discard_override = override;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigErrorsToDiscard(t_Handle h_FmPort,
++ fmPortFrameErrSelect_t errs)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++
++ p_FmPort->p_FmPortDriverParam->errorsToDiscard = errs;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDmaSwapData(t_Handle h_FmPort, e_FmDmaSwapOption swapData)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_swap_data =
++ (enum fman_port_dma_swap)swapData;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDmaIcCacheAttr(t_Handle h_FmPort,
++ e_FmDmaCacheOption intContextCacheAttr)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_ic_stash_on =
++ (bool)(intContextCacheAttr == e_FM_DMA_STASH);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDmaHdrAttr(t_Handle h_FmPort,
++ e_FmDmaCacheOption headerCacheAttr)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_header_stash_on =
++ (bool)(headerCacheAttr == e_FM_DMA_STASH);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDmaScatterGatherAttr(
++ t_Handle h_FmPort, e_FmDmaCacheOption scatterGatherCacheAttr)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_sg_stash_on =
++ (bool)(scatterGatherCacheAttr == e_FM_DMA_STASH);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigDmaWriteOptimize(t_Handle h_FmPort, bool optimize)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("Not available for Tx ports"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_write_optimize = optimize;
++
++ return E_OK;
++}
++
++#if (DPAA_VERSION >= 11)
++t_Error FM_PORT_ConfigNoScatherGather(t_Handle h_FmPort, bool noScatherGather)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ UNUSED(noScatherGather);
++ UNUSED(p_FmPort);
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->noScatherGather = noScatherGather;
++
++ return E_OK;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++t_Error FM_PORT_ConfigForwardReuseIntContext(t_Handle h_FmPort,
++ bool forwardReuse)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->forwardReuseIntContext = forwardReuse;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigMaxFrameLength(t_Handle h_FmPort, uint16_t length)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->maxFrameLength = length;
++
++ return E_OK;
++}
++
++#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
++t_Error FM_PORT_ConfigBCBWorkaround(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->p_FmPortDriverParam->bcbWorkaround = TRUE;
++
++ return E_OK;
++}
++#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
++
++/****************************************************/
++/* Hidden-DEBUG Only API */
++/****************************************************/
++
++t_Error FM_PORT_ConfigTxFifoMinFillLevel(t_Handle h_FmPort,
++ uint32_t minFillLevel)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Tx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_min_level = minFillLevel;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigFifoDeqPipelineDepth(t_Handle h_FmPort,
++ uint8_t deqPipelineDepth)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("Not available for Rx ports"));
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("Not available for IM ports!"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
++ deqPipelineDepth;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigTxFifoLowComfLevel(t_Handle h_FmPort,
++ uint32_t fifoLowComfLevel)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Tx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_low_comf_level =
++ fifoLowComfLevel;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigRxFifoThreshold(t_Handle h_FmPort, uint32_t fifoThreshold)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_fifo_thr = fifoThreshold;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigRxFifoPriElevationLevel(t_Handle h_FmPort,
++ uint32_t priElevationLevel)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_pri_elevation = priElevationLevel;
++
++ return E_OK;
++}
++/****************************************************/
++/* API Run-time Control unit functions */
++/****************************************************/
++
++t_Error FM_PORT_SetNumOfOpenDmas(t_Handle h_FmPort,
++ t_FmPortRsrc *p_NumOfOpenDmas)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if ((!p_NumOfOpenDmas->num) || (p_NumOfOpenDmas->num > MAX_NUM_OF_DMAS))
++ RETURN_ERROR( MAJOR, E_INVALID_VALUE,
++ ("openDmas-num can't be larger than %d", MAX_NUM_OF_DMAS));
++ if (p_NumOfOpenDmas->extra > MAX_NUM_OF_EXTRA_DMAS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("openDmas-extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS));
++ err = FmSetNumOfOpenDmas(p_FmPort->h_Fm, p_FmPort->hardwarePortId,
++ (uint8_t*)&p_NumOfOpenDmas->num,
++ (uint8_t*)&p_NumOfOpenDmas->extra, FALSE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ memcpy(&p_FmPort->openDmas, p_NumOfOpenDmas, sizeof(t_FmPortRsrc));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_SetNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ /* only driver uses host command port, so ASSERT rather than RETURN_ERROR */
++ ASSERT_COND(p_FmPort->portType != e_FM_PORT_TYPE_OH_HOST_COMMAND);
++
++ if ((!p_NumOfTasks->num) || (p_NumOfTasks->num > MAX_NUM_OF_TASKS))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("NumOfTasks-num can't be larger than %d", MAX_NUM_OF_TASKS));
++ if (p_NumOfTasks->extra > MAX_NUM_OF_EXTRA_TASKS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("NumOfTasks-extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS));
++
++ err = FmSetNumOfTasks(p_FmPort->h_Fm, p_FmPort->hardwarePortId,
++ (uint8_t*)&p_NumOfTasks->num,
++ (uint8_t*)&p_NumOfTasks->extra, FALSE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /* update driver's struct */
++ memcpy(&p_FmPort->tasks, p_NumOfTasks, sizeof(t_FmPortRsrc));
++ return E_OK;
++}
++
++t_Error FM_PORT_SetSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if (!p_SizeOfFifo->num || (p_SizeOfFifo->num > MAX_PORT_FIFO_SIZE))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("SizeOfFifo-num has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
++ if (p_SizeOfFifo->num % BMI_FIFO_UNITS)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("SizeOfFifo-num has to be divisible by %d", BMI_FIFO_UNITS));
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ /* extra FIFO size (allowed only to Rx ports) */
++ if (p_SizeOfFifo->extra % BMI_FIFO_UNITS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("SizeOfFifo-extra has to be divisible by %d", BMI_FIFO_UNITS));
++ }
++ else
++ if (p_SizeOfFifo->extra)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ (" No SizeOfFifo-extra for non Rx ports"));
++
++ memcpy(&p_FmPort->fifoBufs, p_SizeOfFifo, sizeof(t_FmPortRsrc));
++
++ /* we do not change user's parameter */
++ err = VerifySizeOfFifo(p_FmPort);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ err = FmSetSizeOfFifo(p_FmPort->h_Fm, p_FmPort->hardwarePortId,
++ &p_SizeOfFifo->num, &p_SizeOfFifo->extra, FALSE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++uint32_t FM_PORT_GetBufferDataOffset(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
++ 0);
++
++ return p_FmPort->bufferOffsets.dataOffset;
++}
++
++uint8_t * FM_PORT_GetBufferICInfo(t_Handle h_FmPort, char *p_Data)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
++ NULL);
++
++ if (p_FmPort->bufferOffsets.pcdInfoOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.pcdInfoOffset);
++}
++
++t_FmPrsResult * FM_PORT_GetBufferPrsResult(t_Handle h_FmPort, char *p_Data)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
++ NULL);
++
++ if (p_FmPort->bufferOffsets.prsResultOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (t_FmPrsResult *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.prsResultOffset);
++}
++
++uint64_t * FM_PORT_GetBufferTimeStamp(t_Handle h_FmPort, char *p_Data)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
++ NULL);
++
++ if (p_FmPort->bufferOffsets.timeStampOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (uint64_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.timeStampOffset);
++}
++
++uint8_t * FM_PORT_GetBufferHashResult(t_Handle h_FmPort, char *p_Data)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
++ NULL);
++
++ if (p_FmPort->bufferOffsets.hashResultOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.hashResultOffset);
++}
++
++t_Error FM_PORT_Disable(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if (p_FmPort->imEn)
++ FmPortImDisable(p_FmPort);
++
++ err = fman_port_disable(&p_FmPort->port);
++ if (err == -EBUSY)
++ {
++ DBG(WARNING, ("%s: BMI or QMI is Busy. Port forced down",
++ p_FmPort->name));
++ }
++ else
++ if (err != 0)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_disable"));
++ }
++
++ p_FmPort->enabled = FALSE;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_Enable(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ /* Used by FM_PORT_Free routine as indication
++ if to disable port. Thus set it to TRUE prior
++ to enabling itself. This way if part of enable
++ process fails there will be still things
++ to disable during Free. For example, if BMI
++ enable succeeded but QMI failed, still BMI
++ needs to be disabled by Free. */
++ p_FmPort->enabled = TRUE;
++
++ if (p_FmPort->imEn)
++ FmPortImEnable(p_FmPort);
++
++ err = fman_port_enable(&p_FmPort->port);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_enable"));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_SetRateLimit(t_Handle h_FmPort, t_FmPortRateLimit *p_RateLimit)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ uint8_t factor, countUnitBit;
++ uint16_t baseGran;
++ struct fman_port_rate_limiter params;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_TX_10G):
++ case (e_FM_PORT_TYPE_TX):
++ baseGran = BMI_RATE_LIMIT_GRAN_TX;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ baseGran = BMI_RATE_LIMIT_GRAN_OP;
++ break;
++ default:
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Tx and Offline parsing ports only"));
++ }
++
++ countUnitBit = (uint8_t)FmGetTimeStampScale(p_FmPort->h_Fm); /* TimeStamp per nano seconds units */
++ /* normally, we use 1 usec as the reference count */
++ factor = 1;
++ /* if ratelimit is too small for a 1usec factor, multiply the factor */
++ while (p_RateLimit->rateLimit < baseGran / factor)
++ {
++ if (countUnitBit == 31)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too small"));
++
++ countUnitBit++;
++ factor <<= 1;
++ }
++ /* if ratelimit is too large for a 1usec factor, it is also larger than max rate*/
++ if (p_RateLimit->rateLimit
++ > ((uint32_t)baseGran * (1 << 10) * (uint32_t)factor))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too large"));
++
++ if (!p_RateLimit->maxBurstSize
++ || (p_RateLimit->maxBurstSize > BMI_RATE_LIMIT_MAX_BURST_SIZE))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("maxBurstSize must be between 1K and %dk", BMI_RATE_LIMIT_MAX_BURST_SIZE));
++
++ params.count_1micro_bit = (uint8_t)FmGetTimeStampScale(p_FmPort->h_Fm);
++ params.high_burst_size_gran = FALSE;
++ params.burst_size = p_RateLimit->maxBurstSize;
++ params.rate = p_RateLimit->rateLimit;
++ params.rate_factor = E_FMAN_PORT_RATE_DOWN_NONE;
++
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ {
++#ifndef FM_NO_ADVANCED_RATE_LIMITER
++
++ if ((p_FmPort->fmRevInfo.majorRev == 4)
++ || (p_FmPort->fmRevInfo.majorRev >= 6))
++ {
++ params.high_burst_size_gran = TRUE;
++ }
++ else
++#endif /* ! FM_NO_ADVANCED_RATE_LIMITER */
++ {
++ if (p_RateLimit->rateLimitDivider
++ != e_FM_PORT_DUAL_RATE_LIMITER_NONE)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("FM_PORT_ConfigDualRateLimitScaleDown"));
++
++ if (p_RateLimit->maxBurstSize % 1000)
++ {
++ p_RateLimit->maxBurstSize =
++ (uint16_t)((p_RateLimit->maxBurstSize / 1000) + 1);
++ DBG(WARNING, ("rateLimit.maxBurstSize rounded up to %d", (p_RateLimit->maxBurstSize/1000+1)*1000));
++ }
++ else
++ p_RateLimit->maxBurstSize = (uint16_t)(p_RateLimit->maxBurstSize
++ / 1000);
++ }
++ params.rate_factor =
++ (enum fman_port_rate_limiter_scale_down)p_RateLimit->rateLimitDivider;
++ params.burst_size = p_RateLimit->maxBurstSize;
++ }
++
++ err = fman_port_set_rate_limiter(&p_FmPort->port, &params);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_rate_limiter"));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_DeleteRateLimit(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Tx and Offline parsing ports only"));
++
++ err = fman_port_delete_rate_limiter(&p_FmPort->port);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_rate_limiter"));
++ return E_OK;
++}
++
++t_Error FM_PORT_SetPfcPrioritiesMappingToQmanWQ(t_Handle h_FmPort, uint8_t prio,
++ uint8_t wq)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ uint32_t tmpReg;
++ uint32_t wqTmpReg;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_TX_10G))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("PFC mapping is available for Tx ports only"));
++
++ if (prio > 7)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
++ ("PFC priority (%d) is out of range (0-7)", prio));
++ if (wq > 7)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
++ ("WQ (%d) is out of range (0-7)", wq));
++
++ tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpfcm[0]);
++ tmpReg &= ~(0xf << ((7 - prio) * 4));
++ wqTmpReg = ((uint32_t)wq << ((7 - prio) * 4));
++ tmpReg |= wqTmpReg;
++
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpfcm[0],
++ tmpReg);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_SetFrameQueueCounters(t_Handle h_FmPort, bool enable)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ fman_port_set_queue_cnt_mode(&p_FmPort->port, enable);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_SetPerformanceCounters(t_Handle h_FmPort, bool enable)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ err = fman_port_set_perf_cnt_mode(&p_FmPort->port, enable);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_perf_cnt_mode"));
++ return E_OK;
++}
++
++t_Error FM_PORT_SetPerformanceCountersParams(
++ t_Handle h_FmPort, t_FmPortPerformanceCnt *p_FmPortPerformanceCnt)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ struct fman_port_perf_cnt_params params;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++
++ /* check parameters */
++ if (!p_FmPortPerformanceCnt->taskCompVal
++ || (p_FmPortPerformanceCnt->taskCompVal > p_FmPort->tasks.num))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("taskCompVal (%d) has to be in the range of 1 - %d (current value)!", p_FmPortPerformanceCnt->taskCompVal, p_FmPort->tasks.num));
++ if (!p_FmPortPerformanceCnt->dmaCompVal
++ || (p_FmPortPerformanceCnt->dmaCompVal > p_FmPort->openDmas.num))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("dmaCompVal (%d) has to be in the range of 1 - %d (current value)!", p_FmPortPerformanceCnt->dmaCompVal, p_FmPort->openDmas.num));
++ if (!p_FmPortPerformanceCnt->fifoCompVal
++ || (p_FmPortPerformanceCnt->fifoCompVal > p_FmPort->fifoBufs.num))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("fifoCompVal (%d) has to be in the range of 256 - %d (current value)!", p_FmPortPerformanceCnt->fifoCompVal, p_FmPort->fifoBufs.num));
++ if (p_FmPortPerformanceCnt->fifoCompVal % BMI_FIFO_UNITS)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("fifoCompVal (%d) has to be divisible by %d", p_FmPortPerformanceCnt->fifoCompVal, BMI_FIFO_UNITS));
++
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ if (!p_FmPortPerformanceCnt->queueCompVal
++ || (p_FmPortPerformanceCnt->queueCompVal
++ > MAX_PERFORMANCE_RX_QUEUE_COMP))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("performanceCnt.queueCompVal for Rx has to be in the range of 1 - %d", MAX_PERFORMANCE_RX_QUEUE_COMP));
++ break;
++ case (e_FM_PORT_TYPE_TX_10G):
++ case (e_FM_PORT_TYPE_TX):
++ if (!p_FmPortPerformanceCnt->queueCompVal
++ || (p_FmPortPerformanceCnt->queueCompVal
++ > MAX_PERFORMANCE_TX_QUEUE_COMP))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("performanceCnt.queueCompVal for Tx has to be in the range of 1 - %d", MAX_PERFORMANCE_TX_QUEUE_COMP));
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
++ if (p_FmPortPerformanceCnt->queueCompVal)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("performanceCnt.queueCompVal is not relevant for H/O ports."));
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ }
++
++ params.task_val = p_FmPortPerformanceCnt->taskCompVal;
++ params.queue_val = p_FmPortPerformanceCnt->queueCompVal;
++ params.dma_val = p_FmPortPerformanceCnt->dmaCompVal;
++ params.fifo_val = p_FmPortPerformanceCnt->fifoCompVal;
++
++ err = fman_port_set_perf_cnt_params(&p_FmPort->port, &params);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_perf_cnt_params"));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_AnalyzePerformanceParams(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_FmPortPerformanceCnt currParams, savedParams;
++ t_Error err;
++ bool underTest, failed = FALSE;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++
++ XX_Print("Analyzing Performance parameters for port (type %d, id%d)\n",
++ p_FmPort->portType, p_FmPort->portId);
++
++ currParams.taskCompVal = (uint8_t)p_FmPort->tasks.num;
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
++ currParams.queueCompVal = 0;
++ else
++ currParams.queueCompVal = 1;
++ currParams.dmaCompVal = (uint8_t)p_FmPort->openDmas.num;
++ currParams.fifoCompVal = p_FmPort->fifoBufs.num;
++
++ FM_PORT_SetPerformanceCounters(p_FmPort, FALSE);
++ ClearPerfCnts(p_FmPort);
++ if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams))
++ != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ FM_PORT_SetPerformanceCounters(p_FmPort, TRUE);
++ XX_UDelay(1000000);
++ FM_PORT_SetPerformanceCounters(p_FmPort, FALSE);
++ if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL))
++ {
++ XX_Print(
++ "Max num of defined port tasks (%d) utilized - Please enlarge\n",
++ p_FmPort->tasks.num);
++ failed = TRUE;
++ }
++ if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL))
++ {
++ XX_Print(
++ "Max num of defined port openDmas (%d) utilized - Please enlarge\n",
++ p_FmPort->openDmas.num);
++ failed = TRUE;
++ }
++ if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL))
++ {
++ XX_Print(
++ "Max size of defined port fifo (%d) utilized - Please enlarge\n",
++ p_FmPort->fifoBufs.num);
++ failed = TRUE;
++ }
++ if (failed)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ memset(&savedParams, 0, sizeof(savedParams));
++ while (TRUE)
++ {
++ underTest = FALSE;
++ if ((currParams.taskCompVal != 1) && !savedParams.taskCompVal)
++ {
++ currParams.taskCompVal--;
++ underTest = TRUE;
++ }
++ if ((currParams.dmaCompVal != 1) && !savedParams.dmaCompVal)
++ {
++ currParams.dmaCompVal--;
++ underTest = TRUE;
++ }
++ if ((currParams.fifoCompVal != BMI_FIFO_UNITS)
++ && !savedParams.fifoCompVal)
++ {
++ currParams.fifoCompVal -= BMI_FIFO_UNITS;
++ underTest = TRUE;
++ }
++ if (!underTest)
++ break;
++
++ ClearPerfCnts(p_FmPort);
++ if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams))
++ != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ FM_PORT_SetPerformanceCounters(p_FmPort, TRUE);
++ XX_UDelay(1000000);
++ FM_PORT_SetPerformanceCounters(p_FmPort, FALSE);
++
++ if (!savedParams.taskCompVal
++ && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL))
++ savedParams.taskCompVal = (uint8_t)(currParams.taskCompVal + 2);
++ if (!savedParams.dmaCompVal
++ && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL))
++ savedParams.dmaCompVal = (uint8_t)(currParams.dmaCompVal + 2);
++ if (!savedParams.fifoCompVal
++ && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL))
++ savedParams.fifoCompVal = currParams.fifoCompVal
++ + (2 * BMI_FIFO_UNITS);
++ }
++
++ XX_Print("best vals: tasks %d, dmas %d, fifos %d\n",
++ savedParams.taskCompVal, savedParams.dmaCompVal,
++ savedParams.fifoCompVal);
++ return E_OK;
++}
++
++t_Error FM_PORT_SetStatisticsCounters(t_Handle h_FmPort, bool enable)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ err = fman_port_set_stats_cnt_mode(&p_FmPort->port, enable);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_stats_cnt_mode"));
++ return E_OK;
++}
++
++t_Error FM_PORT_SetErrorsRoute(t_Handle h_FmPort, fmPortFrameErrSelect_t errs)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ volatile uint32_t *p_ErrDiscard = NULL;
++ int err;
++
++ UNUSED(p_ErrDiscard);
++ err = fman_port_set_err_mask(&p_FmPort->port, (uint32_t)errs);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_err_mask"));
++
++#ifdef FM_ERROR_VSP_NO_MATCH_SW006
++ if (p_FmPort->fmRevInfo.majorRev >= 6)
++ {
++ t_FmPcdCtrlParamsPage *p_ParamsPage;
++
++ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
++ (void**)&p_ParamsPage);
++ ASSERT_COND(p_ParamsPage);
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_ErrDiscard =
++ &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_ErrDiscard =
++ &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm;
++ break;
++ default:
++ RETURN_ERROR(
++ MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++ }
++ WRITE_UINT32(p_ParamsPage->errorsDiscardMask,
++ GET_UINT32(*p_ErrDiscard) | errs);
++ }
++#endif /* FM_ERROR_VSP_NO_MATCH_SW006 */
++
++ return E_OK;
++}
++
++t_Error FM_PORT_SetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId,
++ bool enable)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(poolId<BM_MAX_NUM_OF_POOLS, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ err = fman_port_set_bpool_cnt_mode(&p_FmPort->port, poolId, enable);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_bpool_cnt_mode"));
++ return E_OK;
++}
++
++t_Error FM_PORT_GetBmiCounters(t_Handle h_FmPort, t_FmPortBmiStats *p_BmiStats)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)){
++ p_BmiStats->cntCycle =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE);
++ /* fmbm_rccn */
++ p_BmiStats->cntTaskUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL);
++ /* fmbm_rtuc */
++ p_BmiStats->cntQueueUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL);
++ /* fmbm_rrquc */
++ p_BmiStats->cntDmaUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL);
++ /* fmbm_rduc */
++ p_BmiStats->cntFifoUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL);
++ /* fmbm_rfuc */
++ p_BmiStats->cntRxPauseActivation =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION);
++ /* fmbm_rpac */
++ p_BmiStats->cntFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME);
++ /* fmbm_rfrc */
++ p_BmiStats->cntDiscardFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME);
++ /* fmbm_rfdc */
++ p_BmiStats->cntDeallocBuf =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF);
++ /* fmbm_rbdc */
++ p_BmiStats->cntRxBadFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_BAD_FRAME);
++ /* fmbm_rfbc */
++ p_BmiStats->cntRxLargeFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LARGE_FRAME);
++ /* fmbm_rlfc */
++ p_BmiStats->cntRxFilterFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_FILTER_FRAME);
++ /* fmbm_rffc */
++ p_BmiStats->cntRxListDmaErr =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR);
++ /* fmbm_rfldec */
++ p_BmiStats->cntRxOutOfBuffersDiscard =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD);
++ /* fmbm_rodc */
++ p_BmiStats->cntWredDiscard = 0;
++ p_BmiStats->cntLengthErr = 0;
++ p_BmiStats->cntUnsupportedFormat = 0;
++ }
++ else if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)){
++ p_BmiStats->cntCycle =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE);
++ /* fmbm_tccn */
++ p_BmiStats->cntTaskUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL);
++ /* fmbm_ttuc */
++ p_BmiStats->cntQueueUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL);
++ /* fmbm_ttcquc */
++ p_BmiStats->cntDmaUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL);
++ /* fmbm_tduc */
++ p_BmiStats->cntFifoUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL);
++ /* fmbm_tfuc */
++ p_BmiStats->cntRxPauseActivation = 0;
++ p_BmiStats->cntFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME);
++ /* fmbm_tfrc */
++ p_BmiStats->cntDiscardFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME);
++ /* fmbm_tfdc */
++ p_BmiStats->cntDeallocBuf =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF);
++ /* fmbm_tbdc */
++ p_BmiStats->cntRxBadFrame = 0;
++ p_BmiStats->cntRxLargeFrame = 0;
++ p_BmiStats->cntRxFilterFrame = 0;
++ p_BmiStats->cntRxListDmaErr = 0;
++ p_BmiStats->cntRxOutOfBuffersDiscard = 0;
++ p_BmiStats->cntWredDiscard = 0;
++ p_BmiStats->cntLengthErr =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_LENGTH_ERR);
++ /* fmbm_tfledc */
++ p_BmiStats->cntUnsupportedFormat =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT);
++ /* fmbm_tfufdc */
++ }
++ else if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) {
++ p_BmiStats->cntCycle =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE);
++ /* fmbm_occn */
++ p_BmiStats->cntTaskUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL);
++ /* fmbm_otuc */
++ p_BmiStats->cntQueueUtil = 0;
++ p_BmiStats->cntDmaUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL);
++ /* fmbm_oduc */
++ p_BmiStats->cntFifoUtil =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL);
++ /* fmbm_ofuc*/
++ p_BmiStats->cntRxPauseActivation = 0;
++ p_BmiStats->cntFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME);
++ /* fmbm_ofrc */
++ p_BmiStats->cntDiscardFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME);
++ /* fmbm_ofdc */
++ p_BmiStats->cntDeallocBuf =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF);
++ /* fmbm_obdc*/
++ p_BmiStats->cntRxBadFrame = 0;
++ p_BmiStats->cntRxLargeFrame = 0;
++ p_BmiStats->cntRxFilterFrame =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_FILTER_FRAME);
++ /* fmbm_offc */
++ p_BmiStats->cntRxListDmaErr =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR);
++ /* fmbm_ofldec */
++ p_BmiStats->cntRxOutOfBuffersDiscard =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD);
++ /* fmbm_rodc */
++ p_BmiStats->cntWredDiscard =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_WRED_DISCARD);
++ /* fmbm_ofwdc */
++ p_BmiStats->cntLengthErr =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_LENGTH_ERR);
++ /* fmbm_ofledc */
++ p_BmiStats->cntUnsupportedFormat =
++ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT);
++ /* fmbm_ofufdc */
++ }
++ return E_OK;
++}
++
++uint32_t FM_PORT_GetCounter(t_Handle h_FmPort, e_FmPortCounters counter)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ bool bmiCounter = FALSE;
++ enum fman_port_stats_counters statsType;
++ enum fman_port_perf_counters perfType;
++ enum fman_port_qmi_counters queueType;
++ bool isStats;
++ t_Error errCode;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
++ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
++ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
++ /* check that counter is available for the port type */
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE,
++ ("Requested counter is not available for Rx ports"));
++ return 0;
++ }
++ bmiCounter = FALSE;
++ break;
++ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
++ bmiCounter = FALSE;
++ break;
++ default: /* BMI counters (or error - will be checked in BMI routine )*/
++ bmiCounter = TRUE;
++ break;
++ }
++
++ if (bmiCounter)
++ {
++ errCode = BmiPortCheckAndGetCounterType(p_FmPort, counter, &statsType,
++ &perfType, &isStats);
++ if (errCode != E_OK)
++ {
++ REPORT_ERROR(MINOR, errCode, NO_MSG);
++ return 0;
++ }
++ if (isStats)
++ return fman_port_get_stats_counter(&p_FmPort->port, statsType);
++ else
++ return fman_port_get_perf_counter(&p_FmPort->port, perfType);
++ }
++ else /* QMI counter */
++ {
++ /* check that counters are enabled */
++ if (!(GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pnc)
++ & QMI_PORT_CFG_EN_COUNTERS))
++
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled"));
++ return 0;
++ }
++
++ /* Set counter */
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
++ queueType = E_FMAN_PORT_ENQ_TOTAL;
++ break;
++ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
++ queueType = E_FMAN_PORT_DEQ_TOTAL;
++ break;
++ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
++ queueType = E_FMAN_PORT_DEQ_FROM_DFLT;
++ break;
++ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
++ queueType = E_FMAN_PORT_DEQ_CONFIRM;
++ break;
++ default:
++ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available"));
++ return 0;
++ }
++
++ return fman_port_get_qmi_counter(&p_FmPort->port, queueType);
++ }
++
++ return 0;
++}
++
++t_Error FM_PORT_ModifyCounter(t_Handle h_FmPort, e_FmPortCounters counter,
++ uint32_t value)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ bool bmiCounter = FALSE;
++ enum fman_port_stats_counters statsType;
++ enum fman_port_perf_counters perfType;
++ enum fman_port_qmi_counters queueType;
++ bool isStats;
++ t_Error errCode;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
++ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
++ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
++ /* check that counter is available for the port type */
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
++ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ RETURN_ERROR(
++ MINOR, E_INVALID_STATE,
++ ("Requested counter is not available for Rx ports"));
++ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
++ bmiCounter = FALSE;
++ break;
++ default: /* BMI counters (or error - will be checked in BMI routine )*/
++ bmiCounter = TRUE;
++ break;
++ }
++
++ if (bmiCounter)
++ {
++ errCode = BmiPortCheckAndGetCounterType(p_FmPort, counter, &statsType,
++ &perfType, &isStats);
++ if (errCode != E_OK)
++ {
++ RETURN_ERROR(MINOR, errCode, NO_MSG);
++ }
++ if (isStats)
++ fman_port_set_stats_counter(&p_FmPort->port, statsType, value);
++ else
++ fman_port_set_perf_counter(&p_FmPort->port, perfType, value);
++ }
++ else /* QMI counter */
++ {
++ /* check that counters are enabled */
++ if (!(GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pnc)
++ & QMI_PORT_CFG_EN_COUNTERS))
++ {
++ RETURN_ERROR(MINOR, E_INVALID_STATE,
++ ("Requested counter was not enabled"));
++ }
++
++ /* Set counter */
++ switch (counter)
++ {
++ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
++ queueType = E_FMAN_PORT_ENQ_TOTAL;
++ break;
++ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
++ queueType = E_FMAN_PORT_DEQ_TOTAL;
++ break;
++ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
++ queueType = E_FMAN_PORT_DEQ_FROM_DFLT;
++ break;
++ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
++ queueType = E_FMAN_PORT_DEQ_CONFIRM;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Requested counter is not available"));
++ }
++
++ fman_port_set_qmi_counter(&p_FmPort->port, queueType, value);
++ }
++
++ return E_OK;
++}
++
++uint32_t FM_PORT_GetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for non-Rx ports"));
++ return 0;
++ }
++ return fman_port_get_bpool_counter(&p_FmPort->port, poolId);
++}
++
++t_Error FM_PORT_ModifyAllocBufCounter(t_Handle h_FmPort, uint8_t poolId,
++ uint32_t value)
++{
++ t_FmPort *p_FmPort = (t_FmPort *)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ RETURN_ERROR( MINOR, E_INVALID_STATE,
++ ("Requested counter is not available for non-Rx ports"));
++
++ fman_port_set_bpool_counter(&p_FmPort->port, poolId, value);
++ return E_OK;
++}
++bool FM_PORT_IsStalled(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err;
++ bool isStalled;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, FALSE);
++ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
++ FALSE);
++
++ err = FmIsPortStalled(p_FmPort->h_Fm, p_FmPort->hardwarePortId, &isStalled);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return TRUE;
++ }
++ return isStalled;
++}
++
++t_Error FM_PORT_ReleaseStalled(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ return FmResumeStalledPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId);
++}
++
++t_Error FM_PORT_SetRxL4ChecksumVerify(t_Handle h_FmPort, bool l4Checksum)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for Rx ports only"));
++
++ if (l4Checksum)
++ err = fman_port_modify_rx_fd_bits(
++ &p_FmPort->port, (uint8_t)(BMI_PORT_RFNE_FRWD_DCL4C >> 24),
++ TRUE);
++ else
++ err = fman_port_modify_rx_fd_bits(
++ &p_FmPort->port, (uint8_t)(BMI_PORT_RFNE_FRWD_DCL4C >> 24),
++ FALSE);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_modify_rx_fd_bits"));
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++/* API Run-time PCD Control unit functions */
++/*****************************************************************************/
++
++#if (DPAA_VERSION >= 11)
++t_Error FM_PORT_VSPAlloc(t_Handle h_FmPort, t_FmPortVSPAllocParams *p_VSPParams)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++ volatile uint32_t *p_BmiStorageProfileId = NULL, *p_BmiVspe = NULL;
++ uint32_t tmpReg = 0, tmp = 0;
++ uint16_t hwStoragePrflId;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->h_Fm, E_INVALID_HANDLE);
++ /*for numOfProfiles = 0 don't call this function*/
++ SANITY_CHECK_RETURN_ERROR(p_VSPParams->numOfProfiles, E_INVALID_VALUE);
++ /*dfltRelativeId should be in the range of numOfProfiles*/
++ SANITY_CHECK_RETURN_ERROR(
++ p_VSPParams->dfltRelativeId < p_VSPParams->numOfProfiles,
++ E_INVALID_VALUE);
++ /*p_FmPort should be from Rx type or OP*/
++ SANITY_CHECK_RETURN_ERROR(
++ ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)),
++ E_INVALID_VALUE);
++ /*port should be disabled*/
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->enabled, E_INVALID_STATE);
++ /*if its called for Rx port relevant Tx Port should be passed (initialized) too and it should be disabled*/
++ SANITY_CHECK_RETURN_ERROR(
++ ((p_VSPParams->h_FmTxPort && !((t_FmPort *)(p_VSPParams->h_FmTxPort))->enabled) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)),
++ E_INVALID_VALUE);
++ /*should be called before SetPCD - this port should be without PCD*/
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->pcdEngines, E_INVALID_STATE);
++
++ /*alloc window of VSPs for this port*/
++ err = FmVSPAllocForPort(p_FmPort->h_Fm, p_FmPort->portType,
++ p_FmPort->portId, p_VSPParams->numOfProfiles);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /*get absolute VSP ID for dfltRelative*/
++ err = FmVSPGetAbsoluteProfileId(p_FmPort->h_Fm, p_FmPort->portType,
++ p_FmPort->portId,
++ p_VSPParams->dfltRelativeId,
++ &hwStoragePrflId);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ /*fill relevant registers for p_FmPort and relative TxPort in the case p_FmPort from Rx type*/
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiStorageProfileId =
++ &(((t_FmPort *)(p_VSPParams->h_FmTxPort))->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfqid);
++ p_BmiVspe =
++ &(((t_FmPort *)(p_VSPParams->h_FmTxPort))->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfne);
++
++ tmpReg = GET_UINT32(*p_BmiStorageProfileId) & ~BMI_SP_ID_MASK;
++ tmpReg |= (uint32_t)hwStoragePrflId << BMI_SP_ID_SHIFT;
++ WRITE_UINT32(*p_BmiStorageProfileId, tmpReg);
++
++ tmpReg = GET_UINT32(*p_BmiVspe);
++ WRITE_UINT32(*p_BmiVspe, tmpReg | BMI_SP_EN);
++
++ p_BmiStorageProfileId =
++ &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfqid;
++ p_BmiVspe = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpp;
++ hwStoragePrflId = p_VSPParams->dfltRelativeId;
++ break;
++
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ tmpReg = NIA_ENG_BMI | NIA_BMI_AC_FETCH_ALL_FRAME;
++ WRITE_UINT32( p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn,
++ tmpReg);
++
++ p_BmiStorageProfileId =
++ &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofqid;
++ p_BmiVspe = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opp;
++ tmp |= BMI_EBD_EN;
++ break;
++
++ default:
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++ }
++
++ p_FmPort->vspe = TRUE;
++ p_FmPort->dfltRelativeId = p_VSPParams->dfltRelativeId;
++
++ tmpReg = GET_UINT32(*p_BmiStorageProfileId) & ~BMI_SP_ID_MASK;
++ tmpReg |= (uint32_t)hwStoragePrflId << BMI_SP_ID_SHIFT;
++ WRITE_UINT32(*p_BmiStorageProfileId, tmpReg);
++
++ tmpReg = GET_UINT32(*p_BmiVspe);
++ WRITE_UINT32(*p_BmiVspe, tmpReg | BMI_SP_EN | tmp);
++ return E_OK;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++t_Error FM_PORT_PcdPlcrAllocProfiles(t_Handle h_FmPort, uint16_t numOfProfiles)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++
++ p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm);
++ ASSERT_COND(p_FmPort->h_FmPcd);
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ if (numOfProfiles)
++ {
++ err = FmPcdPlcrAllocProfiles(p_FmPort->h_FmPcd,
++ p_FmPort->hardwarePortId, numOfProfiles);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ /* set the port handle within the PCD policer, even if no profiles defined */
++ FmPcdPortRegister(p_FmPort->h_FmPcd, h_FmPort, p_FmPort->hardwarePortId);
++
++ RELEASE_LOCK(p_FmPort->lock);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_PcdPlcrFreeProfiles(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdPlcrFreeProfiles(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId);
++
++ RELEASE_LOCK(p_FmPort->lock);
++
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_PcdKgModifyInitialScheme(t_Handle h_FmPort,
++ t_FmPcdKgSchemeSelect *p_FmPcdKgScheme)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ volatile uint32_t *p_BmiHpnia = NULL;
++ uint32_t tmpReg;
++ uint8_t relativeSchemeId;
++ uint8_t physicalSchemeId;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG,
++ E_INVALID_STATE);
++
++ tmpReg = (uint32_t)((p_FmPort->pcdEngines & FM_PCD_CC) ? NIA_KG_CC_EN : 0);
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiHpnia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_BmiHpnia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne;
++ break;
++ default:
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++ }
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ /* if we want to change to direct scheme, we need to check that this scheme is valid */
++ if (p_FmPcdKgScheme->direct)
++ {
++ physicalSchemeId = FmPcdKgGetSchemeId(p_FmPcdKgScheme->h_DirectScheme);
++ /* check that this scheme is bound to this port */
++ if (!(p_FmPort->schemesPerPortVector
++ & (uint32_t)(1 << (31 - (uint32_t)physicalSchemeId))))
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(
++ MAJOR, E_INVALID_STATE,
++ ("called with a scheme that is not bound to this port"));
++ }
++
++ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPort->h_FmPcd,
++ physicalSchemeId);
++ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
++ ("called with invalid Scheme "));
++ }
++
++ if (!FmPcdKgIsSchemeValidSw(p_FmPcdKgScheme->h_DirectScheme))
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("called with uninitialized Scheme "));
++ }
++
++ WRITE_UINT32(
++ *p_BmiHpnia,
++ NIA_ENG_KG | tmpReg | NIA_KG_DIRECT | (uint32_t)physicalSchemeId);
++ }
++ else
++ /* change to indirect scheme */
++ WRITE_UINT32(*p_BmiHpnia, NIA_ENG_KG | tmpReg);
++ RELEASE_LOCK(p_FmPort->lock);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_PcdPlcrModifyInitialProfile(t_Handle h_FmPort,
++ t_Handle h_Profile)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ volatile uint32_t *p_BmiNia;
++ volatile uint32_t *p_BmiHpnia;
++ uint32_t tmpReg;
++ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_PLCR,
++ E_INVALID_STATE);
++
++ /* check relevance of this routine - only when policer is used
++ directly after BMI or Parser */
++ if ((p_FmPort->pcdEngines & FM_PCD_KG)
++ || (p_FmPort->pcdEngines & FM_PCD_CC))
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("relevant only when PCD support mode is e_FM_PCD_SUPPORT_PLCR_ONLY or e_FM_PCD_SUPPORT_PRS_AND_PLCR"));
++
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
++ p_BmiHpnia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne;
++ tmpReg = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
++ p_BmiHpnia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne;
++ tmpReg = 0;
++ break;
++ default:
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++ }
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ if (!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId))
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Invalid profile"));
++ }
++
++ tmpReg |= (uint32_t)(NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId);
++
++ if (p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */
++ {
++ /* update BMI HPNIA */
++ WRITE_UINT32(*p_BmiHpnia, tmpReg);
++ }
++ else /* e_FM_PCD_SUPPORT_PLCR_ONLY */
++ {
++ /* rfne may contain FDCS bits, so first we read them. */
++ tmpReg |= (GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK);
++ /* update BMI NIA */
++ WRITE_UINT32(*p_BmiNia, tmpReg);
++ }RELEASE_LOCK(p_FmPort->lock);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_PcdCcModifyTree(t_Handle h_FmPort, t_Handle h_CcTree)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++ volatile uint32_t *p_BmiCcBase = NULL;
++ volatile uint32_t *p_BmiNia = NULL;
++ uint32_t ccTreePhysOffset;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(h_CcTree, E_INVALID_HANDLE);
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for non-independent mode ports only"));
++
++ /* get PCD registers pointers */
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
++ break;
++ default:
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++ }
++
++ /* check that current NIA is BMI to BMI */
++ if ((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK)
++ != GET_NIA_BMI_AC_ENQ_FRAME(p_FmPort->h_FmPcd))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("may be called only for ports in BMI-to-BMI state."));
++
++ if (p_FmPort->pcdEngines & FM_PCD_CC)
++ {
++ if (p_FmPort->h_IpReassemblyManip)
++ {
++ err = FmPcdCcTreeAddIPR(p_FmPort->h_FmPcd, h_CcTree, NULL,
++ p_FmPort->h_IpReassemblyManip, FALSE);
++ if (err != E_OK)
++ {
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++ else
++ if (p_FmPort->h_CapwapReassemblyManip)
++ {
++ err = FmPcdCcTreeAddCPR(p_FmPort->h_FmPcd, h_CcTree, NULL,
++ p_FmPort->h_CapwapReassemblyManip,
++ FALSE);
++ if (err != E_OK)
++ {
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++ switch (p_FmPort->portType)
++ {
++ case (e_FM_PORT_TYPE_RX_10G):
++ case (e_FM_PORT_TYPE_RX):
++ p_BmiCcBase = &p_FmPort->port.bmi_regs->rx.fmbm_rccb;
++ break;
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ p_BmiCcBase = &p_FmPort->port.bmi_regs->oh.fmbm_occb;
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
++ }
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++ err = FmPcdCcBindTree(p_FmPort->h_FmPcd, NULL, h_CcTree,
++ &ccTreePhysOffset, h_FmPort);
++ if (err)
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset);
++
++ p_FmPort->ccTreeId = h_CcTree;
++ RELEASE_LOCK(p_FmPort->lock);
++ }
++ else
++ RETURN_ERROR( MAJOR, E_INVALID_STATE,
++ ("Coarse Classification not defined for this port."));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_AttachPCD(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for non-independent mode ports only"));
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ if (p_FmPort->h_ReassemblyTree)
++ p_FmPort->pcdEngines |= FM_PCD_CC;
++
++ err = AttachPCD(h_FmPort);
++ RELEASE_LOCK(p_FmPort->lock);
++
++ return err;
++}
++
++t_Error FM_PORT_DetachPCD(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for non-independent mode ports only"));
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = DetachPCD(h_FmPort);
++ if (err != E_OK)
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (p_FmPort->h_ReassemblyTree)
++ p_FmPort->pcdEngines &= ~FM_PCD_CC;
++ RELEASE_LOCK(p_FmPort->lock);
++
++ return E_OK;
++}
++
++t_Error FM_PORT_SetPCD(t_Handle h_FmPort, t_FmPortPcdParams *p_PcdParam)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++ t_FmPortPcdParams modifiedPcdParams, *p_PcdParams;
++ t_FmPcdCcTreeParams *p_FmPcdCcTreeParams;
++ t_FmPortPcdCcParams fmPortPcdCcParams;
++ t_FmPortGetSetCcParams fmPortGetSetCcParams;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_PcdParam, E_NULL_POINTER);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for non-independent mode ports only"));
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm);
++ ASSERT_COND(p_FmPort->h_FmPcd);
++
++ if (p_PcdParam->p_CcParams && !p_PcdParam->p_CcParams->h_CcTree)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE,
++ ("Tree handle must be given if CC is required"));
++
++ memcpy(&modifiedPcdParams, p_PcdParam, sizeof(t_FmPortPcdParams));
++ p_PcdParams = &modifiedPcdParams;
++ if ((p_PcdParams->h_IpReassemblyManip)
++#if (DPAA_VERSION >= 11)
++ || (p_PcdParams->h_CapwapReassemblyManip)
++#endif /* (DPAA_VERSION >= 11) */
++ )
++ {
++ if ((p_PcdParams->pcdSupport != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG)
++ && (p_PcdParams->pcdSupport
++ != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC)
++ && (p_PcdParams->pcdSupport
++ != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR)
++ && (p_PcdParams->pcdSupport
++ != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR))
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR( MAJOR, E_INVALID_STATE,
++ ("pcdSupport must have KG for supporting Reassembly"));
++ }
++ p_FmPort->h_IpReassemblyManip = p_PcdParams->h_IpReassemblyManip;
++#if (DPAA_VERSION >= 11)
++ if ((p_PcdParams->h_IpReassemblyManip)
++ && (p_PcdParams->h_CapwapReassemblyManip))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("Either IP-R or CAPWAP-R is allowed"));
++ if ((p_PcdParams->h_CapwapReassemblyManip)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("CAPWAP-R is allowed only on offline-port"));
++ if (p_PcdParams->h_CapwapReassemblyManip)
++ p_FmPort->h_CapwapReassemblyManip =
++ p_PcdParams->h_CapwapReassemblyManip;
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (!p_PcdParams->p_CcParams)
++ {
++ if (!((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG)
++ || (p_PcdParams->pcdSupport
++ == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR)))
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_STATE,
++ ("PCD initialization structure is not consistent with pcdSupport"));
++ }
++
++ /* No user-tree, need to build internal tree */
++ p_FmPcdCcTreeParams = (t_FmPcdCcTreeParams*)XX_Malloc(
++ sizeof(t_FmPcdCcTreeParams));
++ if (!p_FmPcdCcTreeParams)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcTreeParams"));
++ memset(p_FmPcdCcTreeParams, 0, sizeof(t_FmPcdCcTreeParams));
++ p_FmPcdCcTreeParams->h_NetEnv = p_PcdParams->h_NetEnv;
++ p_FmPort->h_ReassemblyTree = FM_PCD_CcRootBuild(
++ p_FmPort->h_FmPcd, p_FmPcdCcTreeParams);
++
++ if (!p_FmPort->h_ReassemblyTree)
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ XX_Free(p_FmPcdCcTreeParams);
++ RETURN_ERROR( MAJOR, E_INVALID_HANDLE,
++ ("FM_PCD_CcBuildTree for Reassembly failed"));
++ }
++ if (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG)
++ p_PcdParams->pcdSupport =
++ e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC;
++ else
++ p_PcdParams->pcdSupport =
++ e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR;
++
++ memset(&fmPortPcdCcParams, 0, sizeof(t_FmPortPcdCcParams));
++ fmPortPcdCcParams.h_CcTree = p_FmPort->h_ReassemblyTree;
++ p_PcdParams->p_CcParams = &fmPortPcdCcParams;
++ XX_Free(p_FmPcdCcTreeParams);
++ }
++
++ if (p_FmPort->h_IpReassemblyManip)
++ err = FmPcdCcTreeAddIPR(p_FmPort->h_FmPcd,
++ p_PcdParams->p_CcParams->h_CcTree,
++ p_PcdParams->h_NetEnv,
++ p_FmPort->h_IpReassemblyManip, TRUE);
++#if (DPAA_VERSION >= 11)
++ else
++ if (p_FmPort->h_CapwapReassemblyManip)
++ err = FmPcdCcTreeAddCPR(p_FmPort->h_FmPcd,
++ p_PcdParams->p_CcParams->h_CcTree,
++ p_PcdParams->h_NetEnv,
++ p_FmPort->h_CapwapReassemblyManip,
++ TRUE);
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (err != E_OK)
++ {
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++
++ if (!FmPcdLockTryLockAll(p_FmPort->h_FmPcd))
++ {
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++ DBG(TRACE, ("Try LockAll - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = SetPcd(h_FmPort, p_PcdParams);
++ if (err)
++ {
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }
++ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if ((p_FmPort->pcdEngines & FM_PCD_PRS)
++ && (p_PcdParams->p_PrsParams->includeInPrsStatistics))
++ {
++ err = FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd,
++ p_FmPort->hardwarePortId, TRUE);
++ if (err)
++ {
++ DeletePcd(p_FmPort);
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }
++ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ p_FmPort->includeInPrsStatistics = TRUE;
++ }
++
++ FmPcdIncNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId);
++
++ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd))
++ {
++ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
++
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ {
++#ifdef FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004
++ if ((p_FmPort->fmRevInfo.majorRev < 6) &&
++ (p_FmPort->pcdEngines & FM_PCD_KG))
++ {
++ int i;
++ for (i = 0; i<p_PcdParams->p_KgParams->numOfSchemes; i++)
++ /* The following function must be locked */
++ FmPcdKgCcGetSetParams(p_FmPort->h_FmPcd,
++ p_PcdParams->p_KgParams->h_Schemes[i],
++ UPDATE_KG_NIA_CC_WA,
++ 0);
++ }
++#endif /* FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 */
++
++#if (DPAA_VERSION >= 11)
++ {
++ t_FmPcdCtrlParamsPage *p_ParamsPage;
++
++ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
++ (void**)&p_ParamsPage);
++ ASSERT_COND(p_ParamsPage);
++ WRITE_UINT32(p_ParamsPage->postBmiFetchNia,
++ p_FmPort->savedBmiNia);
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* Set post-bmi-fetch nia */
++ p_FmPort->savedBmiNia &= BMI_RFNE_FDCS_MASK;
++ p_FmPort->savedBmiNia |= (NIA_FM_CTL_AC_POST_BMI_FETCH
++ | NIA_ENG_FM_CTL);
++
++ /* Set pre-bmi-fetch nia */
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN;
++#if (DPAA_VERSION >= 11)
++ fmPortGetSetCcParams.setCcParams.nia =
++ (NIA_FM_CTL_AC_PRE_BMI_FETCH_FULL_FRAME | NIA_ENG_FM_CTL);
++#else
++ fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_PRE_BMI_FETCH_HEADER | NIA_ENG_FM_CTL);
++#endif /* (DPAA_VERSION >= 11) */
++ if ((err = FmPortGetSetCcParams(p_FmPort, &fmPortGetSetCcParams))
++ != E_OK)
++ {
++ DeletePcd(p_FmPort);
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }
++ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++
++ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
++
++ /* Set pop-to-next-step nia */
++#if (DPAA_VERSION == 10)
++ if (p_FmPort->fmRevInfo.majorRev < 6)
++ {
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN;
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
++ }
++ else
++ {
++#endif /* (DPAA_VERSION == 10) */
++ fmPortGetSetCcParams.getCcParams.type = GET_NIA_FPNE;
++#if (DPAA_VERSION == 10)
++ }
++#endif /* (DPAA_VERSION == 10) */
++ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
++ != E_OK)
++ {
++ DeletePcd(p_FmPort);
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /* Set post-bmi-prepare-to-enq nia */
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FENE;
++ fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_POST_BMI_ENQ
++ | NIA_ENG_FM_CTL);
++ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
++ != E_OK)
++ {
++ DeletePcd(p_FmPort);
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if ((p_FmPort->h_IpReassemblyManip)
++ || (p_FmPort->h_CapwapReassemblyManip))
++ {
++#if (DPAA_VERSION == 10)
++ if (p_FmPort->fmRevInfo.majorRev < 6)
++ {
++ /* Overwrite post-bmi-prepare-to-enq nia */
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FENE;
++ fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_POST_BMI_ENQ_ORR | NIA_ENG_FM_CTL | NIA_ORDER_RESTOR);
++ fmPortGetSetCcParams.setCcParams.overwrite = TRUE;
++ }
++ else
++ {
++#endif /* (DPAA_VERSION == 10) */
++ /* Set the ORR bit (for order-restoration) */
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FPNE;
++ fmPortGetSetCcParams.setCcParams.nia =
++ fmPortGetSetCcParams.getCcParams.nia | NIA_ORDER_RESTOR;
++#if (DPAA_VERSION == 10)
++ }
++#endif /* (DPAA_VERSION == 10) */
++ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
++ != E_OK)
++ {
++ DeletePcd(p_FmPort);
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ }
++ }
++ else
++ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
++
++#if (DPAA_VERSION >= 11)
++ {
++ t_FmPcdCtrlParamsPage *p_ParamsPage;
++
++ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
++
++ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_CMNE;
++ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd))
++ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP
++ | NIA_ENG_FM_CTL;
++ else
++ fmPortGetSetCcParams.setCcParams.nia =
++ NIA_FM_CTL_AC_NO_IPACC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
++ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
++ != E_OK)
++ {
++ DeletePcd(p_FmPort);
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
++ (void**)&p_ParamsPage);
++ ASSERT_COND(p_ParamsPage);
++
++ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd))
++ WRITE_UINT32(
++ p_ParamsPage->misc,
++ GET_UINT32(p_ParamsPage->misc) | FM_CTL_PARAMS_PAGE_OFFLOAD_SUPPORT_EN);
++
++ if ((p_FmPort->h_IpReassemblyManip)
++ || (p_FmPort->h_CapwapReassemblyManip))
++ {
++ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ WRITE_UINT32(
++ p_ParamsPage->discardMask,
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm));
++ else
++ WRITE_UINT32(
++ p_ParamsPage->discardMask,
++ GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm));
++ }
++#ifdef FM_ERROR_VSP_NO_MATCH_SW006
++ if (p_FmPort->vspe)
++ WRITE_UINT32(
++ p_ParamsPage->misc,
++ GET_UINT32(p_ParamsPage->misc) | (p_FmPort->dfltRelativeId & FM_CTL_PARAMS_PAGE_ERROR_VSP_MASK));
++#endif /* FM_ERROR_VSP_NO_MATCH_SW006 */
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ err = AttachPCD(h_FmPort);
++ if (err)
++ {
++ DeletePcd(p_FmPort);
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ RELEASE_LOCK(p_FmPort->lock);
++
++ return err;
++}
++
++t_Error FM_PORT_DeletePCD(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++
++ if (p_FmPort->imEn)
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
++ ("available for non-independant mode ports only"));
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
++ ("available for Rx and offline parsing ports only"));
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = DetachPCD(h_FmPort);
++ if (err)
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ FmPcdDecNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId);
++
++ /* we do it anyway, instead of checking if included */
++ if ((p_FmPort->pcdEngines & FM_PCD_PRS) && p_FmPort->includeInPrsStatistics)
++ {
++ FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd,
++ p_FmPort->hardwarePortId, FALSE);
++ p_FmPort->includeInPrsStatistics = FALSE;
++ }
++
++ if (!FmPcdLockTryLockAll(p_FmPort->h_FmPcd))
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ DBG(TRACE, ("Try LockAll - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = DeletePcd(h_FmPort);
++ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
++ if (err)
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ if (p_FmPort->h_ReassemblyTree)
++ {
++ err = FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
++ if (err)
++ {
++ RELEASE_LOCK(p_FmPort->lock);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ p_FmPort->h_ReassemblyTree = NULL;
++ }RELEASE_LOCK(p_FmPort->lock);
++
++ return err;
++}
++
++t_Error FM_PORT_PcdKgBindSchemes(t_Handle h_FmPort,
++ t_FmPcdPortSchemesParams *p_PortScheme)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
++ t_Error err = E_OK;
++ uint32_t tmpScmVec = 0;
++ int i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG,
++ E_INVALID_STATE);
++
++ schemeBind.netEnvId = p_FmPort->netEnvId;
++ schemeBind.hardwarePortId = p_FmPort->hardwarePortId;
++ schemeBind.numOfSchemes = p_PortScheme->numOfSchemes;
++ schemeBind.useClsPlan = p_FmPort->useClsPlan;
++ for (i = 0; i < schemeBind.numOfSchemes; i++)
++ {
++ schemeBind.schemesIds[i] = FmPcdKgGetSchemeId(
++ p_PortScheme->h_Schemes[i]);
++ /* build vector */
++ tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]);
++ }
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
++ if (err == E_OK)
++ p_FmPort->schemesPerPortVector |= tmpScmVec;
++
++#ifdef FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004
++ if ((FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)) &&
++ (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
++ (p_FmPort->fmRevInfo.majorRev < 6))
++ {
++ for (i=0; i<p_PortScheme->numOfSchemes; i++)
++ FmPcdKgCcGetSetParams(p_FmPort->h_FmPcd, p_PortScheme->h_Schemes[i], UPDATE_KG_NIA_CC_WA, 0);
++ }
++#endif /* FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 */
++
++ RELEASE_LOCK(p_FmPort->lock);
++
++ return err;
++}
++
++t_Error FM_PORT_PcdKgUnbindSchemes(t_Handle h_FmPort,
++ t_FmPcdPortSchemesParams *p_PortScheme)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
++ t_Error err = E_OK;
++ uint32_t tmpScmVec = 0;
++ int i;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG,
++ E_INVALID_STATE);
++
++ schemeBind.netEnvId = p_FmPort->netEnvId;
++ schemeBind.hardwarePortId = p_FmPort->hardwarePortId;
++ schemeBind.numOfSchemes = p_PortScheme->numOfSchemes;
++ for (i = 0; i < schemeBind.numOfSchemes; i++)
++ {
++ schemeBind.schemesIds[i] = FmPcdKgGetSchemeId(
++ p_PortScheme->h_Schemes[i]);
++ /* build vector */
++ tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]);
++ }
++
++ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
++ {
++ DBG(TRACE, ("FM Port Try Lock - BUSY"));
++ return ERROR_CODE(E_BUSY);
++ }
++
++ err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
++ if (err == E_OK)
++ p_FmPort->schemesPerPortVector &= ~tmpScmVec;
++ RELEASE_LOCK(p_FmPort->lock);
++
++ return err;
++}
++
++t_Error FM_PORT_AddCongestionGrps(t_Handle h_FmPort,
++ t_FmPortCongestionGrps *p_CongestionGrps)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ uint8_t priorityTmpArray[FM_PORT_NUM_OF_CONGESTION_GRPS];
++ uint8_t mod, index;
++ uint32_t i, grpsMap[FMAN_PORT_CG_MAP_NUM];
++ int err;
++#if (DPAA_VERSION >= 11)
++ int j;
++#endif /* (DPAA_VERSION >= 11) */
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++
++ /* un-necessary check of the indexes; probably will be needed in the future when there
++ will be more CGs available ....
++ for (i=0; i<p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
++ if (p_CongestionGrps->congestionGrpsToConsider[i] >= FM_PORT_NUM_OF_CONGESTION_GRPS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("CG id!"));
++ */
++
++#ifdef FM_NO_OP_OBSERVED_CGS
++ if ((p_FmPort->fmRevInfo.majorRev != 4) &&
++ (p_FmPort->fmRevInfo.majorRev < 6))
++ {
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) &&
++ (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only"));
++ }
++ else
++#endif /* FM_NO_OP_OBSERVED_CGS */
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("Available for Rx & OP ports only"));
++
++ /* Prepare groups map array */
++ memset(grpsMap, 0, FMAN_PORT_CG_MAP_NUM * sizeof(uint32_t));
++ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
++ {
++ index = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] / 32);
++ mod = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] % 32);
++ if (p_FmPort->fmRevInfo.majorRev != 4)
++ grpsMap[7 - index] |= (uint32_t)(1 << mod);
++ else
++ grpsMap[0] |= (uint32_t)(1 << mod);
++ }
++
++ memset(&priorityTmpArray, 0,
++ FM_PORT_NUM_OF_CONGESTION_GRPS * sizeof(uint8_t));
++
++ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
++ {
++#if (DPAA_VERSION >= 11)
++ for (j = 0; j < FM_MAX_NUM_OF_PFC_PRIORITIES; j++)
++ if (p_CongestionGrps->pfcPrioritiesEn[i][j])
++ priorityTmpArray[p_CongestionGrps->congestionGrpsToConsider[i]] |=
++ (0x01 << (FM_MAX_NUM_OF_PFC_PRIORITIES - j - 1));
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++#if (DPAA_VERSION >= 11)
++ for (i = 0; i < FM_PORT_NUM_OF_CONGESTION_GRPS; i++)
++ {
++ err = FmSetCongestionGroupPFCpriority(p_FmPort->h_Fm, i,
++ priorityTmpArray[i]);
++ if (err)
++ return err;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ err = fman_port_add_congestion_grps(&p_FmPort->port, grpsMap);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_add_congestion_grps"));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_RemoveCongestionGrps(t_Handle h_FmPort,
++ t_FmPortCongestionGrps *p_CongestionGrps)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ uint8_t mod, index;
++ uint32_t i, grpsMap[FMAN_PORT_CG_MAP_NUM];
++ int err;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++
++ {
++#ifdef FM_NO_OP_OBSERVED_CGS
++ t_FmRevisionInfo revInfo;
++
++ FM_GetRevision(p_FmPort->h_Fm, &revInfo);
++ if (revInfo.majorRev != 4)
++ {
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) &&
++ (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only"));
++ }
++ else
++#endif /* FM_NO_OP_OBSERVED_CGS */
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
++ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("Available for Rx & OP ports only"));
++ }
++
++ /* Prepare groups map array */
++ memset(grpsMap, 0, FMAN_PORT_CG_MAP_NUM * sizeof(uint32_t));
++ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
++ {
++ index = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] / 32);
++ mod = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] % 32);
++ if (p_FmPort->fmRevInfo.majorRev != 4)
++ grpsMap[7 - index] |= (uint32_t)(1 << mod);
++ else
++ grpsMap[0] |= (uint32_t)(1 << mod);
++ }
++
++#if (DPAA_VERSION >= 11)
++ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
++ {
++ t_Error err = FmSetCongestionGroupPFCpriority(
++ p_FmPort->h_Fm, p_CongestionGrps->congestionGrpsToConsider[i],
++ 0);
++ if (err)
++ return err;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ err = fman_port_remove_congestion_grps(&p_FmPort->port, grpsMap);
++ if (err != 0)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("fman_port_remove_congestion_grps"));
++ return E_OK;
++}
++
++#if (DPAA_VERSION >= 11)
++t_Error FM_PORT_GetIPv4OptionsCount(t_Handle h_FmPort,
++ uint32_t *p_Ipv4OptionsCount)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(
++ (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING),
++ E_INVALID_VALUE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_ParamsPage, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_Ipv4OptionsCount, E_NULL_POINTER);
++
++ *p_Ipv4OptionsCount = GET_UINT32(p_FmPort->p_ParamsPage->ipfOptionsCounter);
++
++ return E_OK;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++t_Error FM_PORT_ConfigDsarSupport(t_Handle h_FmPortRx,
++ t_FmPortDsarTablesSizes *params)
++{
++ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
++ p_FmPort->deepSleepVars.autoResMaxSizes = XX_Malloc(
++ sizeof(struct t_FmPortDsarTablesSizes));
++ memcpy(p_FmPort->deepSleepVars.autoResMaxSizes, params,
++ sizeof(struct t_FmPortDsarTablesSizes));
++ return E_OK;
++}
++
++static t_Error FmPortConfigAutoResForDeepSleepSupport1(t_FmPort *p_FmPort)
++{
++ uint32_t *param_page;
++ t_FmPortDsarTablesSizes *params = p_FmPort->deepSleepVars.autoResMaxSizes;
++ t_ArCommonDesc *ArCommonDescPtr;
++ uint32_t size = sizeof(t_ArCommonDesc);
++ // ARP
++ // should put here if (params->max_num_of_arp_entries)?
++ size = ROUND_UP(size,4);
++ size += sizeof(t_DsarArpDescriptor);
++ size += sizeof(t_DsarArpBindingEntry) * params->maxNumOfArpEntries;
++ size += sizeof(t_DsarArpStatistics);
++ //ICMPV4
++ size = ROUND_UP(size,4);
++ size += sizeof(t_DsarIcmpV4Descriptor);
++ size += sizeof(t_DsarIcmpV4BindingEntry) * params->maxNumOfEchoIpv4Entries;
++ size += sizeof(t_DsarIcmpV4Statistics);
++ //ICMPV6
++ size = ROUND_UP(size,4);
++ size += sizeof(t_DsarIcmpV6Descriptor);
++ size += sizeof(t_DsarIcmpV6BindingEntry) * params->maxNumOfEchoIpv6Entries;
++ size += sizeof(t_DsarIcmpV6Statistics);
++ //ND
++ size = ROUND_UP(size,4);
++ size += sizeof(t_DsarNdDescriptor);
++ size += sizeof(t_DsarIcmpV6BindingEntry) * params->maxNumOfNdpEntries;
++ size += sizeof(t_DsarIcmpV6Statistics);
++ //SNMP
++ size = ROUND_UP(size,4);
++ size += sizeof(t_DsarSnmpDescriptor);
++ size += sizeof(t_DsarSnmpIpv4AddrTblEntry)
++ * params->maxNumOfSnmpIPV4Entries;
++ size += sizeof(t_DsarSnmpIpv6AddrTblEntry)
++ * params->maxNumOfSnmpIPV6Entries;
++ size += sizeof(t_OidsTblEntry) * params->maxNumOfSnmpOidEntries;
++ size += params->maxNumOfSnmpOidChar;
++ size += sizeof(t_DsarIcmpV6Statistics);
++ //filters
++ size = ROUND_UP(size,4);
++ size += params->maxNumOfIpProtFiltering;
++ size = ROUND_UP(size,4);
++ size += params->maxNumOfUdpPortFiltering * sizeof(t_PortTblEntry);
++ size = ROUND_UP(size,4);
++ size += params->maxNumOfTcpPortFiltering * sizeof(t_PortTblEntry);
++
++ // add here for more protocols
++
++ // statistics
++ size = ROUND_UP(size,4);
++ size += sizeof(t_ArStatistics);
++
++ ArCommonDescPtr = FM_MURAM_AllocMem(p_FmPort->h_FmMuram, size, 0x10);
++
++ param_page =
++ XX_PhysToVirt(
++ p_FmPort->fmMuramPhysBaseAddr
++ + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr));
++ WRITE_UINT32(
++ *param_page,
++ (uint32_t)(XX_VirtToPhys(ArCommonDescPtr) - p_FmPort->fmMuramPhysBaseAddr));
++ return E_OK;
++}
++
++t_FmPortDsarTablesSizes* FM_PORT_GetDsarTablesMaxSizes(t_Handle h_FmPortRx)
++{
++ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
++ return p_FmPort->deepSleepVars.autoResMaxSizes;
++}
++
++struct arOffsets
++{
++ uint32_t arp;
++ uint32_t nd;
++ uint32_t icmpv4;
++ uint32_t icmpv6;
++ uint32_t snmp;
++ uint32_t stats;
++ uint32_t filtIp;
++ uint32_t filtUdp;
++ uint32_t filtTcp;
++};
++
++static uint32_t AR_ComputeOffsets(struct arOffsets* of,
++ struct t_FmPortDsarParams *params,
++ t_FmPort *p_FmPort)
++{
++ uint32_t size = sizeof(t_ArCommonDesc);
++ // ARP
++ if (params->p_AutoResArpInfo)
++ {
++ size = ROUND_UP(size,4);
++ of->arp = size;
++ size += sizeof(t_DsarArpDescriptor);
++ size += sizeof(t_DsarArpBindingEntry)
++ * params->p_AutoResArpInfo->tableSize;
++ size += sizeof(t_DsarArpStatistics);
++ }
++ // ICMPV4
++ if (params->p_AutoResEchoIpv4Info)
++ {
++ size = ROUND_UP(size,4);
++ of->icmpv4 = size;
++ size += sizeof(t_DsarIcmpV4Descriptor);
++ size += sizeof(t_DsarIcmpV4BindingEntry)
++ * params->p_AutoResEchoIpv4Info->tableSize;
++ size += sizeof(t_DsarIcmpV4Statistics);
++ }
++ // ICMPV6
++ if (params->p_AutoResEchoIpv6Info)
++ {
++ size = ROUND_UP(size,4);
++ of->icmpv6 = size;
++ size += sizeof(t_DsarIcmpV6Descriptor);
++ size += sizeof(t_DsarIcmpV6BindingEntry)
++ * params->p_AutoResEchoIpv6Info->tableSize;
++ size += sizeof(t_DsarIcmpV6Statistics);
++ }
++ // ND
++ if (params->p_AutoResNdpInfo)
++ {
++ size = ROUND_UP(size,4);
++ of->nd = size;
++ size += sizeof(t_DsarNdDescriptor);
++ size += sizeof(t_DsarIcmpV6BindingEntry)
++ * (params->p_AutoResNdpInfo->tableSizeAssigned
++ + params->p_AutoResNdpInfo->tableSizeTmp);
++ size += sizeof(t_DsarIcmpV6Statistics);
++ }
++ // SNMP
++ if (params->p_AutoResSnmpInfo)
++ {
++ size = ROUND_UP(size,4);
++ of->snmp = size;
++ size += sizeof(t_DsarSnmpDescriptor);
++ size += sizeof(t_DsarSnmpIpv4AddrTblEntry)
++ * params->p_AutoResSnmpInfo->numOfIpv4Addresses;
++ size += sizeof(t_DsarSnmpIpv6AddrTblEntry)
++ * params->p_AutoResSnmpInfo->numOfIpv6Addresses;
++ size += sizeof(t_OidsTblEntry) * params->p_AutoResSnmpInfo->oidsTblSize;
++ size += p_FmPort->deepSleepVars.autoResMaxSizes->maxNumOfSnmpOidChar;
++ size += sizeof(t_DsarIcmpV6Statistics);
++ }
++ //filters
++ size = ROUND_UP(size,4);
++ if (params->p_AutoResFilteringInfo)
++ {
++ of->filtIp = size;
++ size += params->p_AutoResFilteringInfo->ipProtTableSize;
++ size = ROUND_UP(size,4);
++ of->filtUdp = size;
++ size += params->p_AutoResFilteringInfo->udpPortsTableSize
++ * sizeof(t_PortTblEntry);
++ size = ROUND_UP(size,4);
++ of->filtTcp = size;
++ size += params->p_AutoResFilteringInfo->tcpPortsTableSize
++ * sizeof(t_PortTblEntry);
++ }
++ // add here for more protocols
++ // statistics
++ size = ROUND_UP(size,4);
++ of->stats = size;
++ size += sizeof(t_ArStatistics);
++ return size;
++}
++
++uint32_t* ARDesc;
++void PrsEnable(t_Handle p_FmPcd);
++void PrsDisable(t_Handle p_FmPcd);
++int PrsIsEnabled(t_Handle p_FmPcd);
++t_Handle FM_PCD_GetHcPort(t_Handle h_FmPcd);
++
++static t_Error DsarCheckParams(t_FmPortDsarParams *params,
++ t_FmPortDsarTablesSizes *sizes)
++{
++ bool macInit = FALSE;
++ uint8_t mac[6];
++ int i = 0;
++
++ // check table sizes
++ if (params->p_AutoResArpInfo
++ && sizes->maxNumOfArpEntries < params->p_AutoResArpInfo->tableSize)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("DSAR: Arp table size exceeds the configured maximum size."));
++ if (params->p_AutoResEchoIpv4Info
++ && sizes->maxNumOfEchoIpv4Entries
++ < params->p_AutoResEchoIpv4Info->tableSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: EchoIpv4 table size exceeds the configured maximum size."));
++ if (params->p_AutoResNdpInfo
++ && sizes->maxNumOfNdpEntries
++ < params->p_AutoResNdpInfo->tableSizeAssigned
++ + params->p_AutoResNdpInfo->tableSizeTmp)
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("DSAR: NDP table size exceeds the configured maximum size."));
++ if (params->p_AutoResEchoIpv6Info
++ && sizes->maxNumOfEchoIpv6Entries
++ < params->p_AutoResEchoIpv6Info->tableSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: EchoIpv6 table size exceeds the configured maximum size."));
++ if (params->p_AutoResSnmpInfo
++ && sizes->maxNumOfSnmpOidEntries
++ < params->p_AutoResSnmpInfo->oidsTblSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: Snmp Oid table size exceeds the configured maximum size."));
++ if (params->p_AutoResSnmpInfo
++ && sizes->maxNumOfSnmpIPV4Entries
++ < params->p_AutoResSnmpInfo->numOfIpv4Addresses)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: Snmp ipv4 table size exceeds the configured maximum size."));
++ if (params->p_AutoResSnmpInfo
++ && sizes->maxNumOfSnmpIPV6Entries
++ < params->p_AutoResSnmpInfo->numOfIpv6Addresses)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: Snmp ipv6 table size exceeds the configured maximum size."));
++ if (params->p_AutoResFilteringInfo)
++ {
++ if (sizes->maxNumOfIpProtFiltering
++ < params->p_AutoResFilteringInfo->ipProtTableSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: ip filter table size exceeds the configured maximum size."));
++ if (sizes->maxNumOfTcpPortFiltering
++ < params->p_AutoResFilteringInfo->udpPortsTableSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: udp filter table size exceeds the configured maximum size."));
++ if (sizes->maxNumOfUdpPortFiltering
++ < params->p_AutoResFilteringInfo->tcpPortsTableSize)
++ RETURN_ERROR(
++ MAJOR,
++ E_INVALID_VALUE,
++ ("DSAR: tcp filter table size exceeds the configured maximum size."));
++ }
++ /* check only 1 MAC address is configured (this is what ucode currently supports) */
++ if (params->p_AutoResArpInfo && params->p_AutoResArpInfo->tableSize)
++ {
++ memcpy(mac, params->p_AutoResArpInfo->p_AutoResTable[0].mac, 6);
++ i = 1;
++ macInit = TRUE;
++
++ for (; i < params->p_AutoResArpInfo->tableSize; i++)
++ if (memcmp(mac, params->p_AutoResArpInfo->p_AutoResTable[i].mac, 6))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("DSAR: Only 1 mac address is currently supported."));
++ }
++ if (params->p_AutoResEchoIpv4Info
++ && params->p_AutoResEchoIpv4Info->tableSize)
++ {
++ i = 0;
++ if (!macInit)
++ {
++ memcpy(mac, params->p_AutoResEchoIpv4Info->p_AutoResTable[0].mac,
++ 6);
++ i = 1;
++ macInit = TRUE;
++ }
++ for (; i < params->p_AutoResEchoIpv4Info->tableSize; i++)
++ if (memcmp(mac,
++ params->p_AutoResEchoIpv4Info->p_AutoResTable[i].mac, 6))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("DSAR: Only 1 mac address is currently supported."));
++ }
++ if (params->p_AutoResEchoIpv6Info
++ && params->p_AutoResEchoIpv6Info->tableSize)
++ {
++ i = 0;
++ if (!macInit)
++ {
++ memcpy(mac, params->p_AutoResEchoIpv6Info->p_AutoResTable[0].mac,
++ 6);
++ i = 1;
++ macInit = TRUE;
++ }
++ for (; i < params->p_AutoResEchoIpv6Info->tableSize; i++)
++ if (memcmp(mac,
++ params->p_AutoResEchoIpv6Info->p_AutoResTable[i].mac, 6))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("DSAR: Only 1 mac address is currently supported."));
++ }
++ if (params->p_AutoResNdpInfo && params->p_AutoResNdpInfo->tableSizeAssigned)
++ {
++ i = 0;
++ if (!macInit)
++ {
++ memcpy(mac, params->p_AutoResNdpInfo->p_AutoResTableAssigned[0].mac,
++ 6);
++ i = 1;
++ macInit = TRUE;
++ }
++ for (; i < params->p_AutoResNdpInfo->tableSizeAssigned; i++)
++ if (memcmp(mac,
++ params->p_AutoResNdpInfo->p_AutoResTableAssigned[i].mac,
++ 6))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("DSAR: Only 1 mac address is currently supported."));
++ }
++ if (params->p_AutoResNdpInfo && params->p_AutoResNdpInfo->tableSizeTmp)
++ {
++ i = 0;
++ if (!macInit)
++ {
++ memcpy(mac, params->p_AutoResNdpInfo->p_AutoResTableTmp[0].mac, 6);
++ i = 1;
++ }
++ for (; i < params->p_AutoResNdpInfo->tableSizeTmp; i++)
++ if (memcmp(mac, params->p_AutoResNdpInfo->p_AutoResTableTmp[i].mac,
++ 6))
++ RETURN_ERROR(
++ MAJOR, E_INVALID_VALUE,
++ ("DSAR: Only 1 mac address is currently supported."));
++ }
++ return E_OK;
++}
++
++static int GetBERLen(uint8_t* buf)
++{
++ if (*buf & 0x80)
++ {
++ if ((*buf & 0x7F) == 1)
++ return buf[1];
++ else
++ return *(uint16_t*)&buf[1]; // assuming max len is 2
++ }
++ else
++ return buf[0];
++}
++#define TOTAL_BER_LEN(len) (len < 128) ? len + 2 : len + 3
++
++#define SCFG_FMCLKDPSLPCR_ADDR 0xFFE0FC00C
++#define SCFG_FMCLKDPSLPCR_DS_VAL 0x08402000
++#define SCFG_FMCLKDPSLPCR_NORMAL_VAL 0x00402000
++static int fm_soc_suspend(void)
++{
++ uint32_t *fmclk, tmp32;
++ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
++ tmp32 = GET_UINT32(*fmclk);
++ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL);
++ tmp32 = GET_UINT32(*fmclk);
++ iounmap(fmclk);
++ return 0;
++}
++
++void fm_clk_down(void)
++{
++ uint32_t *fmclk, tmp32;
++ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
++ tmp32 = GET_UINT32(*fmclk);
++ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL | 0x40000000);
++ tmp32 = GET_UINT32(*fmclk);
++ iounmap(fmclk);
++}
++
++t_Error FM_PORT_EnterDsar(t_Handle h_FmPortRx, t_FmPortDsarParams *params)
++{
++ int i, j;
++ t_Error err;
++ uint32_t nia;
++ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
++ t_FmPort *p_FmPortTx = (t_FmPort *)params->h_FmPortTx;
++ t_DsarArpDescriptor *ArpDescriptor;
++ t_DsarIcmpV4Descriptor* ICMPV4Descriptor;
++ t_DsarIcmpV6Descriptor* ICMPV6Descriptor;
++ t_DsarNdDescriptor* NDDescriptor;
++
++ uint64_t fmMuramVirtBaseAddr = (uint64_t)PTR_TO_UINT(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr));
++ uint32_t *param_page = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr));
++ t_ArCommonDesc *ArCommonDescPtr = (t_ArCommonDesc*)(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(*param_page)));
++ struct arOffsets* of;
++ uint8_t tmp = 0;
++ t_FmGetSetParams fmGetSetParams;
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
++ fmGetSetParams.setParams.sleep = 1;
++
++ err = DsarCheckParams(params, p_FmPort->deepSleepVars.autoResMaxSizes);
++ if (err != E_OK)
++ return err;
++
++ p_FmPort->deepSleepVars.autoResOffsets = XX_Malloc(sizeof(struct arOffsets));
++ of = (struct arOffsets *)p_FmPort->deepSleepVars.autoResOffsets;
++ IOMemSet32(ArCommonDescPtr, 0, AR_ComputeOffsets(of, params, p_FmPort));
++
++ // common
++ WRITE_UINT8(ArCommonDescPtr->arTxPort, p_FmPortTx->hardwarePortId);
++ nia = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne); // bmi nia
++ if ((nia & 0x007C0000) == 0x00440000) // bmi nia is parser
++ WRITE_UINT32(ArCommonDescPtr->activeHPNIA, GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne));
++ else
++ WRITE_UINT32(ArCommonDescPtr->activeHPNIA, nia);
++ WRITE_UINT16(ArCommonDescPtr->snmpPort, 161);
++
++ // ARP
++ if (params->p_AutoResArpInfo)
++ {
++ t_DsarArpBindingEntry* arp_bindings;
++ ArpDescriptor = (t_DsarArpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->arp);
++ WRITE_UINT32(ArCommonDescPtr->p_ArpDescriptor, PTR_TO_UINT(ArpDescriptor) - fmMuramVirtBaseAddr);
++ arp_bindings = (t_DsarArpBindingEntry*)(PTR_TO_UINT(ArpDescriptor) + sizeof(t_DsarArpDescriptor));
++ if (params->p_AutoResArpInfo->enableConflictDetection)
++ WRITE_UINT16(ArpDescriptor->control, 1);
++ else
++ WRITE_UINT16(ArpDescriptor->control, 0);
++ if (params->p_AutoResArpInfo->tableSize)
++ {
++ t_FmPortDsarArpEntry* arp_entry = params->p_AutoResArpInfo->p_AutoResTable;
++ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&arp_entry[0].mac[0]);
++ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&arp_entry[0].mac[2]);
++ WRITE_UINT16(ArpDescriptor->numOfBindings, params->p_AutoResArpInfo->tableSize);
++
++ for (i = 0; i < params->p_AutoResArpInfo->tableSize; i++)
++ {
++ WRITE_UINT32(arp_bindings[i].ipv4Addr, arp_entry[i].ipAddress);
++ if (arp_entry[i].isVlan)
++ WRITE_UINT16(arp_bindings[i].vlanId, arp_entry[i].vid & 0xFFF);
++ }
++ WRITE_UINT32(ArpDescriptor->p_Bindings, PTR_TO_UINT(arp_bindings) - fmMuramVirtBaseAddr);
++ }
++ WRITE_UINT32(ArpDescriptor->p_Statistics, PTR_TO_UINT(arp_bindings) +
++ sizeof(t_DsarArpBindingEntry) * params->p_AutoResArpInfo->tableSize - fmMuramVirtBaseAddr);
++ }
++
++ // ICMPV4
++ if (params->p_AutoResEchoIpv4Info)
++ {
++ t_DsarIcmpV4BindingEntry* icmpv4_bindings;
++ ICMPV4Descriptor = (t_DsarIcmpV4Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv4);
++ WRITE_UINT32(ArCommonDescPtr->p_IcmpV4Descriptor, PTR_TO_UINT(ICMPV4Descriptor) - fmMuramVirtBaseAddr);
++ icmpv4_bindings = (t_DsarIcmpV4BindingEntry*)(PTR_TO_UINT(ICMPV4Descriptor) + sizeof(t_DsarIcmpV4Descriptor));
++ WRITE_UINT16(ICMPV4Descriptor->control, 0);
++ if (params->p_AutoResEchoIpv4Info->tableSize)
++ {
++ t_FmPortDsarArpEntry* arp_entry = params->p_AutoResEchoIpv4Info->p_AutoResTable;
++ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&arp_entry[0].mac[0]);
++ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&arp_entry[0].mac[2]);
++ WRITE_UINT16(ICMPV4Descriptor->numOfBindings, params->p_AutoResEchoIpv4Info->tableSize);
++
++ for (i = 0; i < params->p_AutoResEchoIpv4Info->tableSize; i++)
++ {
++ WRITE_UINT32(icmpv4_bindings[i].ipv4Addr, arp_entry[i].ipAddress);
++ if (arp_entry[i].isVlan)
++ WRITE_UINT16(icmpv4_bindings[i].vlanId, arp_entry[i].vid & 0xFFF);
++ }
++ WRITE_UINT32(ICMPV4Descriptor->p_Bindings, PTR_TO_UINT(icmpv4_bindings) - fmMuramVirtBaseAddr);
++ }
++ WRITE_UINT32(ICMPV4Descriptor->p_Statistics, PTR_TO_UINT(icmpv4_bindings) +
++ sizeof(t_DsarIcmpV4BindingEntry) * params->p_AutoResEchoIpv4Info->tableSize - fmMuramVirtBaseAddr);
++ }
++
++ // ICMPV6
++ if (params->p_AutoResEchoIpv6Info)
++ {
++ t_DsarIcmpV6BindingEntry* icmpv6_bindings;
++ ICMPV6Descriptor = (t_DsarIcmpV6Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv6);
++ WRITE_UINT32(ArCommonDescPtr->p_IcmpV6Descriptor, PTR_TO_UINT(ICMPV6Descriptor) - fmMuramVirtBaseAddr);
++ icmpv6_bindings = (t_DsarIcmpV6BindingEntry*)(PTR_TO_UINT(ICMPV6Descriptor) + sizeof(t_DsarIcmpV6Descriptor));
++ WRITE_UINT16(ICMPV6Descriptor->control, 0);
++ if (params->p_AutoResEchoIpv6Info->tableSize)
++ {
++ t_FmPortDsarNdpEntry* ndp_entry = params->p_AutoResEchoIpv6Info->p_AutoResTable;
++ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&ndp_entry[0].mac[0]);
++ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&ndp_entry[0].mac[2]);
++ WRITE_UINT16(ICMPV6Descriptor->numOfBindings, params->p_AutoResEchoIpv6Info->tableSize);
++
++ for (i = 0; i < params->p_AutoResEchoIpv6Info->tableSize; i++)
++ {
++ for (j = 0; j < 4; j++)
++ WRITE_UINT32(icmpv6_bindings[i].ipv6Addr[j], ndp_entry[i].ipAddress[j]);
++ if (ndp_entry[i].isVlan)
++ WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan
++ }
++ WRITE_UINT32(ICMPV6Descriptor->p_Bindings, PTR_TO_UINT(icmpv6_bindings) - fmMuramVirtBaseAddr);
++ }
++ WRITE_UINT32(ICMPV6Descriptor->p_Statistics, PTR_TO_UINT(icmpv6_bindings) +
++ sizeof(t_DsarIcmpV6BindingEntry) * params->p_AutoResEchoIpv6Info->tableSize - fmMuramVirtBaseAddr);
++ }
++
++ // ND
++ if (params->p_AutoResNdpInfo)
++ {
++ t_DsarIcmpV6BindingEntry* icmpv6_bindings;
++ NDDescriptor = (t_DsarNdDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->nd);
++ WRITE_UINT32(ArCommonDescPtr->p_NdDescriptor, PTR_TO_UINT(NDDescriptor) - fmMuramVirtBaseAddr);
++ icmpv6_bindings = (t_DsarIcmpV6BindingEntry*)(PTR_TO_UINT(NDDescriptor) + sizeof(t_DsarNdDescriptor));
++ if (params->p_AutoResNdpInfo->enableConflictDetection)
++ WRITE_UINT16(NDDescriptor->control, 1);
++ else
++ WRITE_UINT16(NDDescriptor->control, 0);
++ if (params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp)
++ {
++ t_FmPortDsarNdpEntry* ndp_entry = params->p_AutoResNdpInfo->p_AutoResTableAssigned;
++ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&ndp_entry[0].mac[0]);
++ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&ndp_entry[0].mac[2]);
++ WRITE_UINT16(NDDescriptor->numOfBindings, params->p_AutoResNdpInfo->tableSizeAssigned
++ + params->p_AutoResNdpInfo->tableSizeTmp);
++
++ for (i = 0; i < params->p_AutoResNdpInfo->tableSizeAssigned; i++)
++ {
++ for (j = 0; j < 4; j++)
++ WRITE_UINT32(icmpv6_bindings[i].ipv6Addr[j], ndp_entry[i].ipAddress[j]);
++ if (ndp_entry[i].isVlan)
++ WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan
++ }
++ ndp_entry = params->p_AutoResNdpInfo->p_AutoResTableTmp;
++ for (i = 0; i < params->p_AutoResNdpInfo->tableSizeTmp; i++)
++ {
++ for (j = 0; j < 4; j++)
++ WRITE_UINT32(icmpv6_bindings[i + params->p_AutoResNdpInfo->tableSizeAssigned].ipv6Addr[j], ndp_entry[i].ipAddress[j]);
++ if (ndp_entry[i].isVlan)
++ WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i + params->p_AutoResNdpInfo->tableSizeAssigned].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan
++ }
++ WRITE_UINT32(NDDescriptor->p_Bindings, PTR_TO_UINT(icmpv6_bindings) - fmMuramVirtBaseAddr);
++ }
++ WRITE_UINT32(NDDescriptor->p_Statistics, PTR_TO_UINT(icmpv6_bindings) + sizeof(t_DsarIcmpV6BindingEntry)
++ * (params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp)
++ - fmMuramVirtBaseAddr);
++ WRITE_UINT32(NDDescriptor->solicitedAddr, 0xFFFFFFFF);
++ }
++
++ // SNMP
++ if (params->p_AutoResSnmpInfo)
++ {
++ t_FmPortDsarSnmpInfo *snmpSrc = params->p_AutoResSnmpInfo;
++ t_DsarSnmpIpv4AddrTblEntry* snmpIpv4Addr;
++ t_DsarSnmpIpv6AddrTblEntry* snmpIpv6Addr;
++ t_OidsTblEntry* snmpOid;
++ uint8_t *charPointer;
++ int len;
++ t_DsarSnmpDescriptor* SnmpDescriptor = (t_DsarSnmpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->snmp);
++ WRITE_UINT32(ArCommonDescPtr->p_SnmpDescriptor, PTR_TO_UINT(SnmpDescriptor) - fmMuramVirtBaseAddr);
++ WRITE_UINT16(SnmpDescriptor->control, snmpSrc->control);
++ WRITE_UINT16(SnmpDescriptor->maxSnmpMsgLength, snmpSrc->maxSnmpMsgLength);
++ snmpIpv4Addr = (t_DsarSnmpIpv4AddrTblEntry*)(PTR_TO_UINT(SnmpDescriptor) + sizeof(t_DsarSnmpDescriptor));
++ if (snmpSrc->numOfIpv4Addresses)
++ {
++ t_FmPortDsarSnmpIpv4AddrTblEntry* snmpIpv4AddrSrc = snmpSrc->p_Ipv4AddrTbl;
++ WRITE_UINT16(SnmpDescriptor->numOfIpv4Addresses, snmpSrc->numOfIpv4Addresses);
++ for (i = 0; i < snmpSrc->numOfIpv4Addresses; i++)
++ {
++ WRITE_UINT32(snmpIpv4Addr[i].ipv4Addr, snmpIpv4AddrSrc[i].ipv4Addr);
++ if (snmpIpv4AddrSrc[i].isVlan)
++ WRITE_UINT16(snmpIpv4Addr[i].vlanId, snmpIpv4AddrSrc[i].vid & 0xFFF);
++ }
++ WRITE_UINT32(SnmpDescriptor->p_Ipv4AddrTbl, PTR_TO_UINT(snmpIpv4Addr) - fmMuramVirtBaseAddr);
++ }
++ snmpIpv6Addr = (t_DsarSnmpIpv6AddrTblEntry*)(PTR_TO_UINT(snmpIpv4Addr)
++ + sizeof(t_DsarSnmpIpv4AddrTblEntry) * snmpSrc->numOfIpv4Addresses);
++ if (snmpSrc->numOfIpv6Addresses)
++ {
++ t_FmPortDsarSnmpIpv6AddrTblEntry* snmpIpv6AddrSrc = snmpSrc->p_Ipv6AddrTbl;
++ WRITE_UINT16(SnmpDescriptor->numOfIpv6Addresses, snmpSrc->numOfIpv6Addresses);
++ for (i = 0; i < snmpSrc->numOfIpv6Addresses; i++)
++ {
++ for (j = 0; j < 4; j++)
++ WRITE_UINT32(snmpIpv6Addr[i].ipv6Addr[j], snmpIpv6AddrSrc[i].ipv6Addr[j]);
++ if (snmpIpv6AddrSrc[i].isVlan)
++ WRITE_UINT16(snmpIpv6Addr[i].vlanId, snmpIpv6AddrSrc[i].vid & 0xFFF);
++ }
++ WRITE_UINT32(SnmpDescriptor->p_Ipv6AddrTbl, PTR_TO_UINT(snmpIpv6Addr) - fmMuramVirtBaseAddr);
++ }
++ snmpOid = (t_OidsTblEntry*)(PTR_TO_UINT(snmpIpv6Addr)
++ + sizeof(t_DsarSnmpIpv6AddrTblEntry) * snmpSrc->numOfIpv6Addresses);
++ charPointer = (uint8_t*)(PTR_TO_UINT(snmpOid)
++ + sizeof(t_OidsTblEntry) * snmpSrc->oidsTblSize);
++ len = TOTAL_BER_LEN(GetBERLen(&snmpSrc->p_RdOnlyCommunityStr[1]));
++ Mem2IOCpy32(charPointer, snmpSrc->p_RdOnlyCommunityStr, len);
++ WRITE_UINT32(SnmpDescriptor->p_RdOnlyCommunityStr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
++ charPointer += len;
++ len = TOTAL_BER_LEN(GetBERLen(&snmpSrc->p_RdWrCommunityStr[1]));
++ Mem2IOCpy32(charPointer, snmpSrc->p_RdWrCommunityStr, len);
++ WRITE_UINT32(SnmpDescriptor->p_RdWrCommunityStr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
++ charPointer += len;
++ WRITE_UINT32(SnmpDescriptor->oidsTblSize, snmpSrc->oidsTblSize);
++ WRITE_UINT32(SnmpDescriptor->p_OidsTbl, PTR_TO_UINT(snmpOid) - fmMuramVirtBaseAddr);
++ for (i = 0; i < snmpSrc->oidsTblSize; i++)
++ {
++ WRITE_UINT16(snmpOid->oidSize, snmpSrc->p_OidsTbl[i].oidSize);
++ WRITE_UINT16(snmpOid->resSize, snmpSrc->p_OidsTbl[i].resSize);
++ Mem2IOCpy32(charPointer, snmpSrc->p_OidsTbl[i].oidVal, snmpSrc->p_OidsTbl[i].oidSize);
++ WRITE_UINT32(snmpOid->p_Oid, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
++ charPointer += snmpSrc->p_OidsTbl[i].oidSize;
++ if (snmpSrc->p_OidsTbl[i].resSize <= 4)
++ WRITE_UINT32(snmpOid->resValOrPtr, *snmpSrc->p_OidsTbl[i].resVal);
++ else
++ {
++ Mem2IOCpy32(charPointer, snmpSrc->p_OidsTbl[i].resVal, snmpSrc->p_OidsTbl[i].resSize);
++ WRITE_UINT32(snmpOid->resValOrPtr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
++ charPointer += snmpSrc->p_OidsTbl[i].resSize;
++ }
++ snmpOid++;
++ }
++ charPointer = UINT_TO_PTR(ROUND_UP(PTR_TO_UINT(charPointer),4));
++ WRITE_UINT32(SnmpDescriptor->p_Statistics, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
++ }
++
++ // filtering
++ if (params->p_AutoResFilteringInfo)
++ {
++ if (params->p_AutoResFilteringInfo->ipProtPassOnHit)
++ tmp |= IP_PROT_TBL_PASS_MASK;
++ if (params->p_AutoResFilteringInfo->udpPortPassOnHit)
++ tmp |= UDP_PORT_TBL_PASS_MASK;
++ if (params->p_AutoResFilteringInfo->tcpPortPassOnHit)
++ tmp |= TCP_PORT_TBL_PASS_MASK;
++ WRITE_UINT8(ArCommonDescPtr->filterControl, tmp);
++ WRITE_UINT16(ArCommonDescPtr->tcpControlPass, params->p_AutoResFilteringInfo->tcpFlagsMask);
++
++ // ip filtering
++ if (params->p_AutoResFilteringInfo->ipProtTableSize)
++ {
++ uint8_t* ip_tbl = (uint8_t*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtIp);
++ WRITE_UINT8(ArCommonDescPtr->ipProtocolTblSize, params->p_AutoResFilteringInfo->ipProtTableSize);
++ for (i = 0; i < params->p_AutoResFilteringInfo->ipProtTableSize; i++)
++ WRITE_UINT8(ip_tbl[i], params->p_AutoResFilteringInfo->p_IpProtTablePtr[i]);
++ WRITE_UINT32(ArCommonDescPtr->p_IpProtocolFiltTbl, PTR_TO_UINT(ip_tbl) - fmMuramVirtBaseAddr);
++ }
++
++ // udp filtering
++ if (params->p_AutoResFilteringInfo->udpPortsTableSize)
++ {
++ t_PortTblEntry* udp_tbl = (t_PortTblEntry*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtUdp);
++ WRITE_UINT8(ArCommonDescPtr->udpPortTblSize, params->p_AutoResFilteringInfo->udpPortsTableSize);
++ for (i = 0; i < params->p_AutoResFilteringInfo->udpPortsTableSize; i++)
++ {
++ WRITE_UINT32(udp_tbl[i].Ports,
++ (params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].srcPort << 16) +
++ params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].dstPort);
++ WRITE_UINT32(udp_tbl[i].PortsMask,
++ (params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].srcPortMask << 16) +
++ params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].dstPortMask);
++ }
++ WRITE_UINT32(ArCommonDescPtr->p_UdpPortFiltTbl, PTR_TO_UINT(udp_tbl) - fmMuramVirtBaseAddr);
++ }
++
++ // tcp filtering
++ if (params->p_AutoResFilteringInfo->tcpPortsTableSize)
++ {
++ t_PortTblEntry* tcp_tbl = (t_PortTblEntry*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtTcp);
++ WRITE_UINT8(ArCommonDescPtr->tcpPortTblSize, params->p_AutoResFilteringInfo->tcpPortsTableSize);
++ for (i = 0; i < params->p_AutoResFilteringInfo->tcpPortsTableSize; i++)
++ {
++ WRITE_UINT32(tcp_tbl[i].Ports,
++ (params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].srcPort << 16) +
++ params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].dstPort);
++ WRITE_UINT32(tcp_tbl[i].PortsMask,
++ (params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].srcPortMask << 16) +
++ params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].dstPortMask);
++ }
++ WRITE_UINT32(ArCommonDescPtr->p_TcpPortFiltTbl, PTR_TO_UINT(tcp_tbl) - fmMuramVirtBaseAddr);
++ }
++ }
++ // common stats
++ WRITE_UINT32(ArCommonDescPtr->p_ArStats, PTR_TO_UINT(ArCommonDescPtr) + of->stats - fmMuramVirtBaseAddr);
++
++ // get into Deep Sleep sequence:
++
++ // Ensures that FMan do not enter the idle state. This is done by programing
++ // FMDPSLPCR[FM_STOP] to one.
++ fm_soc_suspend();
++
++ ARDesc = UINT_TO_PTR(XX_VirtToPhys(ArCommonDescPtr));
++ return E_OK;
++
++}
++
++void FM_ChangeClock(t_Handle h_Fm, int hardwarePortId);
++t_Error FM_PORT_EnterDsarFinal(t_Handle h_DsarRxPort, t_Handle h_DsarTxPort)
++{
++ t_FmGetSetParams fmGetSetParams;
++ t_FmPort *p_FmPort = (t_FmPort *)h_DsarRxPort;
++ t_FmPort *p_FmPortTx = (t_FmPort *)h_DsarTxPort;
++ t_Handle *h_FmPcd = FmGetPcd(p_FmPort->h_Fm);
++ t_FmPort *p_FmPortHc = FM_PCD_GetHcPort(h_FmPcd);
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.setParams.type = UPDATE_FM_CLD;
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++
++ /* Issue graceful stop to HC port */
++ FM_PORT_Disable(p_FmPortHc);
++
++ // config tx port
++ p_FmPort->deepSleepVars.fmbm_tcfg = GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg);
++ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg, GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg) | BMI_PORT_CFG_IM | BMI_PORT_CFG_EN);
++ // ????
++ p_FmPort->deepSleepVars.fmbm_tcmne = GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne);
++ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne, 0xE);
++ // Stage 7:echo
++ p_FmPort->deepSleepVars.fmbm_rfpne = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne);
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne, 0x2E);
++ if (!PrsIsEnabled(h_FmPcd))
++ {
++ p_FmPort->deepSleepVars.dsarEnabledParser = TRUE;
++ PrsEnable(h_FmPcd);
++ }
++ else
++ p_FmPort->deepSleepVars.dsarEnabledParser = FALSE;
++
++ p_FmPort->deepSleepVars.fmbm_rfne = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne);
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne, 0x440000);
++
++ // save rcfg for restoring: accumulate mode is changed by ucode
++ p_FmPort->deepSleepVars.fmbm_rcfg = GET_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcfg);
++ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcfg, p_FmPort->deepSleepVars.fmbm_rcfg | BMI_PORT_CFG_AM);
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
++ fmGetSetParams.setParams.sleep = 1;
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++
++// ***** issue external request sync command
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.setParams.type = UPDATE_FPM_EXTC;
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++ // get
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.getParams.type = GET_FMFP_EXTC;
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++ if (fmGetSetParams.getParams.fmfp_extc != 0)
++ {
++ // clear
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.setParams.type = UPDATE_FPM_EXTC_CLEAR;
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++}
++
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.getParams.type = GET_FMFP_EXTC | GET_FM_NPI;
++ do
++ {
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++ } while (fmGetSetParams.getParams.fmfp_extc != 0 && fmGetSetParams.getParams.fm_npi == 0);
++ if (fmGetSetParams.getParams.fm_npi != 0)
++ XX_Print("FM: Sync did not finish\n");
++
++ // check that all stoped
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.getParams.type = GET_FMQM_GS | GET_FM_NPI;
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++ while (fmGetSetParams.getParams.fmqm_gs & 0xF0000000)
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++ if (fmGetSetParams.getParams.fmqm_gs == 0 && fmGetSetParams.getParams.fm_npi == 0)
++ XX_Print("FM: Sleeping\n");
++// FM_ChangeClock(p_FmPort->h_Fm, p_FmPort->hardwarePortId);
++
++ return E_OK;
++}
++
++EXPORT_SYMBOL(FM_PORT_EnterDsarFinal);
++
++void FM_PORT_Dsar_DumpRegs()
++{
++ uint32_t* hh = XX_PhysToVirt(PTR_TO_UINT(ARDesc));
++ DUMP_MEMORY(hh, 0x220);
++}
++
++void FM_PORT_ExitDsar(t_Handle h_FmPortRx, t_Handle h_FmPortTx)
++{
++ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
++ t_FmPort *p_FmPortTx = (t_FmPort *)h_FmPortTx;
++ t_Handle *h_FmPcd = FmGetPcd(p_FmPort->h_Fm);
++ t_FmPort *p_FmPortHc = FM_PCD_GetHcPort(h_FmPcd);
++ t_FmGetSetParams fmGetSetParams;
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
++ fmGetSetParams.setParams.sleep = 0;
++ if (p_FmPort->deepSleepVars.autoResOffsets)
++ {
++ XX_Free(p_FmPort->deepSleepVars.autoResOffsets);
++ p_FmPort->deepSleepVars.autoResOffsets = 0;
++ }
++
++ if (p_FmPort->deepSleepVars.dsarEnabledParser)
++ PrsDisable(FmGetPcd(p_FmPort->h_Fm));
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne, p_FmPort->deepSleepVars.fmbm_rfpne);
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne, p_FmPort->deepSleepVars.fmbm_rfne);
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rcfg, p_FmPort->deepSleepVars.fmbm_rcfg);
++ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
++ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne, p_FmPort->deepSleepVars.fmbm_tcmne);
++ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg, p_FmPort->deepSleepVars.fmbm_tcfg);
++ FM_PORT_Enable(p_FmPortHc);
++}
++
++bool FM_PORT_IsInDsar(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort *)h_FmPort;
++ return PTR_TO_UINT(p_FmPort->deepSleepVars.autoResOffsets);
++}
++
++t_Error FM_PORT_GetDsarStats(t_Handle h_FmPortRx, t_FmPortDsarStats *stats)
++{
++ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
++ struct arOffsets *of = (struct arOffsets*)p_FmPort->deepSleepVars.autoResOffsets;
++ uint8_t* fmMuramVirtBaseAddr = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr);
++ uint32_t *param_page = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr));
++ t_ArCommonDesc *ArCommonDescPtr = (t_ArCommonDesc*)(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(*param_page)));
++ t_DsarArpDescriptor *ArpDescriptor = (t_DsarArpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->arp);
++ t_DsarArpStatistics* arp_stats = (t_DsarArpStatistics*)(PTR_TO_UINT(ArpDescriptor->p_Statistics) + fmMuramVirtBaseAddr);
++ t_DsarIcmpV4Descriptor* ICMPV4Descriptor = (t_DsarIcmpV4Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv4);
++ t_DsarIcmpV4Statistics* icmpv4_stats = (t_DsarIcmpV4Statistics*)(PTR_TO_UINT(ICMPV4Descriptor->p_Statistics) + fmMuramVirtBaseAddr);
++ t_DsarNdDescriptor* NDDescriptor = (t_DsarNdDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->nd);
++ t_NdStatistics* nd_stats = (t_NdStatistics*)(PTR_TO_UINT(NDDescriptor->p_Statistics) + fmMuramVirtBaseAddr);
++ t_DsarIcmpV6Descriptor* ICMPV6Descriptor = (t_DsarIcmpV6Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv6);
++ t_DsarIcmpV6Statistics* icmpv6_stats = (t_DsarIcmpV6Statistics*)(PTR_TO_UINT(ICMPV6Descriptor->p_Statistics) + fmMuramVirtBaseAddr);
++ t_DsarSnmpDescriptor* SnmpDescriptor = (t_DsarSnmpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->snmp);
++ t_DsarSnmpStatistics* snmp_stats = (t_DsarSnmpStatistics*)(PTR_TO_UINT(SnmpDescriptor->p_Statistics) + fmMuramVirtBaseAddr);
++ stats->arpArCnt = arp_stats->arCnt;
++ stats->echoIcmpv4ArCnt = icmpv4_stats->arCnt;
++ stats->ndpArCnt = nd_stats->arCnt;
++ stats->echoIcmpv6ArCnt = icmpv6_stats->arCnt;
++ stats->snmpGetCnt = snmp_stats->snmpGetReqCnt;
++ stats->snmpGetNextCnt = snmp_stats->snmpGetNextReqCnt;
++ return E_OK;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.h
+new file mode 100644
+index 00000000..85986f55
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.h
+@@ -0,0 +1,999 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_port.h
++
++ @Description FM Port internal structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_PORT_H
++#define __FM_PORT_H
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_port_ext.h"
++
++#include "fm_common.h"
++#include "fm_sp_common.h"
++#include "fsl_fman_sp.h"
++#include "fm_port_ext.h"
++#include "fsl_fman_port.h"
++
++#define __ERR_MODULE__ MODULE_FM_PORT
++
++
++#define MIN_EXT_BUF_SIZE 64
++#define DATA_ALIGNMENT 64
++#define MAX_LIODN_OFFSET 64
++#define MAX_PORT_FIFO_SIZE MIN(BMI_MAX_FIFO_SIZE, 1024*BMI_FIFO_UNITS)
++
++/**************************************************************************//**
++ @Description Memory Map defines
++*//***************************************************************************/
++#define BMI_PORT_REGS_OFFSET 0
++#define QMI_PORT_REGS_OFFSET 0x400
++#define PRS_PORT_REGS_OFFSET 0x800
++
++/**************************************************************************//**
++ @Description defaults
++*//***************************************************************************/
++#define DEFAULT_PORT_deqHighPriority_1G FALSE
++#define DEFAULT_PORT_deqHighPriority_10G TRUE
++#define DEFAULT_PORT_deqType e_FM_PORT_DEQ_TYPE1
++#define DEFAULT_PORT_deqPrefetchOption e_FM_PORT_DEQ_FULL_PREFETCH
++#define DEFAULT_PORT_deqPrefetchOption_HC e_FM_PORT_DEQ_NO_PREFETCH
++#define DEFAULT_PORT_deqByteCnt_10G 0x1400
++#define DEFAULT_PORT_deqByteCnt_1G 0x400
++#define DEFAULT_PORT_bufferPrefixContent_privDataSize DEFAULT_FM_SP_bufferPrefixContent_privDataSize
++#define DEFAULT_PORT_bufferPrefixContent_passPrsResult DEFAULT_FM_SP_bufferPrefixContent_passPrsResult
++#define DEFAULT_PORT_bufferPrefixContent_passTimeStamp DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp
++#define DEFAULT_PORT_bufferPrefixContent_allOtherPCDInfo DEFAULT_FM_SP_bufferPrefixContent_allOtherPCDInfo
++#define DEFAULT_PORT_bufferPrefixContent_dataAlign DEFAULT_FM_SP_bufferPrefixContent_dataAlign
++#define DEFAULT_PORT_cheksumLastBytesIgnore 0
++#define DEFAULT_PORT_cutBytesFromEnd 4
++#define DEFAULT_PORT_fifoDeqPipelineDepth_IM 2
++
++#define DEFAULT_PORT_frmDiscardOverride FALSE
++
++#define DEFAULT_PORT_dmaSwapData (e_FmDmaSwapOption)DEFAULT_FMAN_SP_DMA_SWAP_DATA
++#define DEFAULT_PORT_dmaIntContextCacheAttr (e_FmDmaCacheOption)DEFAULT_FMAN_SP_DMA_INT_CONTEXT_CACHE_ATTR
++#define DEFAULT_PORT_dmaHeaderCacheAttr (e_FmDmaCacheOption)DEFAULT_FMAN_SP_DMA_HEADER_CACHE_ATTR
++#define DEFAULT_PORT_dmaScatterGatherCacheAttr (e_FmDmaCacheOption)DEFAULT_FMAN_SP_DMA_SCATTER_GATHER_CACHE_ATTR
++#define DEFAULT_PORT_dmaWriteOptimize DEFAULT_FMAN_SP_DMA_WRITE_OPTIMIZE
++
++#define DEFAULT_PORT_noScatherGather DEFAULT_FMAN_SP_NO_SCATTER_GATHER
++#define DEFAULT_PORT_forwardIntContextReuse FALSE
++#define DEFAULT_PORT_BufMargins_startMargins 32
++#define DEFAULT_PORT_BufMargins_endMargins 0
++#define DEFAULT_PORT_syncReq TRUE
++#define DEFAULT_PORT_syncReqForHc FALSE
++#define DEFAULT_PORT_color e_FM_PORT_COLOR_GREEN
++#define DEFAULT_PORT_errorsToDiscard FM_PORT_FRM_ERR_CLS_DISCARD
++/* #define DEFAULT_PORT_dualRateLimitScaleDown e_FM_PORT_DUAL_RATE_LIMITER_NONE */
++/* #define DEFAULT_PORT_rateLimitBurstSizeHighGranularity FALSE */
++#define DEFAULT_PORT_exception IM_EV_BSY
++#define DEFAULT_PORT_maxFrameLength 9600
++
++#define DEFAULT_notSupported 0xff
++
++#if (DPAA_VERSION < 11)
++#define DEFAULT_PORT_rxFifoPriElevationLevel MAX_PORT_FIFO_SIZE
++#define DEFAULT_PORT_rxFifoThreshold (MAX_PORT_FIFO_SIZE*3/4)
++
++#define DEFAULT_PORT_txFifoMinFillLevel 0
++#define DEFAULT_PORT_txFifoLowComfLevel (5*KILOBYTE)
++#define DEFAULT_PORT_fifoDeqPipelineDepth_1G 1
++#define DEFAULT_PORT_fifoDeqPipelineDepth_10G 4
++
++#define DEFAULT_PORT_fifoDeqPipelineDepth_OH 2
++
++/* Host command port MUST NOT be changed to more than 1 !!! */
++#define DEFAULT_PORT_numOfTasks(type) \
++ (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \
++ ((type) == e_FM_PORT_TYPE_TX_10G)) ? 16 : \
++ ((((type) == e_FM_PORT_TYPE_RX) || \
++ ((type) == e_FM_PORT_TYPE_TX) || \
++ ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) ? 3 : 1))
++
++#define DEFAULT_PORT_extraNumOfTasks(type) \
++ (uint32_t)(((type) == e_FM_PORT_TYPE_RX_10G) ? 8 : \
++ (((type) == e_FM_PORT_TYPE_RX) ? 2 : 0))
++
++#define DEFAULT_PORT_numOfOpenDmas(type) \
++ (uint32_t)((((type) == e_FM_PORT_TYPE_TX_10G) || \
++ ((type) == e_FM_PORT_TYPE_RX_10G)) ? 8 : 1 )
++
++#define DEFAULT_PORT_extraNumOfOpenDmas(type) \
++ (uint32_t)(((type) == e_FM_PORT_TYPE_RX_10G) ? 8 : \
++ (((type) == e_FM_PORT_TYPE_RX) ? 1 : 0))
++
++#define DEFAULT_PORT_numOfFifoBufs(type) \
++ (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \
++ ((type) == e_FM_PORT_TYPE_TX_10G)) ? 48 : \
++ ((type) == e_FM_PORT_TYPE_RX) ? 45 : \
++ ((type) == e_FM_PORT_TYPE_TX) ? 44 : 8)
++
++#define DEFAULT_PORT_extraNumOfFifoBufs 0
++
++#else /* (DPAA_VERSION < 11) */
++/* Defaults are registers' reset values */
++#define DEFAULT_PORT_rxFifoPriElevationLevel MAX_PORT_FIFO_SIZE
++#define DEFAULT_PORT_rxFifoThreshold MAX_PORT_FIFO_SIZE
++
++#define DEFAULT_PORT_txFifoMinFillLevel 0
++#define DEFAULT_PORT_txFifoLowComfLevel (5 * KILOBYTE)
++#define DEFAULT_PORT_fifoDeqPipelineDepth_1G 2
++#define DEFAULT_PORT_fifoDeqPipelineDepth_10G 4
++
++#define DEFAULT_PORT_fifoDeqPipelineDepth_OH 2
++
++#define DEFAULT_PORT_numOfTasks(type) \
++ (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \
++ ((type) == e_FM_PORT_TYPE_TX_10G)) ? 14 : \
++ (((type) == e_FM_PORT_TYPE_RX) || \
++ ((type) == e_FM_PORT_TYPE_TX)) ? 4 : \
++ ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) ? 6 : 1)
++
++#define DEFAULT_PORT_extraNumOfTasks(type) 0
++
++#define DEFAULT_PORT_numOfOpenDmas(type) \
++ (uint32_t)(((type) == e_FM_PORT_TYPE_RX_10G) ? 8 : \
++ ((type) == e_FM_PORT_TYPE_TX_10G) ? 12 : \
++ ((type) == e_FM_PORT_TYPE_RX) ? 2 : \
++ ((type) == e_FM_PORT_TYPE_TX) ? 3 : \
++ ((type) == e_FM_PORT_TYPE_OH_HOST_COMMAND) ? 2 : 4)
++
++#define DEFAULT_PORT_extraNumOfOpenDmas(type) 0
++
++#define DEFAULT_PORT_numOfFifoBufs(type) \
++ (uint32_t) (((type) == e_FM_PORT_TYPE_RX_10G) ? 96 : \
++ ((type) == e_FM_PORT_TYPE_TX_10G) ? 64 : \
++ ((type) == e_FM_PORT_TYPE_OH_HOST_COMMAND) ? 10 : 50)
++
++#define DEFAULT_PORT_extraNumOfFifoBufs 0
++
++#endif /* (DPAA_VERSION < 11) */
++
++#define DEFAULT_PORT_txBdRingLength 16
++#define DEFAULT_PORT_rxBdRingLength 128
++#define DEFAULT_PORT_ImfwExtStructsMemId 0
++#define DEFAULT_PORT_ImfwExtStructsMemAttr MEMORY_ATTR_CACHEABLE
++
++#define FM_PORT_CG_REG_NUM(_cgId) (((FM_PORT_NUM_OF_CONGESTION_GRPS/32)-1)-_cgId/32)
++
++/**************************************************************************//**
++ @Collection PCD Engines
++*//***************************************************************************/
++typedef uint32_t fmPcdEngines_t; /**< options as defined below: */
++
++#define FM_PCD_NONE 0 /**< No PCD Engine indicated */
++#define FM_PCD_PRS 0x80000000 /**< Parser indicated */
++#define FM_PCD_KG 0x40000000 /**< Keygen indicated */
++#define FM_PCD_CC 0x20000000 /**< Coarse classification indicated */
++#define FM_PCD_PLCR 0x10000000 /**< Policer indicated */
++#define FM_PCD_MANIP 0x08000000 /**< Manipulation indicated */
++/* @} */
++
++#define FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS 8
++#define FM_PORT_MAX_NUM_OF_CONGESTION_GRPS_ALL_INTEGRATIONS 256
++#define FM_PORT_CG_REG_NUM(_cgId) (((FM_PORT_NUM_OF_CONGESTION_GRPS/32)-1)-_cgId/32)
++
++#define FM_OH_PORT_ID 0
++
++/***********************************************************************/
++/* SW parser OFFLOAD labels (offsets) */
++/***********************************************************************/
++#if (DPAA_VERSION == 10)
++#define OFFLOAD_SW_PATCH_IPv4_IPR_LABEL 0x300
++#define OFFLOAD_SW_PATCH_IPv6_IPR_LABEL 0x325
++#define OFFLOAD_SW_PATCH_IPv6_IPF_LABEL 0x325
++#else
++#define OFFLOAD_SW_PATCH_IPv4_IPR_LABEL 0x100
++/* Will be used for:
++ * 1. identify fragments
++ * 2. udp-lite
++ */
++#define OFFLOAD_SW_PATCH_IPv6_IPR_LABEL 0x146
++/* Will be used for:
++ * 1. will identify the fragmentable area
++ * 2. udp-lite
++ */
++#define OFFLOAD_SW_PATCH_IPv6_IPF_LABEL 0x261
++#define OFFLOAD_SW_PATCH_CAPWAP_LABEL 0x38d
++#endif /* (DPAA_VERSION == 10) */
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++#define UDP_LITE_SW_PATCH_LABEL 0x2E0
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++
++
++/**************************************************************************//**
++ @Description Memory Mapped Registers
++*//***************************************************************************/
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++typedef struct
++{
++ volatile uint32_t fmbm_rcfg; /**< Rx Configuration */
++ volatile uint32_t fmbm_rst; /**< Rx Status */
++ volatile uint32_t fmbm_rda; /**< Rx DMA attributes*/
++ volatile uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
++ volatile uint32_t fmbm_rfed; /**< Rx Frame End Data*/
++ volatile uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
++ volatile uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
++ volatile uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
++ volatile uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
++ volatile uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
++ volatile uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
++ volatile uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
++ volatile uint32_t fmbm_rpp; /**< Rx Policer Profile */
++ volatile uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
++ volatile uint32_t fmbm_reth; /**< Rx Excessive Threshold */
++ volatile uint32_t reserved1[0x01];/**< (0x03C) */
++ volatile uint32_t fmbm_rprai[FM_PORT_PRS_RESULT_NUM_OF_WORDS];
++ /**< Rx Parse Results Array Initialization*/
++ volatile uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
++ volatile uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
++ volatile uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
++ volatile uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
++ volatile uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
++ volatile uint32_t reserved2[0x02];/**< (0x074-0x078) */
++ volatile uint32_t fmbm_rcmne; /**< Rx Frame Continuous Mode Next Engine */
++ volatile uint32_t reserved3[0x20];/**< (0x080 0x0FF) */
++ volatile uint32_t fmbm_ebmpi[FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS];
++ /**< Buffer Manager pool Information-*/
++ volatile uint32_t fmbm_acnt[FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS];
++ /**< Allocate Counter-*/
++ volatile uint32_t reserved4[0x08];
++ /**< 0x130/0x140 - 0x15F reserved -*/
++ volatile uint32_t fmbm_rcgm[FM_PORT_MAX_NUM_OF_CONGESTION_GRPS_ALL_INTEGRATIONS/32];
++ /**< Congestion Group Map*/
++ volatile uint32_t fmbm_rmpd; /**< BM Pool Depletion */
++ volatile uint32_t reserved5[0x1F];/**< (0x184 0x1FF) */
++ volatile uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
++ volatile uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
++ volatile uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
++ volatile uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
++ volatile uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
++ volatile uint32_t fmbm_rfcd; /**< Rx Frame Discard Counter*/
++ volatile uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
++ volatile uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard Counter-*/
++ volatile uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter-*/
++ volatile uint32_t fmbm_rpec; /**< Rx RX Prepare to enqueue Counter-*/
++ volatile uint32_t reserved6[0x16];/**< (0x228 0x27F) */
++ volatile uint32_t fmbm_rpc; /**< Rx Performance Counters*/
++ volatile uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
++ volatile uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
++ volatile uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
++ volatile uint32_t fmbm_rrquc; /**< Rx Receive Queue Utilization Counter*/
++ volatile uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
++ volatile uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
++ volatile uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
++ volatile uint32_t reserved7[0x18];/**< (0x2A0-0x2FF) */
++ volatile uint32_t fmbm_rdcfg[0x3];/**< Rx Debug-*/
++ volatile uint32_t fmbm_rgpr; /**< Rx General Purpose Register. */
++ volatile uint32_t reserved8[0x3a];/**< (0x310-0x3FF) */
++} t_FmPortRxBmiRegs;
++
++typedef struct
++{
++ volatile uint32_t fmbm_tcfg; /**< Tx Configuration */
++ volatile uint32_t fmbm_tst; /**< Tx Status */
++ volatile uint32_t fmbm_tda; /**< Tx DMA attributes */
++ volatile uint32_t fmbm_tfp; /**< Tx FIFO Parameters */
++ volatile uint32_t fmbm_tfed; /**< Tx Frame End Data */
++ volatile uint32_t fmbm_ticp; /**< Tx Internal Context Parameters */
++ volatile uint32_t fmbm_tfdne; /**< Tx Frame Dequeue Next Engine. */
++ volatile uint32_t fmbm_tfca; /**< Tx Frame Command attribute. */
++ volatile uint32_t fmbm_tcfqid; /**< Tx Confirmation Frame Queue ID. */
++ volatile uint32_t fmbm_tfeqid; /**< Tx Frame Error Queue ID */
++ volatile uint32_t fmbm_tfene; /**< Tx Frame Enqueue Next Engine */
++ volatile uint32_t fmbm_trlmts; /**< Tx Rate Limiter Scale */
++ volatile uint32_t fmbm_trlmt; /**< Tx Rate Limiter */
++ volatile uint32_t fmbm_tccb; /**< Tx Coarse Classification Base */
++ volatile uint32_t reserved0[0x0e];/**< (0x038-0x070) */
++ volatile uint32_t fmbm_tfne; /**< Tx Frame Next Engine */
++ volatile uint32_t fmbm_tpfcm[0x02];/**< Tx Priority based Flow Control (PFC) Mapping */
++ volatile uint32_t fmbm_tcmne; /**< Tx Frame Continuous Mode Next Engine */
++ volatile uint32_t reserved2[0x60];/**< (0x080-0x200) */
++ volatile uint32_t fmbm_tstc; /**< Tx Statistics Counters */
++ volatile uint32_t fmbm_tfrc; /**< Tx Frame Counter */
++ volatile uint32_t fmbm_tfdc; /**< Tx Frames Discard Counter */
++ volatile uint32_t fmbm_tfledc; /**< Tx Frame Length error discard counter */
++ volatile uint32_t fmbm_tfufdc; /**< Tx Frame unsupported format discard Counter */
++ volatile uint32_t fmbm_tbdc; /**< Tx Buffers Deallocate Counter */
++ volatile uint32_t reserved3[0x1A];/**< (0x218-0x280) */
++ volatile uint32_t fmbm_tpc; /**< Tx Performance Counters*/
++ volatile uint32_t fmbm_tpcp; /**< Tx Performance Count Parameters*/
++ volatile uint32_t fmbm_tccn; /**< Tx Cycle Counter*/
++ volatile uint32_t fmbm_ttuc; /**< Tx Tasks Utilization Counter*/
++ volatile uint32_t fmbm_ttcquc; /**< Tx Transmit Confirm Queue Utilization Counter*/
++ volatile uint32_t fmbm_tduc; /**< Tx DMA Utilization Counter*/
++ volatile uint32_t fmbm_tfuc; /**< Tx FIFO Utilization Counter*/
++ volatile uint32_t reserved4[16]; /**< (0x29C-0x2FF) */
++ volatile uint32_t fmbm_tdcfg[0x3];/**< Tx Debug-*/
++ volatile uint32_t fmbm_tgpr; /**< O/H General Purpose Register */
++ volatile uint32_t reserved5[0x3a];/**< (0x310-0x3FF) */
++} t_FmPortTxBmiRegs;
++
++typedef struct
++{
++ volatile uint32_t fmbm_ocfg; /**< O/H Configuration */
++ volatile uint32_t fmbm_ost; /**< O/H Status */
++ volatile uint32_t fmbm_oda; /**< O/H DMA attributes */
++ volatile uint32_t fmbm_oicp; /**< O/H Internal Context Parameters */
++ volatile uint32_t fmbm_ofdne; /**< O/H Frame Dequeue Next Engine */
++ volatile uint32_t fmbm_ofne; /**< O/H Frame Next Engine */
++ volatile uint32_t fmbm_ofca; /**< O/H Frame Command Attributes. */
++ volatile uint32_t fmbm_ofpne; /**< O/H Frame Parser Next Engine */
++ volatile uint32_t fmbm_opso; /**< O/H Parse Start Offset */
++ volatile uint32_t fmbm_opp; /**< O/H Policer Profile */
++ volatile uint32_t fmbm_occb; /**< O/H Coarse Classification base */
++ volatile uint32_t fmbm_oim; /**< O/H Internal margins*/
++ volatile uint32_t fmbm_ofp; /**< O/H Fifo Parameters*/
++ volatile uint32_t fmbm_ofed; /**< O/H Frame End Data*/
++ volatile uint32_t reserved0[2]; /**< (0x038 - 0x03F) */
++ volatile uint32_t fmbm_oprai[FM_PORT_PRS_RESULT_NUM_OF_WORDS];
++ /**< O/H Parse Results Array Initialization */
++ volatile uint32_t fmbm_ofqid; /**< O/H Frame Queue ID */
++ volatile uint32_t fmbm_oefqid; /**< O/H Error Frame Queue ID */
++ volatile uint32_t fmbm_ofsdm; /**< O/H Frame Status Discard Mask */
++ volatile uint32_t fmbm_ofsem; /**< O/H Frame Status Error Mask */
++ volatile uint32_t fmbm_ofene; /**< O/H Frame Enqueue Next Engine */
++ volatile uint32_t fmbm_orlmts; /**< O/H Rate Limiter Scale */
++ volatile uint32_t fmbm_orlmt; /**< O/H Rate Limiter */
++ volatile uint32_t fmbm_ocmne; /**< O/H Continuous Mode Next Engine */
++ volatile uint32_t reserved1[0x20];/**< (0x080 - 0x0FF) */
++ volatile uint32_t fmbm_oebmpi[2]; /**< Buffer Manager Observed Pool Information */
++ volatile uint32_t reserved2[0x16];/**< (0x108 - 0x15F) */
++ volatile uint32_t fmbm_ocgm; /**< Observed Congestion Group Map */
++ volatile uint32_t reserved3[0x7]; /**< (0x164 - 0x17F) */
++ volatile uint32_t fmbm_ompd; /**< Observed BMan Pool Depletion */
++ volatile uint32_t reserved4[0x1F];/**< (0x184 - 0x1FF) */
++ volatile uint32_t fmbm_ostc; /**< O/H Statistics Counters */
++ volatile uint32_t fmbm_ofrc; /**< O/H Frame Counter */
++ volatile uint32_t fmbm_ofdc; /**< O/H Frames Discard Counter */
++ volatile uint32_t fmbm_ofledc; /**< O/H Frames Length Error Discard Counter */
++ volatile uint32_t fmbm_ofufdc; /**< O/H Frames Unsupported Format Discard Counter */
++ volatile uint32_t fmbm_offc; /**< O/H Filter Frames Counter */
++ volatile uint32_t fmbm_ofwdc; /**< - Rx Frames WRED Discard Counter */
++ volatile uint32_t fmbm_ofldec; /**< O/H Frames List DMA Error Counter */
++ volatile uint32_t fmbm_obdc; /**< O/H Buffers Deallocate Counter */
++ volatile uint32_t fmbm_oodc; /**< O/H Out of Buffers Discard Counter */
++ volatile uint32_t fmbm_opec; /**< O/H Prepare to enqueue Counter */
++ volatile uint32_t reserved5[0x15];/**< ( - 0x27F) */
++ volatile uint32_t fmbm_opc; /**< O/H Performance Counters */
++ volatile uint32_t fmbm_opcp; /**< O/H Performance Count Parameters */
++ volatile uint32_t fmbm_occn; /**< O/H Cycle Counter */
++ volatile uint32_t fmbm_otuc; /**< O/H Tasks Utilization Counter */
++ volatile uint32_t fmbm_oduc; /**< O/H DMA Utilization Counter */
++ volatile uint32_t fmbm_ofuc; /**< O/H FIFO Utilization Counter */
++ volatile uint32_t reserved6[26]; /**< (0x298-0x2FF) */
++ volatile uint32_t fmbm_odcfg[0x3];/**< O/H Debug (only 1 in P1023) */
++ volatile uint32_t fmbm_ogpr; /**< O/H General Purpose Register. */
++ volatile uint32_t reserved7[0x3a];/**< (0x310 0x3FF) */
++} t_FmPortOhBmiRegs;
++
++typedef union
++{
++ t_FmPortRxBmiRegs rxPortBmiRegs;
++ t_FmPortTxBmiRegs txPortBmiRegs;
++ t_FmPortOhBmiRegs ohPortBmiRegs;
++} u_FmPortBmiRegs;
++
++typedef struct
++{
++ volatile uint32_t reserved1[2]; /**< 0xn024 - 0x02B */
++ volatile uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
++ volatile uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
++ volatile uint32_t fmqm_pndtfc; /**< PortID n Dequeue Total Frame Counter */
++ volatile uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID from Default Counter */
++ volatile uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
++} t_FmPortNonRxQmiRegs;
++
++typedef struct
++{
++ volatile uint32_t fmqm_pnc; /**< PortID n Configuration Register */
++ volatile uint32_t fmqm_pns; /**< PortID n Status Register */
++ volatile uint32_t fmqm_pnts; /**< PortID n Task Status Register */
++ volatile uint32_t reserved0[4]; /**< 0xn00C - 0xn01B */
++ volatile uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
++ volatile uint32_t fmqm_pnetfc; /**< PortID n Enqueue Total Frame Counter */
++ t_FmPortNonRxQmiRegs nonRxQmiRegs; /**< Registers for Tx Hc & Op ports */
++} t_FmPortQmiRegs;
++
++typedef struct
++{
++ struct
++ {
++ volatile uint32_t softSeqAttach; /**< Soft Sequence Attachment */
++ volatile uint32_t lcv; /**< Line-up Enable Confirmation Mask */
++ } hdrs[FM_PCD_PRS_NUM_OF_HDRS];
++ volatile uint32_t reserved0[0xde];
++ volatile uint32_t pcac; /**< Parse Internal Memory Configuration Access Control Register */
++ volatile uint32_t pctpid; /**< Parse Internal Memory Configured TPID Register */
++} t_FmPortPrsRegs;
++
++/**************************************************************************//*
++ @Description Basic buffer descriptor (BD) structure
++*//***************************************************************************/
++typedef _Packed struct
++{
++ volatile uint16_t status;
++ volatile uint16_t length;
++ volatile uint8_t reserved0[0x6];
++ volatile uint8_t reserved1[0x1];
++ volatile t_FmPhysAddr buff;
++} _PackedType t_FmImBd;
++
++typedef _Packed struct
++{
++ volatile uint16_t gen; /**< tbd */
++ volatile uint8_t reserved0[0x1];
++ volatile t_FmPhysAddr bdRingBase; /**< tbd */
++ volatile uint16_t bdRingSize; /**< tbd */
++ volatile uint16_t offsetIn; /**< tbd */
++ volatile uint16_t offsetOut; /**< tbd */
++ volatile uint8_t reserved1[0x12]; /**< 0x0e - 0x1f */
++} _PackedType t_FmPortImQd;
++
++typedef _Packed struct
++{
++ volatile uint32_t mode; /**< Mode register */
++ volatile uint32_t rxQdPtr; /**< tbd */
++ volatile uint32_t txQdPtr; /**< tbd */
++ volatile uint16_t mrblr; /**< tbd */
++ volatile uint16_t rxQdBsyCnt; /**< tbd */
++ volatile uint8_t reserved0[0x10]; /**< 0x10 - 0x1f */
++ t_FmPortImQd rxQd;
++ t_FmPortImQd txQd;
++ volatile uint8_t reserved1[0xa0]; /**< 0x60 - 0xff */
++} _PackedType t_FmPortImPram;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/**************************************************************************//**
++ @Description Registers bit fields
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description BMI defines
++*//***************************************************************************/
++#if (DPAA_VERSION >= 11)
++#define BMI_SP_ID_MASK 0xff000000
++#define BMI_SP_ID_SHIFT 24
++#define BMI_SP_EN 0x01000000
++#endif /* (DPAA_VERSION >= 11) */
++
++#define BMI_PORT_CFG_EN 0x80000000
++#define BMI_PORT_CFG_EN_MACSEC 0x00800000
++#define BMI_PORT_CFG_FDOVR 0x02000000
++#define BMI_PORT_CFG_IM 0x01000000
++#define BMI_PORT_CFG_AM 0x00000040
++#define BMI_PORT_STATUS_BSY 0x80000000
++#define BMI_COUNTERS_EN 0x80000000
++
++#define BMI_PORT_RFNE_FRWD_DCL4C 0x10000000
++#define BMI_PORT_RFNE_FRWD_RPD 0x40000000
++#define BMI_RFNE_FDCS_MASK 0xFF000000
++#define BMI_RFNE_HXS_MASK 0x000000FF
++
++#define BMI_CMD_MR_LEAC 0x00200000
++#define BMI_CMD_MR_SLEAC 0x00100000
++#define BMI_CMD_MR_MA 0x00080000
++#define BMI_CMD_MR_DEAS 0x00040000
++#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
++ BMI_CMD_MR_SLEAC | \
++ BMI_CMD_MR_MA | \
++ BMI_CMD_MR_DEAS)
++#define BMI_CMD_ATTR_ORDER 0x80000000
++#define BMI_CMD_ATTR_SYNC 0x02000000
++#define BMI_CMD_ATTR_MODE_MISS_ALLIGN_ADDR_EN 0x00080000
++#define BMI_CMD_ATTR_MACCMD_MASK 0x0000ff00
++#define BMI_CMD_ATTR_MACCMD_OVERRIDE 0x00008000
++#define BMI_CMD_ATTR_MACCMD_SECURED 0x00001000
++#define BMI_CMD_ATTR_MACCMD_SC_MASK 0x00000f00
++
++#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
++#define BMI_STATUS_RX_MASK_UNUSED (uint32_t)(~(FM_PORT_FRM_ERR_DMA | \
++ FM_PORT_FRM_ERR_PHYSICAL | \
++ FM_PORT_FRM_ERR_SIZE | \
++ FM_PORT_FRM_ERR_CLS_DISCARD | \
++ FM_PORT_FRM_ERR_EXTRACTION | \
++ FM_PORT_FRM_ERR_NO_SCHEME | \
++ FM_PORT_FRM_ERR_COLOR_RED | \
++ FM_PORT_FRM_ERR_COLOR_YELLOW | \
++ FM_PORT_FRM_ERR_ILL_PLCR | \
++ FM_PORT_FRM_ERR_PLCR_FRAME_LEN | \
++ FM_PORT_FRM_ERR_PRS_TIMEOUT | \
++ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
++ FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
++ FM_PORT_FRM_ERR_PRS_HDR_ERR | \
++ FM_PORT_FRM_ERR_IPRE | \
++ FM_PORT_FRM_ERR_IPR_NCSP | \
++ FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW))
++
++#define BMI_STATUS_OP_MASK_UNUSED (uint32_t)(BMI_STATUS_RX_MASK_UNUSED & \
++ ~(FM_PORT_FRM_ERR_LENGTH | \
++ FM_PORT_FRM_ERR_NON_FM | \
++ FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT))
++
++#define BMI_RATE_LIMIT_EN 0x80000000
++#define BMI_RATE_LIMIT_BURST_SIZE_GRAN 0x80000000
++#define BMI_RATE_LIMIT_SCALE_BY_2 0x00000001
++#define BMI_RATE_LIMIT_SCALE_BY_4 0x00000002
++#define BMI_RATE_LIMIT_SCALE_BY_8 0x00000003
++
++#define BMI_RX_FIFO_THRESHOLD_BC 0x80000000
++
++#define BMI_PRS_RESULT_HIGH 0x00000000
++#define BMI_PRS_RESULT_LOW 0xFFFFFFFF
++
++
++#define RX_ERRS_TO_ENQ (FM_PORT_FRM_ERR_DMA | \
++ FM_PORT_FRM_ERR_PHYSICAL | \
++ FM_PORT_FRM_ERR_SIZE | \
++ FM_PORT_FRM_ERR_EXTRACTION | \
++ FM_PORT_FRM_ERR_NO_SCHEME | \
++ FM_PORT_FRM_ERR_ILL_PLCR | \
++ FM_PORT_FRM_ERR_PLCR_FRAME_LEN | \
++ FM_PORT_FRM_ERR_PRS_TIMEOUT | \
++ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
++ FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
++ FM_PORT_FRM_ERR_PRS_HDR_ERR | \
++ FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \
++ FM_PORT_FRM_ERR_IPRE)
++
++#define OP_ERRS_TO_ENQ (RX_ERRS_TO_ENQ | \
++ FM_PORT_FRM_ERR_LENGTH | \
++ FM_PORT_FRM_ERR_NON_FM | \
++ FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT)
++
++
++#define BMI_RX_FIFO_PRI_ELEVATION_MASK 0x03FF0000
++#define BMI_RX_FIFO_THRESHOLD_MASK 0x000003FF
++#define BMI_TX_FIFO_MIN_FILL_MASK 0x03FF0000
++#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000F000
++#define BMI_TX_LOW_COMF_MASK 0x000003FF
++
++/* shifts */
++#define BMI_PORT_CFG_MS_SEL_SHIFT 16
++#define BMI_DMA_ATTR_IC_CACHE_SHIFT FMAN_SP_DMA_ATTR_IC_CACHE_SHIFT
++#define BMI_DMA_ATTR_HDR_CACHE_SHIFT FMAN_SP_DMA_ATTR_HDR_CACHE_SHIFT
++#define BMI_DMA_ATTR_SG_CACHE_SHIFT FMAN_SP_DMA_ATTR_SG_CACHE_SHIFT
++
++#define BMI_IM_FOF_SHIFT 28
++#define BMI_PR_PORTID_SHIFT 24
++
++#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
++#define BMI_RX_FIFO_THRESHOLD_SHIFT 0
++
++#define BMI_RX_FRAME_END_CS_IGNORE_SHIFT 24
++#define BMI_RX_FRAME_END_CUT_SHIFT 16
++
++#define BMI_IC_SIZE_SHIFT FMAN_SP_IC_SIZE_SHIFT
++
++#define BMI_INT_BUF_MARG_SHIFT 28
++
++#define BMI_EXT_BUF_MARG_END_SHIFT FMAN_SP_EXT_BUF_MARG_END_SHIFT
++
++#define BMI_CMD_ATTR_COLOR_SHIFT 26
++#define BMI_CMD_ATTR_COM_MODE_SHIFT 16
++#define BMI_CMD_ATTR_MACCMD_SHIFT 8
++#define BMI_CMD_ATTR_MACCMD_OVERRIDE_SHIFT 15
++#define BMI_CMD_ATTR_MACCMD_SECURED_SHIFT 12
++#define BMI_CMD_ATTR_MACCMD_SC_SHIFT 8
++
++#define BMI_POOL_DEP_NUM_OF_POOLS_VECTOR_SHIFT 24
++
++#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
++#define BMI_TX_LOW_COMF_SHIFT 0
++
++#define BMI_PERFORMANCE_TASK_COMP_SHIFT 24
++#define BMI_PERFORMANCE_PORT_COMP_SHIFT 16
++#define BMI_PERFORMANCE_DMA_COMP_SHIFT 12
++#define BMI_PERFORMANCE_FIFO_COMP_SHIFT 0
++
++#define BMI_MAX_BURST_SHIFT 16
++#define BMI_COUNT_RATE_UNIT_SHIFT 16
++
++/* sizes */
++#define FRAME_END_DATA_SIZE 16
++#define FRAME_OFFSET_UNITS 16
++#define MIN_TX_INT_OFFSET 16
++#define MAX_FRAME_OFFSET 64
++#define MAX_FIFO_PIPELINE_DEPTH 8
++#define MAX_PERFORMANCE_TASK_COMP 64
++#define MAX_PERFORMANCE_TX_QUEUE_COMP 8
++#define MAX_PERFORMANCE_RX_QUEUE_COMP 64
++#define MAX_PERFORMANCE_DMA_COMP 16
++#define MAX_NUM_OF_TASKS 64
++#define MAX_NUM_OF_EXTRA_TASKS 8
++#define MAX_NUM_OF_DMAS 16
++#define MAX_NUM_OF_EXTRA_DMAS 8
++#define MAX_BURST_SIZE 1024
++#define MIN_NUM_OF_OP_DMAS 2
++
++
++/**************************************************************************//**
++ @Description QMI defines
++*//***************************************************************************/
++/* masks */
++#define QMI_PORT_CFG_EN 0x80000000
++#define QMI_PORT_CFG_EN_COUNTERS 0x10000000
++#define QMI_PORT_STATUS_DEQ_TNUM_BSY 0x80000000
++#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
++
++#define QMI_DEQ_CFG_PREFETCH_NO_TNUM 0x02000000
++#define QMI_DEQ_CFG_PREFETCH_WAITING_TNUM 0
++#define QMI_DEQ_CFG_PREFETCH_1_FRAME 0
++#define QMI_DEQ_CFG_PREFETCH_3_FRAMES 0x01000000
++
++#define QMI_DEQ_CFG_PRI 0x80000000
++#define QMI_DEQ_CFG_TYPE1 0x10000000
++#define QMI_DEQ_CFG_TYPE2 0x20000000
++#define QMI_DEQ_CFG_TYPE3 0x30000000
++
++#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f
++#define QMI_DEQ_CFG_SUBPORTAL_SHIFT 20
++
++/**************************************************************************//**
++ @Description PARSER defines
++*//***************************************************************************/
++/* masks */
++#define PRS_HDR_ERROR_DIS 0x00000800
++#define PRS_HDR_SW_PRS_EN 0x00000400
++#define PRS_CP_OFFSET_MASK 0x0000000F
++#define PRS_TPID1_MASK 0xFFFF0000
++#define PRS_TPID2_MASK 0x0000FFFF
++#define PRS_TPID_DFLT 0x91009100
++
++#define PRS_HDR_MPLS_LBL_INTER_EN 0x00200000
++#define PRS_HDR_IPV6_ROUTE_HDR_EN 0x00008000
++#define PRS_HDR_PPPOE_MTU_CHECK_EN 0x80000000
++#define PRS_HDR_UDP_PAD_REMOVAL 0x80000000
++#define PRS_HDR_TCP_PAD_REMOVAL 0x80000000
++#define PRS_CAC_STOP 0x00000001
++#define PRS_CAC_ACTIVE 0x00000100
++
++/* shifts */
++#define PRS_PCTPID_SHIFT 16
++#define PRS_HDR_MPLS_NEXT_HDR_SHIFT 22
++#define PRS_HDR_ETH_BC_SHIFT 28
++#define PRS_HDR_ETH_MC_SHIFT 24
++#define PRS_HDR_VLAN_STACKED_SHIFT 16
++#define PRS_HDR_MPLS_STACKED_SHIFT 16
++#define PRS_HDR_IPV4_1_BC_SHIFT 28
++#define PRS_HDR_IPV4_1_MC_SHIFT 24
++#define PRS_HDR_IPV4_2_UC_SHIFT 20
++#define PRS_HDR_IPV4_2_MC_BC_SHIFT 16
++#define PRS_HDR_IPV6_1_MC_SHIFT 24
++#define PRS_HDR_IPV6_2_UC_SHIFT 20
++#define PRS_HDR_IPV6_2_MC_SHIFT 16
++
++#define PRS_HDR_ETH_BC_MASK 0x0fffffff
++#define PRS_HDR_ETH_MC_MASK 0xf0ffffff
++#define PRS_HDR_VLAN_STACKED_MASK 0xfff0ffff
++#define PRS_HDR_MPLS_STACKED_MASK 0xfff0ffff
++#define PRS_HDR_IPV4_1_BC_MASK 0x0fffffff
++#define PRS_HDR_IPV4_1_MC_MASK 0xf0ffffff
++#define PRS_HDR_IPV4_2_UC_MASK 0xff0fffff
++#define PRS_HDR_IPV4_2_MC_BC_MASK 0xfff0ffff
++#define PRS_HDR_IPV6_1_MC_MASK 0xf0ffffff
++#define PRS_HDR_IPV6_2_UC_MASK 0xff0fffff
++#define PRS_HDR_IPV6_2_MC_MASK 0xfff0ffff
++
++/* others */
++#define PRS_HDR_ENTRY_SIZE 8
++#define DEFAULT_CLS_PLAN_VECTOR 0xFFFFFFFF
++
++#define IPSEC_SW_PATCH_START 0x20
++#define SCTP_SW_PATCH_START 0x4D
++#define DCCP_SW_PATCH_START 0x41
++
++/**************************************************************************//**
++ @Description IM defines
++*//***************************************************************************/
++#define BD_R_E 0x80000000
++#define BD_L 0x08000000
++
++#define BD_RX_CRE 0x00080000
++#define BD_RX_FTL 0x00040000
++#define BD_RX_FTS 0x00020000
++#define BD_RX_OV 0x00010000
++
++#define BD_RX_ERRORS (BD_RX_CRE | BD_RX_FTL | BD_RX_FTS | BD_RX_OV)
++
++#define FM_IM_SIZEOF_BD sizeof(t_FmImBd)
++
++#define BD_STATUS_MASK 0xffff0000
++#define BD_LENGTH_MASK 0x0000ffff
++
++#define BD_STATUS_AND_LENGTH_SET(bd, val) WRITE_UINT32(*(volatile uint32_t*)(bd), (val))
++
++#define BD_STATUS_AND_LENGTH(bd) GET_UINT32(*(volatile uint32_t*)(bd))
++
++#define BD_GET(id) &p_FmPort->im.p_BdRing[id]
++
++#define IM_ILEGAL_BD_ID 0xffff
++
++/* others */
++#define IM_PRAM_ALIGN 0x100
++
++/* masks */
++#define IM_MODE_GBL 0x20000000
++#define IM_MODE_BO_MASK 0x18000000
++#define IM_MODE_BO_SHIFT 3
++#define IM_MODE_GRC_STP 0x00800000
++
++#define IM_MODE_SET_BO(val) (uint32_t)((val << (31-IM_MODE_BO_SHIFT)) & IM_MODE_BO_MASK)
++
++#define IM_RXQD_BSYINTM 0x0008
++#define IM_RXQD_RXFINTM 0x0010
++#define IM_RXQD_FPMEVT_SEL_MASK 0x0003
++
++#define IM_EV_BSY 0x40000000
++#define IM_EV_RX 0x80000000
++
++
++/**************************************************************************//**
++ @Description Additional defines
++*//***************************************************************************/
++
++typedef struct {
++ t_Handle h_FmMuram;
++ t_FmPortImPram *p_FmPortImPram;
++ uint8_t fwExtStructsMemId;
++ uint32_t fwExtStructsMemAttr;
++ uint16_t bdRingSize;
++ t_FmImBd *p_BdRing;
++ t_Handle *p_BdShadow;
++ uint16_t currBdId;
++ uint16_t firstBdOfFrameId;
++
++ /* Rx port parameters */
++ uint8_t dataMemId; /**< Memory partition ID for data buffers */
++ uint32_t dataMemAttributes; /**< Memory attributes for data buffers */
++ t_BufferPoolInfo rxPool;
++ uint16_t mrblr;
++ uint16_t rxFrameAccumLength;
++ t_FmPortImRxStoreCallback *f_RxStore;
++
++ /* Tx port parameters */
++ uint32_t txFirstBdStatus;
++ t_FmPortImTxConfCallback *f_TxConf;
++} t_FmMacIm;
++
++
++typedef struct {
++ struct fman_port_cfg dfltCfg;
++ uint32_t dfltFqid;
++ uint32_t confFqid;
++ uint32_t errFqid;
++ uintptr_t baseAddr;
++ uint8_t deqSubPortal;
++ bool deqHighPriority;
++ e_FmPortDeqType deqType;
++ e_FmPortDeqPrefetchOption deqPrefetchOption;
++ uint16_t deqByteCnt;
++ uint8_t cheksumLastBytesIgnore;
++ uint8_t cutBytesFromEnd;
++ t_FmBufPoolDepletion bufPoolDepletion;
++ uint8_t pipelineDepth;
++ uint16_t fifoLowComfLevel;
++ bool frmDiscardOverride;
++ bool enRateLimit;
++ t_FmPortRateLimit rateLimit;
++ e_FmPortDualRateLimiterScaleDown rateLimitDivider;
++ bool enBufPoolDepletion;
++ uint16_t liodnOffset;
++ uint16_t liodnBase;
++ t_FmExtPools extBufPools;
++ e_FmDmaSwapOption dmaSwapData;
++ e_FmDmaCacheOption dmaIntContextCacheAttr;
++ e_FmDmaCacheOption dmaHeaderCacheAttr;
++ e_FmDmaCacheOption dmaScatterGatherCacheAttr;
++ bool dmaReadOptimize;
++ bool dmaWriteOptimize;
++ uint32_t txFifoMinFillLevel;
++ uint32_t txFifoLowComfLevel;
++ uint32_t rxFifoPriElevationLevel;
++ uint32_t rxFifoThreshold;
++ t_FmSpBufMargins bufMargins;
++ t_FmSpIntContextDataCopy intContext;
++ bool syncReq;
++ e_FmPortColor color;
++ fmPortFrameErrSelect_t errorsToDiscard;
++ fmPortFrameErrSelect_t errorsToEnq;
++ bool forwardReuseIntContext;
++ t_FmBufferPrefixContent bufferPrefixContent;
++ t_FmBackupBmPools *p_BackupBmPools;
++ bool dontReleaseBuf;
++ bool setNumOfTasks;
++ bool setNumOfOpenDmas;
++ bool setSizeOfFifo;
++#if (DPAA_VERSION >= 11)
++ bool noScatherGather;
++#endif /* (DPAA_VERSION >= 11) */
++
++#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
++ bool bcbWorkaround;
++#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
++} t_FmPortDriverParam;
++
++
++typedef struct t_FmPortRxPoolsParams
++{
++ uint8_t numOfPools;
++ uint16_t secondLargestBufSize;
++ uint16_t largestBufSize;
++} t_FmPortRxPoolsParams;
++
++typedef struct t_FmPortDsarVars {
++ t_Handle *autoResOffsets;
++ t_FmPortDsarTablesSizes *autoResMaxSizes;
++ uint32_t fmbm_tcfg;
++ uint32_t fmbm_tcmne;
++ uint32_t fmbm_rfne;
++ uint32_t fmbm_rfpne;
++ uint32_t fmbm_rcfg;
++ bool dsarEnabledParser;
++} t_FmPortDsarVars;
++typedef struct {
++ struct fman_port port;
++ t_Handle h_Fm;
++ t_Handle h_FmPcd;
++ t_Handle h_FmMuram;
++ t_FmRevisionInfo fmRevInfo;
++ uint8_t portId;
++ e_FmPortType portType;
++ int enabled;
++ char name[MODULE_NAME_SIZE];
++ uint8_t hardwarePortId;
++ uint16_t fmClkFreq;
++ t_FmPortQmiRegs *p_FmPortQmiRegs;
++ u_FmPortBmiRegs *p_FmPortBmiRegs;
++ t_FmPortPrsRegs *p_FmPortPrsRegs;
++ fmPcdEngines_t pcdEngines;
++ uint32_t savedBmiNia;
++ uint8_t netEnvId;
++ uint32_t optArray[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
++ uint32_t lcvs[FM_PCD_PRS_NUM_OF_HDRS];
++ uint8_t privateInfo;
++ uint32_t schemesPerPortVector;
++ bool useClsPlan;
++ uint8_t clsPlanGrpId;
++ t_Handle ccTreeId;
++ t_Handle completeArg;
++ void (*f_Complete)(t_Handle arg);
++ t_FmSpBufferOffsets bufferOffsets;
++ /* Independent-Mode parameters support */
++ bool imEn;
++ t_FmMacIm im;
++ volatile bool lock;
++ t_Handle h_Spinlock;
++ t_FmPortExceptionCallback *f_Exception;
++ t_Handle h_App;
++ uint8_t internalBufferOffset;
++ uint8_t fmanCtrlEventId;
++ uint32_t exceptions;
++ bool polling;
++ t_FmExtPools extBufPools;
++ uint32_t requiredAction;
++ uint32_t savedQmiPnen;
++ uint32_t savedBmiFene;
++ uint32_t savedBmiFpne;
++ uint32_t savedBmiCmne;
++ uint32_t savedBmiOfp;
++ uint32_t savedNonRxQmiRegsPndn;
++ uint32_t origNonRxQmiRegsPndn;
++ int savedPrsStartOffset;
++ bool includeInPrsStatistics;
++ uint16_t maxFrameLength;
++ t_FmFmanCtrl orFmanCtrl;
++ t_FmPortRsrc openDmas;
++ t_FmPortRsrc tasks;
++ t_FmPortRsrc fifoBufs;
++ t_FmPortRxPoolsParams rxPoolsParams;
++// bool explicitUserSizeOfFifo;
++ t_Handle h_IpReassemblyManip;
++ t_Handle h_CapwapReassemblyManip;
++ t_Handle h_ReassemblyTree;
++ uint64_t fmMuramPhysBaseAddr;
++#if (DPAA_VERSION >= 11)
++ bool vspe;
++ uint8_t dfltRelativeId;
++ e_FmPortGprFuncType gprFunc;
++ t_FmPcdCtrlParamsPage *p_ParamsPage;
++#endif /* (DPAA_VERSION >= 11) */
++ t_FmPortDsarVars deepSleepVars;
++ t_FmPortDriverParam *p_FmPortDriverParam;
++} t_FmPort;
++
++
++void FmPortConfigIM (t_FmPort *p_FmPort, t_FmPortParams *p_FmPortParams);
++t_Error FmPortImCheckInitParameters(t_FmPort *p_FmPort);
++
++t_Error FmPortImInit(t_FmPort *p_FmPort);
++void FmPortImFree(t_FmPort *p_FmPort);
++
++t_Error FmPortImEnable (t_FmPort *p_FmPort);
++t_Error FmPortImDisable (t_FmPort *p_FmPort);
++t_Error FmPortImRx (t_FmPort *p_FmPort);
++
++void FmPortSetMacsecLcv(t_Handle h_FmPort);
++void FmPortSetMacsecCmd(t_Handle h_FmPort, uint8_t dfltSci);
++
++
++t_Error FM_PORT_SetNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfOpenDmas);
++t_Error FM_PORT_SetNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks);
++t_Error FM_PORT_SetSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo);
++
++static __inline__ uint8_t * BdBufferGet (t_PhysToVirt *f_PhysToVirt, t_FmImBd *p_Bd)
++{
++ uint64_t physAddr = (uint64_t)((uint64_t)GET_UINT8(p_Bd->buff.high) << 32);
++ physAddr |= GET_UINT32(p_Bd->buff.low);
++
++ return (uint8_t *)f_PhysToVirt((physAddress_t)(physAddr));
++}
++
++static __inline__ void SET_ADDR(volatile t_FmPhysAddr *fmPhysAddr, uint64_t value)
++{
++ WRITE_UINT8(fmPhysAddr->high,(uint8_t)((value & 0x000000ff00000000LL) >> 32));
++ WRITE_UINT32(fmPhysAddr->low,(uint32_t)value);
++}
++
++static __inline__ void BdBufferSet(t_VirtToPhys *f_VirtToPhys, t_FmImBd *p_Bd, uint8_t *p_Buffer)
++{
++ uint64_t physAddr = (uint64_t)(f_VirtToPhys(p_Buffer));
++ SET_ADDR(&p_Bd->buff, physAddr);
++}
++
++static __inline__ uint16_t GetNextBdId(t_FmPort *p_FmPort, uint16_t id)
++{
++ if (id < p_FmPort->im.bdRingSize-1)
++ return (uint16_t)(id+1);
++ else
++ return 0;
++}
++
++void FM_PORT_Dsar_DumpRegs(void);
++
++
++#endif /* __FM_PORT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_dsar.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_dsar.h
+new file mode 100755
+index 00000000..95619eff
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_dsar.h
+@@ -0,0 +1,494 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File fm_port_dsar.h
++
++ @Description Deep Sleep Auto Response project - common module header file.
++
++ Author - Eyal Harari
++
++ @Cautions See the FMan Controller spec and design document for more information.
++*//***************************************************************************/
++
++#ifndef __FM_PORT_DSAR_H_
++#define __FM_PORT_DSAR_H_
++
++#define DSAR_GETSER_MASK 0xFF0000FF
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response VLAN-IPv4 Binding Table (for ARP/ICMPv4)
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
++ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++ uint16_t reserved;
++} _PackedType t_DsarArpBindingEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response Address Resolution Protocol Statistics Descriptor
++ Refer to the FMan Controller spec for more details.
++ 0x00 INVAL_CNT Invalid ARP IPv4-Ethernet counter
++ 0x04 ECHO_CNT Echo counter
++ 0x08 CD_CNT Conflict Detection counter
++ 0x0C AR_CNT Auto-Response counter
++ 0x10 RATM_CNT Replies Addressed To Me counter
++ 0x14 UKOP_CNT Unknown Operation counter
++ 0x18 NMTP_CNT Not my TPA counter
++ 0x1C NMVLAN_CNT Not My VLAN counter
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint32_t invalCnt; /**< Invalid ARP IPv4-Ethernet counter. */
++ uint32_t echoCnt; /**< Echo counter. */
++ uint32_t cdCnt; /**< Conflict Detection counter. */
++ uint32_t arCnt; /**< Auto-Response counter. */
++ uint32_t ratmCnt; /**< Replies Addressed To Me counter. */
++ uint32_t ukopCnt; /**< Unknown Operation counter. */
++ uint32_t nmtpCnt; /**< Not my TPA counter. */
++ uint32_t nmVlanCnt; /**< Not My VLAN counter */
++} _PackedType t_DsarArpStatistics;
++
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response Address Resolution Protocol Descriptor
++ 0x0 0-15 Control bits [0-15]. Bit 15 = CDEN.
++ 0x2 0-15 NumOfBindings Number of entries in the binding list.
++ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an IPv4-MAC Addresses Bindings list.
++ 0x6 0-15
++ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ARP Descriptors statistics data structure.
++ 0xA 0-15
++ 0xC 0-15 Reserved Reserved. Must be cleared.
++ 0xE 015
++
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint16_t control; /** Control bits [0-15]. Bit 15 = CDEN */
++ uint16_t numOfBindings; /**< Number of VLAN-IPv4 */
++ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
++ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
++ uint32_t reserved1; /**< Reserved. */
++} _PackedType t_DsarArpDescriptor;
++
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response VLAN-IPv4 Binding Table (for ARP/ICMPv4)
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
++ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++ uint16_t reserved;
++} _PackedType t_DsarIcmpV4BindingEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response ICMPv4 Statistics Descriptor
++ Refer to the FMan Controller spec for more details.
++ 0x00 INVAL_CNT Invalid ICMPv4 header counter
++ 0x04 NMVLAN_CNT Not My VLAN counter
++ 0x08 NMIP_CNT Not My IP counter
++ 0x0C AR_CNT Auto-Response counter
++ 0x10 CSERR_CNT Checksum Error counter
++ 0x14 Reserved Reserved
++ 0x18 Reserved Reserved
++ 0x1C Reserved Reserved
++
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint32_t invalCnt; /**< Invalid ICMPv4 Echo counter. */
++ uint32_t nmVlanCnt; /**< Not My VLAN counter */
++ uint32_t nmIpCnt; /**< Not My IP counter */
++ uint32_t arCnt; /**< Auto-Response counter */
++ uint32_t cserrCnt; /**< Checksum Error counter */
++ uint32_t reserved0; /**< Reserved */
++ uint32_t reserved1; /**< Reserved */
++ uint32_t reserved2; /**< Reserved */
++} _PackedType t_DsarIcmpV4Statistics;
++
++
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response ICMPv4 Descriptor
++ 0x0 0-15 Control bits [0-15]
++ 0x2 0-15 NumOfBindings Number of entries in the binding list.
++ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an VLAN-IPv4 Addresses Bindings list.
++ 0x6 0-15
++ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ICMPv4 statistics data structure.
++ 0xA 0-15
++ 0xC 0-15 Reserved Reserved. Must be cleared.
++ 0xE 015
++
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint16_t control; /** Control bits [0-15]. */
++ uint16_t numOfBindings; /**< Number of VLAN-IPv4 */
++ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
++ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
++ uint32_t reserved1; /**< Reserved. */
++} _PackedType t_DsarIcmpV4Descriptor;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response VLAN-IPv4 Binding Table (for ARP/ICMPv4)
++ The 4 left-most bits (15:12) of the VlanId parameter are control flags.
++ Flags[3:1] (VlanId[15:13]): Reserved, should be cleared.
++ Flags[0] (VlanId[12]): Temporary address.
++ • 0 - Assigned IP address.
++ • 1- Temporary (tentative) IP address.
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint32_t ipv6Addr[4]; /*!< 3 * 32 bit IPv4 Address. */
++ uint16_t resFlags:4; /*!< reserved flags. should be cleared */
++ uint16_t vlanId:12; /*!< 12 bits VLAN ID. */
++ /*!< This field should be 0x000 for an entry with no VLAN tag or a null VLAN ID. */
++ uint16_t reserved;
++} _PackedType t_DsarIcmpV6BindingEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response ICMPv4 Statistics Descriptor
++ Refer to the FMan Controller spec for more details.
++ 0x00 INVAL_CNT Invalid ICMPv4 header counter
++ 0x04 NMVLAN_CNT Not My VLAN counter
++ 0x08 NMIP_CNT Not My IP counter
++ 0x0C AR_CNT Auto-Response counter
++ 0x10 CSERR_CNT Checksum Error counter
++ 0x14 MCAST_CNT Multicast counter
++ 0x18 Reserved Reserved
++ 0x1C Reserved Reserved
++
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint32_t invalCnt; /**< Invalid ICMPv4 Echo counter. */
++ uint32_t nmVlanCnt; /**< Not My VLAN counter */
++ uint32_t nmIpCnt; /**< Not My IP counter */
++ uint32_t arCnt; /**< Auto-Response counter */
++ uint32_t reserved1; /**< Reserved */
++ uint32_t reserved2; /**< Reserved */
++ uint32_t reserved3; /**< Reserved */
++ uint32_t reserved4; /**< Reserved */
++} _PackedType t_DsarIcmpV6Statistics;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response Neighbor Discovery Statistics Descriptor
++ 0x00 INVAL_CNT Invalid Neighbor Discovery message counter
++ 0x04 NMVLAN_CNT Not My VLAN counter
++ 0x08 NMIP_CNT Not My IP counter
++ 0x0C AR_CNT Auto-Response counter
++ 0x10 CSERR_CNT Checksum Error counter
++ 0x14 USADVERT_CNT Unsolicited Neighbor Advertisements counter
++ 0x18 NMMCAST_CNT Not My Multicast group counter
++ 0x1C NSLLA_CNT No Source Link-Layer Address counter. Indicates that there was a match on a Target
++ Address of a packet that its source IP address is a unicast address, but the ICMPv6
++ Source Link-layer Address option is omitted
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint32_t invalCnt; /**< Invalid ICMPv4 Echo counter. */
++ uint32_t nmVlanCnt; /**< Not My VLAN counter */
++ uint32_t nmIpCnt; /**< Not My IP counter */
++ uint32_t arCnt; /**< Auto-Response counter */
++ uint32_t reserved1; /**< Reserved */
++ uint32_t usadvertCnt; /**< Unsolicited Neighbor Advertisements counter */
++ uint32_t nmmcastCnt; /**< Not My Multicast group counter */
++ uint32_t nsllaCnt; /**< No Source Link-Layer Address counter */
++} _PackedType t_NdStatistics;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response ICMPv6 Descriptor
++ 0x0 0-15 Control bits [0-15]
++ 0x2 0-15 NumOfBindings Number of entries in the binding list.
++ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an VLAN-IPv4 Addresses Bindings list.
++ 0x6 0-15
++ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ICMPv4 statistics data structure.
++ 0xA 0-15
++ 0xC 0-15 Reserved Reserved. Must be cleared.
++ 0xE 015
++
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint16_t control; /** Control bits [0-15]. */
++ uint16_t numOfBindings; /**< Number of VLAN-IPv6 */
++ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
++ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
++ uint32_t reserved1; /**< Reserved. */
++} _PackedType t_DsarIcmpV6Descriptor;
++
++
++/**************************************************************************//**
++ @Description Internet Control Message Protocol (ICMPv6) Echo message header
++ The fields names are taken from RFC 4443.
++*//***************************************************************************/
++/* 0 1 2 3 */
++/* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 */
++/* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
++/* | Type | Code | Checksum | */
++/* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
++/* | Identifier | Sequence Number | */
++/* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
++/* | Data ... */
++/* +-+-+-+-+- */
++typedef _Packed struct
++{
++ uint8_t type;
++ uint8_t code;
++ uint16_t checksum;
++ uint16_t identifier;
++ uint16_t sequenceNumber;
++} _PackedType t_IcmpV6EchoHdr;
++
++/**************************************************************************//**
++ @Description Internet Control Message Protocol (ICMPv6)
++ Neighbor Solicitation/Advertisement header
++ The fields names are taken from RFC 4861.
++ The R/S/O fields are valid for Neighbor Advertisement only
++*//***************************************************************************/
++/* 0 1 2 3
++ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ * | Type | Code | Checksum |
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ * |R|S|O| Reserved |
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ * | |
++ * + +
++ * | |
++ * + Target Address +
++ * | |
++ * + +
++ * | |
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ * | Options ...
++ * +-+-+-+-+-+-+-+-+-+-+-+-
++ *
++ * Options Format:
++ * 0 1 2 3
++ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ * | Type | Length | Link-Layer Address ... |
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ * | Link-Layer Address |
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++*/
++typedef _Packed struct
++{
++ uint8_t type;
++ uint8_t code;
++ uint16_t checksum;
++ uint32_t router:1;
++ uint32_t solicited:1;
++ uint32_t override:1;
++ uint32_t reserved:29;
++ uint32_t targetAddr[4];
++ uint8_t optionType;
++ uint8_t optionLength;
++ uint8_t linkLayerAddr[6];
++} _PackedType t_IcmpV6NdHdr;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response ICMPv6 Descriptor
++ 0x0 0-15 Control bits [0-15]
++ 0x2 0-15 NumOfBindings Number of entries in the binding list.
++ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an VLAN-IPv4 Addresses Bindings list.
++ 0x6 0-15
++ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ICMPv4 statistics data structure.
++ 0xA 0-15
++ 0xC 0-15 Reserved Reserved. Must be cleared.
++ 0xE 015
++
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint16_t control; /** Control bits [0-15]. */
++ uint16_t numOfBindings; /**< Number of VLAN-IPv6 */
++ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
++ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
++ uint32_t solicitedAddr; /**< Solicited Node Multicast Group Address */
++} _PackedType t_DsarNdDescriptor;
++
++/**************************************************************************//**
++@Description Deep Sleep Auto Response SNMP OIDs table entry
++
++*//***************************************************************************/
++typedef struct {
++ uint16_t oidSize; /**< Size in octets of the OID. */
++ uint16_t resSize; /**< Size in octets of the value that is attached to the OID. */
++ uint32_t p_Oid; /**< Pointer to the OID. OID is encoded in BER but type and length are excluded. */
++ uint32_t resValOrPtr; /**< Value (for up to 4 octets) or pointer to the Value. Encoded in BER. */
++ uint32_t reserved;
++} t_OidsTblEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP IPv4 Addresses Table Entry
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++typedef struct
++{
++ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
++ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++ uint16_t reserved;
++} t_DsarSnmpIpv4AddrTblEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP IPv6 Addresses Table Entry
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++#pragma pack(push,1)
++typedef struct
++{
++ uint32_t ipv6Addr[4]; /*!< 4 * 32 bit IPv6 Address. */
++ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++ uint16_t reserved;
++} t_DsarSnmpIpv6AddrTblEntry;
++#pragma pack(pop)
++
++/**************************************************************************//**
++@Description Deep Sleep Auto Response SNMP statistics table
++
++*//***************************************************************************/
++typedef struct {
++ uint32_t snmpErrCnt; /**< Counts SNMP errors (wrong version, BER encoding, format). */
++ uint32_t snmpCommunityErrCnt; /**< Counts messages that were dropped due to insufficient permission. */
++ uint32_t snmpTotalDiscardCnt; /**< Counts any message that was dropped. */
++ uint32_t snmpGetReqCnt; /**< Counts the number of get-request messages */
++ uint32_t snmpGetNextReqCnt; /**< Counts the number of get-next-request messages */
++} t_DsarSnmpStatistics;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP Descriptor
++
++*//***************************************************************************/
++typedef struct
++{
++ uint16_t control; /**< Control bits [0-15]. */
++ uint16_t maxSnmpMsgLength; /**< Maximal allowed SNMP message length. */
++ uint16_t numOfIpv4Addresses; /**< Number of entries in IPv4 addresses table. */
++ uint16_t numOfIpv6Addresses; /**< Number of entries in IPv6 addresses table. */
++ uint32_t p_Ipv4AddrTbl; /**< Pointer to IPv4 addresses table. */
++ uint32_t p_Ipv6AddrTbl; /**< Pointer to IPv6 addresses table. */
++ uint32_t p_RdOnlyCommunityStr; /**< Pointer to the Read Only Community String. */
++ uint32_t p_RdWrCommunityStr; /**< Pointer to the Read Write Community String. */
++ uint32_t p_OidsTbl; /**< Pointer to OIDs table. */
++ uint32_t oidsTblSize; /**< Number of entries in OIDs table. */
++ uint32_t p_Statistics; /**< Pointer to SNMP statistics table. */
++} t_DsarSnmpDescriptor;
++
++/**************************************************************************//**
++@Description Deep Sleep Auto Response (Common) Statistics
++
++*//***************************************************************************/
++typedef _Packed struct {
++ uint32_t dsarDiscarded;
++ uint32_t dsarErrDiscarded;
++ uint32_t dsarFragDiscarded;
++ uint32_t dsarTunnelDiscarded;
++ uint32_t dsarArpDiscarded;
++ uint32_t dsarIpDiscarded;
++ uint32_t dsarTcpDiscarded;
++ uint32_t dsarUdpDiscarded;
++ uint32_t dsarIcmpV6ChecksumErr; /* ICMPv6 Checksum Error counter */
++ uint32_t dsarIcmpV6OtherType; /* ICMPv6 'Other' type (not Echo or Neighbor Solicitaion/Advertisement counter */
++ uint32_t dsarIcmpV4OtherType; /* ICMPv4 'Other' type (not Echo) counter */
++} _PackedType t_ArStatistics;
++
++
++/**************************************************************************//**
++@Description Deep Sleep Auto Response TCP/UDP port filter table entry
++
++*//***************************************************************************/
++typedef _Packed struct {
++ uint32_t Ports;
++ uint32_t PortsMask;
++} _PackedType t_PortTblEntry;
++
++
++
++/**************************************************************************//**
++@Description Deep Sleep Auto Response Common Parameters Descriptor
++
++*//***************************************************************************/
++typedef _Packed struct {
++ uint8_t arTxPort; /* 0x00 0-7 Auto Response Transmit Port number */
++ uint8_t controlBits; /* 0x00 8-15 Auto Response control bits */
++ uint16_t res1; /* 0x00 16-31 Reserved */
++ uint32_t activeHPNIA; /* 0x04 0-31 Active mode Hardware Parser NIA */
++ uint16_t snmpPort; /* 0x08 0-15 SNMP Port. */
++ uint8_t macStationAddr[6]; /* 0x08 16-31 and 0x0C 0-31 MAC Station Address */
++ uint8_t res2; /* 0x10 0-7 Reserved */
++ uint8_t filterControl; /* 0x10 8-15 Filtering Control Bits. */
++ uint16_t tcpControlPass; /* 0x10 16-31 TCP control pass flags */
++ uint8_t ipProtocolTblSize; /* 0x14 0-7 IP Protocol Table Size. */
++ uint8_t udpPortTblSize; /* 0x14 8-15 UDP Port Table Size. */
++ uint8_t tcpPortTblSize; /* 0x14 16-23 TCP Port Table Size. */
++ uint8_t res3; /* 0x14 24-31 Reserved */
++ uint32_t p_IpProtocolFiltTbl; /* 0x18 0-31 Pointer to IP Protocol Filter Table */
++ uint32_t p_UdpPortFiltTbl; /* 0x1C 0-31 Pointer to UDP Port Filter Table */
++ uint32_t p_TcpPortFiltTbl; /* 0x20 0-31 Pointer to TCP Port Filter Table */
++ uint32_t res4; /* 0x24 Reserved */
++ uint32_t p_ArpDescriptor; /* 0x28 0-31 ARP Descriptor Pointer. */
++ uint32_t p_NdDescriptor; /* 0x2C 0-31 Neighbor Discovery Descriptor. */
++ uint32_t p_IcmpV4Descriptor; /* 0x30 0-31 ICMPv4 Descriptor pointer. */
++ uint32_t p_IcmpV6Descriptor; /* 0x34 0-31 ICMPv6 Descriptor pointer. */
++ uint32_t p_SnmpDescriptor; /* 0x38 0-31 SNMP Descriptor pointer. */
++ uint32_t p_ArStats; /* 0x3C 0-31 Pointer to Auto Response Statistics */
++} _PackedType t_ArCommonDesc;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++/* t_ArCommonDesc.filterControl bits */
++#define IP_PROT_TBL_PASS_MASK 0x08
++#define UDP_PORT_TBL_PASS_MASK 0x04
++#define TCP_PORT_TBL_PASS_MASK 0x02
++
++/* Offset of TCF flags within TCP packet */
++#define TCP_FLAGS_OFFSET 12
++
++
++#endif /* __FM_PORT_DSAR_H_ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_im.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_im.c
+new file mode 100644
+index 00000000..8de8f5fd
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_im.c
+@@ -0,0 +1,753 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_port_im.c
++
++ @Description FM Port Independent-Mode ...
++*//***************************************************************************/
++#include "std_ext.h"
++#include "string_ext.h"
++#include "error_ext.h"
++#include "memcpy_ext.h"
++#include "fm_muram_ext.h"
++
++#include "fm_port.h"
++
++
++#define TX_CONF_STATUS_UNSENT 0x1
++
++
++typedef enum e_TxConfType
++{
++ e_TX_CONF_TYPE_CHECK = 0 /**< check if all the buffers were touched by the muxator, no confirmation callback */
++ ,e_TX_CONF_TYPE_CALLBACK = 1 /**< confirm to user all the available sent buffers */
++ ,e_TX_CONF_TYPE_FLUSH = 3 /**< confirm all buffers plus the unsent one with an appropriate status */
++} e_TxConfType;
++
++
++static void ImException(t_Handle h_FmPort, uint32_t event)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ ASSERT_COND(((event & (IM_EV_RX | IM_EV_BSY)) && FmIsMaster(p_FmPort->h_Fm)) ||
++ !FmIsMaster(p_FmPort->h_Fm));
++
++ if (event & IM_EV_RX)
++ FmPortImRx(p_FmPort);
++ if ((event & IM_EV_BSY) && p_FmPort->f_Exception)
++ p_FmPort->f_Exception(p_FmPort->h_App, e_FM_PORT_EXCEPTION_IM_BUSY);
++}
++
++
++static t_Error TxConf(t_FmPort *p_FmPort, e_TxConfType confType)
++{
++ t_Error retVal = E_BUSY;
++ uint32_t bdStatus;
++ uint16_t savedStartBdId, confBdId;
++
++ ASSERT_COND(p_FmPort);
++
++ /*
++ if (confType==e_TX_CONF_TYPE_CHECK)
++ return (WfqEntryIsQueueEmpty(p_FmPort->im.h_WfqEntry) ? E_OK : E_BUSY);
++ */
++
++ confBdId = savedStartBdId = p_FmPort->im.currBdId;
++ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(confBdId));
++
++ /* If R bit is set, we don't enter, or we break.
++ we run till we get to R, or complete the loop */
++ while ((!(bdStatus & BD_R_E) || (confType == e_TX_CONF_TYPE_FLUSH)) && (retVal != E_OK))
++ {
++ if (confType & e_TX_CONF_TYPE_CALLBACK) /* if it is confirmation with user callbacks */
++ BD_STATUS_AND_LENGTH_SET(BD_GET(confBdId), 0);
++
++ /* case 1: R bit is 0 and Length is set -> confirm! */
++ if ((confType & e_TX_CONF_TYPE_CALLBACK) && (bdStatus & BD_LENGTH_MASK))
++ {
++ if (p_FmPort->im.f_TxConf)
++ {
++ if ((confType == e_TX_CONF_TYPE_FLUSH) && (bdStatus & BD_R_E))
++ p_FmPort->im.f_TxConf(p_FmPort->h_App,
++ BdBufferGet(XX_PhysToVirt, BD_GET(confBdId)),
++ TX_CONF_STATUS_UNSENT,
++ p_FmPort->im.p_BdShadow[confBdId]);
++ else
++ p_FmPort->im.f_TxConf(p_FmPort->h_App,
++ BdBufferGet(XX_PhysToVirt, BD_GET(confBdId)),
++ 0,
++ p_FmPort->im.p_BdShadow[confBdId]);
++ }
++ }
++ /* case 2: R bit is 0 and Length is 0 -> not used yet, nop! */
++
++ confBdId = GetNextBdId(p_FmPort, confBdId);
++ if (confBdId == savedStartBdId)
++ retVal = E_OK;
++ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(confBdId));
++ }
++
++ return retVal;
++}
++
++t_Error FmPortImEnable(t_FmPort *p_FmPort)
++{
++ uint32_t tmpReg = GET_UINT32(p_FmPort->im.p_FmPortImPram->mode);
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, (uint32_t)(tmpReg & ~IM_MODE_GRC_STP));
++ return E_OK;
++}
++
++t_Error FmPortImDisable(t_FmPort *p_FmPort)
++{
++ uint32_t tmpReg = GET_UINT32(p_FmPort->im.p_FmPortImPram->mode);
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, (uint32_t)(tmpReg | IM_MODE_GRC_STP));
++ return E_OK;
++}
++
++t_Error FmPortImRx(t_FmPort *p_FmPort)
++{
++ t_Handle h_CurrUserPriv, h_NewUserPriv;
++ uint32_t bdStatus;
++ volatile uint8_t buffPos;
++ uint16_t length;
++ uint16_t errors;
++ uint8_t *p_CurData, *p_Data;
++ uint32_t flags;
++
++ ASSERT_COND(p_FmPort);
++
++ flags = XX_LockIntrSpinlock(p_FmPort->h_Spinlock);
++ if (p_FmPort->lock)
++ {
++ XX_UnlockIntrSpinlock(p_FmPort->h_Spinlock, flags);
++ return E_OK;
++ }
++ p_FmPort->lock = TRUE;
++ XX_UnlockIntrSpinlock(p_FmPort->h_Spinlock, flags);
++
++ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
++
++ while (!(bdStatus & BD_R_E)) /* while there is data in the Rx BD */
++ {
++ if ((p_Data = p_FmPort->im.rxPool.f_GetBuf(p_FmPort->im.rxPool.h_BufferPool, &h_NewUserPriv)) == NULL)
++ {
++ p_FmPort->lock = FALSE;
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Data buffer"));
++ }
++
++ if (p_FmPort->im.firstBdOfFrameId == IM_ILEGAL_BD_ID)
++ p_FmPort->im.firstBdOfFrameId = p_FmPort->im.currBdId;
++
++ p_CurData = BdBufferGet(p_FmPort->im.rxPool.f_PhysToVirt, BD_GET(p_FmPort->im.currBdId));
++ h_CurrUserPriv = p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId];
++ length = (uint16_t)((bdStatus & BD_L) ?
++ ((bdStatus & BD_LENGTH_MASK) - p_FmPort->im.rxFrameAccumLength):
++ (bdStatus & BD_LENGTH_MASK));
++ p_FmPort->im.rxFrameAccumLength += length;
++
++ /* determine whether buffer is first, last, first and last (single */
++ /* buffer frame) or middle (not first and not last) */
++ buffPos = (uint8_t)((p_FmPort->im.currBdId == p_FmPort->im.firstBdOfFrameId) ?
++ ((bdStatus & BD_L) ? SINGLE_BUF : FIRST_BUF) :
++ ((bdStatus & BD_L) ? LAST_BUF : MIDDLE_BUF));
++
++ if (bdStatus & BD_L)
++ {
++ p_FmPort->im.rxFrameAccumLength = 0;
++ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
++ }
++
++ BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, BD_GET(p_FmPort->im.currBdId), p_Data);
++
++ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), BD_R_E);
++
++ errors = (uint16_t)((bdStatus & BD_RX_ERRORS) >> 16);
++ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId] = h_NewUserPriv;
++
++ p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.offsetOut, (uint16_t)(p_FmPort->im.currBdId<<4));
++ /* Pass the buffer if one of the conditions is true:
++ - There are no errors
++ - This is a part of a larger frame ( the application has already received some buffers ) */
++ if ((buffPos != SINGLE_BUF) || !errors)
++ {
++ if (p_FmPort->im.f_RxStore(p_FmPort->h_App,
++ p_CurData,
++ length,
++ errors,
++ buffPos,
++ h_CurrUserPriv) == e_RX_STORE_RESPONSE_PAUSE)
++ break;
++ }
++ else if (p_FmPort->im.rxPool.f_PutBuf(p_FmPort->im.rxPool.h_BufferPool,
++ p_CurData,
++ h_CurrUserPriv))
++ {
++ p_FmPort->lock = FALSE;
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Failed freeing data buffer"));
++ }
++
++ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
++ }
++ p_FmPort->lock = FALSE;
++ return E_OK;
++}
++
++void FmPortConfigIM (t_FmPort *p_FmPort, t_FmPortParams *p_FmPortParams)
++{
++ ASSERT_COND(p_FmPort);
++
++ SANITY_CHECK_RETURN(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->im.h_FmMuram = p_FmPortParams->specificParams.imRxTxParams.h_FmMuram;
++ p_FmPort->p_FmPortDriverParam->liodnOffset = p_FmPortParams->specificParams.imRxTxParams.liodnOffset;
++ p_FmPort->im.dataMemId = p_FmPortParams->specificParams.imRxTxParams.dataMemId;
++ p_FmPort->im.dataMemAttributes = p_FmPortParams->specificParams.imRxTxParams.dataMemAttributes;
++
++ p_FmPort->im.fwExtStructsMemId = DEFAULT_PORT_ImfwExtStructsMemId;
++ p_FmPort->im.fwExtStructsMemAttr = DEFAULT_PORT_ImfwExtStructsMemAttr;
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
++ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ p_FmPort->im.rxPool.h_BufferPool = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.h_BufferPool;
++ p_FmPort->im.rxPool.f_GetBuf = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_GetBuf;
++ p_FmPort->im.rxPool.f_PutBuf = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_PutBuf;
++ p_FmPort->im.rxPool.bufferSize = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.bufferSize;
++ p_FmPort->im.rxPool.f_PhysToVirt = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_PhysToVirt;
++ if (!p_FmPort->im.rxPool.f_PhysToVirt)
++ p_FmPort->im.rxPool.f_PhysToVirt = XX_PhysToVirt;
++ p_FmPort->im.rxPool.f_VirtToPhys = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_VirtToPhys;
++ if (!p_FmPort->im.rxPool.f_VirtToPhys)
++ p_FmPort->im.rxPool.f_VirtToPhys = XX_VirtToPhys;
++ p_FmPort->im.f_RxStore = p_FmPortParams->specificParams.imRxTxParams.f_RxStore;
++
++ p_FmPort->im.mrblr = 0x8000;
++ while (p_FmPort->im.mrblr)
++ {
++ if (p_FmPort->im.rxPool.bufferSize & p_FmPort->im.mrblr)
++ break;
++ p_FmPort->im.mrblr >>= 1;
++ }
++ if (p_FmPort->im.mrblr != p_FmPort->im.rxPool.bufferSize)
++ DBG(WARNING, ("Max-Rx-Buffer-Length set to %d", p_FmPort->im.mrblr));
++ p_FmPort->im.bdRingSize = DEFAULT_PORT_rxBdRingLength;
++ p_FmPort->exceptions = DEFAULT_PORT_exception;
++ if (FmIsMaster(p_FmPort->h_Fm))
++ p_FmPort->polling = FALSE;
++ else
++ p_FmPort->polling = TRUE;
++ p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ;
++ }
++ else
++ {
++ p_FmPort->im.f_TxConf = p_FmPortParams->specificParams.imRxTxParams.f_TxConf;
++
++ p_FmPort->im.bdRingSize = DEFAULT_PORT_txBdRingLength;
++ }
++}
++
++t_Error FmPortImCheckInitParameters(t_FmPort *p_FmPort)
++{
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX) &&
++ (p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) &&
++ (p_FmPort->portType != e_FM_PORT_TYPE_TX) &&
++ (p_FmPort->portType != e_FM_PORT_TYPE_TX_10G))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
++ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ if (!POWER_OF_2(p_FmPort->im.mrblr))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("max Rx buffer length must be power of 2!!!"));
++ if (p_FmPort->im.mrblr < 256)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("max Rx buffer length must at least 256!!!"));
++ if (p_FmPort->p_FmPortDriverParam->liodnOffset & ~FM_LIODN_OFFSET_MASK)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1));
++ }
++
++ return E_OK;
++}
++
++t_Error FmPortImInit(t_FmPort *p_FmPort)
++{
++ t_FmImBd *p_Bd=NULL;
++ t_Handle h_BufContext;
++ uint64_t tmpPhysBase;
++ uint16_t log2Num;
++ uint8_t *p_Data/*, *p_Tmp*/;
++ int i;
++ t_Error err;
++ uint16_t tmpReg16;
++ uint32_t tmpReg32;
++
++ ASSERT_COND(p_FmPort);
++
++ p_FmPort->im.p_FmPortImPram =
++ (t_FmPortImPram *)FM_MURAM_AllocMem(p_FmPort->im.h_FmMuram, sizeof(t_FmPortImPram), IM_PRAM_ALIGN);
++ if (!p_FmPort->im.p_FmPortImPram)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Parameter-RAM!!!"));
++ WRITE_BLOCK(p_FmPort->im.p_FmPortImPram, 0, sizeof(t_FmPortImPram));
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
++ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ p_FmPort->im.p_BdRing =
++ (t_FmImBd *)XX_MallocSmart((uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize),
++ p_FmPort->im.fwExtStructsMemId,
++ 4);
++ if (!p_FmPort->im.p_BdRing)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD ring!!!"));
++ IOMemSet32(p_FmPort->im.p_BdRing, 0, (uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
++
++ p_FmPort->im.p_BdShadow = (t_Handle *)XX_Malloc((uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
++ if (!p_FmPort->im.p_BdShadow)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD shadow!!!"));
++ memset(p_FmPort->im.p_BdShadow, 0, (uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
++
++ /* Initialize the Rx-BD ring */
++ for (i=0; i<p_FmPort->im.bdRingSize; i++)
++ {
++ p_Bd = BD_GET(i);
++ BD_STATUS_AND_LENGTH_SET (p_Bd, BD_R_E);
++
++ if ((p_Data = p_FmPort->im.rxPool.f_GetBuf(p_FmPort->im.rxPool.h_BufferPool, &h_BufContext)) == NULL)
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Data buffer"));
++ BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, p_Bd, p_Data);
++ p_FmPort->im.p_BdShadow[i] = h_BufContext;
++ }
++
++ if ((p_FmPort->im.dataMemAttributes & MEMORY_ATTR_CACHEABLE) ||
++ (p_FmPort->im.fwExtStructsMemAttr & MEMORY_ATTR_CACHEABLE))
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_GBL | IM_MODE_SET_BO(2));
++ else
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_SET_BO(2));
++
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->rxQdPtr,
++ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
++ p_FmPort->fmMuramPhysBaseAddr + 0x20));
++
++ LOG2((uint64_t)p_FmPort->im.mrblr, log2Num);
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->mrblr, log2Num);
++
++ /* Initialize Rx QD */
++ tmpPhysBase = (uint64_t)(XX_VirtToPhys(p_FmPort->im.p_BdRing));
++ SET_ADDR(&p_FmPort->im.p_FmPortImPram->rxQd.bdRingBase, tmpPhysBase);
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.bdRingSize, (uint16_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
++
++ /* Update the IM PRAM address in the BMI */
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfqid,
++ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
++ p_FmPort->fmMuramPhysBaseAddr));
++ if (!p_FmPort->polling || p_FmPort->exceptions)
++ {
++ /* Allocate, configure and register interrupts */
++ err = FmAllocFmanCtrlEventReg(p_FmPort->h_Fm, &p_FmPort->fmanCtrlEventId);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ ASSERT_COND(!(p_FmPort->fmanCtrlEventId & ~IM_RXQD_FPMEVT_SEL_MASK));
++ tmpReg16 = (uint16_t)(p_FmPort->fmanCtrlEventId & IM_RXQD_FPMEVT_SEL_MASK);
++ tmpReg32 = 0;
++
++ if (p_FmPort->exceptions & IM_EV_BSY)
++ {
++ tmpReg16 |= IM_RXQD_BSYINTM;
++ tmpReg32 |= IM_EV_BSY;
++ }
++ if (!p_FmPort->polling)
++ {
++ tmpReg16 |= IM_RXQD_RXFINTM;
++ tmpReg32 |= IM_EV_RX;
++ }
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16);
++
++ FmRegisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, ImException , (t_Handle)p_FmPort);
++
++ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32);
++ }
++ else
++ p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ;
++ }
++ else
++ {
++ p_FmPort->im.p_BdRing = (t_FmImBd *)XX_MallocSmart((uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize), p_FmPort->im.fwExtStructsMemId, 4);
++ if (!p_FmPort->im.p_BdRing)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Tx BD ring!!!"));
++ IOMemSet32(p_FmPort->im.p_BdRing, 0, (uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
++
++ p_FmPort->im.p_BdShadow = (t_Handle *)XX_Malloc((uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
++ if (!p_FmPort->im.p_BdShadow)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD shadow!!!"));
++ memset(p_FmPort->im.p_BdShadow, 0, (uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
++ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
++
++ if ((p_FmPort->im.dataMemAttributes & MEMORY_ATTR_CACHEABLE) ||
++ (p_FmPort->im.fwExtStructsMemAttr & MEMORY_ATTR_CACHEABLE))
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_GBL | IM_MODE_SET_BO(2));
++ else
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_SET_BO(2));
++
++ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->txQdPtr,
++ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
++ p_FmPort->fmMuramPhysBaseAddr + 0x40));
++
++ /* Initialize Tx QD */
++ tmpPhysBase = (uint64_t)(XX_VirtToPhys(p_FmPort->im.p_BdRing));
++ SET_ADDR(&p_FmPort->im.p_FmPortImPram->txQd.bdRingBase, tmpPhysBase);
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->txQd.bdRingSize, (uint16_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
++
++ /* Update the IM PRAM address in the BMI */
++ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfqid,
++ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
++ p_FmPort->fmMuramPhysBaseAddr));
++ }
++
++
++ return E_OK;
++}
++
++void FmPortImFree(t_FmPort *p_FmPort)
++{
++ uint32_t bdStatus;
++ uint8_t *p_CurData;
++
++ ASSERT_COND(p_FmPort);
++ ASSERT_COND(p_FmPort->im.p_FmPortImPram);
++
++ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
++ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ if (!p_FmPort->polling || p_FmPort->exceptions)
++ {
++ /* Deallocate and unregister interrupts */
++ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, 0);
++
++ FmFreeFmanCtrlEventReg(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
++
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, 0);
++
++ FmUnregisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
++ }
++ /* Try first clean what has received */
++ FmPortImRx(p_FmPort);
++
++ /* Now, get rid of the the empty buffer! */
++ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
++
++ while (bdStatus & BD_R_E) /* while there is data in the Rx BD */
++ {
++ p_CurData = BdBufferGet(p_FmPort->im.rxPool.f_PhysToVirt, BD_GET(p_FmPort->im.currBdId));
++
++ BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, BD_GET(p_FmPort->im.currBdId), NULL);
++ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), 0);
++
++ p_FmPort->im.rxPool.f_PutBuf(p_FmPort->im.rxPool.h_BufferPool,
++ p_CurData,
++ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId]);
++
++ p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
++ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
++ }
++ }
++ else
++ TxConf(p_FmPort, e_TX_CONF_TYPE_FLUSH);
++
++ FM_MURAM_FreeMem(p_FmPort->im.h_FmMuram, p_FmPort->im.p_FmPortImPram);
++
++ if (p_FmPort->im.p_BdShadow)
++ XX_Free(p_FmPort->im.p_BdShadow);
++
++ if (p_FmPort->im.p_BdRing)
++ XX_FreeSmart(p_FmPort->im.p_BdRing);
++}
++
++
++t_Error FM_PORT_ConfigIMMaxRxBufLength(t_Handle h_FmPort, uint16_t newVal)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->im.mrblr = newVal;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigIMRxBdRingLength(t_Handle h_FmPort, uint16_t newVal)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->im.bdRingSize = newVal;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigIMTxBdRingLength(t_Handle h_FmPort, uint16_t newVal)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->im.bdRingSize = newVal;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigIMFmanCtrlExternalStructsMemory(t_Handle h_FmPort,
++ uint8_t memId,
++ uint32_t memAttributes)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ p_FmPort->im.fwExtStructsMemId = memId;
++ p_FmPort->im.fwExtStructsMemAttr = memAttributes;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ConfigIMPolling(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Available for Rx ports only"));
++
++ if (!FmIsMaster(p_FmPort->h_Fm))
++ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Available on master-partition only;"
++ "in guest-partitions, IM is always in polling!"));
++
++ p_FmPort->polling = TRUE;
++
++ return E_OK;
++}
++
++t_Error FM_PORT_SetIMExceptions(t_Handle h_FmPort, e_FmPortExceptions exception, bool enable)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ t_Error err;
++ uint16_t tmpReg16;
++ uint32_t tmpReg32;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ if (exception == e_FM_PORT_EXCEPTION_IM_BUSY)
++ {
++ if (enable)
++ {
++ p_FmPort->exceptions |= IM_EV_BSY;
++ if (p_FmPort->fmanCtrlEventId == (uint8_t)NO_IRQ)
++ {
++ /* Allocate, configure and register interrupts */
++ err = FmAllocFmanCtrlEventReg(p_FmPort->h_Fm, &p_FmPort->fmanCtrlEventId);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ ASSERT_COND(!(p_FmPort->fmanCtrlEventId & ~IM_RXQD_FPMEVT_SEL_MASK));
++
++ FmRegisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, ImException, (t_Handle)p_FmPort);
++ tmpReg16 = (uint16_t)((p_FmPort->fmanCtrlEventId & IM_RXQD_FPMEVT_SEL_MASK) | IM_RXQD_BSYINTM);
++ tmpReg32 = IM_EV_BSY;
++ }
++ else
++ {
++ tmpReg16 = (uint16_t)(GET_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen) | IM_RXQD_BSYINTM);
++ tmpReg32 = FmGetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId) | IM_EV_BSY;
++ }
++
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16);
++ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32);
++ }
++ else
++ {
++ p_FmPort->exceptions &= ~IM_EV_BSY;
++ if (!p_FmPort->exceptions && p_FmPort->polling)
++ {
++ FmFreeFmanCtrlEventReg(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
++ FmUnregisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
++ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, 0);
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, 0);
++ p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ;
++ }
++ else
++ {
++ tmpReg16 = (uint16_t)(GET_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen) & ~IM_RXQD_BSYINTM);
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16);
++ tmpReg32 = FmGetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId) & ~IM_EV_BSY;
++ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32);
++ }
++ }
++ }
++ else
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Invalid exception."));
++
++ return E_OK;
++}
++
++t_Error FM_PORT_ImTx( t_Handle h_FmPort,
++ uint8_t *p_Data,
++ uint16_t length,
++ bool lastBuffer,
++ t_Handle h_BufContext)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++ uint16_t nextBdId;
++ uint32_t bdStatus, nextBdStatus;
++ bool firstBuffer;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
++ nextBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
++ nextBdStatus = BD_STATUS_AND_LENGTH(BD_GET(nextBdId));
++
++ if (!(bdStatus & BD_R_E) && !(nextBdStatus & BD_R_E))
++ {
++ /* Confirm the current BD - BD is available */
++ if ((bdStatus & BD_LENGTH_MASK) && (p_FmPort->im.f_TxConf))
++ p_FmPort->im.f_TxConf (p_FmPort->h_App,
++ BdBufferGet(XX_PhysToVirt, BD_GET(p_FmPort->im.currBdId)),
++ 0,
++ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId]);
++
++ bdStatus = length;
++
++ /* if this is the first BD of a frame */
++ if (p_FmPort->im.firstBdOfFrameId == IM_ILEGAL_BD_ID)
++ {
++ firstBuffer = TRUE;
++ p_FmPort->im.txFirstBdStatus = (bdStatus | BD_R_E);
++
++ if (!lastBuffer)
++ p_FmPort->im.firstBdOfFrameId = p_FmPort->im.currBdId;
++ }
++ else
++ firstBuffer = FALSE;
++
++ BdBufferSet(XX_VirtToPhys, BD_GET(p_FmPort->im.currBdId), p_Data);
++ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId] = h_BufContext;
++
++ /* deal with last */
++ if (lastBuffer)
++ {
++ /* if single buffer frame */
++ if (firstBuffer)
++ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), p_FmPort->im.txFirstBdStatus | BD_L);
++ else
++ {
++ /* Set the last BD of the frame */
++ BD_STATUS_AND_LENGTH_SET (BD_GET(p_FmPort->im.currBdId), (bdStatus | BD_R_E | BD_L));
++ /* Set the first BD of the frame */
++ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.firstBdOfFrameId), p_FmPort->im.txFirstBdStatus);
++ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
++ }
++ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->txQd.offsetIn, (uint16_t)(GetNextBdId(p_FmPort, p_FmPort->im.currBdId)<<4));
++ }
++ else if (!firstBuffer) /* mid frame buffer */
++ BD_STATUS_AND_LENGTH_SET (BD_GET(p_FmPort->im.currBdId), bdStatus | BD_R_E);
++
++ p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
++ }
++ else
++ {
++ /* Discard current frame. Return error. */
++ if (p_FmPort->im.firstBdOfFrameId != IM_ILEGAL_BD_ID)
++ {
++ /* Error: No free BD */
++ /* Response: Discard current frame. Return error. */
++ uint16_t cleanBdId = p_FmPort->im.firstBdOfFrameId;
++
++ ASSERT_COND(p_FmPort->im.firstBdOfFrameId != p_FmPort->im.currBdId);
++
++ /* Since firstInFrame is not NULL, one buffer at least has already been
++ inserted into the BD ring. Using do-while covers the situation of a
++ frame spanned throughout the whole Tx BD ring (p_CleanBd is incremented
++ prior to testing whether or not it's equal to TxBd). */
++ do
++ {
++ BD_STATUS_AND_LENGTH_SET(BD_GET(cleanBdId), 0);
++ /* Advance BD pointer */
++ cleanBdId = GetNextBdId(p_FmPort, cleanBdId);
++ } while (cleanBdId != p_FmPort->im.currBdId);
++
++ p_FmPort->im.currBdId = cleanBdId;
++ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
++ }
++
++ return ERROR_CODE(E_FULL);
++ }
++
++ return E_OK;
++}
++
++void FM_PORT_ImTxConf(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ TxConf(p_FmPort, e_TX_CONF_TYPE_CALLBACK);
++}
++
++t_Error FM_PORT_ImRx(t_Handle h_FmPort)
++{
++ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
++
++ return FmPortImRx(p_FmPort);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fman_port.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fman_port.c
+new file mode 100755
+index 00000000..60acbf34
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fman_port.c
+@@ -0,0 +1,1568 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "common/general.h"
++
++#include "fman_common.h"
++#include "fsl_fman_port.h"
++
++
++/* problem Eyal: the following should not be here*/
++#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
++
++static uint32_t get_no_pcd_nia_bmi_ac_enc_frame(struct fman_port_cfg *cfg)
++{
++ if (cfg->errata_A006675)
++ return NIA_ENG_FM_CTL |
++ NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
++ else
++ return NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
++}
++
++static int init_bmi_rx(struct fman_port *port,
++ struct fman_port_cfg *cfg,
++ struct fman_port_params *params)
++{
++ struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
++ uint32_t tmp;
++
++ /* Rx Configuration register */
++ tmp = 0;
++ if (port->im_en)
++ tmp |= BMI_PORT_CFG_IM;
++ else if (cfg->discard_override)
++ tmp |= BMI_PORT_CFG_FDOVR;
++ iowrite32be(tmp, &regs->fmbm_rcfg);
++
++ /* DMA attributes */
++ tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
++ if (cfg->dma_ic_stash_on)
++ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
++ if (cfg->dma_header_stash_on)
++ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
++ if (cfg->dma_sg_stash_on)
++ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
++ if (cfg->dma_write_optimize)
++ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
++ iowrite32be(tmp, &regs->fmbm_rda);
++
++ /* Rx FIFO parameters */
++ tmp = (cfg->rx_pri_elevation / FMAN_PORT_BMI_FIFO_UNITS - 1) <<
++ BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
++ tmp |= cfg->rx_fifo_thr / FMAN_PORT_BMI_FIFO_UNITS - 1;
++ iowrite32be(tmp, &regs->fmbm_rfp);
++
++ if (cfg->excessive_threshold_register)
++ /* always allow access to the extra resources */
++ iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
++
++ /* Frame end data */
++ tmp = (uint32_t)cfg->checksum_bytes_ignore <<
++ BMI_RX_FRAME_END_CS_IGNORE_SHIFT;
++ tmp |= (uint32_t)cfg->rx_cut_end_bytes <<
++ BMI_RX_FRAME_END_CUT_SHIFT;
++ if (cfg->errata_A006320)
++ tmp &= 0xffe0ffff;
++ iowrite32be(tmp, &regs->fmbm_rfed);
++
++ /* Internal context parameters */
++ tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
++ BMI_IC_TO_EXT_SHIFT;
++ tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
++ BMI_IC_FROM_INT_SHIFT;
++ tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
++ iowrite32be(tmp, &regs->fmbm_ricp);
++
++ /* Internal buffer offset */
++ tmp = ((uint32_t)cfg->int_buf_start_margin / FMAN_PORT_IC_OFFSET_UNITS)
++ << BMI_INT_BUF_MARG_SHIFT;
++ iowrite32be(tmp, &regs->fmbm_rim);
++
++ /* External buffer margins */
++ if (!port->im_en)
++ {
++ tmp = (uint32_t)cfg->ext_buf_start_margin <<
++ BMI_EXT_BUF_MARG_START_SHIFT;
++ tmp |= (uint32_t)cfg->ext_buf_end_margin;
++ if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
++ tmp |= BMI_SG_DISABLE;
++ iowrite32be(tmp, &regs->fmbm_rebm);
++ }
++
++ /* Frame attributes */
++ tmp = BMI_CMD_RX_MR_DEF;
++ if (!port->im_en)
++ {
++ tmp |= BMI_CMD_ATTR_ORDER;
++ tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
++ if (cfg->sync_req)
++ tmp |= BMI_CMD_ATTR_SYNC;
++ }
++ iowrite32be(tmp, &regs->fmbm_rfca);
++
++ /* NIA */
++ if (port->im_en)
++ tmp = NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_RX;
++ else
++ {
++ tmp = (uint32_t)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
++ tmp |= get_no_pcd_nia_bmi_ac_enc_frame(cfg);
++ }
++ iowrite32be(tmp, &regs->fmbm_rfne);
++
++ /* Enqueue NIA */
++ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
++
++ /* Default/error queues */
++ if (!port->im_en)
++ {
++ iowrite32be((params->dflt_fqid & 0x00FFFFFF), &regs->fmbm_rfqid);
++ iowrite32be((params->err_fqid & 0x00FFFFFF), &regs->fmbm_refqid);
++ }
++
++ /* Discard/error masks */
++ iowrite32be(params->discard_mask, &regs->fmbm_rfsdm);
++ iowrite32be(params->err_mask, &regs->fmbm_rfsem);
++
++ /* Statistics counters */
++ tmp = 0;
++ if (cfg->stats_counters_enable)
++ tmp = BMI_COUNTERS_EN;
++ iowrite32be(tmp, &regs->fmbm_rstc);
++
++ /* Performance counters */
++ fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
++ tmp = 0;
++ if (cfg->perf_counters_enable)
++ tmp = BMI_COUNTERS_EN;
++ iowrite32be(tmp, &regs->fmbm_rpc);
++
++ return 0;
++}
++
++static int init_bmi_tx(struct fman_port *port,
++ struct fman_port_cfg *cfg,
++ struct fman_port_params *params)
++{
++ struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
++ uint32_t tmp;
++
++ /* Tx Configuration register */
++ tmp = 0;
++ if (port->im_en)
++ tmp |= BMI_PORT_CFG_IM;
++ iowrite32be(tmp, &regs->fmbm_tcfg);
++
++ /* DMA attributes */
++ tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
++ if (cfg->dma_ic_stash_on)
++ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
++ if (cfg->dma_header_stash_on)
++ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
++ if (cfg->dma_sg_stash_on)
++ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
++ iowrite32be(tmp, &regs->fmbm_tda);
++
++ /* Tx FIFO parameters */
++ tmp = (cfg->tx_fifo_min_level / FMAN_PORT_BMI_FIFO_UNITS) <<
++ BMI_TX_FIFO_MIN_FILL_SHIFT;
++ tmp |= ((uint32_t)cfg->tx_fifo_deq_pipeline_depth - 1) <<
++ BMI_FIFO_PIPELINE_DEPTH_SHIFT;
++ tmp |= (uint32_t)(cfg->tx_fifo_low_comf_level /
++ FMAN_PORT_BMI_FIFO_UNITS - 1);
++ iowrite32be(tmp, &regs->fmbm_tfp);
++
++ /* Frame end data */
++ tmp = (uint32_t)cfg->checksum_bytes_ignore <<
++ BMI_FRAME_END_CS_IGNORE_SHIFT;
++ iowrite32be(tmp, &regs->fmbm_tfed);
++
++ /* Internal context parameters */
++ if (!port->im_en)
++ {
++ tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
++ BMI_IC_TO_EXT_SHIFT;
++ tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
++ BMI_IC_FROM_INT_SHIFT;
++ tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
++ iowrite32be(tmp, &regs->fmbm_ticp);
++ }
++ /* Frame attributes */
++ tmp = BMI_CMD_TX_MR_DEF;
++ if (port->im_en)
++ tmp |= BMI_CMD_MR_DEAS;
++ else
++ {
++ tmp |= BMI_CMD_ATTR_ORDER;
++ tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
++ }
++ iowrite32be(tmp, &regs->fmbm_tfca);
++
++ /* Dequeue NIA + enqueue NIA */
++ if (port->im_en)
++ {
++ iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX, &regs->fmbm_tfdne);
++ iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX, &regs->fmbm_tfene);
++ }
++ else
++ {
++ iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
++ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
++ if (cfg->fmbm_tfne_has_features)
++ iowrite32be(!params->dflt_fqid ?
++ BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
++ NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
++ if (!params->dflt_fqid && params->dont_release_buf)
++ {
++ iowrite32be(0x00FFFFFF, &regs->fmbm_tcfqid);
++ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, &regs->fmbm_tfene);
++ if (cfg->fmbm_tfne_has_features)
++ iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN, &regs->fmbm_tfne);
++ }
++ }
++
++ /* Confirmation/error queues */
++ if (!port->im_en)
++ {
++ if (params->dflt_fqid || !params->dont_release_buf)
++ iowrite32be(params->dflt_fqid & 0x00FFFFFF, &regs->fmbm_tcfqid);
++ iowrite32be((params->err_fqid & 0x00FFFFFF), &regs->fmbm_tefqid);
++ }
++ /* Statistics counters */
++ tmp = 0;
++ if (cfg->stats_counters_enable)
++ tmp = BMI_COUNTERS_EN;
++ iowrite32be(tmp, &regs->fmbm_tstc);
++
++ /* Performance counters */
++ fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
++ tmp = 0;
++ if (cfg->perf_counters_enable)
++ tmp = BMI_COUNTERS_EN;
++ iowrite32be(tmp, &regs->fmbm_tpc);
++
++ return 0;
++}
++
++static int init_bmi_oh(struct fman_port *port,
++ struct fman_port_cfg *cfg,
++ struct fman_port_params *params)
++{
++ struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
++ uint32_t tmp;
++
++ /* OP Configuration register */
++ tmp = 0;
++ if (cfg->discard_override)
++ tmp |= BMI_PORT_CFG_FDOVR;
++ iowrite32be(tmp, &regs->fmbm_ocfg);
++
++ /* DMA attributes */
++ tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
++ if (cfg->dma_ic_stash_on)
++ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
++ if (cfg->dma_header_stash_on)
++ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
++ if (cfg->dma_sg_stash_on)
++ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
++ if (cfg->dma_write_optimize)
++ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
++ iowrite32be(tmp, &regs->fmbm_oda);
++
++ /* Tx FIFO parameters */
++ tmp = ((uint32_t)cfg->tx_fifo_deq_pipeline_depth - 1) <<
++ BMI_FIFO_PIPELINE_DEPTH_SHIFT;
++ iowrite32be(tmp, &regs->fmbm_ofp);
++
++ /* Internal context parameters */
++ tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
++ BMI_IC_TO_EXT_SHIFT;
++ tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
++ BMI_IC_FROM_INT_SHIFT;
++ tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
++ iowrite32be(tmp, &regs->fmbm_oicp);
++
++ /* Frame attributes */
++ tmp = BMI_CMD_OP_MR_DEF;
++ tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
++ if (cfg->sync_req)
++ tmp |= BMI_CMD_ATTR_SYNC;
++ if (port->type == E_FMAN_PORT_TYPE_OP)
++ tmp |= BMI_CMD_ATTR_ORDER;
++ iowrite32be(tmp, &regs->fmbm_ofca);
++
++ /* Internal buffer offset */
++ tmp = ((uint32_t)cfg->int_buf_start_margin / FMAN_PORT_IC_OFFSET_UNITS)
++ << BMI_INT_BUF_MARG_SHIFT;
++ iowrite32be(tmp, &regs->fmbm_oim);
++
++ /* Dequeue NIA */
++ iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_ofdne);
++
++ /* NIA and Enqueue NIA */
++ if (port->type == E_FMAN_PORT_TYPE_HC) {
++ iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_HC,
++ &regs->fmbm_ofne);
++ iowrite32be(NIA_ENG_QMI_ENQ, &regs->fmbm_ofene);
++ } else {
++ iowrite32be(get_no_pcd_nia_bmi_ac_enc_frame(cfg),
++ &regs->fmbm_ofne);
++ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR,
++ &regs->fmbm_ofene);
++ }
++
++ /* Default/error queues */
++ iowrite32be((params->dflt_fqid & 0x00FFFFFF), &regs->fmbm_ofqid);
++ iowrite32be((params->err_fqid & 0x00FFFFFF), &regs->fmbm_oefqid);
++
++ /* Discard/error masks */
++ if (port->type == E_FMAN_PORT_TYPE_OP) {
++ iowrite32be(params->discard_mask, &regs->fmbm_ofsdm);
++ iowrite32be(params->err_mask, &regs->fmbm_ofsem);
++ }
++
++ /* Statistics counters */
++ tmp = 0;
++ if (cfg->stats_counters_enable)
++ tmp = BMI_COUNTERS_EN;
++ iowrite32be(tmp, &regs->fmbm_ostc);
++
++ /* Performance counters */
++ fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
++ tmp = 0;
++ if (cfg->perf_counters_enable)
++ tmp = BMI_COUNTERS_EN;
++ iowrite32be(tmp, &regs->fmbm_opc);
++
++ return 0;
++}
++
++static int init_qmi(struct fman_port *port,
++ struct fman_port_cfg *cfg,
++ struct fman_port_params *params)
++{
++ struct fman_port_qmi_regs *regs = port->qmi_regs;
++ uint32_t tmp;
++
++ tmp = 0;
++ if (cfg->queue_counters_enable)
++ tmp |= QMI_PORT_CFG_EN_COUNTERS;
++ iowrite32be(tmp, &regs->fmqm_pnc);
++
++ /* Rx port configuration */
++ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
++ (port->type == E_FMAN_PORT_TYPE_RX_10G)) {
++ /* Enqueue NIA */
++ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
++ return 0;
++ }
++
++ /* Continue with Tx and O/H port configuration */
++ if ((port->type == E_FMAN_PORT_TYPE_TX) ||
++ (port->type == E_FMAN_PORT_TYPE_TX_10G)) {
++ /* Enqueue NIA */
++ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
++ &regs->fmqm_pnen);
++ /* Dequeue NIA */
++ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
++ } else {
++ /* Enqueue NIA */
++ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
++ /* Dequeue NIA */
++ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_FETCH, &regs->fmqm_pndn);
++ }
++
++ /* Dequeue Configuration register */
++ tmp = 0;
++ if (cfg->deq_high_pri)
++ tmp |= QMI_DEQ_CFG_PRI;
++
++ switch (cfg->deq_type) {
++ case E_FMAN_PORT_DEQ_BY_PRI:
++ tmp |= QMI_DEQ_CFG_TYPE1;
++ break;
++ case E_FMAN_PORT_DEQ_ACTIVE_FQ:
++ tmp |= QMI_DEQ_CFG_TYPE2;
++ break;
++ case E_FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
++ tmp |= QMI_DEQ_CFG_TYPE3;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (cfg->qmi_deq_options_support) {
++ if ((port->type == E_FMAN_PORT_TYPE_HC) &&
++ (cfg->deq_prefetch_opt != E_FMAN_PORT_DEQ_NO_PREFETCH))
++ return -EINVAL;
++
++ switch (cfg->deq_prefetch_opt) {
++ case E_FMAN_PORT_DEQ_NO_PREFETCH:
++ break;
++ case E_FMAN_PORT_DEQ_PART_PREFETCH:
++ tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
++ break;
++ case E_FMAN_PORT_DEQ_FULL_PREFETCH:
++ tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++ tmp |= (uint32_t)(params->deq_sp & QMI_DEQ_CFG_SP_MASK) <<
++ QMI_DEQ_CFG_SP_SHIFT;
++ tmp |= cfg->deq_byte_cnt;
++ iowrite32be(tmp, &regs->fmqm_pndc);
++
++ return 0;
++}
++
++static void get_rx_stats_reg(struct fman_port *port,
++ enum fman_port_stats_counters counter,
++ uint32_t **stats_reg)
++{
++ struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
++
++ switch (counter) {
++ case E_FMAN_PORT_STATS_CNT_FRAME:
++ *stats_reg = &regs->fmbm_rfrc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DISCARD:
++ *stats_reg = &regs->fmbm_rfdc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
++ *stats_reg = &regs->fmbm_rbdc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME:
++ *stats_reg = &regs->fmbm_rfbc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME:
++ *stats_reg = &regs->fmbm_rlfc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF:
++ *stats_reg = &regs->fmbm_rodc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_FILTERED_FRAME:
++ *stats_reg = &regs->fmbm_rffc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DMA_ERR:
++ *stats_reg = &regs->fmbm_rfldec;
++ break;
++ default:
++ *stats_reg = NULL;
++ }
++}
++
++static void get_tx_stats_reg(struct fman_port *port,
++ enum fman_port_stats_counters counter,
++ uint32_t **stats_reg)
++{
++ struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
++
++ switch (counter) {
++ case E_FMAN_PORT_STATS_CNT_FRAME:
++ *stats_reg = &regs->fmbm_tfrc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DISCARD:
++ *stats_reg = &regs->fmbm_tfdc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
++ *stats_reg = &regs->fmbm_tbdc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_LEN_ERR:
++ *stats_reg = &regs->fmbm_tfledc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT:
++ *stats_reg = &regs->fmbm_tfufdc;
++ break;
++ default:
++ *stats_reg = NULL;
++ }
++}
++
++static void get_oh_stats_reg(struct fman_port *port,
++ enum fman_port_stats_counters counter,
++ uint32_t **stats_reg)
++{
++ struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
++
++ switch (counter) {
++ case E_FMAN_PORT_STATS_CNT_FRAME:
++ *stats_reg = &regs->fmbm_ofrc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DISCARD:
++ *stats_reg = &regs->fmbm_ofdc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
++ *stats_reg = &regs->fmbm_obdc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_FILTERED_FRAME:
++ *stats_reg = &regs->fmbm_offc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_DMA_ERR:
++ *stats_reg = &regs->fmbm_ofldec;
++ break;
++ case E_FMAN_PORT_STATS_CNT_LEN_ERR:
++ *stats_reg = &regs->fmbm_ofledc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT:
++ *stats_reg = &regs->fmbm_ofufdc;
++ break;
++ case E_FMAN_PORT_STATS_CNT_WRED_DISCARD:
++ *stats_reg = &regs->fmbm_ofwdc;
++ break;
++ default:
++ *stats_reg = NULL;
++ }
++}
++
++static void get_rx_perf_reg(struct fman_port *port,
++ enum fman_port_perf_counters counter,
++ uint32_t **perf_reg)
++{
++ struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
++
++ switch (counter) {
++ case E_FMAN_PORT_PERF_CNT_CYCLE:
++ *perf_reg = &regs->fmbm_rccn;
++ break;
++ case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
++ *perf_reg = &regs->fmbm_rtuc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_QUEUE_UTIL:
++ *perf_reg = &regs->fmbm_rrquc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
++ *perf_reg = &regs->fmbm_rduc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
++ *perf_reg = &regs->fmbm_rfuc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_RX_PAUSE:
++ *perf_reg = &regs->fmbm_rpac;
++ break;
++ default:
++ *perf_reg = NULL;
++ }
++}
++
++static void get_tx_perf_reg(struct fman_port *port,
++ enum fman_port_perf_counters counter,
++ uint32_t **perf_reg)
++{
++ struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
++
++ switch (counter) {
++ case E_FMAN_PORT_PERF_CNT_CYCLE:
++ *perf_reg = &regs->fmbm_tccn;
++ break;
++ case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
++ *perf_reg = &regs->fmbm_ttuc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_QUEUE_UTIL:
++ *perf_reg = &regs->fmbm_ttcquc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
++ *perf_reg = &regs->fmbm_tduc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
++ *perf_reg = &regs->fmbm_tfuc;
++ break;
++ default:
++ *perf_reg = NULL;
++ }
++}
++
++static void get_oh_perf_reg(struct fman_port *port,
++ enum fman_port_perf_counters counter,
++ uint32_t **perf_reg)
++{
++ struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
++
++ switch (counter) {
++ case E_FMAN_PORT_PERF_CNT_CYCLE:
++ *perf_reg = &regs->fmbm_occn;
++ break;
++ case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
++ *perf_reg = &regs->fmbm_otuc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
++ *perf_reg = &regs->fmbm_oduc;
++ break;
++ case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
++ *perf_reg = &regs->fmbm_ofuc;
++ break;
++ default:
++ *perf_reg = NULL;
++ }
++}
++
++static void get_qmi_counter_reg(struct fman_port *port,
++ enum fman_port_qmi_counters counter,
++ uint32_t **queue_reg)
++{
++ struct fman_port_qmi_regs *regs = port->qmi_regs;
++
++ switch (counter) {
++ case E_FMAN_PORT_ENQ_TOTAL:
++ *queue_reg = &regs->fmqm_pnetfc;
++ break;
++ case E_FMAN_PORT_DEQ_TOTAL:
++ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
++ (port->type == E_FMAN_PORT_TYPE_RX_10G))
++ /* Counter not available for Rx ports */
++ *queue_reg = NULL;
++ else
++ *queue_reg = &regs->fmqm_pndtfc;
++ break;
++ case E_FMAN_PORT_DEQ_FROM_DFLT:
++ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
++ (port->type == E_FMAN_PORT_TYPE_RX_10G))
++ /* Counter not available for Rx ports */
++ *queue_reg = NULL;
++ else
++ *queue_reg = &regs->fmqm_pndfdc;
++ break;
++ case E_FMAN_PORT_DEQ_CONFIRM:
++ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
++ (port->type == E_FMAN_PORT_TYPE_RX_10G))
++ /* Counter not available for Rx ports */
++ *queue_reg = NULL;
++ else
++ *queue_reg = &regs->fmqm_pndcc;
++ break;
++ default:
++ *queue_reg = NULL;
++ }
++}
++
++void fman_port_defconfig(struct fman_port_cfg *cfg, enum fman_port_type type)
++{
++ cfg->dma_swap_data = E_FMAN_PORT_DMA_NO_SWAP;
++ cfg->dma_ic_stash_on = FALSE;
++ cfg->dma_header_stash_on = FALSE;
++ cfg->dma_sg_stash_on = FALSE;
++ cfg->dma_write_optimize = TRUE;
++ cfg->color = E_FMAN_PORT_COLOR_GREEN;
++ cfg->discard_override = FALSE;
++ cfg->checksum_bytes_ignore = 0;
++ cfg->rx_cut_end_bytes = 4;
++ cfg->rx_pri_elevation = ((0x3FF + 1) * FMAN_PORT_BMI_FIFO_UNITS);
++ cfg->rx_fifo_thr = ((0x3FF + 1) * FMAN_PORT_BMI_FIFO_UNITS);
++ cfg->rx_fd_bits = 0;
++ cfg->ic_ext_offset = 0;
++ cfg->ic_int_offset = 0;
++ cfg->ic_size = 0;
++ cfg->int_buf_start_margin = 0;
++ cfg->ext_buf_start_margin = 0;
++ cfg->ext_buf_end_margin = 0;
++ cfg->tx_fifo_min_level = 0;
++ cfg->tx_fifo_low_comf_level = (5 * KILOBYTE);
++ cfg->stats_counters_enable = TRUE;
++ cfg->perf_counters_enable = TRUE;
++ cfg->deq_type = E_FMAN_PORT_DEQ_BY_PRI;
++
++ if (type == E_FMAN_PORT_TYPE_HC) {
++ cfg->sync_req = FALSE;
++ cfg->deq_prefetch_opt = E_FMAN_PORT_DEQ_NO_PREFETCH;
++ } else {
++ cfg->sync_req = TRUE;
++ cfg->deq_prefetch_opt = E_FMAN_PORT_DEQ_FULL_PREFETCH;
++ }
++
++ if (type == E_FMAN_PORT_TYPE_TX_10G) {
++ cfg->tx_fifo_deq_pipeline_depth = 4;
++ cfg->deq_high_pri = TRUE;
++ cfg->deq_byte_cnt = 0x1400;
++ } else {
++ if ((type == E_FMAN_PORT_TYPE_HC) ||
++ (type == E_FMAN_PORT_TYPE_OP))
++ cfg->tx_fifo_deq_pipeline_depth = 2;
++ else
++ cfg->tx_fifo_deq_pipeline_depth = 1;
++
++ cfg->deq_high_pri = FALSE;
++ cfg->deq_byte_cnt = 0x400;
++ }
++ cfg->no_scatter_gather = DEFAULT_FMAN_SP_NO_SCATTER_GATHER;
++}
++
++static uint8_t fman_port_find_bpool(struct fman_port *port, uint8_t bpid)
++{
++ uint32_t *bp_reg, tmp;
++ uint8_t i, id;
++
++ /* Find the pool */
++ bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
++ for (i = 0;
++ (i < port->ext_pools_num && (i < FMAN_PORT_MAX_EXT_POOLS_NUM));
++ i++) {
++ tmp = ioread32be(&bp_reg[i]);
++ id = (uint8_t)((tmp & BMI_EXT_BUF_POOL_ID_MASK) >>
++ BMI_EXT_BUF_POOL_ID_SHIFT);
++
++ if (id == bpid)
++ break;
++ }
++
++ return i;
++}
++
++int fman_port_init(struct fman_port *port,
++ struct fman_port_cfg *cfg,
++ struct fman_port_params *params)
++{
++ int err;
++
++ /* Init BMI registers */
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ err = init_bmi_rx(port, cfg, params);
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ err = init_bmi_tx(port, cfg, params);
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ err = init_bmi_oh(port, cfg, params);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (err)
++ return err;
++
++ /* Init QMI registers */
++ if (!port->im_en)
++ {
++ err = init_qmi(port, cfg, params);
++ return err;
++ }
++ return 0;
++}
++
++int fman_port_enable(struct fman_port *port)
++{
++ uint32_t *bmi_cfg_reg, tmp;
++ bool rx_port;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
++ rx_port = TRUE;
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
++ rx_port = FALSE;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ bmi_cfg_reg = &port->bmi_regs->oh.fmbm_ocfg;
++ rx_port = FALSE;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Enable QMI */
++ if (!rx_port) {
++ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
++ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
++ }
++
++ /* Enable BMI */
++ tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
++ iowrite32be(tmp, bmi_cfg_reg);
++
++ return 0;
++}
++
++int fman_port_disable(const struct fman_port *port)
++{
++ uint32_t *bmi_cfg_reg, *bmi_status_reg, tmp;
++ bool rx_port, failure = FALSE;
++ int count;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
++ bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
++ rx_port = TRUE;
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
++ bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
++ rx_port = FALSE;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ bmi_cfg_reg = &port->bmi_regs->oh.fmbm_ocfg;
++ bmi_status_reg = &port->bmi_regs->oh.fmbm_ost;
++ rx_port = FALSE;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Disable QMI */
++ if (!rx_port) {
++ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
++ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
++
++ /* Wait for QMI to finish FD handling */
++ count = 100;
++ do {
++ udelay(10);
++ tmp = ioread32be(&port->qmi_regs->fmqm_pns);
++ } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
++
++ if (count == 0)
++ {
++ /* Timeout */
++ failure = TRUE;
++ }
++ }
++
++ /* Disable BMI */
++ tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
++ iowrite32be(tmp, bmi_cfg_reg);
++
++ /* Wait for graceful stop end */
++ count = 500;
++ do {
++ udelay(10);
++ tmp = ioread32be(bmi_status_reg);
++ } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
++
++ if (count == 0)
++ {
++ /* Timeout */
++ failure = TRUE;
++ }
++
++ if (failure)
++ return -EBUSY;
++
++ return 0;
++}
++
++int fman_port_set_bpools(const struct fman_port *port,
++ const struct fman_port_bpools *bp)
++{
++ uint32_t tmp, *bp_reg, *bp_depl_reg;
++ uint8_t i, max_bp_num;
++ bool grp_depl_used = FALSE, rx_port;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ max_bp_num = port->ext_pools_num;
++ rx_port = TRUE;
++ bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
++ bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ if (port->fm_rev_maj != 4)
++ return -EINVAL;
++ max_bp_num = FMAN_PORT_OBS_EXT_POOLS_NUM;
++ rx_port = FALSE;
++ bp_reg = port->bmi_regs->oh.fmbm_oebmpi;
++ bp_depl_reg = &port->bmi_regs->oh.fmbm_ompd;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (rx_port) {
++ /* Check buffers are provided in ascending order */
++ for (i = 0;
++ (i < (bp->count-1) && (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1));
++ i++) {
++ if (bp->bpool[i].size > bp->bpool[i+1].size)
++ return -EINVAL;
++ }
++ }
++
++ /* Set up external buffers pools */
++ for (i = 0; i < bp->count; i++) {
++ tmp = BMI_EXT_BUF_POOL_VALID;
++ tmp |= ((uint32_t)bp->bpool[i].bpid <<
++ BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
++
++ if (rx_port) {
++ if (bp->counters_enable)
++ tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
++
++ if (bp->bpool[i].is_backup)
++ tmp |= BMI_EXT_BUF_POOL_BACKUP;
++
++ tmp |= (uint32_t)bp->bpool[i].size;
++ }
++
++ iowrite32be(tmp, &bp_reg[i]);
++ }
++
++ /* Clear unused pools */
++ for (i = bp->count; i < max_bp_num; i++)
++ iowrite32be(0, &bp_reg[i]);
++
++ /* Pools depletion */
++ tmp = 0;
++ for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
++ if (bp->bpool[i].grp_bp_depleted) {
++ grp_depl_used = TRUE;
++ tmp |= 0x80000000 >> i;
++ }
++
++ if (bp->bpool[i].single_bp_depleted)
++ tmp |= 0x80 >> i;
++
++ if (bp->bpool[i].pfc_priorities_en)
++ tmp |= 0x0100 << i;
++ }
++
++ if (grp_depl_used)
++ tmp |= ((uint32_t)bp->grp_bp_depleted_num - 1) <<
++ BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
++
++ iowrite32be(tmp, bp_depl_reg);
++ return 0;
++}
++
++int fman_port_set_rate_limiter(struct fman_port *port,
++ struct fman_port_rate_limiter *rate_limiter)
++{
++ uint32_t *rate_limit_reg, *rate_limit_scale_reg;
++ uint32_t granularity, tmp;
++ uint8_t usec_bit, factor;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ rate_limit_reg = &port->bmi_regs->tx.fmbm_trlmt;
++ rate_limit_scale_reg = &port->bmi_regs->tx.fmbm_trlmts;
++ granularity = BMI_RATE_LIMIT_GRAN_TX;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ rate_limit_reg = &port->bmi_regs->oh.fmbm_orlmt;
++ rate_limit_scale_reg = &port->bmi_regs->oh.fmbm_orlmts;
++ granularity = BMI_RATE_LIMIT_GRAN_OP;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Factor is per 1 usec count */
++ factor = 1;
++ usec_bit = rate_limiter->count_1micro_bit;
++
++ /* If rate limit is too small for an 1usec factor, adjust timestamp
++ * scale and multiply the factor */
++ while (rate_limiter->rate < (granularity / factor)) {
++ if (usec_bit == 31)
++ /* Can't configure rate limiter - rate is too small */
++ return -EINVAL;
++
++ usec_bit++;
++ factor <<= 1;
++ }
++
++ /* Figure out register value. The "while" above quarantees that
++ * (rate_limiter->rate * factor / granularity) >= 1 */
++ tmp = (uint32_t)(rate_limiter->rate * factor / granularity - 1);
++
++ /* Check rate limit isn't too large */
++ if (tmp >= BMI_RATE_LIMIT_MAX_RATE_IN_GRAN_UNITS)
++ return -EINVAL;
++
++ /* Check burst size is in allowed range */
++ if ((rate_limiter->burst_size == 0) ||
++ (rate_limiter->burst_size >
++ BMI_RATE_LIMIT_MAX_BURST_SIZE))
++ return -EINVAL;
++
++ tmp |= (uint32_t)(rate_limiter->burst_size - 1) <<
++ BMI_RATE_LIMIT_MAX_BURST_SHIFT;
++
++ if ((port->type == E_FMAN_PORT_TYPE_OP) &&
++ (port->fm_rev_maj == 4)) {
++ if (rate_limiter->high_burst_size_gran)
++ tmp |= BMI_RATE_LIMIT_HIGH_BURST_SIZE_GRAN;
++ }
++
++ iowrite32be(tmp, rate_limit_reg);
++
++ /* Set up rate limiter scale register */
++ tmp = BMI_RATE_LIMIT_SCALE_EN;
++ tmp |= (31 - (uint32_t)usec_bit) << BMI_RATE_LIMIT_SCALE_TSBS_SHIFT;
++
++ if ((port->type == E_FMAN_PORT_TYPE_OP) &&
++ (port->fm_rev_maj == 4))
++ tmp |= rate_limiter->rate_factor;
++
++ iowrite32be(tmp, rate_limit_scale_reg);
++
++ return 0;
++}
++
++int fman_port_delete_rate_limiter(struct fman_port *port)
++{
++ uint32_t *rate_limit_scale_reg;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ rate_limit_scale_reg = &port->bmi_regs->tx.fmbm_trlmts;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ rate_limit_scale_reg = &port->bmi_regs->oh.fmbm_orlmts;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ iowrite32be(0, rate_limit_scale_reg);
++ return 0;
++}
++
++int fman_port_set_err_mask(struct fman_port *port, uint32_t err_mask)
++{
++ uint32_t *err_mask_reg;
++
++ /* Obtain register address */
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ err_mask_reg = &port->bmi_regs->rx.fmbm_rfsem;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ err_mask_reg = &port->bmi_regs->oh.fmbm_ofsem;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ iowrite32be(err_mask, err_mask_reg);
++ return 0;
++}
++
++int fman_port_set_discard_mask(struct fman_port *port, uint32_t discard_mask)
++{
++ uint32_t *discard_mask_reg;
++
++ /* Obtain register address */
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ discard_mask_reg = &port->bmi_regs->rx.fmbm_rfsdm;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ discard_mask_reg = &port->bmi_regs->oh.fmbm_ofsdm;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ iowrite32be(discard_mask, discard_mask_reg);
++ return 0;
++}
++
++int fman_port_modify_rx_fd_bits(struct fman_port *port,
++ uint8_t rx_fd_bits,
++ bool add)
++{
++ uint32_t tmp;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ tmp = ioread32be(&port->bmi_regs->rx.fmbm_rfne);
++
++ if (add)
++ tmp |= (uint32_t)rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
++ else
++ tmp &= ~((uint32_t)rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT);
++
++ iowrite32be(tmp, &port->bmi_regs->rx.fmbm_rfne);
++ return 0;
++}
++
++int fman_port_set_perf_cnt_params(struct fman_port *port,
++ struct fman_port_perf_cnt_params *params)
++{
++ uint32_t *pcp_reg, tmp;
++
++ /* Obtain register address and check parameters are in range */
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ pcp_reg = &port->bmi_regs->rx.fmbm_rpcp;
++ if ((params->queue_val == 0) ||
++ (params->queue_val > MAX_PERFORMANCE_RX_QUEUE_COMP))
++ return -EINVAL;
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ pcp_reg = &port->bmi_regs->tx.fmbm_tpcp;
++ if ((params->queue_val == 0) ||
++ (params->queue_val > MAX_PERFORMANCE_TX_QUEUE_COMP))
++ return -EINVAL;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ pcp_reg = &port->bmi_regs->oh.fmbm_opcp;
++ if (params->queue_val != 0)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if ((params->task_val == 0) ||
++ (params->task_val > MAX_PERFORMANCE_TASK_COMP))
++ return -EINVAL;
++ if ((params->dma_val == 0) ||
++ (params->dma_val > MAX_PERFORMANCE_DMA_COMP))
++ return -EINVAL;
++ if ((params->fifo_val == 0) ||
++ ((params->fifo_val / FMAN_PORT_BMI_FIFO_UNITS) >
++ MAX_PERFORMANCE_FIFO_COMP))
++ return -EINVAL;
++ tmp = (uint32_t)(params->task_val - 1) <<
++ BMI_PERFORMANCE_TASK_COMP_SHIFT;
++ tmp |= (uint32_t)(params->dma_val - 1) <<
++ BMI_PERFORMANCE_DMA_COMP_SHIFT;
++ tmp |= (uint32_t)(params->fifo_val / FMAN_PORT_BMI_FIFO_UNITS - 1);
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ tmp |= (uint32_t)(params->queue_val - 1) <<
++ BMI_PERFORMANCE_QUEUE_COMP_SHIFT;
++ break;
++ default:
++ break;
++ }
++
++
++ iowrite32be(tmp, pcp_reg);
++ return 0;
++}
++
++int fman_port_set_stats_cnt_mode(struct fman_port *port, bool enable)
++{
++ uint32_t *stats_reg, tmp;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ stats_reg = &port->bmi_regs->rx.fmbm_rstc;
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ stats_reg = &port->bmi_regs->tx.fmbm_tstc;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ stats_reg = &port->bmi_regs->oh.fmbm_ostc;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ tmp = ioread32be(stats_reg);
++
++ if (enable)
++ tmp |= BMI_COUNTERS_EN;
++ else
++ tmp &= ~BMI_COUNTERS_EN;
++
++ iowrite32be(tmp, stats_reg);
++ return 0;
++}
++
++int fman_port_set_perf_cnt_mode(struct fman_port *port, bool enable)
++{
++ uint32_t *stats_reg, tmp;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ stats_reg = &port->bmi_regs->rx.fmbm_rpc;
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ stats_reg = &port->bmi_regs->tx.fmbm_tpc;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ stats_reg = &port->bmi_regs->oh.fmbm_opc;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ tmp = ioread32be(stats_reg);
++
++ if (enable)
++ tmp |= BMI_COUNTERS_EN;
++ else
++ tmp &= ~BMI_COUNTERS_EN;
++
++ iowrite32be(tmp, stats_reg);
++ return 0;
++}
++
++int fman_port_set_queue_cnt_mode(struct fman_port *port, bool enable)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&port->qmi_regs->fmqm_pnc);
++
++ if (enable)
++ tmp |= QMI_PORT_CFG_EN_COUNTERS;
++ else
++ tmp &= ~QMI_PORT_CFG_EN_COUNTERS;
++
++ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
++ return 0;
++}
++
++int fman_port_set_bpool_cnt_mode(struct fman_port *port,
++ uint8_t bpid,
++ bool enable)
++{
++ uint8_t index;
++ uint32_t tmp;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Find the pool */
++ index = fman_port_find_bpool(port, bpid);
++ if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
++ /* Not found */
++ return -EINVAL;
++
++ tmp = ioread32be(&port->bmi_regs->rx.fmbm_ebmpi[index]);
++
++ if (enable)
++ tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
++ else
++ tmp &= ~BMI_EXT_BUF_POOL_EN_COUNTER;
++
++ iowrite32be(tmp, &port->bmi_regs->rx.fmbm_ebmpi[index]);
++ return 0;
++}
++
++uint32_t fman_port_get_stats_counter(struct fman_port *port,
++ enum fman_port_stats_counters counter)
++{
++ uint32_t *stats_reg, ret_val;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ get_rx_stats_reg(port, counter, &stats_reg);
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ get_tx_stats_reg(port, counter, &stats_reg);
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ get_oh_stats_reg(port, counter, &stats_reg);
++ break;
++ default:
++ stats_reg = NULL;
++ }
++
++ if (stats_reg == NULL)
++ return 0;
++
++ ret_val = ioread32be(stats_reg);
++ return ret_val;
++}
++
++void fman_port_set_stats_counter(struct fman_port *port,
++ enum fman_port_stats_counters counter,
++ uint32_t value)
++{
++ uint32_t *stats_reg;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ get_rx_stats_reg(port, counter, &stats_reg);
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ get_tx_stats_reg(port, counter, &stats_reg);
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ get_oh_stats_reg(port, counter, &stats_reg);
++ break;
++ default:
++ stats_reg = NULL;
++ }
++
++ if (stats_reg == NULL)
++ return;
++
++ iowrite32be(value, stats_reg);
++}
++
++uint32_t fman_port_get_perf_counter(struct fman_port *port,
++ enum fman_port_perf_counters counter)
++{
++ uint32_t *perf_reg, ret_val;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ get_rx_perf_reg(port, counter, &perf_reg);
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ get_tx_perf_reg(port, counter, &perf_reg);
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ get_oh_perf_reg(port, counter, &perf_reg);
++ break;
++ default:
++ perf_reg = NULL;
++ }
++
++ if (perf_reg == NULL)
++ return 0;
++
++ ret_val = ioread32be(perf_reg);
++ return ret_val;
++}
++
++void fman_port_set_perf_counter(struct fman_port *port,
++ enum fman_port_perf_counters counter,
++ uint32_t value)
++{
++ uint32_t *perf_reg;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ get_rx_perf_reg(port, counter, &perf_reg);
++ break;
++ case E_FMAN_PORT_TYPE_TX:
++ case E_FMAN_PORT_TYPE_TX_10G:
++ get_tx_perf_reg(port, counter, &perf_reg);
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ case E_FMAN_PORT_TYPE_HC:
++ get_oh_perf_reg(port, counter, &perf_reg);
++ break;
++ default:
++ perf_reg = NULL;
++ }
++
++ if (perf_reg == NULL)
++ return;
++
++ iowrite32be(value, perf_reg);
++}
++
++uint32_t fman_port_get_qmi_counter(struct fman_port *port,
++ enum fman_port_qmi_counters counter)
++{
++ uint32_t *queue_reg, ret_val;
++
++ get_qmi_counter_reg(port, counter, &queue_reg);
++
++ if (queue_reg == NULL)
++ return 0;
++
++ ret_val = ioread32be(queue_reg);
++ return ret_val;
++}
++
++void fman_port_set_qmi_counter(struct fman_port *port,
++ enum fman_port_qmi_counters counter,
++ uint32_t value)
++{
++ uint32_t *queue_reg;
++
++ get_qmi_counter_reg(port, counter, &queue_reg);
++
++ if (queue_reg == NULL)
++ return;
++
++ iowrite32be(value, queue_reg);
++}
++
++uint32_t fman_port_get_bpool_counter(struct fman_port *port, uint8_t bpid)
++{
++ uint8_t index;
++ uint32_t ret_val;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ break;
++ default:
++ return 0;
++ }
++
++ /* Find the pool */
++ index = fman_port_find_bpool(port, bpid);
++ if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
++ /* Not found */
++ return 0;
++
++ ret_val = ioread32be(&port->bmi_regs->rx.fmbm_acnt[index]);
++ return ret_val;
++}
++
++void fman_port_set_bpool_counter(struct fman_port *port,
++ uint8_t bpid,
++ uint32_t value)
++{
++ uint8_t index;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ break;
++ default:
++ return;
++ }
++
++ /* Find the pool */
++ index = fman_port_find_bpool(port, bpid);
++ if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
++ /* Not found */
++ return;
++
++ iowrite32be(value, &port->bmi_regs->rx.fmbm_acnt[index]);
++}
++
++int fman_port_add_congestion_grps(struct fman_port *port,
++ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])
++{
++ int i;
++ uint32_t tmp, *grp_map_reg;
++ uint8_t max_grp_map_num;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ if (port->fm_rev_maj == 4)
++ max_grp_map_num = 1;
++ else
++ max_grp_map_num = FMAN_PORT_CG_MAP_NUM;
++ grp_map_reg = port->bmi_regs->rx.fmbm_rcgm;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ max_grp_map_num = 1;
++ if (port->fm_rev_maj != 4)
++ return -EINVAL;
++ grp_map_reg = port->bmi_regs->oh.fmbm_ocgm;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ for (i = (max_grp_map_num - 1); i >= 0; i--) {
++ if (grps_map[i] == 0)
++ continue;
++ tmp = ioread32be(&grp_map_reg[i]);
++ tmp |= grps_map[i];
++ iowrite32be(tmp, &grp_map_reg[i]);
++ }
++
++ return 0;
++}
++
++int fman_port_remove_congestion_grps(struct fman_port *port,
++ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])
++{
++ int i;
++ uint32_t tmp, *grp_map_reg;
++ uint8_t max_grp_map_num;
++
++ switch (port->type) {
++ case E_FMAN_PORT_TYPE_RX:
++ case E_FMAN_PORT_TYPE_RX_10G:
++ if (port->fm_rev_maj == 4)
++ max_grp_map_num = 1;
++ else
++ max_grp_map_num = FMAN_PORT_CG_MAP_NUM;
++ grp_map_reg = port->bmi_regs->rx.fmbm_rcgm;
++ break;
++ case E_FMAN_PORT_TYPE_OP:
++ max_grp_map_num = 1;
++ if (port->fm_rev_maj != 4)
++ return -EINVAL;
++ grp_map_reg = port->bmi_regs->oh.fmbm_ocgm;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ for (i = (max_grp_map_num - 1); i >= 0; i--) {
++ if (grps_map[i] == 0)
++ continue;
++ tmp = ioread32be(&grp_map_reg[i]);
++ tmp &= ~grps_map[i];
++ iowrite32be(tmp, &grp_map_reg[i]);
++ }
++ return 0;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/Makefile
+new file mode 100644
+index 00000000..d2c21d34
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++obj-y += fsl-ncsw-RTC.o
++
++fsl-ncsw-RTC-objs := fm_rtc.o fman_rtc.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c
+new file mode 100644
+index 00000000..99de427b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c
+@@ -0,0 +1,692 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_rtc.c
++
++ @Description FM RTC driver implementation.
++
++ @Cautions None
++*//***************************************************************************/
++#include <linux/math64.h>
++#include "error_ext.h"
++#include "debug_ext.h"
++#include "string_ext.h"
++#include "part_ext.h"
++#include "xx_ext.h"
++#include "ncsw_ext.h"
++
++#include "fm_rtc.h"
++#include "fm_common.h"
++
++
++
++/*****************************************************************************/
++static t_Error CheckInitParameters(t_FmRtc *p_Rtc)
++{
++ struct rtc_cfg *p_RtcDriverParam = p_Rtc->p_RtcDriverParam;
++ int i;
++
++ if ((p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_EXTERNAL) &&
++ (p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_SYSTEM) &&
++ (p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR))
++ RETURN_ERROR(MAJOR, E_INVALID_CLOCK, ("Source clock undefined"));
++
++ if (p_Rtc->outputClockDivisor == 0)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("Divisor for output clock (should be positive)"));
++ }
++
++ for (i=0; i < FM_RTC_NUM_OF_ALARMS; i++)
++ {
++ if ((p_RtcDriverParam->alarm_polarity[i] != E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW) &&
++ (p_RtcDriverParam->alarm_polarity[i] != E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH))
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm %d signal polarity", i));
++ }
++ }
++ for (i=0; i < FM_RTC_NUM_OF_EXT_TRIGGERS; i++)
++ {
++ if ((p_RtcDriverParam->trigger_polarity[i] != E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE) &&
++ (p_RtcDriverParam->trigger_polarity[i] != E_FMAN_RTC_TRIGGER_ON_RISING_EDGE))
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Trigger %d signal polarity", i));
++ }
++ }
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++static void RtcExceptions(t_Handle h_FmRtc)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++ struct rtc_regs *p_MemMap;
++ register uint32_t events;
++
++ ASSERT_COND(p_Rtc);
++ p_MemMap = p_Rtc->p_MemMap;
++
++ events = fman_rtc_check_and_clear_event(p_MemMap);
++ if (events & FMAN_RTC_TMR_TEVENT_ALM1)
++ {
++ if (p_Rtc->alarmParams[0].clearOnExpiration)
++ {
++ fman_rtc_set_timer_alarm_l(p_MemMap, 0, 0);
++ fman_rtc_disable_interupt(p_MemMap, FMAN_RTC_TMR_TEVENT_ALM1);
++ }
++ ASSERT_COND(p_Rtc->alarmParams[0].f_AlarmCallback);
++ p_Rtc->alarmParams[0].f_AlarmCallback(p_Rtc->h_App, 0);
++ }
++ if (events & FMAN_RTC_TMR_TEVENT_ALM2)
++ {
++ if (p_Rtc->alarmParams[1].clearOnExpiration)
++ {
++ fman_rtc_set_timer_alarm_l(p_MemMap, 1, 0);
++ fman_rtc_disable_interupt(p_MemMap, FMAN_RTC_TMR_TEVENT_ALM2);
++ }
++ ASSERT_COND(p_Rtc->alarmParams[1].f_AlarmCallback);
++ p_Rtc->alarmParams[1].f_AlarmCallback(p_Rtc->h_App, 1);
++ }
++ if (events & FMAN_RTC_TMR_TEVENT_PP1)
++ {
++ ASSERT_COND(p_Rtc->periodicPulseParams[0].f_PeriodicPulseCallback);
++ p_Rtc->periodicPulseParams[0].f_PeriodicPulseCallback(p_Rtc->h_App, 0);
++ }
++ if (events & FMAN_RTC_TMR_TEVENT_PP2)
++ {
++ ASSERT_COND(p_Rtc->periodicPulseParams[1].f_PeriodicPulseCallback);
++ p_Rtc->periodicPulseParams[1].f_PeriodicPulseCallback(p_Rtc->h_App, 1);
++ }
++ if (events & FMAN_RTC_TMR_TEVENT_ETS1)
++ {
++ ASSERT_COND(p_Rtc->externalTriggerParams[0].f_ExternalTriggerCallback);
++ p_Rtc->externalTriggerParams[0].f_ExternalTriggerCallback(p_Rtc->h_App, 0);
++ }
++ if (events & FMAN_RTC_TMR_TEVENT_ETS2)
++ {
++ ASSERT_COND(p_Rtc->externalTriggerParams[1].f_ExternalTriggerCallback);
++ p_Rtc->externalTriggerParams[1].f_ExternalTriggerCallback(p_Rtc->h_App, 1);
++ }
++}
++
++
++/*****************************************************************************/
++t_Handle FM_RTC_Config(t_FmRtcParams *p_FmRtcParam)
++{
++ t_FmRtc *p_Rtc;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmRtcParam, E_NULL_POINTER, NULL);
++
++ /* Allocate memory for the FM RTC driver parameters */
++ p_Rtc = (t_FmRtc *)XX_Malloc(sizeof(t_FmRtc));
++ if (!p_Rtc)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM RTC driver structure"));
++ return NULL;
++ }
++
++ memset(p_Rtc, 0, sizeof(t_FmRtc));
++
++ /* Allocate memory for the FM RTC driver parameters */
++ p_Rtc->p_RtcDriverParam = (struct rtc_cfg *)XX_Malloc(sizeof(struct rtc_cfg));
++ if (!p_Rtc->p_RtcDriverParam)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM RTC driver parameters"));
++ XX_Free(p_Rtc);
++ return NULL;
++ }
++
++ memset(p_Rtc->p_RtcDriverParam, 0, sizeof(struct rtc_cfg));
++
++ /* Store RTC configuration parameters */
++ p_Rtc->h_Fm = p_FmRtcParam->h_Fm;
++
++ /* Set default RTC configuration parameters */
++ fman_rtc_defconfig(p_Rtc->p_RtcDriverParam);
++
++ p_Rtc->outputClockDivisor = DEFAULT_OUTPUT_CLOCK_DIVISOR;
++ p_Rtc->p_RtcDriverParam->bypass = DEFAULT_BYPASS;
++ p_Rtc->clockPeriodNanoSec = DEFAULT_CLOCK_PERIOD; /* 1 usec */
++
++
++ /* Store RTC parameters in the RTC control structure */
++ p_Rtc->p_MemMap = (struct rtc_regs *)UINT_TO_PTR(p_FmRtcParam->baseAddress);
++ p_Rtc->h_App = p_FmRtcParam->h_App;
++
++ return p_Rtc;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_Init(t_Handle h_FmRtc)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++ struct rtc_cfg *p_RtcDriverParam;
++ struct rtc_regs *p_MemMap;
++ uint32_t freqCompensation = 0;
++ uint64_t tmpDouble;
++ bool init_freq_comp = FALSE;
++
++ p_RtcDriverParam = p_Rtc->p_RtcDriverParam;
++ p_MemMap = p_Rtc->p_MemMap;
++
++ if (CheckInitParameters(p_Rtc)!=E_OK)
++ RETURN_ERROR(MAJOR, E_CONFLICT,
++ ("Init Parameters are not Valid"));
++
++ /* TODO check that no timestamping MACs are working in this stage. */
++
++ /* find source clock frequency in Mhz */
++ if (p_Rtc->p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_SYSTEM)
++ p_Rtc->srcClkFreqMhz = p_Rtc->p_RtcDriverParam->ext_src_clk_freq;
++ else
++ p_Rtc->srcClkFreqMhz = (uint32_t)(FmGetMacClockFreq(p_Rtc->h_Fm));
++
++ /* if timer in Master mode Initialize TMR_CTRL */
++ /* We want the counter (TMR_CNT) to count in nano-seconds */
++ if (!p_RtcDriverParam->timer_slave_mode && p_Rtc->p_RtcDriverParam->bypass)
++ p_Rtc->clockPeriodNanoSec = (1000 / p_Rtc->srcClkFreqMhz);
++ else
++ {
++ /* Initialize TMR_ADD with the initial frequency compensation value:
++ freqCompensation = (2^32 / frequency ratio) */
++ /* frequency ratio = sorce clock/rtc clock =
++ * (p_Rtc->srcClkFreqMhz*1000000))/ 1/(p_Rtc->clockPeriodNanoSec * 1000000000) */
++ init_freq_comp = TRUE;
++ freqCompensation = (uint32_t)DIV_CEIL(ACCUMULATOR_OVERFLOW * 1000,
++ p_Rtc->clockPeriodNanoSec * p_Rtc->srcClkFreqMhz);
++ }
++
++ /* check the legality of the relation between source and destination clocks */
++ /* should be larger than 1.0001 */
++ tmpDouble = 10000 * (uint64_t)p_Rtc->clockPeriodNanoSec * (uint64_t)p_Rtc->srcClkFreqMhz;
++ if ((tmpDouble) <= 10001)
++ RETURN_ERROR(MAJOR, E_CONFLICT,
++ ("Invalid relation between source and destination clocks. Should be larger than 1.0001"));
++
++ fman_rtc_init(p_RtcDriverParam,
++ p_MemMap,
++ FM_RTC_NUM_OF_ALARMS,
++ FM_RTC_NUM_OF_PERIODIC_PULSES,
++ FM_RTC_NUM_OF_EXT_TRIGGERS,
++ init_freq_comp,
++ freqCompensation,
++ p_Rtc->outputClockDivisor);
++
++ /* Register the FM RTC interrupt */
++ FmRegisterIntr(p_Rtc->h_Fm, e_FM_MOD_TMR, 0, e_FM_INTR_TYPE_NORMAL, RtcExceptions , p_Rtc);
++
++ /* Free parameters structures */
++ XX_Free(p_Rtc->p_RtcDriverParam);
++ p_Rtc->p_RtcDriverParam = NULL;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_Free(t_Handle h_FmRtc)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++
++ if (p_Rtc->p_RtcDriverParam)
++ {
++ XX_Free(p_Rtc->p_RtcDriverParam);
++ }
++ else
++ {
++ FM_RTC_Disable(h_FmRtc);
++ }
++
++ /* Unregister FM RTC interrupt */
++ FmUnregisterIntr(p_Rtc->h_Fm, e_FM_MOD_TMR, 0, e_FM_INTR_TYPE_NORMAL);
++ XX_Free(p_Rtc);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigSourceClock(t_Handle h_FmRtc,
++ e_FmSrcClk srcClk,
++ uint32_t freqInMhz)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ p_Rtc->p_RtcDriverParam->src_clk = (enum fman_src_clock)srcClk;
++ if (srcClk != e_FM_RTC_SOURCE_CLOCK_SYSTEM)
++ p_Rtc->p_RtcDriverParam->ext_src_clk_freq = freqInMhz;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigPeriod(t_Handle h_FmRtc, uint32_t period)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ p_Rtc->clockPeriodNanoSec = period;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigFrequencyBypass(t_Handle h_FmRtc, bool enabled)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ p_Rtc->p_RtcDriverParam->bypass = enabled;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigInvertedInputClockPhase(t_Handle h_FmRtc, bool inverted)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ p_Rtc->p_RtcDriverParam->invert_input_clk_phase = inverted;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigInvertedOutputClockPhase(t_Handle h_FmRtc, bool inverted)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ p_Rtc->p_RtcDriverParam->invert_output_clk_phase = inverted;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigOutputClockDivisor(t_Handle h_FmRtc, uint16_t divisor)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ p_Rtc->outputClockDivisor = divisor;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigPulseRealignment(t_Handle h_FmRtc, bool enable)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ p_Rtc->p_RtcDriverParam->pulse_realign = enable;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigAlarmPolarity(t_Handle h_FmRtc,
++ uint8_t alarmId,
++ e_FmRtcAlarmPolarity alarmPolarity)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (alarmId >= FM_RTC_NUM_OF_ALARMS)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm ID"));
++
++ p_Rtc->p_RtcDriverParam->alarm_polarity[alarmId] =
++ (enum fman_rtc_alarm_polarity)alarmPolarity;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ConfigExternalTriggerPolarity(t_Handle h_FmRtc,
++ uint8_t triggerId,
++ e_FmRtcTriggerPolarity triggerPolarity)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (triggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External trigger ID"));
++ }
++
++ p_Rtc->p_RtcDriverParam->trigger_polarity[triggerId] =
++ (enum fman_rtc_trigger_polarity)triggerPolarity;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_Enable(t_Handle h_FmRtc, bool resetClock)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ fman_rtc_enable(p_Rtc->p_MemMap, resetClock);
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_Disable(t_Handle h_FmRtc)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ /* TODO A check must be added here, that no timestamping MAC's
++ * are working in this stage. */
++ fman_rtc_disable(p_Rtc->p_MemMap);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_SetClockOffset(t_Handle h_FmRtc, int64_t offset)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ fman_rtc_set_timer_offset(p_Rtc->p_MemMap, offset);
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_SetAlarm(t_Handle h_FmRtc, t_FmRtcAlarmParams *p_FmRtcAlarmParams)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++ uint64_t tmpAlarm;
++ bool enable = FALSE;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (p_FmRtcAlarmParams->alarmId >= FM_RTC_NUM_OF_ALARMS)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm ID"));
++ }
++
++ if (p_FmRtcAlarmParams->alarmTime < p_Rtc->clockPeriodNanoSec)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
++ ("Alarm time must be equal or larger than RTC period - %d nanoseconds",
++ p_Rtc->clockPeriodNanoSec));
++ tmpAlarm = p_FmRtcAlarmParams->alarmTime;
++ if (do_div(tmpAlarm, p_Rtc->clockPeriodNanoSec))
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
++ ("Alarm time must be a multiple of RTC period - %d nanoseconds",
++ p_Rtc->clockPeriodNanoSec));
++
++ if (p_FmRtcAlarmParams->f_AlarmCallback)
++ {
++ p_Rtc->alarmParams[p_FmRtcAlarmParams->alarmId].f_AlarmCallback = p_FmRtcAlarmParams->f_AlarmCallback;
++ p_Rtc->alarmParams[p_FmRtcAlarmParams->alarmId].clearOnExpiration = p_FmRtcAlarmParams->clearOnExpiration;
++ enable = TRUE;
++ }
++
++ fman_rtc_set_alarm(p_Rtc->p_MemMap, p_FmRtcAlarmParams->alarmId, (unsigned long)tmpAlarm, enable);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_SetPeriodicPulse(t_Handle h_FmRtc, t_FmRtcPeriodicPulseParams *p_FmRtcPeriodicPulseParams)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++ bool enable = FALSE;
++ uint64_t tmpFiper;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (p_FmRtcPeriodicPulseParams->periodicPulseId >= FM_RTC_NUM_OF_PERIODIC_PULSES)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse ID"));
++ }
++ if (fman_rtc_is_enabled(p_Rtc->p_MemMap))
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Can't set Periodic pulse when RTC is enabled."));
++ if (p_FmRtcPeriodicPulseParams->periodicPulsePeriod < p_Rtc->clockPeriodNanoSec)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
++ ("Periodic pulse must be equal or larger than RTC period - %d nanoseconds",
++ p_Rtc->clockPeriodNanoSec));
++ tmpFiper = p_FmRtcPeriodicPulseParams->periodicPulsePeriod;
++ if (do_div(tmpFiper, p_Rtc->clockPeriodNanoSec))
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
++ ("Periodic pulse must be a multiple of RTC period - %d nanoseconds",
++ p_Rtc->clockPeriodNanoSec));
++ if (tmpFiper & 0xffffffff00000000LL)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
++ ("Periodic pulse/RTC Period must be smaller than 4294967296",
++ p_Rtc->clockPeriodNanoSec));
++
++ if (p_FmRtcPeriodicPulseParams->f_PeriodicPulseCallback)
++ {
++ p_Rtc->periodicPulseParams[p_FmRtcPeriodicPulseParams->periodicPulseId].f_PeriodicPulseCallback =
++ p_FmRtcPeriodicPulseParams->f_PeriodicPulseCallback;
++ enable = TRUE;
++ }
++ fman_rtc_set_periodic_pulse(p_Rtc->p_MemMap, p_FmRtcPeriodicPulseParams->periodicPulseId, (uint32_t)tmpFiper, enable);
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ClearPeriodicPulse(t_Handle h_FmRtc, uint8_t periodicPulseId)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (periodicPulseId >= FM_RTC_NUM_OF_PERIODIC_PULSES)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse ID"));
++ }
++
++ p_Rtc->periodicPulseParams[periodicPulseId].f_PeriodicPulseCallback = NULL;
++ fman_rtc_clear_periodic_pulse(p_Rtc->p_MemMap, periodicPulseId);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_SetExternalTrigger(t_Handle h_FmRtc, t_FmRtcExternalTriggerParams *p_FmRtcExternalTriggerParams)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++ bool enable = FALSE;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (p_FmRtcExternalTriggerParams->externalTriggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External Trigger ID"));
++ }
++
++ if (p_FmRtcExternalTriggerParams->f_ExternalTriggerCallback)
++ {
++ p_Rtc->externalTriggerParams[p_FmRtcExternalTriggerParams->externalTriggerId].f_ExternalTriggerCallback = p_FmRtcExternalTriggerParams->f_ExternalTriggerCallback;
++ enable = TRUE;
++ }
++
++ fman_rtc_set_ext_trigger(p_Rtc->p_MemMap, p_FmRtcExternalTriggerParams->externalTriggerId, enable, p_FmRtcExternalTriggerParams->usePulseAsInput);
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_ClearExternalTrigger(t_Handle h_FmRtc, uint8_t externalTriggerId)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (externalTriggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External Trigger ID"));
++
++ p_Rtc->externalTriggerParams[externalTriggerId].f_ExternalTriggerCallback = NULL;
++
++ fman_rtc_clear_external_trigger(p_Rtc->p_MemMap, externalTriggerId);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_GetExternalTriggerTimeStamp(t_Handle h_FmRtc,
++ uint8_t triggerId,
++ uint64_t *p_TimeStamp)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ if (triggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External trigger ID"));
++
++ *p_TimeStamp = fman_rtc_get_trigger_stamp(p_Rtc->p_MemMap, triggerId)*p_Rtc->clockPeriodNanoSec;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_GetCurrentTime(t_Handle h_FmRtc, uint64_t *p_Ts)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ *p_Ts = fman_rtc_get_timer(p_Rtc->p_MemMap)*p_Rtc->clockPeriodNanoSec;
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_SetCurrentTime(t_Handle h_FmRtc, uint64_t ts)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ do_div(ts, p_Rtc->clockPeriodNanoSec);
++ fman_rtc_set_timer(p_Rtc->p_MemMap, (int64_t)ts);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_GetFreqCompensation(t_Handle h_FmRtc, uint32_t *p_Compensation)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ *p_Compensation = fman_rtc_get_frequency_compensation(p_Rtc->p_MemMap);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_SetFreqCompensation(t_Handle h_FmRtc, uint32_t freqCompensation)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ /* set the new freqCompensation */
++ fman_rtc_set_frequency_compensation(p_Rtc->p_MemMap, freqCompensation);
++
++ return E_OK;
++}
++
++#ifdef CONFIG_PTP_1588_CLOCK_DPAA
++/*****************************************************************************/
++t_Error FM_RTC_EnableInterrupt(t_Handle h_FmRtc, uint32_t events)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ /* enable interrupt */
++ fman_rtc_enable_interupt(p_Rtc->p_MemMap, events);
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++t_Error FM_RTC_DisableInterrupt(t_Handle h_FmRtc, uint32_t events)
++{
++ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
++
++ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
++
++ /* disable interrupt */
++ fman_rtc_disable_interupt(p_Rtc->p_MemMap, events);
++
++ return E_OK;
++}
++#endif
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h
+new file mode 100644
+index 00000000..843ca008
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h
+@@ -0,0 +1,96 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_rtc.h
++
++ @Description Memory map and internal definitions for FM RTC IEEE1588 Timer driver.
++
++ @Cautions None
++*//***************************************************************************/
++
++#ifndef __FM_RTC_H__
++#define __FM_RTC_H__
++
++#include "std_ext.h"
++#include "fm_rtc_ext.h"
++
++
++#define __ERR_MODULE__ MODULE_FM_RTC
++
++/* General definitions */
++
++#define ACCUMULATOR_OVERFLOW ((uint64_t)(1LL << 32))
++#define DEFAULT_OUTPUT_CLOCK_DIVISOR 0x00000002
++#define DEFAULT_BYPASS FALSE
++#define DEFAULT_CLOCK_PERIOD 1000
++
++
++
++typedef struct t_FmRtcAlarm
++{
++ t_FmRtcExceptionsCallback *f_AlarmCallback;
++ bool clearOnExpiration;
++} t_FmRtcAlarm;
++
++typedef struct t_FmRtcPeriodicPulse
++{
++ t_FmRtcExceptionsCallback *f_PeriodicPulseCallback;
++} t_FmRtcPeriodicPulse;
++
++typedef struct t_FmRtcExternalTrigger
++{
++ t_FmRtcExceptionsCallback *f_ExternalTriggerCallback;
++} t_FmRtcExternalTrigger;
++
++
++/**************************************************************************//**
++ @Description RTC FM driver control structure.
++*//***************************************************************************/
++typedef struct t_FmRtc
++{
++ t_Part *p_Part; /**< Pointer to the integration device */
++ t_Handle h_Fm;
++ t_Handle h_App; /**< Application handle */
++ struct rtc_regs *p_MemMap;
++ uint32_t clockPeriodNanoSec; /**< RTC clock period in nano-seconds (for FS mode) */
++ uint32_t srcClkFreqMhz;
++ uint16_t outputClockDivisor; /**< Output clock divisor (for FS mode) */
++ t_FmRtcAlarm alarmParams[FM_RTC_NUM_OF_ALARMS];
++ t_FmRtcPeriodicPulse periodicPulseParams[FM_RTC_NUM_OF_PERIODIC_PULSES];
++ t_FmRtcExternalTrigger externalTriggerParams[FM_RTC_NUM_OF_EXT_TRIGGERS];
++ struct rtc_cfg *p_RtcDriverParam; /**< RTC Driver parameters (for Init phase) */
++} t_FmRtc;
++
++
++#endif /* __FM_RTC_H__ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fman_rtc.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fman_rtc.c
+new file mode 100755
+index 00000000..acdf507e
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fman_rtc.c
+@@ -0,0 +1,334 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "fsl_fman_rtc.h"
++
++void fman_rtc_defconfig(struct rtc_cfg *cfg)
++{
++ int i;
++ cfg->src_clk = DEFAULT_SRC_CLOCK;
++ cfg->invert_input_clk_phase = DEFAULT_INVERT_INPUT_CLK_PHASE;
++ cfg->invert_output_clk_phase = DEFAULT_INVERT_OUTPUT_CLK_PHASE;
++ cfg->pulse_realign = DEFAULT_PULSE_REALIGN;
++ for (i = 0; i < FMAN_RTC_MAX_NUM_OF_ALARMS; i++)
++ cfg->alarm_polarity[i] = DEFAULT_ALARM_POLARITY;
++ for (i = 0; i < FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS; i++)
++ cfg->trigger_polarity[i] = DEFAULT_TRIGGER_POLARITY;
++}
++
++uint32_t fman_rtc_get_events(struct rtc_regs *regs)
++{
++ return ioread32be(&regs->tmr_tevent);
++}
++
++uint32_t fman_rtc_get_event(struct rtc_regs *regs, uint32_t ev_mask)
++{
++ return ioread32be(&regs->tmr_tevent) & ev_mask;
++}
++
++uint32_t fman_rtc_get_interrupt_mask(struct rtc_regs *regs)
++{
++ return ioread32be(&regs->tmr_temask);
++}
++
++void fman_rtc_set_interrupt_mask(struct rtc_regs *regs, uint32_t mask)
++{
++ iowrite32be(mask, &regs->tmr_temask);
++}
++
++void fman_rtc_ack_event(struct rtc_regs *regs, uint32_t events)
++{
++ iowrite32be(events, &regs->tmr_tevent);
++}
++
++uint32_t fman_rtc_check_and_clear_event(struct rtc_regs *regs)
++{
++ uint32_t event;
++
++ event = ioread32be(&regs->tmr_tevent);
++ event &= ioread32be(&regs->tmr_temask);
++
++ if (event)
++ iowrite32be(event, &regs->tmr_tevent);
++ return event;
++}
++
++uint32_t fman_rtc_get_frequency_compensation(struct rtc_regs *regs)
++{
++ return ioread32be(&regs->tmr_add);
++}
++
++void fman_rtc_set_frequency_compensation(struct rtc_regs *regs, uint32_t val)
++{
++ iowrite32be(val, &regs->tmr_add);
++}
++
++void fman_rtc_enable_interupt(struct rtc_regs *regs, uint32_t events)
++{
++ fman_rtc_set_interrupt_mask(regs, fman_rtc_get_interrupt_mask(regs) | events);
++}
++
++void fman_rtc_disable_interupt(struct rtc_regs *regs, uint32_t events)
++{
++ fman_rtc_set_interrupt_mask(regs, fman_rtc_get_interrupt_mask(regs) & ~events);
++}
++
++void fman_rtc_set_timer_alarm_l(struct rtc_regs *regs, int index, uint32_t val)
++{
++ iowrite32be(val, &regs->tmr_alarm[index].tmr_alarm_l);
++}
++
++void fman_rtc_set_timer_fiper(struct rtc_regs *regs, int index, uint32_t val)
++{
++ iowrite32be(val, &regs->tmr_fiper[index]);
++}
++
++void fman_rtc_set_timer_alarm(struct rtc_regs *regs, int index, int64_t val)
++{
++ iowrite32be((uint32_t)val, &regs->tmr_alarm[index].tmr_alarm_l);
++ iowrite32be((uint32_t)(val >> 32), &regs->tmr_alarm[index].tmr_alarm_h);
++}
++
++void fman_rtc_set_timer_offset(struct rtc_regs *regs, int64_t val)
++{
++ iowrite32be((uint32_t)val, &regs->tmr_off_l);
++ iowrite32be((uint32_t)(val >> 32), &regs->tmr_off_h);
++}
++
++uint64_t fman_rtc_get_trigger_stamp(struct rtc_regs *regs, int id)
++{
++ uint64_t time;
++ /* TMR_CNT_L must be read first to get an accurate value */
++ time = (uint64_t)ioread32be(&regs->tmr_etts[id].tmr_etts_l);
++ time |= ((uint64_t)ioread32be(&regs->tmr_etts[id].tmr_etts_h)
++ << 32);
++
++ return time;
++}
++
++uint32_t fman_rtc_get_timer_ctrl(struct rtc_regs *regs)
++{
++ return ioread32be(&regs->tmr_ctrl);
++}
++
++void fman_rtc_set_timer_ctrl(struct rtc_regs *regs, uint32_t val)
++{
++ iowrite32be(val, &regs->tmr_ctrl);
++}
++
++void fman_rtc_timers_soft_reset(struct rtc_regs *regs)
++{
++ fman_rtc_set_timer_ctrl(regs, FMAN_RTC_TMR_CTRL_TMSR);
++ udelay(10);
++ fman_rtc_set_timer_ctrl(regs, 0);
++}
++
++void fman_rtc_init(struct rtc_cfg *cfg, struct rtc_regs *regs, int num_alarms,
++ int num_fipers, int num_ext_triggers, bool init_freq_comp,
++ uint32_t freq_compensation, uint32_t output_clock_divisor)
++{
++ uint32_t tmr_ctrl;
++ int i;
++
++ fman_rtc_timers_soft_reset(regs);
++
++ /* Set the source clock */
++ switch (cfg->src_clk) {
++ case E_FMAN_RTC_SOURCE_CLOCK_SYSTEM:
++ tmr_ctrl = FMAN_RTC_TMR_CTRL_CKSEL_MAC_CLK;
++ break;
++ case E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR:
++ tmr_ctrl = FMAN_RTC_TMR_CTRL_CKSEL_OSC_CLK;
++ break;
++ default:
++ /* Use a clock from the External TMR reference clock.*/
++ tmr_ctrl = FMAN_RTC_TMR_CTRL_CKSEL_EXT_CLK;
++ break;
++ }
++
++ /* whatever period the user picked, the timestamp will advance in '1'
++ * every time the period passed. */
++ tmr_ctrl |= ((1 << FMAN_RTC_TMR_CTRL_TCLK_PERIOD_SHIFT) &
++ FMAN_RTC_TMR_CTRL_TCLK_PERIOD_MASK);
++
++ if (cfg->invert_input_clk_phase)
++ tmr_ctrl |= FMAN_RTC_TMR_CTRL_CIPH;
++ if (cfg->invert_output_clk_phase)
++ tmr_ctrl |= FMAN_RTC_TMR_CTRL_COPH;
++
++ for (i = 0; i < num_alarms; i++) {
++ if (cfg->alarm_polarity[i] ==
++ E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW)
++ tmr_ctrl |= (FMAN_RTC_TMR_CTRL_ALMP1 >> i);
++ }
++
++ for (i = 0; i < num_ext_triggers; i++)
++ if (cfg->trigger_polarity[i] ==
++ E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE)
++ tmr_ctrl |= (FMAN_RTC_TMR_CTRL_ETEP1 << i);
++
++ if (!cfg->timer_slave_mode && cfg->bypass)
++ tmr_ctrl |= FMAN_RTC_TMR_CTRL_BYP;
++
++ fman_rtc_set_timer_ctrl(regs, tmr_ctrl);
++ if (init_freq_comp)
++ fman_rtc_set_frequency_compensation(regs, freq_compensation);
++
++ /* Clear TMR_ALARM registers */
++ for (i = 0; i < num_alarms; i++)
++ fman_rtc_set_timer_alarm(regs, i, 0xFFFFFFFFFFFFFFFFLL);
++
++ /* Clear TMR_TEVENT */
++ fman_rtc_ack_event(regs, FMAN_RTC_TMR_TEVENT_ALL);
++
++ /* Initialize TMR_TEMASK */
++ fman_rtc_set_interrupt_mask(regs, 0);
++
++ /* Clear TMR_FIPER registers */
++ for (i = 0; i < num_fipers; i++)
++ fman_rtc_set_timer_fiper(regs, i, 0xFFFFFFFF);
++
++ /* Initialize TMR_PRSC */
++ iowrite32be(output_clock_divisor, &regs->tmr_prsc);
++
++ /* Clear TMR_OFF */
++ fman_rtc_set_timer_offset(regs, 0);
++}
++
++bool fman_rtc_is_enabled(struct rtc_regs *regs)
++{
++ return (bool)(fman_rtc_get_timer_ctrl(regs) & FMAN_RTC_TMR_CTRL_TE);
++}
++
++void fman_rtc_enable(struct rtc_regs *regs, bool reset_clock)
++{
++ uint32_t tmr_ctrl = fman_rtc_get_timer_ctrl(regs);
++
++ /* TODO check that no timestamping MACs are working in this stage. */
++ if (reset_clock) {
++ fman_rtc_set_timer_ctrl(regs, (tmr_ctrl | FMAN_RTC_TMR_CTRL_TMSR));
++
++ udelay(10);
++ /* Clear TMR_OFF */
++ fman_rtc_set_timer_offset(regs, 0);
++ }
++
++ fman_rtc_set_timer_ctrl(regs, (tmr_ctrl | FMAN_RTC_TMR_CTRL_TE));
++}
++
++void fman_rtc_disable(struct rtc_regs *regs)
++{
++ fman_rtc_set_timer_ctrl(regs, (fman_rtc_get_timer_ctrl(regs)
++ & ~(FMAN_RTC_TMR_CTRL_TE)));
++}
++
++void fman_rtc_clear_periodic_pulse(struct rtc_regs *regs, int id)
++{
++ uint32_t tmp_reg;
++ if (id == 0)
++ tmp_reg = FMAN_RTC_TMR_TEVENT_PP1;
++ else
++ tmp_reg = FMAN_RTC_TMR_TEVENT_PP2;
++ fman_rtc_disable_interupt(regs, tmp_reg);
++
++ tmp_reg = fman_rtc_get_timer_ctrl(regs);
++ if (tmp_reg & FMAN_RTC_TMR_CTRL_FS)
++ fman_rtc_set_timer_ctrl(regs, tmp_reg & ~FMAN_RTC_TMR_CTRL_FS);
++
++ fman_rtc_set_timer_fiper(regs, id, 0xFFFFFFFF);
++}
++
++void fman_rtc_clear_external_trigger(struct rtc_regs *regs, int id)
++{
++ uint32_t tmpReg, tmp_ctrl;
++
++ if (id == 0)
++ tmpReg = FMAN_RTC_TMR_TEVENT_ETS1;
++ else
++ tmpReg = FMAN_RTC_TMR_TEVENT_ETS2;
++ fman_rtc_disable_interupt(regs, tmpReg);
++
++ if (id == 0)
++ tmpReg = FMAN_RTC_TMR_CTRL_PP1L;
++ else
++ tmpReg = FMAN_RTC_TMR_CTRL_PP2L;
++ tmp_ctrl = fman_rtc_get_timer_ctrl(regs);
++ if (tmp_ctrl & tmpReg)
++ fman_rtc_set_timer_ctrl(regs, tmp_ctrl & ~tmpReg);
++}
++
++void fman_rtc_set_alarm(struct rtc_regs *regs, int id, uint32_t val, bool enable)
++{
++ uint32_t tmpReg;
++ fman_rtc_set_timer_alarm(regs, id, val);
++ if (enable) {
++ if (id == 0)
++ tmpReg = FMAN_RTC_TMR_TEVENT_ALM1;
++ else
++ tmpReg = FMAN_RTC_TMR_TEVENT_ALM2;
++ fman_rtc_enable_interupt(regs, tmpReg);
++ }
++}
++
++void fman_rtc_set_periodic_pulse(struct rtc_regs *regs, int id, uint32_t val,
++ bool enable)
++{
++ uint32_t tmpReg;
++ fman_rtc_set_timer_fiper(regs, id, val);
++ if (enable) {
++ if (id == 0)
++ tmpReg = FMAN_RTC_TMR_TEVENT_PP1;
++ else
++ tmpReg = FMAN_RTC_TMR_TEVENT_PP2;
++ fman_rtc_enable_interupt(regs, tmpReg);
++ }
++}
++
++void fman_rtc_set_ext_trigger(struct rtc_regs *regs, int id, bool enable,
++ bool use_pulse_as_input)
++{
++ uint32_t tmpReg;
++ if (enable) {
++ if (id == 0)
++ tmpReg = FMAN_RTC_TMR_TEVENT_ETS1;
++ else
++ tmpReg = FMAN_RTC_TMR_TEVENT_ETS2;
++ fman_rtc_enable_interupt(regs, tmpReg);
++ }
++ if (use_pulse_as_input) {
++ if (id == 0)
++ tmpReg = FMAN_RTC_TMR_CTRL_PP1L;
++ else
++ tmpReg = FMAN_RTC_TMR_CTRL_PP2L;
++ fman_rtc_set_timer_ctrl(regs, fman_rtc_get_timer_ctrl(regs) | tmpReg);
++ }
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/Makefile b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/Makefile
+new file mode 100644
+index 00000000..fae50ce4
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++
++obj-y += fsl-ncsw-sp.o
++
++fsl-ncsw-sp-objs := fm_sp.o fman_sp.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c
+new file mode 100644
+index 00000000..0994f34d
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c
+@@ -0,0 +1,757 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_sp.c
++
++ @Description FM PCD Storage profile ...
++*//***************************************************************************/
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++#include "net_ext.h"
++
++#include "fm_vsp_ext.h"
++#include "fm_sp.h"
++#include "fm_common.h"
++#include "fsl_fman_sp.h"
++
++
++#if (DPAA_VERSION >= 11)
++static t_Error CheckParamsGeneratedInternally(t_FmVspEntry *p_FmVspEntry)
++{
++ t_Error err = E_OK;
++
++ if ((err = FmSpCheckIntContextParams(&p_FmVspEntry->intContext))!= E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if ((err = FmSpCheckBufMargins(&p_FmVspEntry->bufMargins)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ return err;
++
++}
++
++static t_Error CheckParams(t_FmVspEntry *p_FmVspEntry)
++{
++ t_Error err = E_OK;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->h_Fm, E_INVALID_HANDLE);
++
++ if ((err = FmSpCheckBufPoolsParams(&p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools,
++ p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools,
++ p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion)) != E_OK)
++
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset & ~FM_LIODN_OFFSET_MASK)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1));
++
++ err = FmVSPCheckRelativeProfile(p_FmVspEntry->h_Fm,
++ p_FmVspEntry->portType,
++ p_FmVspEntry->portId,
++ p_FmVspEntry->relativeProfileId);
++
++ return err;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++
++/*****************************************************************************/
++/* Inter-module API routines */
++/*****************************************************************************/
++void FmSpSetBufPoolsInAscOrderOfBufSizes(t_FmExtPools *p_FmExtPools,
++ uint8_t *orderedArray,
++ uint16_t *sizesArray)
++{
++ uint16_t bufSize = 0;
++ int i=0, j=0, k=0;
++
++ /* First we copy the external buffers pools information to an ordered local array */
++ for (i=0;i<p_FmExtPools->numOfPoolsUsed;i++)
++ {
++ /* get pool size */
++ bufSize = p_FmExtPools->extBufPool[i].size;
++
++ /* keep sizes in an array according to poolId for direct access */
++ sizesArray[p_FmExtPools->extBufPool[i].id] = bufSize;
++
++ /* save poolId in an ordered array according to size */
++ for (j=0;j<=i;j++)
++ {
++ /* this is the next free place in the array */
++ if (j==i)
++ orderedArray[i] = p_FmExtPools->extBufPool[i].id;
++ else
++ {
++ /* find the right place for this poolId */
++ if (bufSize < sizesArray[orderedArray[j]])
++ {
++ /* move the poolIds one place ahead to make room for this poolId */
++ for (k=i;k>j;k--)
++ orderedArray[k] = orderedArray[k-1];
++
++ /* now k==j, this is the place for the new size */
++ orderedArray[k] = p_FmExtPools->extBufPool[i].id;
++ break;
++ }
++ }
++ }
++ }
++}
++
++t_Error FmSpCheckBufPoolsParams(t_FmExtPools *p_FmExtPools,
++ t_FmBackupBmPools *p_FmBackupBmPools,
++ t_FmBufPoolDepletion *p_FmBufPoolDepletion)
++{
++
++ int i = 0, j = 0;
++ bool found;
++ uint8_t count = 0;
++
++ if (p_FmExtPools)
++ {
++ if (p_FmExtPools->numOfPoolsUsed > FM_PORT_MAX_NUM_OF_EXT_POOLS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfPoolsUsed can't be larger than %d", FM_PORT_MAX_NUM_OF_EXT_POOLS));
++
++ for (i=0;i<p_FmExtPools->numOfPoolsUsed;i++)
++ {
++ if (p_FmExtPools->extBufPool[i].id >= BM_MAX_NUM_OF_POOLS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("extBufPools.extBufPool[%d].id can't be larger than %d", i, BM_MAX_NUM_OF_POOLS));
++ if (!p_FmExtPools->extBufPool[i].size)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("extBufPools.extBufPool[%d].size is 0", i));
++ }
++ }
++ if (!p_FmExtPools && (p_FmBackupBmPools || p_FmBufPoolDepletion))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("backupBmPools ot bufPoolDepletion can not be defined without external pools"));
++
++ /* backup BM pools indication is valid only for some chip derivatives
++ (limited by the config routine) */
++ if (p_FmBackupBmPools)
++ {
++ if (p_FmBackupBmPools->numOfBackupPools >= p_FmExtPools->numOfPoolsUsed)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_BackupBmPools must be smaller than extBufPools.numOfPoolsUsed"));
++ found = FALSE;
++ for (i = 0;i<p_FmBackupBmPools->numOfBackupPools;i++)
++ {
++
++ for (j=0;j<p_FmExtPools->numOfPoolsUsed;j++)
++ {
++ if (p_FmBackupBmPools->poolIds[i] == p_FmExtPools->extBufPool[j].id)
++ {
++ found = TRUE;
++ break;
++ }
++ }
++ if (!found)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("All p_BackupBmPools.poolIds must be included in extBufPools.extBufPool[n].id"));
++ else
++ found = FALSE;
++ }
++ }
++
++ /* up to extBufPools.numOfPoolsUsed pools may be defined */
++ if (p_FmBufPoolDepletion && p_FmBufPoolDepletion->poolsGrpModeEnable)
++ {
++ if ((p_FmBufPoolDepletion->numOfPools > p_FmExtPools->numOfPoolsUsed))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPools can't be larger than %d and can't be larger than numOfPoolsUsed", FM_PORT_MAX_NUM_OF_EXT_POOLS));
++
++ if (!p_FmBufPoolDepletion->numOfPools)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPoolsToConsider can not be 0 when poolsGrpModeEnable=TRUE"));
++
++ found = FALSE;
++ count = 0;
++ /* for each pool that is in poolsToConsider, check if it is defined
++ in extBufPool */
++ for (i=0;i<BM_MAX_NUM_OF_POOLS;i++)
++ {
++ if (p_FmBufPoolDepletion->poolsToConsider[i])
++ {
++ for (j=0;j<p_FmExtPools->numOfPoolsUsed;j++)
++ {
++ if (i == p_FmExtPools->extBufPool[j].id)
++ {
++ found = TRUE;
++ count++;
++ break;
++ }
++ }
++ if (!found)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Pools selected for depletion are not used."));
++ else
++ found = FALSE;
++ }
++ }
++ /* check that the number of pools that we have checked is equal to the number announced by the user */
++ if (count != p_FmBufPoolDepletion->numOfPools)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPools is larger than the number of pools defined."));
++ }
++
++ if (p_FmBufPoolDepletion && p_FmBufPoolDepletion->singlePoolModeEnable)
++ {
++ /* calculate vector for number of pools depletion */
++ found = FALSE;
++ count = 0;
++ for (i=0;i<BM_MAX_NUM_OF_POOLS;i++)
++ {
++ if (p_FmBufPoolDepletion->poolsToConsiderForSingleMode[i])
++ {
++ for (j=0;j<p_FmExtPools->numOfPoolsUsed;j++)
++ {
++ if (i == p_FmExtPools->extBufPool[j].id)
++ {
++ found = TRUE;
++ count++;
++ break;
++ }
++ }
++ if (!found)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Pools selected for depletion are not used."));
++ else
++ found = FALSE;
++ }
++ }
++ if (!count)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("No pools defined for single buffer mode pool depletion."));
++ }
++
++ return E_OK;
++}
++
++t_Error FmSpCheckIntContextParams(t_FmSpIntContextDataCopy *p_FmSpIntContextDataCopy)
++{
++ /* Check that divisible by 16 and not larger than 240 */
++ if (p_FmSpIntContextDataCopy->intContextOffset >MAX_INT_OFFSET)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.intContextOffset can't be larger than %d", MAX_INT_OFFSET));
++ if (p_FmSpIntContextDataCopy->intContextOffset % OFFSET_UNITS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.intContextOffset has to be divisible by %d", OFFSET_UNITS));
++
++ /* check that ic size+ic internal offset, does not exceed ic block size */
++ if (p_FmSpIntContextDataCopy->size + p_FmSpIntContextDataCopy->intContextOffset > MAX_IC_SIZE)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size + intContext.intContextOffset has to be smaller than %d", MAX_IC_SIZE));
++ /* Check that divisible by 16 and not larger than 256 */
++ if (p_FmSpIntContextDataCopy->size % OFFSET_UNITS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size has to be divisible by %d", OFFSET_UNITS));
++
++ /* Check that divisible by 16 and not larger than 4K */
++ if (p_FmSpIntContextDataCopy->extBufOffset > MAX_EXT_OFFSET)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.extBufOffset can't be larger than %d", MAX_EXT_OFFSET));
++ if (p_FmSpIntContextDataCopy->extBufOffset % OFFSET_UNITS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.extBufOffset has to be divisible by %d", OFFSET_UNITS));
++
++ return E_OK;
++}
++
++t_Error FmSpCheckBufMargins(t_FmSpBufMargins *p_FmSpBufMargins)
++{
++ /* Check the margin definition */
++ if (p_FmSpBufMargins->startMargins > MAX_EXT_BUFFER_OFFSET)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufMargins.startMargins can't be larger than %d", MAX_EXT_BUFFER_OFFSET));
++ if (p_FmSpBufMargins->endMargins > MAX_EXT_BUFFER_OFFSET)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufMargins.endMargins can't be larger than %d", MAX_EXT_BUFFER_OFFSET));
++
++ return E_OK;
++}
++
++t_Error FmSpBuildBufferStructure(t_FmSpIntContextDataCopy *p_FmSpIntContextDataCopy,
++ t_FmBufferPrefixContent *p_BufferPrefixContent,
++ t_FmSpBufMargins *p_FmSpBufMargins,
++ t_FmSpBufferOffsets *p_FmSpBufferOffsets,
++ uint8_t *internalBufferOffset)
++{
++ uint32_t tmp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmSpIntContextDataCopy, E_INVALID_VALUE);
++ ASSERT_COND(p_FmSpIntContextDataCopy);
++ ASSERT_COND(p_BufferPrefixContent);
++ ASSERT_COND(p_FmSpBufMargins);
++ ASSERT_COND(p_FmSpBufferOffsets);
++
++ /* Align start of internal context data to 16 byte */
++ p_FmSpIntContextDataCopy->extBufOffset =
++ (uint16_t)((p_BufferPrefixContent->privDataSize & (OFFSET_UNITS-1)) ?
++ ((p_BufferPrefixContent->privDataSize + OFFSET_UNITS) & ~(uint16_t)(OFFSET_UNITS-1)) :
++ p_BufferPrefixContent->privDataSize);
++
++ /* Translate margin and intContext params to FM parameters */
++ /* Initialize with illegal value. Later we'll set legal values. */
++ p_FmSpBufferOffsets->prsResultOffset = (uint32_t)ILLEGAL_BASE;
++ p_FmSpBufferOffsets->timeStampOffset = (uint32_t)ILLEGAL_BASE;
++ p_FmSpBufferOffsets->hashResultOffset= (uint32_t)ILLEGAL_BASE;
++ p_FmSpBufferOffsets->pcdInfoOffset = (uint32_t)ILLEGAL_BASE;
++
++ /* Internally the driver supports 4 options
++ 1. prsResult/timestamp/hashResult selection (in fact 8 options, but for simplicity we'll
++ relate to it as 1).
++ 2. All IC context (from AD) not including debug.*/
++
++ /* This 'if' covers option 2. We copy from beginning of context. */
++ if (p_BufferPrefixContent->passAllOtherPCDInfo)
++ {
++ p_FmSpIntContextDataCopy->size = 128; /* must be aligned to 16 */
++ /* Start copying data after 16 bytes (FD) from the beginning of the internal context */
++ p_FmSpIntContextDataCopy->intContextOffset = 16;
++
++ if (p_BufferPrefixContent->passAllOtherPCDInfo)
++ p_FmSpBufferOffsets->pcdInfoOffset = p_FmSpIntContextDataCopy->extBufOffset;
++ if (p_BufferPrefixContent->passPrsResult)
++ p_FmSpBufferOffsets->prsResultOffset =
++ (uint32_t)(p_FmSpIntContextDataCopy->extBufOffset + 16);
++ if (p_BufferPrefixContent->passTimeStamp)
++ p_FmSpBufferOffsets->timeStampOffset =
++ (uint32_t)(p_FmSpIntContextDataCopy->extBufOffset + 48);
++ if (p_BufferPrefixContent->passHashResult)
++ p_FmSpBufferOffsets->hashResultOffset =
++ (uint32_t)(p_FmSpIntContextDataCopy->extBufOffset + 56);
++ }
++ else
++ {
++ /* This case covers the options under 1 */
++ /* Copy size must be in 16-byte granularity. */
++ p_FmSpIntContextDataCopy->size =
++ (uint16_t)((p_BufferPrefixContent->passPrsResult ? 32 : 0) +
++ ((p_BufferPrefixContent->passTimeStamp ||
++ p_BufferPrefixContent->passHashResult) ? 16 : 0));
++
++ /* Align start of internal context data to 16 byte */
++ p_FmSpIntContextDataCopy->intContextOffset =
++ (uint8_t)(p_BufferPrefixContent->passPrsResult ? 32 :
++ ((p_BufferPrefixContent->passTimeStamp ||
++ p_BufferPrefixContent->passHashResult) ? 64 : 0));
++
++ if (p_BufferPrefixContent->passPrsResult)
++ p_FmSpBufferOffsets->prsResultOffset = p_FmSpIntContextDataCopy->extBufOffset;
++ if (p_BufferPrefixContent->passTimeStamp)
++ p_FmSpBufferOffsets->timeStampOffset = p_BufferPrefixContent->passPrsResult ?
++ (p_FmSpIntContextDataCopy->extBufOffset + sizeof(t_FmPrsResult)) :
++ p_FmSpIntContextDataCopy->extBufOffset;
++ if (p_BufferPrefixContent->passHashResult)
++ /* If PR is not requested, whether TS is requested or not, IC will be copied from TS */
++ p_FmSpBufferOffsets->hashResultOffset = p_BufferPrefixContent->passPrsResult ?
++ (p_FmSpIntContextDataCopy->extBufOffset + sizeof(t_FmPrsResult) + 8) :
++ p_FmSpIntContextDataCopy->extBufOffset + 8;
++ }
++
++ if (p_FmSpIntContextDataCopy->size)
++ p_FmSpBufMargins->startMargins =
++ (uint16_t)(p_FmSpIntContextDataCopy->extBufOffset +
++ p_FmSpIntContextDataCopy->size);
++ else
++ /* No Internal Context passing, STartMargin is immediately after privateInfo */
++ p_FmSpBufMargins->startMargins = p_BufferPrefixContent->privDataSize;
++
++ /* save extra space for manip in both external and internal buffers */
++ if (p_BufferPrefixContent->manipExtraSpace)
++ {
++ uint8_t extraSpace;
++#ifdef FM_CAPWAP_SUPPORT
++ if ((p_BufferPrefixContent->manipExtraSpace + CAPWAP_FRAG_EXTRA_SPACE) >= 256)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("p_BufferPrefixContent->manipExtraSpace should be less than %d",
++ 256-CAPWAP_FRAG_EXTRA_SPACE));
++ extraSpace = (uint8_t)(p_BufferPrefixContent->manipExtraSpace + CAPWAP_FRAG_EXTRA_SPACE);
++#else
++ extraSpace = p_BufferPrefixContent->manipExtraSpace;
++#endif /* FM_CAPWAP_SUPPORT */
++ p_FmSpBufferOffsets->manipOffset = p_FmSpBufMargins->startMargins;
++ p_FmSpBufMargins->startMargins += extraSpace;
++ *internalBufferOffset = extraSpace;
++ }
++
++ /* align data start */
++ tmp = (uint32_t)(p_FmSpBufMargins->startMargins % p_BufferPrefixContent->dataAlign);
++ if (tmp)
++ p_FmSpBufMargins->startMargins += (p_BufferPrefixContent->dataAlign-tmp);
++ p_FmSpBufferOffsets->dataOffset = p_FmSpBufMargins->startMargins;
++
++ return E_OK;
++}
++/*********************** End of inter-module routines ************************/
++
++
++#if (DPAA_VERSION >= 11)
++/*****************************************************************************/
++/* API routines */
++/*****************************************************************************/
++t_Handle FM_VSP_Config(t_FmVspParams *p_FmVspParams)
++{
++ t_FmVspEntry *p_FmVspEntry = NULL;
++ struct fm_storage_profile_params fm_vsp_params;
++
++ p_FmVspEntry = (t_FmVspEntry *)XX_Malloc(sizeof(t_FmVspEntry));
++ if (!p_FmVspEntry)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_StorageProfile allocation failed"));
++ return NULL;
++ }
++ memset(p_FmVspEntry, 0, sizeof(t_FmVspEntry));
++
++ p_FmVspEntry->p_FmVspEntryDriverParams = (t_FmVspEntryDriverParams *)XX_Malloc(sizeof(t_FmVspEntryDriverParams));
++ if (!p_FmVspEntry->p_FmVspEntryDriverParams)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_StorageProfile allocation failed"));
++ XX_Free(p_FmVspEntry);
++ return NULL;
++ }
++ memset(p_FmVspEntry->p_FmVspEntryDriverParams, 0, sizeof(t_FmVspEntryDriverParams));
++ fman_vsp_defconfig(&fm_vsp_params);
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr = fm_vsp_params.header_cache_attr;
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr = fm_vsp_params.int_context_cache_attr;
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr = fm_vsp_params.scatter_gather_cache_attr;
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData = fm_vsp_params.dma_swap_data;
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize = fm_vsp_params.dma_write_optimize;
++ p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather = fm_vsp_params.no_scather_gather;
++ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.privDataSize = DEFAULT_FM_SP_bufferPrefixContent_privDataSize;
++ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passPrsResult= DEFAULT_FM_SP_bufferPrefixContent_passPrsResult;
++ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passTimeStamp= DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp;
++ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passAllOtherPCDInfo
++ = DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp;
++ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.dataAlign = DEFAULT_FM_SP_bufferPrefixContent_dataAlign;
++ p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset = p_FmVspParams->liodnOffset;
++
++ memcpy(&p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools, &p_FmVspParams->extBufPools, sizeof(t_FmExtPools));
++ p_FmVspEntry->h_Fm = p_FmVspParams->h_Fm;
++ p_FmVspEntry->portType = p_FmVspParams->portParams.portType;
++ p_FmVspEntry->portId = p_FmVspParams->portParams.portId;
++
++ p_FmVspEntry->relativeProfileId = p_FmVspParams->relativeProfileId;
++
++ return p_FmVspEntry;
++}
++
++t_Error FM_VSP_Init(t_Handle h_FmVsp)
++{
++
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry *)h_FmVsp;
++ struct fm_storage_profile_params fm_vsp_params;
++ uint8_t orderedArray[FM_PORT_MAX_NUM_OF_EXT_POOLS];
++ uint16_t sizesArray[BM_MAX_NUM_OF_POOLS];
++ t_Error err;
++ uint16_t absoluteProfileId = 0;
++ int i = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams,E_INVALID_HANDLE);
++
++ CHECK_INIT_PARAMETERS(p_FmVspEntry, CheckParams);
++
++ memset(&orderedArray, 0, sizeof(uint8_t) * FM_PORT_MAX_NUM_OF_EXT_POOLS);
++ memset(&sizesArray, 0, sizeof(uint16_t) * BM_MAX_NUM_OF_POOLS);
++
++ err = FmSpBuildBufferStructure(&p_FmVspEntry->intContext,
++ &p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent,
++ &p_FmVspEntry->bufMargins,
++ &p_FmVspEntry->bufferOffsets,
++ &p_FmVspEntry->internalBufferOffset);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++
++ err = CheckParamsGeneratedInternally(p_FmVspEntry);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++
++ p_FmVspEntry->p_FmSpRegsBase =
++ (struct fm_pcd_storage_profile_regs *)FmGetVSPBaseAddr(p_FmVspEntry->h_Fm);
++ if (!p_FmVspEntry->p_FmSpRegsBase)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("impossible to initialize SpRegsBase"));
++
++ /* order external buffer pools in ascending order of buffer pools sizes */
++ FmSpSetBufPoolsInAscOrderOfBufSizes(&(p_FmVspEntry->p_FmVspEntryDriverParams)->extBufPools,
++ orderedArray,
++ sizesArray);
++
++ p_FmVspEntry->extBufPools.numOfPoolsUsed =
++ p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools.numOfPoolsUsed;
++ for (i = 0; i < p_FmVspEntry->extBufPools.numOfPoolsUsed; i++)
++ {
++ p_FmVspEntry->extBufPools.extBufPool[i].id = orderedArray[i];
++ p_FmVspEntry->extBufPools.extBufPool[i].size = sizesArray[orderedArray[i]];
++ }
++
++ /* on user responsibility to fill it according requirement */
++ memset(&fm_vsp_params, 0, sizeof(struct fm_storage_profile_params));
++ fm_vsp_params.dma_swap_data = p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData;
++ fm_vsp_params.int_context_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr;
++ fm_vsp_params.header_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr;
++ fm_vsp_params.scatter_gather_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr;
++ fm_vsp_params.dma_write_optimize = p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize;
++ fm_vsp_params.liodn_offset = p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset;
++ fm_vsp_params.no_scather_gather = p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather;
++
++ if (p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion)
++ {
++ fm_vsp_params.buf_pool_depletion.buf_pool_depletion_enabled = TRUE;
++ fm_vsp_params.buf_pool_depletion.pools_grp_mode_enable = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsGrpModeEnable;
++ fm_vsp_params.buf_pool_depletion.num_pools = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->numOfPools;
++ fm_vsp_params.buf_pool_depletion.pools_to_consider = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsToConsider;
++ fm_vsp_params.buf_pool_depletion.single_pool_mode_enable = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->singlePoolModeEnable;
++ fm_vsp_params.buf_pool_depletion.pools_to_consider_for_single_mode = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsToConsiderForSingleMode;
++ fm_vsp_params.buf_pool_depletion.has_pfc_priorities = TRUE;
++ fm_vsp_params.buf_pool_depletion.pfc_priorities_en = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->pfcPrioritiesEn;
++ }
++ else
++ fm_vsp_params.buf_pool_depletion.buf_pool_depletion_enabled = FALSE;
++
++ if (p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools)
++ {
++ fm_vsp_params.backup_pools.num_backup_pools = p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools->numOfBackupPools;
++ fm_vsp_params.backup_pools.pool_ids = p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools->poolIds;
++ }
++ else
++ fm_vsp_params.backup_pools.num_backup_pools = 0;
++
++ fm_vsp_params.fm_ext_pools.num_pools_used = p_FmVspEntry->extBufPools.numOfPoolsUsed;
++ fm_vsp_params.fm_ext_pools.ext_buf_pool = (struct fman_ext_pool_params*)&p_FmVspEntry->extBufPools.extBufPool;
++ fm_vsp_params.buf_margins = (struct fman_sp_buf_margins*)&p_FmVspEntry->bufMargins;
++ fm_vsp_params.int_context = (struct fman_sp_int_context_data_copy*)&p_FmVspEntry->intContext;
++
++ /* no check on err - it was checked earlier */
++ FmVSPGetAbsoluteProfileId(p_FmVspEntry->h_Fm,
++ p_FmVspEntry->portType,
++ p_FmVspEntry->portId,
++ p_FmVspEntry->relativeProfileId,
++ &absoluteProfileId);
++
++ ASSERT_COND(p_FmVspEntry->p_FmSpRegsBase);
++ ASSERT_COND(fm_vsp_params.int_context);
++ ASSERT_COND(fm_vsp_params.buf_margins);
++ ASSERT_COND((absoluteProfileId <= FM_VSP_MAX_NUM_OF_ENTRIES));
++
++ /* Set all registers related to VSP */
++ fman_vsp_init(p_FmVspEntry->p_FmSpRegsBase, absoluteProfileId, &fm_vsp_params,FM_PORT_MAX_NUM_OF_EXT_POOLS, BM_MAX_NUM_OF_POOLS, FM_MAX_NUM_OF_PFC_PRIORITIES);
++
++ p_FmVspEntry->absoluteSpId = absoluteProfileId;
++
++ if (p_FmVspEntry->p_FmVspEntryDriverParams)
++ XX_Free(p_FmVspEntry->p_FmVspEntryDriverParams);
++ p_FmVspEntry->p_FmVspEntryDriverParams = NULL;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_Free(t_Handle h_FmVsp)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry *)h_FmVsp;
++ SANITY_CHECK_RETURN_ERROR(h_FmVsp, E_INVALID_HANDLE);
++ XX_Free(p_FmVspEntry);
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigBufferPrefixContent(t_Handle h_FmVsp, t_FmBufferPrefixContent *p_FmBufferPrefixContent)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++
++ memcpy(&p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent, p_FmBufferPrefixContent, sizeof(t_FmBufferPrefixContent));
++ /* if dataAlign was not initialized by user, we return to driver's default */
++ if (!p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.dataAlign)
++ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.dataAlign = DEFAULT_FM_SP_bufferPrefixContent_dataAlign;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigDmaSwapData(t_Handle h_FmVsp, e_FmDmaSwapOption swapData)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData = swapData;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigDmaIcCacheAttr(t_Handle h_FmVsp, e_FmDmaCacheOption intContextCacheAttr)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr = intContextCacheAttr;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigDmaHdrAttr(t_Handle h_FmVsp, e_FmDmaCacheOption headerCacheAttr)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr = headerCacheAttr;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigDmaScatterGatherAttr(t_Handle h_FmVsp, e_FmDmaCacheOption scatterGatherCacheAttr)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr = scatterGatherCacheAttr;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigDmaWriteOptimize(t_Handle h_FmVsp, bool optimize)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize = optimize;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigNoScatherGather(t_Handle h_FmVsp, bool noScatherGather)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather = noScatherGather;
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigPoolDepletion(t_Handle h_FmVsp, t_FmBufPoolDepletion *p_BufPoolDepletion)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmVsp, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_BufPoolDepletion, E_INVALID_HANDLE);
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion = (t_FmBufPoolDepletion *)XX_Malloc(sizeof(t_FmBufPoolDepletion));
++ if (!p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BufPoolDepletion allocation failed"));
++ memcpy(p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion, p_BufPoolDepletion, sizeof(t_FmBufPoolDepletion));
++
++ return E_OK;
++}
++
++t_Error FM_VSP_ConfigBackupPools(t_Handle h_FmVsp, t_FmBackupBmPools *p_BackupBmPools)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmVsp, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_BackupBmPools, E_INVALID_HANDLE);
++
++ p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools = (t_FmBackupBmPools *)XX_Malloc(sizeof(t_FmBackupBmPools));
++ if (!p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BackupBmPools allocation failed"));
++ memcpy(p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools, p_BackupBmPools, sizeof(t_FmBackupBmPools));
++
++ return E_OK;
++}
++
++uint32_t FM_VSP_GetBufferDataOffset(t_Handle h_FmVsp)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, 0);
++
++ return p_FmVspEntry->bufferOffsets.dataOffset;
++}
++
++uint8_t * FM_VSP_GetBufferICInfo(t_Handle h_FmVsp, char *p_Data)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
++
++ if (p_FmVspEntry->bufferOffsets.pcdInfoOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (uint8_t *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.pcdInfoOffset);
++}
++
++t_FmPrsResult * FM_VSP_GetBufferPrsResult(t_Handle h_FmVsp, char *p_Data)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
++
++ if (p_FmVspEntry->bufferOffsets.prsResultOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (t_FmPrsResult *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.prsResultOffset);
++}
++
++uint64_t * FM_VSP_GetBufferTimeStamp(t_Handle h_FmVsp, char *p_Data)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
++
++ if (p_FmVspEntry->bufferOffsets.timeStampOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (uint64_t *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.timeStampOffset);
++}
++
++uint8_t * FM_VSP_GetBufferHashResult(t_Handle h_FmVsp, char *p_Data)
++{
++ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
++
++ if (p_FmVspEntry->bufferOffsets.hashResultOffset == ILLEGAL_BASE)
++ return NULL;
++
++ return (uint8_t *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.hashResultOffset);
++}
++
++#endif /* (DPAA_VERSION >= 11) */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h
+new file mode 100644
+index 00000000..9c171d85
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h
+@@ -0,0 +1,85 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_sp.h
++
++ @Description FM SP ...
++*//***************************************************************************/
++#ifndef __FM_SP_H
++#define __FM_SP_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++
++#include "fm_sp_common.h"
++#include "fm_common.h"
++
++
++#define __ERR_MODULE__ MODULE_FM_SP
++
++typedef struct {
++ t_FmBufferPrefixContent bufferPrefixContent;
++ e_FmDmaSwapOption dmaSwapData;
++ e_FmDmaCacheOption dmaIntContextCacheAttr;
++ e_FmDmaCacheOption dmaHeaderCacheAttr;
++ e_FmDmaCacheOption dmaScatterGatherCacheAttr;
++ bool dmaWriteOptimize;
++ uint16_t liodnOffset;
++ bool noScatherGather;
++ t_FmBufPoolDepletion *p_BufPoolDepletion;
++ t_FmBackupBmPools *p_BackupBmPools;
++ t_FmExtPools extBufPools;
++} t_FmVspEntryDriverParams;
++
++typedef struct {
++ bool valid;
++ volatile bool lock;
++ uint8_t pointedOwners;
++ uint16_t absoluteSpId;
++ uint8_t internalBufferOffset;
++ t_FmSpBufMargins bufMargins;
++ t_FmSpIntContextDataCopy intContext;
++ t_FmSpBufferOffsets bufferOffsets;
++ t_Handle h_Fm;
++ e_FmPortType portType; /**< Port type */
++ uint8_t portId; /**< Port Id - relative to type */
++ uint8_t relativeProfileId;
++ struct fm_pcd_storage_profile_regs *p_FmSpRegsBase;
++ t_FmExtPools extBufPools;
++ t_FmVspEntryDriverParams *p_FmVspEntryDriverParams;
++} t_FmVspEntry;
++
++
++#endif /* __FM_SP_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c
+new file mode 100755
+index 00000000..0f772e91
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c
+@@ -0,0 +1,197 @@
++/*
++ * Copyright 2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "fsl_fman_sp.h"
++
++
++uint32_t fman_vsp_get_statistics(struct fm_pcd_storage_profile_regs *regs,
++ uint16_t index)
++{
++ struct fm_pcd_storage_profile_regs *sp_regs;
++ sp_regs = &regs[index];
++ return ioread32be(&sp_regs->fm_sp_acnt);
++}
++
++void fman_vsp_set_statistics(struct fm_pcd_storage_profile_regs *regs,
++ uint16_t index, uint32_t value)
++{
++ struct fm_pcd_storage_profile_regs *sp_regs;
++ sp_regs = &regs[index];
++ iowrite32be(value, &sp_regs->fm_sp_acnt);
++}
++
++void fman_vsp_defconfig(struct fm_storage_profile_params *cfg)
++{
++ cfg->dma_swap_data =
++ DEFAULT_FMAN_SP_DMA_SWAP_DATA;
++ cfg->int_context_cache_attr =
++ DEFAULT_FMAN_SP_DMA_INT_CONTEXT_CACHE_ATTR;
++ cfg->header_cache_attr =
++ DEFAULT_FMAN_SP_DMA_HEADER_CACHE_ATTR;
++ cfg->scatter_gather_cache_attr =
++ DEFAULT_FMAN_SP_DMA_SCATTER_GATHER_CACHE_ATTR;
++ cfg->dma_write_optimize =
++ DEFAULT_FMAN_SP_DMA_WRITE_OPTIMIZE;
++ cfg->no_scather_gather =
++ DEFAULT_FMAN_SP_NO_SCATTER_GATHER;
++}
++
++static inline uint32_t calc_vec_dep(int max_pools, bool *pools,
++ struct fman_ext_pools *ext_buf_pools, uint32_t mask)
++{
++ int i, j;
++ uint32_t vector = 0;
++ for (i = 0; i < max_pools; i++)
++ if (pools[i])
++ for (j = 0; j < ext_buf_pools->num_pools_used; j++)
++ if (i == ext_buf_pools->ext_buf_pool[j].id) {
++ vector |= mask >> j;
++ break;
++ }
++ return vector;
++}
++
++void fman_vsp_init(struct fm_pcd_storage_profile_regs *regs,
++ uint16_t index, struct fm_storage_profile_params *fm_vsp_params,
++ int port_max_num_of_ext_pools, int bm_max_num_of_pools,
++ int max_num_of_pfc_priorities)
++{
++ int i = 0, j = 0;
++ struct fm_pcd_storage_profile_regs *sp_regs;
++ uint32_t tmp_reg, vector;
++ struct fman_ext_pools *ext_buf_pools = &fm_vsp_params->fm_ext_pools;
++ struct fman_buf_pool_depletion *buf_pool_depletion =
++ &fm_vsp_params->buf_pool_depletion;
++ struct fman_backup_bm_pools *backup_pools =
++ &fm_vsp_params->backup_pools;
++ struct fman_sp_int_context_data_copy *int_context_data_copy =
++ fm_vsp_params->int_context;
++ struct fman_sp_buf_margins *external_buffer_margins =
++ fm_vsp_params->buf_margins;
++ bool no_scather_gather = fm_vsp_params->no_scather_gather;
++ uint16_t liodn_offset = fm_vsp_params->liodn_offset;
++
++ sp_regs = &regs[index];
++
++ /* fill external buffers manager pool information register*/
++ for (i = 0; i < ext_buf_pools->num_pools_used; i++) {
++ tmp_reg = FMAN_SP_EXT_BUF_POOL_VALID |
++ FMAN_SP_EXT_BUF_POOL_EN_COUNTER;
++ tmp_reg |= ((uint32_t)ext_buf_pools->ext_buf_pool[i].id <<
++ FMAN_SP_EXT_BUF_POOL_ID_SHIFT);
++ tmp_reg |= ext_buf_pools->ext_buf_pool[i].size;
++ /* functionality available only for some deriviatives
++ (limited by config) */
++ for (j = 0; j < backup_pools->num_backup_pools; j++)
++ if (ext_buf_pools->ext_buf_pool[i].id ==
++ backup_pools->pool_ids[j]) {
++ tmp_reg |= FMAN_SP_EXT_BUF_POOL_BACKUP;
++ break;
++ }
++ iowrite32be(tmp_reg, &sp_regs->fm_sp_ebmpi[i]);
++ }
++
++ /* clear unused pools */
++ for (i = ext_buf_pools->num_pools_used;
++ i < port_max_num_of_ext_pools; i++)
++ iowrite32be(0, &sp_regs->fm_sp_ebmpi[i]);
++
++ /* fill pool depletion register*/
++ tmp_reg = 0;
++ if (buf_pool_depletion->buf_pool_depletion_enabled && buf_pool_depletion->pools_grp_mode_enable) {
++ /* calculate vector for number of pools depletion */
++ vector = calc_vec_dep(bm_max_num_of_pools, buf_pool_depletion->
++ pools_to_consider, ext_buf_pools, 0x80000000);
++
++ /* configure num of pools and vector for number of pools mode */
++ tmp_reg |= (((uint32_t)buf_pool_depletion->num_pools - 1) <<
++ FMAN_SP_POOL_DEP_NUM_OF_POOLS_SHIFT);
++ tmp_reg |= vector;
++ }
++
++ if (buf_pool_depletion->buf_pool_depletion_enabled && buf_pool_depletion->single_pool_mode_enable) {
++ /* calculate vector for number of pools depletion */
++ vector = calc_vec_dep(bm_max_num_of_pools, buf_pool_depletion->
++ pools_to_consider_for_single_mode,
++ ext_buf_pools, 0x00000080);
++
++ /* configure num of pools and vector for number of pools mode */
++ tmp_reg |= vector;
++ }
++
++ /* fill QbbPEV */
++ if (buf_pool_depletion->buf_pool_depletion_enabled) {
++ vector = 0;
++ for (i = 0; i < max_num_of_pfc_priorities; i++)
++ if (buf_pool_depletion->pfc_priorities_en[i] == TRUE)
++ vector |= 0x00000100 << i;
++ tmp_reg |= vector;
++ }
++ iowrite32be(tmp_reg, &sp_regs->fm_sp_mpd);
++
++ /* fill dma attributes register */
++ tmp_reg = 0;
++ tmp_reg |= (uint32_t)fm_vsp_params->dma_swap_data <<
++ FMAN_SP_DMA_ATTR_SWP_SHIFT;
++ tmp_reg |= (uint32_t)fm_vsp_params->int_context_cache_attr <<
++ FMAN_SP_DMA_ATTR_IC_CACHE_SHIFT;
++ tmp_reg |= (uint32_t)fm_vsp_params->header_cache_attr <<
++ FMAN_SP_DMA_ATTR_HDR_CACHE_SHIFT;
++ tmp_reg |= (uint32_t)fm_vsp_params->scatter_gather_cache_attr <<
++ FMAN_SP_DMA_ATTR_SG_CACHE_SHIFT;
++ if (fm_vsp_params->dma_write_optimize)
++ tmp_reg |= FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE;
++ iowrite32be(tmp_reg, &sp_regs->fm_sp_da);
++
++ /* IC parameters - fill internal context parameters register */
++ tmp_reg = 0;
++ tmp_reg |= (((uint32_t)int_context_data_copy->ext_buf_offset/
++ OFFSET_UNITS) << FMAN_SP_IC_TO_EXT_SHIFT);
++ tmp_reg |= (((uint32_t)int_context_data_copy->int_context_offset/
++ OFFSET_UNITS) << FMAN_SP_IC_FROM_INT_SHIFT);
++ tmp_reg |= (((uint32_t)int_context_data_copy->size/OFFSET_UNITS) <<
++ FMAN_SP_IC_SIZE_SHIFT);
++ iowrite32be(tmp_reg, &sp_regs->fm_sp_icp);
++
++ /* buffer margins - fill external buffer margins register */
++ tmp_reg = 0;
++ tmp_reg |= (((uint32_t)external_buffer_margins->start_margins) <<
++ FMAN_SP_EXT_BUF_MARG_START_SHIFT);
++ tmp_reg |= (((uint32_t)external_buffer_margins->end_margins) <<
++ FMAN_SP_EXT_BUF_MARG_END_SHIFT);
++ if (no_scather_gather)
++ tmp_reg |= FMAN_SP_SG_DISABLE;
++ iowrite32be(tmp_reg, &sp_regs->fm_sp_ebm);
++
++ /* buffer margins - fill spliodn register */
++ iowrite32be(liodn_offset, &sp_regs->fm_sp_spliodn);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c
+new file mode 100644
+index 00000000..a870b47e
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c
+@@ -0,0 +1,5216 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm.c
++
++ @Description FM driver routines implementation.
++*//***************************************************************************/
++#include "std_ext.h"
++#include "error_ext.h"
++#include "xx_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++#include "debug_ext.h"
++#include "fm_muram_ext.h"
++#include <linux/math64.h>
++
++#include "fm_common.h"
++#include "fm_ipc.h"
++#include "fm.h"
++#ifndef CONFIG_FMAN_ARM
++#include <linux/fsl/svr.h>
++#endif
++#include "fsl_fman.h"
++
++
++/****************************************/
++/* static functions */
++/****************************************/
++
++static volatile bool blockingFlag = FALSE;
++static void IpcMsgCompletionCB(t_Handle h_Fm,
++ uint8_t *p_Msg,
++ uint8_t *p_Reply,
++ uint32_t replyLength,
++ t_Error status)
++{
++ UNUSED(h_Fm);UNUSED(p_Msg);UNUSED(p_Reply);UNUSED(replyLength);UNUSED(status);
++ blockingFlag = FALSE;
++}
++
++static void FreeInitResources(t_Fm *p_Fm)
++{
++ if (p_Fm->camBaseAddr)
++ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->camBaseAddr));
++ if (p_Fm->fifoBaseAddr)
++ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->fifoBaseAddr));
++ if (p_Fm->resAddr)
++ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->resAddr));
++}
++
++static bool IsFmanCtrlCodeLoaded(t_Fm *p_Fm)
++{
++ t_FMIramRegs *p_Iram;
++
++ ASSERT_COND(p_Fm);
++ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
++
++ return (bool)!!(GET_UINT32(p_Iram->iready) & IRAM_READY);
++}
++
++static t_Error CheckFmParameters(t_Fm *p_Fm)
++{
++ if (IsFmanCtrlCodeLoaded(p_Fm) && !p_Fm->resetOnInit)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Old FMan CTRL code is loaded; FM must be reset!"));
++#if (DPAA_VERSION < 11)
++ if (!p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats ||
++ (p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats > DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("axiDbgNumOfBeats has to be in the range 1 - %d", DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS));
++#endif /* (DPAA_VERSION < 11) */
++ if (p_Fm->p_FmDriverParam->dma_cam_num_of_entries % DMA_CAM_UNITS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_cam_num_of_entries has to be divisble by %d", DMA_CAM_UNITS));
++// if (!p_Fm->p_FmDriverParam->dma_cam_num_of_entries || (p_Fm->p_FmDriverParam->dma_cam_num_of_entries > DMA_MODE_MAX_CAM_NUM_OF_ENTRIES))
++// RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_cam_num_of_entries has to be in the range 1 - %d", DMA_MODE_MAX_CAM_NUM_OF_ENTRIES));
++ if (p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer > DMA_THRESH_MAX_COMMQ)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_comm_qtsh_asrt_emer can not be larger than %d", DMA_THRESH_MAX_COMMQ));
++ if (p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer > DMA_THRESH_MAX_COMMQ)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_comm_qtsh_clr_emer can not be larger than %d", DMA_THRESH_MAX_COMMQ));
++ if (p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer >= p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_comm_qtsh_clr_emer must be smaller than dma_comm_qtsh_asrt_emer"));
++#if (DPAA_VERSION < 11)
++ if (p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer > DMA_THRESH_MAX_BUF)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_read_buf_tsh_asrt_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
++ if (p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer > DMA_THRESH_MAX_BUF)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_read_buf_tsh_clr_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
++ if (p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer >= p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_read_buf_tsh_clr_emer must be smaller than dma_read_buf_tsh_asrt_emer"));
++ if (p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer > DMA_THRESH_MAX_BUF)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_write_buf_tsh_asrt_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
++ if (p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer > DMA_THRESH_MAX_BUF)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_write_buf_tsh_clr_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
++ if (p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer >= p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_write_buf_tsh_clr_emer must be smaller than dma_write_buf_tsh_asrt_emer"));
++#else /* (DPAA_VERSION >= 11) */
++ if ((p_Fm->p_FmDriverParam->dma_dbg_cnt_mode == E_FMAN_DMA_DBG_CNT_INT_READ_EM)||
++ (p_Fm->p_FmDriverParam->dma_dbg_cnt_mode == E_FMAN_DMA_DBG_CNT_INT_WRITE_EM) ||
++ (p_Fm->p_FmDriverParam->dma_dbg_cnt_mode == E_FMAN_DMA_DBG_CNT_RAW_WAR_PROT))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_dbg_cnt_mode value not supported by this integration."));
++ if ((p_Fm->p_FmDriverParam->dma_emergency_bus_select == FM_DMA_MURAM_READ_EMERGENCY)||
++ (p_Fm->p_FmDriverParam->dma_emergency_bus_select == FM_DMA_MURAM_WRITE_EMERGENCY))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("emergencyBusSelect value not supported by this integration."));
++ if (p_Fm->p_FmDriverParam->dma_stop_on_bus_error)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_stop_on_bus_error not supported by this integration."));
++#ifdef FM_AID_MODE_NO_TNUM_SW005
++ if (p_Fm->p_FmDriverParam->dma_aid_mode != E_FMAN_DMA_AID_OUT_PORT_ID)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_aid_mode not supported by this integration."));
++#endif /* FM_AID_MODE_NO_TNUM_SW005 */
++ if (p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_axi_dbg_num_of_beats not supported by this integration."));
++#endif /* (DPAA_VERSION < 11) */
++
++ if (!p_Fm->p_FmStateStruct->fmClkFreq)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fmClkFreq must be set."));
++ if (USEC_TO_CLK(p_Fm->p_FmDriverParam->dma_watchdog, p_Fm->p_FmStateStruct->fmClkFreq) > DMA_MAX_WATCHDOG)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("dma_watchdog depends on FM clock. dma_watchdog(in microseconds) * clk (in Mhz), may not exceed 0x08x", DMA_MAX_WATCHDOG));
++
++#if (DPAA_VERSION >= 11)
++ if ((p_Fm->partVSPBase + p_Fm->partNumOfVSPs) > FM_VSP_MAX_NUM_OF_ENTRIES)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("partVSPBase+partNumOfVSPs out of range!!!"));
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (p_Fm->p_FmStateStruct->totalFifoSize % BMI_FIFO_UNITS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("totalFifoSize number has to be divisible by %d", BMI_FIFO_UNITS));
++ if (!p_Fm->p_FmStateStruct->totalFifoSize ||
++ (p_Fm->p_FmStateStruct->totalFifoSize > BMI_MAX_FIFO_SIZE))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("totalFifoSize (currently defined as %d) has to be in the range of 256 to %d",
++ p_Fm->p_FmStateStruct->totalFifoSize,
++ BMI_MAX_FIFO_SIZE));
++ if (!p_Fm->p_FmStateStruct->totalNumOfTasks ||
++ (p_Fm->p_FmStateStruct->totalNumOfTasks > BMI_MAX_NUM_OF_TASKS))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("totalNumOfTasks number has to be in the range 1 - %d", BMI_MAX_NUM_OF_TASKS));
++
++#ifdef FM_HAS_TOTAL_DMAS
++ if (!p_Fm->p_FmStateStruct->maxNumOfOpenDmas ||
++ (p_Fm->p_FmStateStruct->maxNumOfOpenDmas > BMI_MAX_NUM_OF_DMAS))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxNumOfOpenDmas number has to be in the range 1 - %d", BMI_MAX_NUM_OF_DMAS));
++#endif /* FM_HAS_TOTAL_DMAS */
++
++ if (p_Fm->p_FmDriverParam->disp_limit_tsh > FPM_MAX_DISP_LIMIT)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("disp_limit_tsh can't be greater than %d", FPM_MAX_DISP_LIMIT));
++
++ if (!p_Fm->f_Exception)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
++ if (!p_Fm->f_BusError)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
++
++#ifdef FM_NO_WATCHDOG
++ if ((p_Fm->p_FmStateStruct->revInfo.majorRev == 2) &&
++ (p_Fm->p_FmDriverParam->dma_watchdog))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("watchdog!"));
++#endif /* FM_NO_WATCHDOG */
++
++#ifdef FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008
++ if ((p_Fm->p_FmStateStruct->revInfo.majorRev < 6) &&
++ (p_Fm->p_FmDriverParam->halt_on_unrecov_ecc_err))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("HaltOnEccError!"));
++#endif /* FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008 */
++
++#ifdef FM_NO_TNUM_AGING
++ if ((p_Fm->p_FmStateStruct->revInfo.majorRev != 4) &&
++ (p_Fm->p_FmStateStruct->revInfo.majorRev < 6))
++ if (p_Fm->p_FmDriverParam->tnum_aging_period)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Tnum aging!"));
++#endif /* FM_NO_TNUM_AGING */
++
++ /* check that user did not set revision-dependent exceptions */
++#ifdef FM_NO_DISPATCH_RAM_ECC
++ if ((p_Fm->p_FmStateStruct->revInfo.majorRev != 4) &&
++ (p_Fm->p_FmStateStruct->revInfo.majorRev < 6))
++ if (p_Fm->userSetExceptions & FM_EX_BMI_DISPATCH_RAM_ECC)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("exception e_FM_EX_BMI_DISPATCH_RAM_ECC!"));
++#endif /* FM_NO_DISPATCH_RAM_ECC */
++
++#ifdef FM_QMI_NO_ECC_EXCEPTIONS
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev == 4)
++ if (p_Fm->userSetExceptions & (FM_EX_QMI_SINGLE_ECC | FM_EX_QMI_DOUBLE_ECC))
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("exception e_FM_EX_QMI_SINGLE_ECC/e_FM_EX_QMI_DOUBLE_ECC!"));
++#endif /* FM_QMI_NO_ECC_EXCEPTIONS */
++
++#ifdef FM_QMI_NO_SINGLE_ECC_EXCEPTION
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ if (p_Fm->userSetExceptions & FM_EX_QMI_SINGLE_ECC)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("exception e_FM_EX_QMI_SINGLE_ECC!"));
++#endif /* FM_QMI_NO_SINGLE_ECC_EXCEPTION */
++
++ return E_OK;
++}
++
++
++static void SendIpcIsr(t_Fm *p_Fm, uint32_t macEvent, uint32_t pendingReg)
++{
++ ASSERT_COND(p_Fm->guestId == NCSW_MASTER_ID);
++
++ if (p_Fm->intrMng[macEvent].guestId == NCSW_MASTER_ID)
++ p_Fm->intrMng[macEvent].f_Isr(p_Fm->intrMng[macEvent].h_SrcHandle);
++
++ /* If the MAC is running on guest-partition and we have IPC session with it,
++ we inform him about the event through IPC; otherwise, we ignore the event. */
++ else if (p_Fm->h_IpcSessions[p_Fm->intrMng[macEvent].guestId])
++ {
++ t_Error err;
++ t_FmIpcIsr fmIpcIsr;
++ t_FmIpcMsg msg;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_GUEST_ISR;
++ fmIpcIsr.pendingReg = pendingReg;
++ fmIpcIsr.boolErr = FALSE;
++ memcpy(msg.msgBody, &fmIpcIsr, sizeof(fmIpcIsr));
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[p_Fm->intrMng[macEvent].guestId],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(fmIpcIsr),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ }
++ else
++ DBG(TRACE, ("FM Guest mode, without IPC - can't call ISR!"));
++}
++
++static void BmiErrEvent(t_Fm *p_Fm)
++{
++ uint32_t event;
++ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
++
++
++ event = fman_get_bmi_err_event(bmi_rg);
++
++ if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_STORAGE_PROFILE_ECC);
++ if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_LIST_RAM_ECC);
++ if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_STATISTICS_RAM_ECC);
++ if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_DISPATCH_RAM_ECC);
++}
++
++static void QmiErrEvent(t_Fm *p_Fm)
++{
++ uint32_t event;
++ struct fman_qmi_regs *qmi_rg = p_Fm->p_FmQmiRegs;
++
++ event = fman_get_qmi_err_event(qmi_rg);
++
++ if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_DOUBLE_ECC);
++ if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
++}
++
++static void DmaErrEvent(t_Fm *p_Fm)
++{
++ uint32_t status, com_id;
++ uint8_t tnum;
++ uint8_t hardwarePortId;
++ uint8_t relativePortId;
++ uint16_t liodn;
++ struct fman_dma_regs *dma_rg = p_Fm->p_FmDmaRegs;
++
++ status = fman_get_dma_err_event(dma_rg);
++
++ if (status & DMA_STATUS_BUS_ERR)
++ {
++ com_id = fman_get_dma_com_id(dma_rg);
++ hardwarePortId = (uint8_t)(((com_id & DMA_TRANSFER_PORTID_MASK) >> DMA_TRANSFER_PORTID_SHIFT));
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++ HW_PORT_ID_TO_SW_PORT_ID(relativePortId, hardwarePortId);
++ tnum = (uint8_t)((com_id & DMA_TRANSFER_TNUM_MASK) >> DMA_TRANSFER_TNUM_SHIFT);
++ liodn = (uint16_t)(com_id & DMA_TRANSFER_LIODN_MASK);
++ ASSERT_COND(p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] != e_FM_PORT_TYPE_DUMMY);
++ p_Fm->f_BusError(p_Fm->h_App,
++ p_Fm->p_FmStateStruct->portsTypes[hardwarePortId],
++ relativePortId,
++ fman_get_dma_addr(dma_rg),
++ tnum,
++ liodn);
++ }
++ if (status & DMA_STATUS_FM_SPDAT_ECC)
++ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_SINGLE_PORT_ECC);
++ if (status & DMA_STATUS_READ_ECC)
++ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_READ_ECC);
++ if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
++ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_SYSTEM_WRITE_ECC);
++ if (status & DMA_STATUS_FM_WRITE_ECC)
++ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_FM_WRITE_ECC);
++ }
++
++static void FpmErrEvent(t_Fm *p_Fm)
++{
++ uint32_t event;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ event = fman_get_fpm_err_event(fpm_rg);
++
++ if ((event & FPM_EV_MASK_DOUBLE_ECC) && (event & FPM_EV_MASK_DOUBLE_ECC_EN))
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_DOUBLE_ECC);
++ if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_STALL_ON_TASKS);
++ if ((event & FPM_EV_MASK_SINGLE_ECC) && (event & FPM_EV_MASK_SINGLE_ECC_EN))
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_SINGLE_ECC);
++}
++
++static void MuramErrIntr(t_Fm *p_Fm)
++{
++ uint32_t event;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ event = fman_get_muram_err_event(fpm_rg);
++
++ if (event & FPM_RAM_MURAM_ECC)
++ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_MURAM_ECC);
++}
++
++static void IramErrIntr(t_Fm *p_Fm)
++{
++ uint32_t event;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ event = fman_get_iram_err_event(fpm_rg);
++
++ if (event & FPM_RAM_IRAM_ECC)
++ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_IRAM_ECC);
++}
++
++static void QmiEvent(t_Fm *p_Fm)
++{
++ uint32_t event;
++ struct fman_qmi_regs *qmi_rg = p_Fm->p_FmQmiRegs;
++
++ event = fman_get_qmi_event(qmi_rg);
++
++ if (event & QMI_INTR_EN_SINGLE_ECC)
++ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_SINGLE_ECC);
++}
++
++static void UnimplementedIsr(t_Handle h_Arg)
++{
++ UNUSED(h_Arg);
++
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented ISR!"));
++}
++
++static void UnimplementedFmanCtrlIsr(t_Handle h_Arg, uint32_t event)
++{
++ UNUSED(h_Arg); UNUSED(event);
++
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented FmCtl ISR!"));
++}
++
++static void EnableTimeStamp(t_Fm *p_Fm)
++{
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ ASSERT_COND(p_Fm->p_FmStateStruct);
++ ASSERT_COND(p_Fm->p_FmStateStruct->count1MicroBit);
++
++ fman_enable_time_stamp(fpm_rg, p_Fm->p_FmStateStruct->count1MicroBit, p_Fm->p_FmStateStruct->fmClkFreq);
++
++ p_Fm->p_FmStateStruct->enabledTimeStamp = TRUE;
++}
++
++static t_Error ClearIRam(t_Fm *p_Fm)
++{
++ t_FMIramRegs *p_Iram;
++ int i;
++ int iram_size;
++
++ ASSERT_COND(p_Fm);
++ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
++ iram_size = FM_IRAM_SIZE(p_Fm->p_FmStateStruct->revInfo.majorRev,p_Fm->p_FmStateStruct->revInfo.minorRev);
++
++ /* Enable the auto-increment */
++ WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE);
++ while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ;
++
++ for (i=0; i < (iram_size/4); i++)
++ WRITE_UINT32(p_Iram->idata, 0xffffffff);
++
++ WRITE_UINT32(p_Iram->iadd, iram_size - 4);
++ CORE_MemoryBarrier();
++ while (GET_UINT32(p_Iram->idata) != 0xffffffff) ;
++
++ return E_OK;
++}
++
++static t_Error LoadFmanCtrlCode(t_Fm *p_Fm)
++{
++ t_FMIramRegs *p_Iram;
++ int i;
++ uint32_t tmp;
++ uint8_t compTo16;
++
++ ASSERT_COND(p_Fm);
++ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
++
++ /* Enable the auto-increment */
++ WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE);
++ while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ;
++
++ for (i=0; i < (p_Fm->firmware.size / 4); i++)
++ WRITE_UINT32(p_Iram->idata, p_Fm->firmware.p_Code[i]);
++
++ compTo16 = (uint8_t)(p_Fm->firmware.size % 16);
++ if (compTo16)
++ for (i=0; i < ((16-compTo16) / 4); i++)
++ WRITE_UINT32(p_Iram->idata, 0xffffffff);
++
++ WRITE_UINT32(p_Iram->iadd,p_Fm->firmware.size-4);
++ while (GET_UINT32(p_Iram->iadd) != (p_Fm->firmware.size-4)) ;
++
++ /* verify that writing has completed */
++ while (GET_UINT32(p_Iram->idata) != p_Fm->firmware.p_Code[(p_Fm->firmware.size / 4)-1]) ;
++
++ if (p_Fm->fwVerify)
++ {
++ WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE);
++ while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ;
++ for (i=0; i < (p_Fm->firmware.size / 4); i++)
++ {
++ tmp = GET_UINT32(p_Iram->idata);
++ if (tmp != p_Fm->firmware.p_Code[i])
++ RETURN_ERROR(MAJOR, E_WRITE_FAILED,
++ ("UCode write error : write 0x%x, read 0x%x",
++ p_Fm->firmware.p_Code[i],tmp));
++ }
++ WRITE_UINT32(p_Iram->iadd, 0x0);
++ }
++
++ /* Enable patch from IRAM */
++ WRITE_UINT32(p_Iram->iready, IRAM_READY);
++ XX_UDelay(1000);
++
++ DBG(INFO, ("FMan-Controller code (ver %d.%d.%d) loaded to IRAM.",
++ ((uint16_t *)p_Fm->firmware.p_Code)[2],
++ ((uint8_t *)p_Fm->firmware.p_Code)[6],
++ ((uint8_t *)p_Fm->firmware.p_Code)[7]));
++
++ return E_OK;
++}
++
++#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
++static t_Error FwNotResetErratumBugzilla6173WA(t_Fm *p_Fm)
++{
++ t_FMIramRegs *p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
++ uint32_t tmpReg;
++ uint32_t savedSpliodn[63];
++
++ /* write to IRAM first location the debug instruction */
++ WRITE_UINT32(p_Iram->iadd, 0);
++ while (GET_UINT32(p_Iram->iadd) != 0) ;
++ WRITE_UINT32(p_Iram->idata, FM_FW_DEBUG_INSTRUCTION);
++
++ WRITE_UINT32(p_Iram->iadd, 0);
++ while (GET_UINT32(p_Iram->iadd) != 0) ;
++ while (GET_UINT32(p_Iram->idata) != FM_FW_DEBUG_INSTRUCTION) ;
++
++ /* Enable patch from IRAM */
++ WRITE_UINT32(p_Iram->iready, IRAM_READY);
++ CORE_MemoryBarrier();
++ XX_UDelay(100);
++ IO2MemCpy32((uint8_t *)savedSpliodn,
++ (uint8_t *)p_Fm->p_FmBmiRegs->fmbm_spliodn,
++ 63*sizeof(uint32_t));
++
++ /* reset FMAN */
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET);
++ CORE_MemoryBarrier();
++ XX_UDelay(100);
++
++ /* verify breakpoint debug status register */
++ tmpReg = GET_UINT32(*(uint32_t *)UINT_TO_PTR(p_Fm->baseAddr + FM_DEBUG_STATUS_REGISTER_OFFSET));
++ if (!tmpReg)
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid debug status register value is '0'"));
++
++ /*************************************/
++ /* Load FMan-Controller code to IRAM */
++ /*************************************/
++ ClearIRam(p_Fm);
++ if (p_Fm->firmware.p_Code &&
++ (LoadFmanCtrlCode(p_Fm) != E_OK))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++ XX_UDelay(100);
++
++ /* reset FMAN again to start the microcode */
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET);
++ CORE_MemoryBarrier();
++ XX_UDelay(100);
++ Mem2IOCpy32((uint8_t *)p_Fm->p_FmBmiRegs->fmbm_spliodn,
++ (uint8_t *)savedSpliodn,
++ 63*sizeof(uint32_t));
++
++ if (fman_is_qmi_halt_not_busy_state(p_Fm->p_FmQmiRegs))
++ {
++ fman_resume(p_Fm->p_FmFpmRegs);
++ CORE_MemoryBarrier();
++ XX_UDelay(100);
++ }
++
++ return E_OK;
++}
++#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
++
++static void GuestErrorIsr(t_Fm *p_Fm, uint32_t pending)
++{
++#define FM_G_CALL_1G_MAC_ERR_ISR(_id) \
++do { \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].h_SrcHandle);\
++} while (0)
++#define FM_G_CALL_10G_MAC_ERR_ISR(_id) \
++do { \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].h_SrcHandle);\
++} while (0)
++
++ /* error interrupts */
++ if (pending & ERR_INTR_EN_1G_MAC0)
++ FM_G_CALL_1G_MAC_ERR_ISR(0);
++ if (pending & ERR_INTR_EN_1G_MAC1)
++ FM_G_CALL_1G_MAC_ERR_ISR(1);
++ if (pending & ERR_INTR_EN_1G_MAC2)
++ FM_G_CALL_1G_MAC_ERR_ISR(2);
++ if (pending & ERR_INTR_EN_1G_MAC3)
++ FM_G_CALL_1G_MAC_ERR_ISR(3);
++ if (pending & ERR_INTR_EN_1G_MAC4)
++ FM_G_CALL_1G_MAC_ERR_ISR(4);
++ if (pending & ERR_INTR_EN_1G_MAC5)
++ FM_G_CALL_1G_MAC_ERR_ISR(5);
++ if (pending & ERR_INTR_EN_1G_MAC6)
++ FM_G_CALL_1G_MAC_ERR_ISR(6);
++ if (pending & ERR_INTR_EN_1G_MAC7)
++ FM_G_CALL_1G_MAC_ERR_ISR(7);
++ if (pending & ERR_INTR_EN_10G_MAC0)
++ FM_G_CALL_10G_MAC_ERR_ISR(0);
++ if (pending & ERR_INTR_EN_10G_MAC1)
++ FM_G_CALL_10G_MAC_ERR_ISR(1);
++}
++
++static void GuestEventIsr(t_Fm *p_Fm, uint32_t pending)
++{
++#define FM_G_CALL_1G_MAC_ISR(_id) \
++do { \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].h_SrcHandle);\
++} while (0)
++#define FM_G_CALL_10G_MAC_ISR(_id) \
++do { \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].h_SrcHandle);\
++} while (0)
++
++ if (pending & INTR_EN_1G_MAC0)
++ FM_G_CALL_1G_MAC_ISR(0);
++ if (pending & INTR_EN_1G_MAC1)
++ FM_G_CALL_1G_MAC_ISR(1);
++ if (pending & INTR_EN_1G_MAC2)
++ FM_G_CALL_1G_MAC_ISR(2);
++ if (pending & INTR_EN_1G_MAC3)
++ FM_G_CALL_1G_MAC_ISR(3);
++ if (pending & INTR_EN_1G_MAC4)
++ FM_G_CALL_1G_MAC_ISR(4);
++ if (pending & INTR_EN_1G_MAC5)
++ FM_G_CALL_1G_MAC_ISR(5);
++ if (pending & INTR_EN_1G_MAC6)
++ FM_G_CALL_1G_MAC_ISR(6);
++ if (pending & INTR_EN_1G_MAC7)
++ FM_G_CALL_1G_MAC_ISR(7);
++ if (pending & INTR_EN_10G_MAC0)
++ FM_G_CALL_10G_MAC_ISR(0);
++ if (pending & INTR_EN_10G_MAC1)
++ FM_G_CALL_10G_MAC_ISR(1);
++ if (pending & INTR_EN_TMR)
++ p_Fm->intrMng[e_FM_EV_TMR].f_Isr(p_Fm->intrMng[e_FM_EV_TMR].h_SrcHandle);
++}
++
++#if (DPAA_VERSION >= 11)
++static t_Error SetVSPWindow(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint8_t baseStorageProfile,
++ uint8_t log2NumOfProfiles)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++
++ ASSERT_COND(h_Fm);
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->p_FmBmiRegs &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcVspSetPortWindow fmIpcVspSetPortWindow;
++ t_FmIpcMsg msg;
++ t_Error err = E_OK;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&fmIpcVspSetPortWindow, 0, sizeof(t_FmIpcVspSetPortWindow));
++ fmIpcVspSetPortWindow.hardwarePortId = hardwarePortId;
++ fmIpcVspSetPortWindow.baseStorageProfile = baseStorageProfile;
++ fmIpcVspSetPortWindow.log2NumOfProfiles = log2NumOfProfiles;
++ msg.msgId = FM_VSP_SET_PORT_WINDOW;
++ memcpy(msg.msgBody, &fmIpcVspSetPortWindow, sizeof(t_FmIpcVspSetPortWindow));
++
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ return E_OK;
++ }
++ else if (!p_Fm->p_FmBmiRegs)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++
++ fman_set_vsp_window(p_Fm->p_FmBmiRegs,
++ hardwarePortId,
++ baseStorageProfile,
++ log2NumOfProfiles);
++
++ return E_OK;
++}
++
++static uint8_t AllocVSPsForPartition(t_Handle h_Fm, uint8_t base, uint8_t numOfProfiles, uint8_t guestId)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ uint8_t profilesFound = 0;
++ int i = 0;
++ uint32_t intFlags;
++
++ if (!numOfProfiles)
++ return E_OK;
++
++ if ((numOfProfiles > FM_VSP_MAX_NUM_OF_ENTRIES) ||
++ (base + numOfProfiles > FM_VSP_MAX_NUM_OF_ENTRIES))
++ return (uint8_t)ILLEGAL_BASE;
++
++ if (p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ t_Error err;
++ uint32_t replyLength;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
++ ipcAllocParams.guestId = p_Fm->guestId;
++ ipcAllocParams.num = p_Fm->partNumOfVSPs;
++ ipcAllocParams.base = p_Fm->partVSPBase;
++ msg.msgId = FM_VSP_ALLOC;
++ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
++ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if ((err != E_OK) ||
++ (replyLength != (sizeof(uint32_t) + sizeof(uint8_t))))
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ else
++ memcpy((uint8_t*)&p_Fm->partVSPBase, reply.replyBody, sizeof(uint8_t));
++ if (p_Fm->partVSPBase == (uint8_t)(ILLEGAL_BASE))
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ DBG(WARNING, ("FM Guest mode, without IPC - can't validate VSP range!"));
++ return (uint8_t)ILLEGAL_BASE;
++ }
++
++ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
++ for (i = base; i < base + numOfProfiles; i++)
++ if (p_Fm->p_FmSp->profiles[i].profilesMng.ownerId == (uint8_t)ILLEGAL_BASE)
++ profilesFound++;
++ else
++ break;
++
++ if (profilesFound == numOfProfiles)
++ for (i = base; i<base + numOfProfiles; i++)
++ p_Fm->p_FmSp->profiles[i].profilesMng.ownerId = guestId;
++ else
++ {
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++ return (uint8_t)ILLEGAL_BASE;
++ }
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++
++ return base;
++}
++
++static void FreeVSPsForPartition(t_Handle h_Fm, uint8_t base, uint8_t numOfProfiles, uint8_t guestId)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ int i = 0;
++
++ ASSERT_COND(p_Fm);
++
++ if (p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++ t_Error err;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
++ ipcAllocParams.guestId = p_Fm->guestId;
++ ipcAllocParams.num = p_Fm->partNumOfVSPs;
++ ipcAllocParams.base = p_Fm->partVSPBase;
++ msg.msgId = FM_VSP_FREE;
++ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
++ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return;
++ }
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ DBG(WARNING, ("FM Guest mode, without IPC - can't validate VSP range!"));
++ return;
++ }
++
++ ASSERT_COND(p_Fm->p_FmSp);
++
++ for (i=base; i<numOfProfiles; i++)
++ {
++ if (p_Fm->p_FmSp->profiles[i].profilesMng.ownerId == guestId)
++ p_Fm->p_FmSp->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
++ else
++ DBG(WARNING, ("Request for freeing storage profile window which wasn't allocated to this partition"));
++ }
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++static t_Error FmGuestHandleIpcMsgCB(t_Handle h_Fm,
++ uint8_t *p_Msg,
++ uint32_t msgLength,
++ uint8_t *p_Reply,
++ uint32_t *p_ReplyLength)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_FmIpcMsg *p_IpcMsg = (t_FmIpcMsg*)p_Msg;
++
++ UNUSED(p_Reply);
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((msgLength > sizeof(uint32_t)), E_INVALID_VALUE);
++
++#ifdef DISABLE_SANITY_CHECKS
++ UNUSED(msgLength);
++#endif /* DISABLE_SANITY_CHECKS */
++
++ ASSERT_COND(p_Msg);
++
++ *p_ReplyLength = 0;
++
++ switch (p_IpcMsg->msgId)
++ {
++ case (FM_GUEST_ISR):
++ {
++ t_FmIpcIsr ipcIsr;
++
++ memcpy((uint8_t*)&ipcIsr, p_IpcMsg->msgBody, sizeof(t_FmIpcIsr));
++ if (ipcIsr.boolErr)
++ GuestErrorIsr(p_Fm, ipcIsr.pendingReg);
++ else
++ GuestEventIsr(p_Fm, ipcIsr.pendingReg);
++ break;
++ }
++ default:
++ *p_ReplyLength = 0;
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!"));
++ }
++ return E_OK;
++}
++
++static t_Error FmHandleIpcMsgCB(t_Handle h_Fm,
++ uint8_t *p_Msg,
++ uint32_t msgLength,
++ uint8_t *p_Reply,
++ uint32_t *p_ReplyLength)
++{
++ t_Error err;
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_FmIpcMsg *p_IpcMsg = (t_FmIpcMsg*)p_Msg;
++ t_FmIpcReply *p_IpcReply = (t_FmIpcReply*)p_Reply;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((msgLength >= sizeof(uint32_t)), E_INVALID_VALUE);
++
++#ifdef DISABLE_SANITY_CHECKS
++ UNUSED(msgLength);
++#endif /* DISABLE_SANITY_CHECKS */
++
++ ASSERT_COND(p_IpcMsg);
++
++ memset(p_IpcReply, 0, (sizeof(uint8_t) * FM_IPC_MAX_REPLY_SIZE));
++ *p_ReplyLength = 0;
++
++ switch (p_IpcMsg->msgId)
++ {
++ case (FM_GET_SET_PORT_PARAMS):
++ {
++ t_FmIpcPortInInitParams ipcInitParams;
++ t_FmInterModulePortInitParams initParams;
++ t_FmIpcPortOutInitParams ipcOutInitParams;
++
++ memcpy((uint8_t*)&ipcInitParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortInInitParams));
++ initParams.hardwarePortId = ipcInitParams.hardwarePortId;
++ initParams.portType = (e_FmPortType)ipcInitParams.enumPortType;
++ initParams.independentMode = (bool)(ipcInitParams.boolIndependentMode);
++ initParams.liodnOffset = ipcInitParams.liodnOffset;
++ initParams.numOfTasks = ipcInitParams.numOfTasks;
++ initParams.numOfExtraTasks = ipcInitParams.numOfExtraTasks;
++ initParams.numOfOpenDmas = ipcInitParams.numOfOpenDmas;
++ initParams.numOfExtraOpenDmas = ipcInitParams.numOfExtraOpenDmas;
++ initParams.sizeOfFifo = ipcInitParams.sizeOfFifo;
++ initParams.extraSizeOfFifo = ipcInitParams.extraSizeOfFifo;
++ initParams.deqPipelineDepth = ipcInitParams.deqPipelineDepth;
++ initParams.maxFrameLength = ipcInitParams.maxFrameLength;
++ initParams.liodnBase = ipcInitParams.liodnBase;
++
++ p_IpcReply->error = (uint32_t)FmGetSetPortParams(h_Fm, &initParams);
++
++ ipcOutInitParams.ipcPhysAddr.high = initParams.fmMuramPhysBaseAddr.high;
++ ipcOutInitParams.ipcPhysAddr.low = initParams.fmMuramPhysBaseAddr.low;
++ ipcOutInitParams.sizeOfFifo = initParams.sizeOfFifo;
++ ipcOutInitParams.extraSizeOfFifo = initParams.extraSizeOfFifo;
++ ipcOutInitParams.numOfTasks = initParams.numOfTasks;
++ ipcOutInitParams.numOfExtraTasks = initParams.numOfExtraTasks;
++ ipcOutInitParams.numOfOpenDmas = initParams.numOfOpenDmas;
++ ipcOutInitParams.numOfExtraOpenDmas = initParams.numOfExtraOpenDmas;
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcOutInitParams, sizeof(ipcOutInitParams));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcPortOutInitParams);
++ break;
++ }
++ case (FM_SET_SIZE_OF_FIFO):
++ {
++ t_FmIpcPortRsrcParams ipcPortRsrcParams;
++
++ memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams));
++ p_IpcReply->error = (uint32_t)FmSetSizeOfFifo(h_Fm,
++ ipcPortRsrcParams.hardwarePortId,
++ &ipcPortRsrcParams.val,
++ &ipcPortRsrcParams.extra,
++ (bool)ipcPortRsrcParams.boolInitialConfig);
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++ }
++ case (FM_SET_NUM_OF_TASKS):
++ {
++ t_FmIpcPortRsrcParams ipcPortRsrcParams;
++
++ memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams));
++ p_IpcReply->error = (uint32_t)FmSetNumOfTasks(h_Fm, ipcPortRsrcParams.hardwarePortId,
++ (uint8_t*)&ipcPortRsrcParams.val,
++ (uint8_t*)&ipcPortRsrcParams.extra,
++ (bool)ipcPortRsrcParams.boolInitialConfig);
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++ }
++ case (FM_SET_NUM_OF_OPEN_DMAS):
++ {
++ t_FmIpcPortRsrcParams ipcPortRsrcParams;
++
++ memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams));
++ p_IpcReply->error = (uint32_t)FmSetNumOfOpenDmas(h_Fm, ipcPortRsrcParams.hardwarePortId,
++ (uint8_t*)&ipcPortRsrcParams.val,
++ (uint8_t*)&ipcPortRsrcParams.extra,
++ (bool)ipcPortRsrcParams.boolInitialConfig);
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++ }
++ case (FM_RESUME_STALLED_PORT):
++ *p_ReplyLength = sizeof(uint32_t);
++ p_IpcReply->error = (uint32_t)FmResumeStalledPort(h_Fm, p_IpcMsg->msgBody[0]);
++ break;
++ case (FM_MASTER_IS_ALIVE):
++ {
++ uint8_t guestId = p_IpcMsg->msgBody[0];
++ /* build the FM master partition IPC address */
++ memset(p_Fm->fmIpcHandlerModuleName[guestId], 0, (sizeof(char)) * MODULE_NAME_SIZE);
++ if (Sprint (p_Fm->fmIpcHandlerModuleName[guestId], "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, guestId) != (guestId<10 ? 6:7))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++ p_Fm->h_IpcSessions[guestId] = XX_IpcInitSession(p_Fm->fmIpcHandlerModuleName[guestId], p_Fm->fmModuleName);
++ if (p_Fm->h_IpcSessions[guestId] == NULL)
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM Master IPC session for guest %d", guestId));
++ *(uint8_t*)(p_IpcReply->replyBody) = 1;
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ break;
++ }
++ case (FM_IS_PORT_STALLED):
++ {
++ bool tmp;
++
++ p_IpcReply->error = (uint32_t)FmIsPortStalled(h_Fm, p_IpcMsg->msgBody[0], &tmp);
++ *(uint8_t*)(p_IpcReply->replyBody) = (uint8_t)tmp;
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ break;
++ }
++ case (FM_RESET_MAC):
++ {
++ t_FmIpcMacParams ipcMacParams;
++
++ memcpy((uint8_t*)&ipcMacParams, p_IpcMsg->msgBody, sizeof(t_FmIpcMacParams));
++ p_IpcReply->error = (uint32_t)FmResetMac(p_Fm,
++ (e_FmMacType)(ipcMacParams.enumType),
++ ipcMacParams.id);
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++ }
++ case (FM_SET_MAC_MAX_FRAME):
++ {
++ t_FmIpcMacMaxFrameParams ipcMacMaxFrameParams;
++
++ memcpy((uint8_t*)&ipcMacMaxFrameParams, p_IpcMsg->msgBody, sizeof(t_FmIpcMacMaxFrameParams));
++ err = FmSetMacMaxFrame(p_Fm,
++ (e_FmMacType)(ipcMacMaxFrameParams.macParams.enumType),
++ ipcMacMaxFrameParams.macParams.id,
++ ipcMacMaxFrameParams.maxFrameLength);
++ if (err != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ break;
++ }
++#if (DPAA_VERSION >= 11)
++ case (FM_VSP_ALLOC) :
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ uint8_t vspBase;
++ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
++ vspBase = AllocVSPsForPartition(h_Fm, (uint8_t)ipcAllocParams.base, (uint8_t)ipcAllocParams.num, ipcAllocParams.guestId);
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&vspBase, sizeof(uint8_t));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ break;
++ }
++ case (FM_VSP_FREE) :
++ {
++ t_FmIpcResourceAllocParams ipcAllocParams;
++ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
++ FreeVSPsForPartition(h_Fm, (uint8_t)ipcAllocParams.base, (uint8_t)ipcAllocParams.num, ipcAllocParams.guestId);
++ break;
++ }
++ case (FM_VSP_SET_PORT_WINDOW) :
++ {
++ t_FmIpcVspSetPortWindow ipcVspSetPortWindow;
++ memcpy(&ipcVspSetPortWindow, p_IpcMsg->msgBody, sizeof(t_FmIpcVspSetPortWindow));
++ err = SetVSPWindow(h_Fm,
++ ipcVspSetPortWindow.hardwarePortId,
++ ipcVspSetPortWindow.baseStorageProfile,
++ ipcVspSetPortWindow.log2NumOfProfiles);
++ return err;
++ }
++ case (FM_SET_CONG_GRP_PFC_PRIO) :
++ {
++ t_FmIpcSetCongestionGroupPfcPriority fmIpcSetCongestionGroupPfcPriority;
++ memcpy(&fmIpcSetCongestionGroupPfcPriority, p_IpcMsg->msgBody, sizeof(t_FmIpcSetCongestionGroupPfcPriority));
++ err = FmSetCongestionGroupPFCpriority(h_Fm,
++ fmIpcSetCongestionGroupPfcPriority.congestionGroupId,
++ fmIpcSetCongestionGroupPfcPriority.priorityBitMap);
++ return err;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ case (FM_FREE_PORT):
++ {
++ t_FmInterModulePortFreeParams portParams;
++ t_FmIpcPortFreeParams ipcPortParams;
++
++ memcpy((uint8_t*)&ipcPortParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortFreeParams));
++ portParams.hardwarePortId = ipcPortParams.hardwarePortId;
++ portParams.portType = (e_FmPortType)(ipcPortParams.enumPortType);
++ portParams.deqPipelineDepth = ipcPortParams.deqPipelineDepth;
++ FmFreePortParams(h_Fm, &portParams);
++ break;
++ }
++ case (FM_REGISTER_INTR):
++ {
++ t_FmIpcRegisterIntr ipcRegIntr;
++
++ memcpy((uint8_t*)&ipcRegIntr, p_IpcMsg->msgBody, sizeof(ipcRegIntr));
++ p_Fm->intrMng[ipcRegIntr.event].guestId = ipcRegIntr.guestId;
++ break;
++ }
++ case (FM_GET_PARAMS):
++ {
++ t_FmIpcParams ipcParams;
++
++ /* Get clock frequency */
++ ipcParams.fmClkFreq = p_Fm->p_FmStateStruct->fmClkFreq;
++ ipcParams.fmMacClkFreq = p_Fm->p_FmStateStruct->fmMacClkFreq;
++
++ fman_get_revision(p_Fm->p_FmFpmRegs,&ipcParams.majorRev,&ipcParams.minorRev);
++
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcParams, sizeof(t_FmIpcParams));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcParams);
++ break;
++ }
++ case (FM_GET_FMAN_CTRL_CODE_REV):
++ {
++ t_FmCtrlCodeRevisionInfo fmanCtrlRevInfo;
++ t_FmIpcFmanCtrlCodeRevisionInfo ipcRevInfo;
++
++ p_IpcReply->error = (uint32_t)FM_GetFmanCtrlCodeRevision(h_Fm, &fmanCtrlRevInfo);
++ ipcRevInfo.packageRev = fmanCtrlRevInfo.packageRev;
++ ipcRevInfo.majorRev = fmanCtrlRevInfo.majorRev;
++ ipcRevInfo.minorRev = fmanCtrlRevInfo.minorRev;
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcRevInfo, sizeof(t_FmIpcFmanCtrlCodeRevisionInfo));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcFmanCtrlCodeRevisionInfo);
++ break;
++ }
++
++ case (FM_DMA_STAT):
++ {
++ t_FmDmaStatus dmaStatus;
++ t_FmIpcDmaStatus ipcDmaStatus;
++
++ FM_GetDmaStatus(h_Fm, &dmaStatus);
++ ipcDmaStatus.boolCmqNotEmpty = (uint8_t)dmaStatus.cmqNotEmpty;
++ ipcDmaStatus.boolBusError = (uint8_t)dmaStatus.busError;
++ ipcDmaStatus.boolReadBufEccError = (uint8_t)dmaStatus.readBufEccError;
++ ipcDmaStatus.boolWriteBufEccSysError = (uint8_t)dmaStatus.writeBufEccSysError;
++ ipcDmaStatus.boolWriteBufEccFmError = (uint8_t)dmaStatus.writeBufEccFmError;
++ ipcDmaStatus.boolSinglePortEccError = (uint8_t)dmaStatus.singlePortEccError;
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcDmaStatus, sizeof(t_FmIpcDmaStatus));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus);
++ break;
++ }
++ case (FM_ALLOC_FMAN_CTRL_EVENT_REG):
++ p_IpcReply->error = (uint32_t)FmAllocFmanCtrlEventReg(h_Fm, (uint8_t*)p_IpcReply->replyBody);
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ break;
++ case (FM_FREE_FMAN_CTRL_EVENT_REG):
++ FmFreeFmanCtrlEventReg(h_Fm, p_IpcMsg->msgBody[0]);
++ break;
++ case (FM_GET_TIMESTAMP_SCALE):
++ {
++ uint32_t timeStamp = FmGetTimeStampScale(h_Fm);
++
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&timeStamp, sizeof(uint32_t));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ break;
++ }
++ case (FM_GET_COUNTER):
++ {
++ e_FmCounters inCounter;
++ uint32_t outCounter;
++
++ memcpy((uint8_t*)&inCounter, p_IpcMsg->msgBody, sizeof(uint32_t));
++ outCounter = FM_GetCounter(h_Fm, inCounter);
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&outCounter, sizeof(uint32_t));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ break;
++ }
++ case (FM_SET_FMAN_CTRL_EVENTS_ENABLE):
++ {
++ t_FmIpcFmanEvents ipcFmanEvents;
++
++ memcpy((uint8_t*)&ipcFmanEvents, p_IpcMsg->msgBody, sizeof(t_FmIpcFmanEvents));
++ FmSetFmanCtrlIntr(h_Fm,
++ ipcFmanEvents.eventRegId,
++ ipcFmanEvents.enableEvents);
++ break;
++ }
++ case (FM_GET_FMAN_CTRL_EVENTS_ENABLE):
++ {
++ uint32_t tmp = FmGetFmanCtrlIntr(h_Fm, p_IpcMsg->msgBody[0]);
++
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&tmp, sizeof(uint32_t));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ break;
++ }
++ case (FM_GET_PHYS_MURAM_BASE):
++ {
++ t_FmPhysAddr physAddr;
++ t_FmIpcPhysAddr ipcPhysAddr;
++
++ FmGetPhysicalMuramBase(h_Fm, &physAddr);
++ ipcPhysAddr.high = physAddr.high;
++ ipcPhysAddr.low = physAddr.low;
++ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcPhysAddr, sizeof(t_FmIpcPhysAddr));
++ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcPhysAddr);
++ break;
++ }
++ case (FM_ENABLE_RAM_ECC):
++ {
++ if (((err = FM_EnableRamsEcc(h_Fm)) != E_OK) ||
++ ((err = FM_SetException(h_Fm, e_FM_EX_IRAM_ECC, TRUE)) != E_OK) ||
++ ((err = FM_SetException(h_Fm, e_FM_EX_MURAM_ECC, TRUE)) != E_OK))
++#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0))
++ UNUSED(err);
++#else
++ REPORT_ERROR(MINOR, err, NO_MSG);
++#endif /* (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0)) */
++ break;
++ }
++ case (FM_DISABLE_RAM_ECC):
++ {
++
++ if (((err = FM_SetException(h_Fm, e_FM_EX_IRAM_ECC, FALSE)) != E_OK) ||
++ ((err = FM_SetException(h_Fm, e_FM_EX_MURAM_ECC, FALSE)) != E_OK) ||
++ ((err = FM_DisableRamsEcc(h_Fm)) != E_OK))
++#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0))
++ UNUSED(err);
++#else
++ REPORT_ERROR(MINOR, err, NO_MSG);
++#endif /* (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0)) */
++ break;
++ }
++ case (FM_SET_NUM_OF_FMAN_CTRL):
++ {
++ t_FmIpcPortNumOfFmanCtrls ipcPortNumOfFmanCtrls;
++
++ memcpy((uint8_t*)&ipcPortNumOfFmanCtrls, p_IpcMsg->msgBody, sizeof(t_FmIpcPortNumOfFmanCtrls));
++ err = FmSetNumOfRiscsPerPort(h_Fm,
++ ipcPortNumOfFmanCtrls.hardwarePortId,
++ ipcPortNumOfFmanCtrls.numOfFmanCtrls,
++ ipcPortNumOfFmanCtrls.orFmanCtrl);
++ if (err != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ break;
++ }
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++ case (FM_10G_TX_ECC_WA):
++ p_IpcReply->error = (uint32_t)Fm10GTxEccWorkaround(h_Fm, p_IpcMsg->msgBody[0]);
++ *p_ReplyLength = sizeof(uint32_t);
++ break;
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++ default:
++ *p_ReplyLength = 0;
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!"));
++ }
++ return E_OK;
++}
++
++
++/****************************************/
++/* Inter-Module functions */
++/****************************************/
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++t_Error Fm10GTxEccWorkaround(t_Handle h_Fm, uint8_t macId)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_Error err = E_OK;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++ uint8_t rxHardwarePortId, txHardwarePortId;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_10G_TX_ECC_WA;
++ msg.msgBody[0] = macId;
++ replyLength = sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(macId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return (t_Error)(reply.error);
++ }
++
++ SANITY_CHECK_RETURN_ERROR((macId == 0), E_NOT_SUPPORTED);
++ SANITY_CHECK_RETURN_ERROR(IsFmanCtrlCodeLoaded(p_Fm), E_INVALID_STATE);
++
++ rxHardwarePortId = SwPortIdToHwPortId(e_FM_PORT_TYPE_RX_10G,
++ macId,
++ p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++ txHardwarePortId = SwPortIdToHwPortId(e_FM_PORT_TYPE_TX_10G,
++ macId,
++ p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++ if ((p_Fm->p_FmStateStruct->portsTypes[rxHardwarePortId] != e_FM_PORT_TYPE_DUMMY) ||
++ (p_Fm->p_FmStateStruct->portsTypes[txHardwarePortId] != e_FM_PORT_TYPE_DUMMY))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("MAC should be initialized prior to Rx and Tx ports!"));
++
++ return fman_set_erratum_10gmac_a004_wa(fpm_rg);
++}
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++uint16_t FmGetTnumAgingPeriod(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_Fm->p_FmDriverParam, E_INVALID_STATE, 0);
++
++ return p_Fm->tnumAgingPeriod;
++}
++
++t_Error FmSetPortPreFetchConfiguration(t_Handle h_Fm,
++ uint8_t portNum,
++ bool preFetchConfigured)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ p_Fm->portsPreFetchConfigured[portNum] = TRUE;
++ p_Fm->portsPreFetchValue[portNum] = preFetchConfigured;
++
++ return E_OK;
++}
++
++t_Error FmGetPortPreFetchConfiguration(t_Handle h_Fm,
++ uint8_t portNum,
++ bool *p_PortConfigured,
++ bool *p_PreFetchConfigured)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ /* If the prefetch wasn't configured yet (not enable or disabled)
++ we return the value TRUE as it was already configured */
++ if (!p_Fm->portsPreFetchConfigured[portNum])
++ {
++ *p_PortConfigured = FALSE;
++ *p_PreFetchConfigured = FALSE;
++ }
++ else
++ {
++ *p_PortConfigured = TRUE;
++ *p_PreFetchConfigured = (p_Fm->portsPreFetchConfigured[portNum]);
++ }
++
++ return E_OK;
++}
++
++t_Error FmSetCongestionGroupPFCpriority(t_Handle h_Fm,
++ uint32_t congestionGroupId,
++ uint8_t priorityBitMap)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ uint32_t regNum;
++
++ ASSERT_COND(h_Fm);
++
++ if (congestionGroupId > FM_PORT_NUM_OF_CONGESTION_GRPS)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("Congestion group ID bigger than %d",
++ FM_PORT_NUM_OF_CONGESTION_GRPS));
++
++ if (p_Fm->guestId == NCSW_MASTER_ID)
++ {
++ ASSERT_COND(p_Fm->baseAddr);
++ regNum = (FM_PORT_NUM_OF_CONGESTION_GRPS - 1 - congestionGroupId) / 4;
++ fman_set_congestion_group_pfc_priority((uint32_t *)((p_Fm->baseAddr+FM_MM_CGP)),
++ congestionGroupId,
++ priorityBitMap,
++ regNum);
++ }
++ else if (p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++ t_FmIpcSetCongestionGroupPfcPriority fmIpcSetCongestionGroupPfcPriority;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&fmIpcSetCongestionGroupPfcPriority, 0, sizeof(t_FmIpcSetCongestionGroupPfcPriority));
++ fmIpcSetCongestionGroupPfcPriority.congestionGroupId = congestionGroupId;
++ fmIpcSetCongestionGroupPfcPriority.priorityBitMap = priorityBitMap;
++
++ msg.msgId = FM_SET_CONG_GRP_PFC_PRIO;
++ memcpy(msg.msgBody, &fmIpcSetCongestionGroupPfcPriority, sizeof(t_FmIpcSetCongestionGroupPfcPriority));
++
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("guest without IPC!"));
++
++ return E_OK;
++}
++
++uintptr_t FmGetPcdPrsBaseAddr(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
++
++ if (!p_Fm->baseAddr)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE,
++ ("No base-addr; probably Guest with IPC!"));
++ return 0;
++ }
++
++ return (p_Fm->baseAddr + FM_MM_PRS);
++}
++
++uintptr_t FmGetPcdKgBaseAddr(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
++
++ if (!p_Fm->baseAddr)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE,
++ ("No base-addr; probably Guest with IPC!"));
++ return 0;
++ }
++
++ return (p_Fm->baseAddr + FM_MM_KG);
++}
++
++uintptr_t FmGetPcdPlcrBaseAddr(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
++
++ if (!p_Fm->baseAddr)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE,
++ ("No base-addr; probably Guest with IPC!"));
++ return 0;
++ }
++
++ return (p_Fm->baseAddr + FM_MM_PLCR);
++}
++
++#if (DPAA_VERSION >= 11)
++uintptr_t FmGetVSPBaseAddr(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
++
++ return p_Fm->vspBaseAddr;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++t_Handle FmGetMuramHandle(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, NULL);
++
++ return (p_Fm->h_FmMuram);
++}
++
++void FmGetPhysicalMuramBase(t_Handle h_Fm, t_FmPhysAddr *p_FmPhysAddr)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ if (p_Fm->fmMuramPhysBaseAddr)
++ {
++ /* General FM driver initialization */
++ p_FmPhysAddr->low = (uint32_t)p_Fm->fmMuramPhysBaseAddr;
++ p_FmPhysAddr->high = (uint8_t)((p_Fm->fmMuramPhysBaseAddr & 0x000000ff00000000LL) >> 32);
++ return;
++ }
++
++ ASSERT_COND(p_Fm->guestId != NCSW_MASTER_ID);
++
++ if (p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++ t_FmIpcPhysAddr ipcPhysAddr;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_GET_PHYS_MURAM_BASE;
++ replyLength = sizeof(uint32_t) + sizeof(t_FmPhysAddr);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return;
++ }
++ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmPhysAddr)))
++ {
++ REPORT_ERROR(MINOR, E_INVALID_VALUE,("IPC reply length mismatch"));
++ return;
++ }
++ memcpy((uint8_t*)&ipcPhysAddr, reply.replyBody, sizeof(t_FmIpcPhysAddr));
++ p_FmPhysAddr->high = ipcPhysAddr.high;
++ p_FmPhysAddr->low = ipcPhysAddr.low;
++ }
++ else
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without neither IPC nor mapped register!"));
++}
++
++#if (DPAA_VERSION >= 11)
++t_Error FmVSPAllocForPort (t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint8_t numOfVSPs)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ t_Error err = E_OK;
++ uint32_t profilesFound, intFlags;
++ uint8_t first, i;
++ uint8_t log2Num;
++ uint8_t swPortIndex=0, hardwarePortId;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ if (!numOfVSPs)
++ return E_OK;
++
++ if (numOfVSPs > FM_VSP_MAX_NUM_OF_ENTRIES)
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles can not be bigger than %d.",FM_VSP_MAX_NUM_OF_ENTRIES));
++
++ if (!POWER_OF_2(numOfVSPs))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numProfiles must be a power of 2."));
++
++ LOG2((uint64_t)numOfVSPs, log2Num);
++
++ if ((log2Num == 0) || (p_Fm->partVSPBase == 0))
++ first = 0;
++ else
++ first = 1<<log2Num;
++
++ if (first > (p_Fm->partVSPBase + p_Fm->partNumOfVSPs))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("can not allocate storage profile port window"));
++
++ if (first < p_Fm->partVSPBase)
++ while (first < p_Fm->partVSPBase)
++ first = first + numOfVSPs;
++
++ if ((first + numOfVSPs) > (p_Fm->partVSPBase + p_Fm->partNumOfVSPs))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("can not allocate storage profile port window"));
++
++ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
++ profilesFound = 0;
++ for (i=first; i < p_Fm->partVSPBase + p_Fm->partNumOfVSPs; )
++ {
++ if (!p_Fm->p_FmSp->profiles[i].profilesMng.allocated)
++ {
++ profilesFound++;
++ i++;
++ if (profilesFound == numOfVSPs)
++ break;
++ }
++ else
++ {
++ profilesFound = 0;
++ /* advance i to the next aligned address */
++ first = i = (uint8_t)(first + numOfVSPs);
++ }
++ }
++ if (profilesFound == numOfVSPs)
++ for (i = first; i<first + numOfVSPs; i++)
++ p_Fm->p_FmSp->profiles[i].profilesMng.allocated = TRUE;
++ else
++ {
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++ RETURN_ERROR(MINOR, E_FULL, ("No profiles."));
++ }
++
++ hardwarePortId = SwPortIdToHwPortId(portType,
++ portId,
++ p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ p_Fm->p_FmSp->portsMapping[swPortIndex].numOfProfiles = numOfVSPs;
++ p_Fm->p_FmSp->portsMapping[swPortIndex].profilesBase = first;
++
++ if ((err = SetVSPWindow(h_Fm,hardwarePortId, first,log2Num)) != E_OK)
++ for (i = first; i < first + numOfVSPs; i++)
++ p_Fm->p_FmSp->profiles[i].profilesMng.allocated = FALSE;
++
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++
++ return err;
++}
++
++t_Error FmVSPFreeForPort(t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ uint8_t swPortIndex=0, hardwarePortId, first, numOfVSPs, i;
++ uint32_t intFlags;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ hardwarePortId = SwPortIdToHwPortId(portType,
++ portId,
++ p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ numOfVSPs = (uint8_t)p_Fm->p_FmSp->portsMapping[swPortIndex].numOfProfiles;
++ first = (uint8_t)p_Fm->p_FmSp->portsMapping[swPortIndex].profilesBase;
++
++ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
++ for (i = first; i < first + numOfVSPs; i++)
++ p_Fm->p_FmSp->profiles[i].profilesMng.allocated = FALSE;
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++
++ p_Fm->p_FmSp->portsMapping[swPortIndex].numOfProfiles = 0;
++ p_Fm->p_FmSp->portsMapping[swPortIndex].profilesBase = 0;
++
++ return E_OK;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++t_Error FmAllocFmanCtrlEventReg(t_Handle h_Fm, uint8_t *p_EventId)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ uint8_t i;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_ALLOC_FMAN_CTRL_EVENT_REG;
++ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++
++ *p_EventId = *(uint8_t*)(reply.replyBody);
++
++ return (t_Error)(reply.error);
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++
++ for (i=0;i<FM_NUM_OF_FMAN_CTRL_EVENT_REGS;i++)
++ if (!p_Fm->usedEventRegs[i])
++ {
++ p_Fm->usedEventRegs[i] = TRUE;
++ *p_EventId = i;
++ break;
++ }
++
++ if (i==FM_NUM_OF_FMAN_CTRL_EVENT_REGS)
++ RETURN_ERROR(MAJOR, E_BUSY, ("No resource - FMan controller event register."));
++
++ return E_OK;
++}
++
++void FmFreeFmanCtrlEventReg(t_Handle h_Fm, uint8_t eventId)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_FREE_FMAN_CTRL_EVENT_REG;
++ msg.msgBody[0] = eventId;
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(eventId),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return;
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++ return;
++ }
++
++ ((t_Fm*)h_Fm)->usedEventRegs[eventId] = FALSE;
++}
++
++void FmSetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, uint32_t enableEvents)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->p_FmFpmRegs &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcFmanEvents fmanCtrl;
++ t_Error err;
++ t_FmIpcMsg msg;
++
++ fmanCtrl.eventRegId = eventRegId;
++ fmanCtrl.enableEvents = enableEvents;
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_SET_FMAN_CTRL_EVENTS_ENABLE;
++ memcpy(msg.msgBody, &fmanCtrl, sizeof(fmanCtrl));
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(fmanCtrl),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return;
++ }
++ else if (!p_Fm->p_FmFpmRegs)
++ {
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++ return;
++ }
++
++ ASSERT_COND(eventRegId < FM_NUM_OF_FMAN_CTRL_EVENT_REGS);
++ fman_set_ctrl_intr(fpm_rg, eventRegId, enableEvents);
++}
++
++uint32_t FmGetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->p_FmFpmRegs &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength, ctrlIntr;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_GET_FMAN_CTRL_EVENTS_ENABLE;
++ msg.msgBody[0] = eventRegId;
++ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(eventRegId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return 0;
++ }
++ if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t)))
++ {
++ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return 0;
++ }
++ memcpy((uint8_t*)&ctrlIntr, reply.replyBody, sizeof(uint32_t));
++ return ctrlIntr;
++ }
++ else if (!p_Fm->p_FmFpmRegs)
++ {
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++ return 0;
++ }
++
++ return fman_get_ctrl_intr(fpm_rg, eventRegId);
++}
++
++void FmRegisterIntr(t_Handle h_Fm,
++ e_FmEventModules module,
++ uint8_t modId,
++ e_FmIntrType intrType,
++ void (*f_Isr) (t_Handle h_Arg),
++ t_Handle h_Arg)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ int event = 0;
++
++ ASSERT_COND(h_Fm);
++
++ GET_FM_MODULE_EVENT(module, modId, intrType, event);
++ ASSERT_COND(event < e_FM_EV_DUMMY_LAST);
++
++ /* register in local FM structure */
++ p_Fm->intrMng[event].f_Isr = f_Isr;
++ p_Fm->intrMng[event].h_SrcHandle = h_Arg;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcRegisterIntr fmIpcRegisterIntr;
++ t_Error err;
++ t_FmIpcMsg msg;
++
++ /* register in Master FM structure */
++ fmIpcRegisterIntr.event = (uint32_t)event;
++ fmIpcRegisterIntr.guestId = p_Fm->guestId;
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_REGISTER_INTR;
++ memcpy(msg.msgBody, &fmIpcRegisterIntr, sizeof(fmIpcRegisterIntr));
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(fmIpcRegisterIntr),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++}
++
++void FmUnregisterIntr(t_Handle h_Fm,
++ e_FmEventModules module,
++ uint8_t modId,
++ e_FmIntrType intrType)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ int event = 0;
++
++ ASSERT_COND(h_Fm);
++
++ GET_FM_MODULE_EVENT(module, modId,intrType, event);
++ ASSERT_COND(event < e_FM_EV_DUMMY_LAST);
++
++ p_Fm->intrMng[event].f_Isr = UnimplementedIsr;
++ p_Fm->intrMng[event].h_SrcHandle = NULL;
++}
++
++void FmRegisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Arg, uint32_t event), t_Handle h_Arg)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ ASSERT_COND(eventRegId<FM_NUM_OF_FMAN_CTRL_EVENT_REGS);
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM in guest-mode"));
++ return;
++ }
++
++ p_Fm->fmanCtrlIntr[eventRegId].f_Isr = f_Isr;
++ p_Fm->fmanCtrlIntr[eventRegId].h_SrcHandle = h_Arg;
++}
++
++void FmUnregisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ ASSERT_COND(eventRegId<FM_NUM_OF_FMAN_CTRL_EVENT_REGS);
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM in guest-mode"));
++ return;
++ }
++
++ p_Fm->fmanCtrlIntr[eventRegId].f_Isr = UnimplementedFmanCtrlIsr;
++ p_Fm->fmanCtrlIntr[eventRegId].h_SrcHandle = NULL;
++}
++
++void FmRegisterPcd(t_Handle h_Fm, t_Handle h_FmPcd)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ if (p_Fm->h_Pcd)
++ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("PCD already set"));
++
++ p_Fm->h_Pcd = h_FmPcd;
++}
++
++void FmUnregisterPcd(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ if (!p_Fm->h_Pcd)
++ REPORT_ERROR(MAJOR, E_NOT_FOUND, ("PCD handle!"));
++
++ p_Fm->h_Pcd = NULL;
++}
++
++t_Handle FmGetPcdHandle(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ return p_Fm->h_Pcd;
++}
++
++uint8_t FmGetId(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0xff);
++
++ return p_Fm->p_FmStateStruct->fmId;
++}
++
++t_Error FmReset(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET);
++ CORE_MemoryBarrier();
++ XX_UDelay(100);
++
++ return E_OK;
++}
++
++t_Error FmSetNumOfRiscsPerPort(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint8_t numOfFmanCtrls,
++ t_FmFmanCtrl orFmanCtrl)
++{
++
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_fpm_regs *fpm_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((numOfFmanCtrls > 0) && (numOfFmanCtrls < 3)) , E_INVALID_HANDLE);
++
++ fpm_rg = p_Fm->p_FmFpmRegs;
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->p_FmFpmRegs &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcPortNumOfFmanCtrls params;
++ t_FmIpcMsg msg;
++
++ memset(&msg, 0, sizeof(msg));
++ params.hardwarePortId = hardwarePortId;
++ params.numOfFmanCtrls = numOfFmanCtrls;
++ params.orFmanCtrl = orFmanCtrl;
++ msg.msgId = FM_SET_NUM_OF_FMAN_CTRL;
++ memcpy(msg.msgBody, &params, sizeof(params));
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) +sizeof(params),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ return E_OK;
++ }
++ else if (!p_Fm->p_FmFpmRegs)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++
++ fman_set_num_of_riscs_per_port(fpm_rg, hardwarePortId, numOfFmanCtrls, orFmanCtrl);
++
++ return E_OK;
++}
++
++t_Error FmGetSetPortParams(t_Handle h_Fm, t_FmInterModulePortInitParams *p_PortParams)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_Error err;
++ uint32_t intFlags;
++ uint8_t hardwarePortId = p_PortParams->hardwarePortId, macId;
++ struct fman_rg fman_rg;
++
++ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
++ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
++ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
++ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ t_FmIpcPortInInitParams portInParams;
++ t_FmIpcPortOutInitParams portOutParams;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ portInParams.hardwarePortId = p_PortParams->hardwarePortId;
++ portInParams.enumPortType = (uint32_t)p_PortParams->portType;
++ portInParams.boolIndependentMode= (uint8_t)p_PortParams->independentMode;
++ portInParams.liodnOffset = p_PortParams->liodnOffset;
++ portInParams.numOfTasks = p_PortParams->numOfTasks;
++ portInParams.numOfExtraTasks = p_PortParams->numOfExtraTasks;
++ portInParams.numOfOpenDmas = p_PortParams->numOfOpenDmas;
++ portInParams.numOfExtraOpenDmas = p_PortParams->numOfExtraOpenDmas;
++ portInParams.sizeOfFifo = p_PortParams->sizeOfFifo;
++ portInParams.extraSizeOfFifo = p_PortParams->extraSizeOfFifo;
++ portInParams.deqPipelineDepth = p_PortParams->deqPipelineDepth;
++ portInParams.maxFrameLength = p_PortParams->maxFrameLength;
++ portInParams.liodnBase = p_PortParams->liodnBase;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_GET_SET_PORT_PARAMS;
++ memcpy(msg.msgBody, &portInParams, sizeof(portInParams));
++ replyLength = (sizeof(uint32_t) + sizeof(t_FmIpcPortOutInitParams));
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) +sizeof(portInParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmIpcPortOutInitParams)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ memcpy((uint8_t*)&portOutParams, reply.replyBody, sizeof(t_FmIpcPortOutInitParams));
++
++ p_PortParams->fmMuramPhysBaseAddr.high = portOutParams.ipcPhysAddr.high;
++ p_PortParams->fmMuramPhysBaseAddr.low = portOutParams.ipcPhysAddr.low;
++ p_PortParams->numOfTasks = portOutParams.numOfTasks;
++ p_PortParams->numOfExtraTasks = portOutParams.numOfExtraTasks;
++ p_PortParams->numOfOpenDmas = portOutParams.numOfOpenDmas;
++ p_PortParams->numOfExtraOpenDmas = portOutParams.numOfExtraOpenDmas;
++ p_PortParams->sizeOfFifo = portOutParams.sizeOfFifo;
++ p_PortParams->extraSizeOfFifo = portOutParams.extraSizeOfFifo;
++
++ return (t_Error)(reply.error);
++ }
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
++ if (p_PortParams->independentMode)
++ {
++ /* set port parameters */
++ p_Fm->independentMode = p_PortParams->independentMode;
++ /* disable dispatch limit */
++ fman_qmi_disable_dispatch_limit(fman_rg.fpm_rg);
++ }
++
++ if (p_PortParams->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
++ {
++ if (p_Fm->hcPortInitialized)
++ {
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Only one host command port is allowed."));
++ }
++ else
++ p_Fm->hcPortInitialized = TRUE;
++ }
++ p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] = p_PortParams->portType;
++
++ err = FmSetNumOfTasks(p_Fm, hardwarePortId, &p_PortParams->numOfTasks, &p_PortParams->numOfExtraTasks, TRUE);
++ if (err)
++ {
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev != 4)
++#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
++ if ((p_PortParams->portType != e_FM_PORT_TYPE_RX) &&
++ (p_PortParams->portType != e_FM_PORT_TYPE_RX_10G))
++ /* for transmit & O/H ports */
++ {
++ uint8_t enqTh;
++ uint8_t deqTh;
++
++ /* update qmi ENQ/DEQ threshold */
++ p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums += p_PortParams->deqPipelineDepth;
++ enqTh = fman_get_qmi_enq_th(fman_rg.qmi_rg);
++ /* if enqTh is too big, we reduce it to the max value that is still OK */
++ if (enqTh >= (QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums))
++ {
++ enqTh = (uint8_t)(QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums - 1);
++ fman_set_qmi_enq_th(fman_rg.qmi_rg, enqTh);
++ }
++
++ deqTh = fman_get_qmi_deq_th(fman_rg.qmi_rg);
++ /* if deqTh is too small, we enlarge it to the min value that is still OK.
++ deqTh may not be larger than 63 (QMI_MAX_NUM_OF_TNUMS-1). */
++ if ((deqTh <= p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums) && (deqTh < QMI_MAX_NUM_OF_TNUMS-1))
++ {
++ deqTh = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums + 1);
++ fman_set_qmi_deq_th(fman_rg.qmi_rg, deqTh);
++ }
++ }
++
++#ifdef FM_LOW_END_RESTRICTION
++ if ((hardwarePortId==0x1) || (hardwarePortId==0x29))
++ {
++ if (p_Fm->p_FmStateStruct->lowEndRestriction)
++ {
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("OP #0 cannot work with Tx Port #1."));
++ }
++ else
++ p_Fm->p_FmStateStruct->lowEndRestriction = TRUE;
++ }
++#endif /* FM_LOW_END_RESTRICTION */
++
++ err = FmSetSizeOfFifo(p_Fm,
++ hardwarePortId,
++ &p_PortParams->sizeOfFifo,
++ &p_PortParams->extraSizeOfFifo,
++ TRUE);
++ if (err)
++ {
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ err = FmSetNumOfOpenDmas(p_Fm,
++ hardwarePortId,
++ &p_PortParams->numOfOpenDmas,
++ &p_PortParams->numOfExtraOpenDmas,
++ TRUE);
++ if (err)
++ {
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ fman_set_liodn_per_port(&fman_rg,
++ hardwarePortId,
++ p_PortParams->liodnBase,
++ p_PortParams->liodnOffset);
++
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev < 6)
++ fman_set_order_restoration_per_port(fman_rg.fpm_rg,
++ hardwarePortId,
++ p_PortParams->independentMode,
++ !!((p_PortParams->portType==e_FM_PORT_TYPE_RX) || (p_PortParams->portType==e_FM_PORT_TYPE_RX_10G)));
++
++ HW_PORT_ID_TO_SW_PORT_ID(macId, hardwarePortId);
++
++#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)
++ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX_10G) ||
++ (p_PortParams->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ ASSERT_COND(macId < FM_MAX_NUM_OF_10G_MACS);
++ if (p_PortParams->maxFrameLength >= p_Fm->p_FmStateStruct->macMaxFrameLengths10G[macId])
++ p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId] = p_PortParams->maxFrameLength;
++ else
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Port maxFrameLength is smaller than MAC current MTU"));
++ }
++ else
++#endif /* defined(FM_MAX_NUM_OF_10G_MACS) && ... */
++ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX) ||
++ (p_PortParams->portType == e_FM_PORT_TYPE_RX))
++ {
++ ASSERT_COND(macId < FM_MAX_NUM_OF_1G_MACS);
++ if (p_PortParams->maxFrameLength >= p_Fm->p_FmStateStruct->macMaxFrameLengths1G[macId])
++ p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId] = p_PortParams->maxFrameLength;
++ else
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Port maxFrameLength is smaller than MAC current MTU"));
++ }
++
++ FmGetPhysicalMuramBase(p_Fm, &p_PortParams->fmMuramPhysBaseAddr);
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++
++ return E_OK;
++}
++
++void FmFreePortParams(t_Handle h_Fm,t_FmInterModulePortFreeParams *p_PortParams)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ uint32_t intFlags;
++ uint8_t hardwarePortId = p_PortParams->hardwarePortId;
++ uint8_t numOfTasks, numOfDmas, macId;
++ uint16_t sizeOfFifo;
++ t_Error err;
++ t_FmIpcPortFreeParams portParams;
++ t_FmIpcMsg msg;
++ struct fman_qmi_regs *qmi_rg = p_Fm->p_FmQmiRegs;
++ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ portParams.hardwarePortId = p_PortParams->hardwarePortId;
++ portParams.enumPortType = (uint32_t)p_PortParams->portType;
++ portParams.deqPipelineDepth = p_PortParams->deqPipelineDepth;
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_FREE_PORT;
++ memcpy(msg.msgBody, &portParams, sizeof(portParams));
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(portParams),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return;
++ }
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
++
++ if (p_PortParams->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
++ {
++ ASSERT_COND(p_Fm->hcPortInitialized);
++ p_Fm->hcPortInitialized = FALSE;
++ }
++
++ p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] = e_FM_PORT_TYPE_DUMMY;
++
++ /* free numOfTasks */
++ numOfTasks = fman_get_num_of_tasks(bmi_rg, hardwarePortId);
++ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfTasks >= numOfTasks);
++ p_Fm->p_FmStateStruct->accumulatedNumOfTasks -= numOfTasks;
++
++ /* free numOfOpenDmas */
++ numOfDmas = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
++ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas >= numOfDmas);
++ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas -= numOfDmas;
++
++#ifdef FM_HAS_TOTAL_DMAS
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev < 6)
++ {
++ /* update total num of DMA's with committed number of open DMAS, and max uncommitted pool. */
++ fman_set_num_of_open_dmas(bmi_rg,
++ hardwarePortId,
++ 1,
++ 0,
++ (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize));
++ }
++#endif /* FM_HAS_TOTAL_DMAS */
++
++ /* free sizeOfFifo */
++ sizeOfFifo = fman_get_size_of_fifo(bmi_rg, hardwarePortId);
++ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedFifoSize >= (sizeOfFifo * BMI_FIFO_UNITS));
++ p_Fm->p_FmStateStruct->accumulatedFifoSize -= (sizeOfFifo * BMI_FIFO_UNITS);
++
++#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev != 4)
++#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
++ if ((p_PortParams->portType != e_FM_PORT_TYPE_RX) &&
++ (p_PortParams->portType != e_FM_PORT_TYPE_RX_10G))
++ /* for transmit & O/H ports */
++ {
++ uint8_t enqTh;
++ uint8_t deqTh;
++
++ /* update qmi ENQ/DEQ threshold */
++ p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums -= p_PortParams->deqPipelineDepth;
++
++ /* p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums is now smaller,
++ so we can enlarge enqTh */
++ enqTh = (uint8_t)(QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums - 1);
++
++ /* p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums is now smaller,
++ so we can reduce deqTh */
++ deqTh = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums + 1);
++
++ fman_set_qmi_enq_th(qmi_rg, enqTh);
++ fman_set_qmi_deq_th(qmi_rg, deqTh);
++ }
++
++ HW_PORT_ID_TO_SW_PORT_ID(macId, hardwarePortId);
++
++#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)
++ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX_10G) ||
++ (p_PortParams->portType == e_FM_PORT_TYPE_RX_10G))
++ {
++ ASSERT_COND(macId < FM_MAX_NUM_OF_10G_MACS);
++ p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId] = 0;
++ }
++ else
++#endif /* defined(FM_MAX_NUM_OF_10G_MACS) && ... */
++ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX) ||
++ (p_PortParams->portType == e_FM_PORT_TYPE_RX))
++ {
++ ASSERT_COND(macId < FM_MAX_NUM_OF_1G_MACS);
++ p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId] = 0;
++ }
++
++#ifdef FM_LOW_END_RESTRICTION
++ if ((hardwarePortId==0x1) || (hardwarePortId==0x29))
++ p_Fm->p_FmStateStruct->lowEndRestriction = FALSE;
++#endif /* FM_LOW_END_RESTRICTION */
++ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
++}
++
++t_Error FmIsPortStalled(t_Handle h_Fm, uint8_t hardwarePortId, bool *p_IsStalled)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_Error err;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_IS_PORT_STALLED;
++ msg.msgBody[0] = hardwarePortId;
++ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(hardwarePortId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++
++ *p_IsStalled = (bool)!!(*(uint8_t*)(reply.replyBody));
++
++ return (t_Error)(reply.error);
++ }
++ else if (!p_Fm->baseAddr)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++
++ *p_IsStalled = fman_is_port_stalled(fpm_rg, hardwarePortId);
++
++ return E_OK;
++}
++
++t_Error FmResumeStalledPort(t_Handle h_Fm, uint8_t hardwarePortId)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_Error err;
++ bool isStalled;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_RESUME_STALLED_PORT;
++ msg.msgBody[0] = hardwarePortId;
++ replyLength = sizeof(uint32_t);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(hardwarePortId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return (t_Error)(reply.error);
++ }
++ else if (!p_Fm->baseAddr)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Not available for this FM revision!"));
++
++ /* Get port status */
++ err = FmIsPortStalled(h_Fm, hardwarePortId, &isStalled);
++ if (err)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't get port status"));
++ if (!isStalled)
++ return E_OK;
++
++ fman_resume_stalled_port(fpm_rg, hardwarePortId);
++
++ return E_OK;
++}
++
++t_Error FmResetMac(t_Handle h_Fm, e_FmMacType type, uint8_t macId)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_Error err;
++ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
++
++#if (DPAA_VERSION >= 11)
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("FMan MAC reset!"));
++#endif /*(DPAA_VERSION >= 11)*/
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcMacParams macParams;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ macParams.id = macId;
++ macParams.enumType = (uint32_t)type;
++ msg.msgId = FM_RESET_MAC;
++ memcpy(msg.msgBody, &macParams, sizeof(macParams));
++ replyLength = sizeof(uint32_t);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(macParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return (t_Error)(reply.error);
++ }
++ else if (!p_Fm->baseAddr)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++
++ err = (t_Error)fman_reset_mac(fpm_rg, macId, !!(type == e_FM_MAC_10G));
++
++ if (err == -EBUSY)
++ return ERROR_CODE(E_TIMEOUT);
++ else if (err)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal MAC ID"));
++
++ return E_OK;
++}
++
++t_Error FmSetMacMaxFrame(t_Handle h_Fm, e_FmMacType type, uint8_t macId, uint16_t mtu)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcMacMaxFrameParams macMaxFrameLengthParams;
++ t_Error err;
++ t_FmIpcMsg msg;
++
++ memset(&msg, 0, sizeof(msg));
++ macMaxFrameLengthParams.macParams.id = macId;
++ macMaxFrameLengthParams.macParams.enumType = (uint32_t)type;
++ macMaxFrameLengthParams.maxFrameLength = (uint16_t)mtu;
++ msg.msgId = FM_SET_MAC_MAX_FRAME;
++ memcpy(msg.msgBody, &macMaxFrameLengthParams, sizeof(macMaxFrameLengthParams));
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(macMaxFrameLengthParams),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ return E_OK;
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++
++ /* if port is already initialized, check that MaxFrameLength is smaller
++ * or equal to the port's max */
++#if (defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS))
++ if (type == e_FM_MAC_10G)
++ {
++ if ((!p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId])
++ || (p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId] &&
++ (mtu <= p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId])))
++ p_Fm->p_FmStateStruct->macMaxFrameLengths10G[macId] = mtu;
++ else
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("MAC maxFrameLength is larger than Port maxFrameLength"));
++
++ }
++ else
++#else
++ UNUSED(type);
++#endif /* (defined(FM_MAX_NUM_OF_10G_MACS) && ... */
++ if ((!p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId])
++ || (p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId] &&
++ (mtu <= p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId])))
++ p_Fm->p_FmStateStruct->macMaxFrameLengths1G[macId] = mtu;
++ else
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("MAC maxFrameLength is larger than Port maxFrameLength"));
++
++ return E_OK;
++}
++
++uint16_t FmGetClockFreq(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ /* for multicore environment: this depends on the
++ * fact that fmClkFreq was properly initialized at "init". */
++ return p_Fm->p_FmStateStruct->fmClkFreq;
++}
++
++uint16_t FmGetMacClockFreq(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ return p_Fm->p_FmStateStruct->fmMacClkFreq;
++}
++
++uint32_t FmGetTimeStampScale(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength, timeStamp;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_GET_TIMESTAMP_SCALE;
++ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return 0;
++ }
++ if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t)))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return 0;
++ }
++
++ memcpy((uint8_t*)&timeStamp, reply.replyBody, sizeof(uint32_t));
++ return timeStamp;
++ }
++ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->baseAddr)
++ {
++ if (!(GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_tsc1) & FPM_TS_CTL_EN))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("timestamp is not enabled!"));
++ return 0;
++ }
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ DBG(WARNING, ("No IPC - can't validate FM if timestamp enabled."));
++
++ return p_Fm->p_FmStateStruct->count1MicroBit;
++}
++
++t_Error FmEnableRamsEcc(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ p_Fm->p_FmStateStruct->ramsEccOwners++;
++ p_Fm->p_FmStateStruct->internalCall = TRUE;
++
++ return FM_EnableRamsEcc(p_Fm);
++}
++
++t_Error FmDisableRamsEcc(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ ASSERT_COND(p_Fm->p_FmStateStruct->ramsEccOwners);
++ p_Fm->p_FmStateStruct->ramsEccOwners--;
++
++ if (p_Fm->p_FmStateStruct->ramsEccOwners==0)
++ {
++ p_Fm->p_FmStateStruct->internalCall = TRUE;
++ return FM_DisableRamsEcc(p_Fm);
++ }
++
++ return E_OK;
++}
++
++uint8_t FmGetGuestId(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ return p_Fm->guestId;
++}
++
++bool FmIsMaster(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ return (p_Fm->guestId == NCSW_MASTER_ID);
++}
++
++t_Error FmSetSizeOfFifo(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint32_t *p_SizeOfFifo,
++ uint32_t *p_ExtraSizeOfFifo,
++ bool initialConfig)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_FmIpcPortRsrcParams rsrcParams;
++ t_Error err;
++ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
++ uint32_t sizeOfFifo = *p_SizeOfFifo, extraSizeOfFifo = *p_ExtraSizeOfFifo;
++ uint16_t currentVal = 0, currentExtraVal = 0;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ rsrcParams.hardwarePortId = hardwarePortId;
++ rsrcParams.val = sizeOfFifo;
++ rsrcParams.extra = extraSizeOfFifo;
++ rsrcParams.boolInitialConfig = (uint8_t)initialConfig;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_SET_SIZE_OF_FIFO;
++ memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams));
++ replyLength = sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(rsrcParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return (t_Error)(reply.error);
++ }
++ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->baseAddr)
++ {
++ DBG(WARNING, ("No IPC - can't validate FM total-fifo size."));
++ fman_set_size_of_fifo(bmi_rg, hardwarePortId, sizeOfFifo, extraSizeOfFifo);
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without neither IPC nor mapped register!"));
++
++ if (!initialConfig)
++ {
++ /* !initialConfig - runtime change of existing value.
++ * - read the current FIFO and extra FIFO size */
++ currentExtraVal = fman_get_size_of_extra_fifo(bmi_rg, hardwarePortId);
++ currentVal = fman_get_size_of_fifo(bmi_rg, hardwarePortId);
++ }
++
++ if (extraSizeOfFifo > currentExtraVal)
++ {
++ if (extraSizeOfFifo && !p_Fm->p_FmStateStruct->extraFifoPoolSize)
++ /* if this is the first time a port requires extraFifoPoolSize, the total extraFifoPoolSize
++ * must be initialized to 1 buffer per port
++ */
++ p_Fm->p_FmStateStruct->extraFifoPoolSize = FM_MAX_NUM_OF_RX_PORTS*BMI_FIFO_UNITS;
++
++ p_Fm->p_FmStateStruct->extraFifoPoolSize = MAX(p_Fm->p_FmStateStruct->extraFifoPoolSize, extraSizeOfFifo);
++ }
++
++ /* check that there are enough uncommitted fifo size */
++ if ((p_Fm->p_FmStateStruct->accumulatedFifoSize - currentVal + sizeOfFifo) >
++ (p_Fm->p_FmStateStruct->totalFifoSize - p_Fm->p_FmStateStruct->extraFifoPoolSize)){
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("Port request fifo size + accumulated size > total FIFO size:"));
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
++ ("port 0x%x requested %d bytes, extra size = %d, accumulated size = %d total size = %d",
++ hardwarePortId, sizeOfFifo, p_Fm->p_FmStateStruct->extraFifoPoolSize,
++ p_Fm->p_FmStateStruct->accumulatedFifoSize,
++ p_Fm->p_FmStateStruct->totalFifoSize));
++ }
++ else
++ {
++ /* update accumulated */
++ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedFifoSize >= currentVal);
++ p_Fm->p_FmStateStruct->accumulatedFifoSize -= currentVal;
++ p_Fm->p_FmStateStruct->accumulatedFifoSize += sizeOfFifo;
++ fman_set_size_of_fifo(bmi_rg, hardwarePortId, sizeOfFifo, extraSizeOfFifo);
++ }
++
++ return E_OK;
++}
++
++t_Error FmSetNumOfTasks(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint8_t *p_NumOfTasks,
++ uint8_t *p_NumOfExtraTasks,
++ bool initialConfig)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ t_Error err;
++ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
++ uint8_t currentVal = 0, currentExtraVal = 0, numOfTasks = *p_NumOfTasks, numOfExtraTasks = *p_NumOfExtraTasks;
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcPortRsrcParams rsrcParams;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ rsrcParams.hardwarePortId = hardwarePortId;
++ rsrcParams.val = numOfTasks;
++ rsrcParams.extra = numOfExtraTasks;
++ rsrcParams.boolInitialConfig = (uint8_t)initialConfig;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_SET_NUM_OF_TASKS;
++ memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams));
++ replyLength = sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(rsrcParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return (t_Error)(reply.error);
++ }
++ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->baseAddr)
++ {
++ DBG(WARNING, ("No IPC - can't validate FM total-num-of-tasks."));
++ fman_set_num_of_tasks(bmi_rg, hardwarePortId, numOfTasks, numOfExtraTasks);
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without neither IPC nor mapped register!"));
++
++ if (!initialConfig)
++ {
++ /* !initialConfig - runtime change of existing value.
++ * - read the current number of tasks */
++ currentVal = fman_get_num_of_tasks(bmi_rg, hardwarePortId);
++ currentExtraVal = fman_get_num_extra_tasks(bmi_rg, hardwarePortId);
++ }
++
++ if (numOfExtraTasks > currentExtraVal)
++ p_Fm->p_FmStateStruct->extraTasksPoolSize =
++ (uint8_t)MAX(p_Fm->p_FmStateStruct->extraTasksPoolSize, numOfExtraTasks);
++
++ /* check that there are enough uncommitted tasks */
++ if ((p_Fm->p_FmStateStruct->accumulatedNumOfTasks - currentVal + numOfTasks) >
++ (p_Fm->p_FmStateStruct->totalNumOfTasks - p_Fm->p_FmStateStruct->extraTasksPoolSize))
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE,
++ ("Requested numOfTasks and extra tasks pool for fm%d exceed total numOfTasks.",
++ p_Fm->p_FmStateStruct->fmId));
++ else
++ {
++ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfTasks >= currentVal);
++ /* update accumulated */
++ p_Fm->p_FmStateStruct->accumulatedNumOfTasks -= currentVal;
++ p_Fm->p_FmStateStruct->accumulatedNumOfTasks += numOfTasks;
++ fman_set_num_of_tasks(bmi_rg, hardwarePortId, numOfTasks, numOfExtraTasks);
++ }
++
++ return E_OK;
++}
++
++t_Error FmSetNumOfOpenDmas(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint8_t *p_NumOfOpenDmas,
++ uint8_t *p_NumOfExtraOpenDmas,
++ bool initialConfig)
++
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ t_Error err;
++ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
++ uint8_t numOfOpenDmas = *p_NumOfOpenDmas, numOfExtraOpenDmas = *p_NumOfExtraOpenDmas;
++ uint8_t totalNumDmas = 0, currentVal = 0, currentExtraVal = 0;
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcPortRsrcParams rsrcParams;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ rsrcParams.hardwarePortId = hardwarePortId;
++ rsrcParams.val = numOfOpenDmas;
++ rsrcParams.extra = numOfExtraOpenDmas;
++ rsrcParams.boolInitialConfig = (uint8_t)initialConfig;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_SET_NUM_OF_OPEN_DMAS;
++ memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams));
++ replyLength = sizeof(uint32_t);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) + sizeof(rsrcParams),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != sizeof(uint32_t))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return (t_Error)(reply.error);
++ }
++#ifdef FM_HAS_TOTAL_DMAS
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("running in guest-mode without IPC!"));
++#else
++ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->baseAddr &&
++ (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6))
++ {
++ /*DBG(WARNING, ("No IPC - can't validate FM total-num-of-dmas."));*/
++
++ if (!numOfOpenDmas)
++ {
++ /* first config without explic it value: Do Nothing - reset value shouldn't be
++ changed, read register for port save */
++ *p_NumOfOpenDmas = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
++ *p_NumOfExtraOpenDmas = fman_get_num_extra_dmas(bmi_rg, hardwarePortId);
++ }
++ else
++ /* whether it is the first time with explicit value, or runtime "set" - write register */
++ fman_set_num_of_open_dmas(bmi_rg,
++ hardwarePortId,
++ numOfOpenDmas,
++ numOfExtraOpenDmas,
++ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize);
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without neither IPC nor mapped register!"));
++#endif /* FM_HAS_TOTAL_DMAS */
++
++ if (!initialConfig)
++ {
++ /* !initialConfig - runtime change of existing value.
++ * - read the current number of open Dma's */
++ currentExtraVal = fman_get_num_extra_dmas(bmi_rg, hardwarePortId);
++ currentVal = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
++ }
++
++#ifdef FM_NO_GUARANTEED_RESET_VALUES
++ /* it's illegal to be in a state where this is not the first set and no value is specified */
++ ASSERT_COND(initialConfig || numOfOpenDmas);
++ if (!numOfOpenDmas)
++ {
++ /* !numOfOpenDmas - first configuration according to values in regs.
++ * - read the current number of open Dma's */
++ currentExtraVal = fman_get_num_extra_dmas(bmi_rg, hardwarePortId);
++ currentVal = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
++ /* This is the first configuration and user did not specify value (!numOfOpenDmas),
++ * reset values will be used and we just save these values for resource management */
++ p_Fm->p_FmStateStruct->extraOpenDmasPoolSize =
++ (uint8_t)MAX(p_Fm->p_FmStateStruct->extraOpenDmasPoolSize, currentExtraVal);
++ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas += currentVal;
++ *p_NumOfOpenDmas = currentVal;
++ *p_NumOfExtraOpenDmas = currentExtraVal;
++ return E_OK;
++ }
++#endif /* FM_NO_GUARANTEED_RESET_VALUES */
++
++ if (numOfExtraOpenDmas > currentExtraVal)
++ p_Fm->p_FmStateStruct->extraOpenDmasPoolSize =
++ (uint8_t)MAX(p_Fm->p_FmStateStruct->extraOpenDmasPoolSize, numOfExtraOpenDmas);
++
++#ifdef FM_HAS_TOTAL_DMAS
++ if ((p_Fm->p_FmStateStruct->revInfo.majorRev < 6) &&
++ (p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas - currentVal + numOfOpenDmas >
++ p_Fm->p_FmStateStruct->maxNumOfOpenDmas))
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE,
++ ("Requested numOfOpenDmas for fm%d exceeds total numOfOpenDmas.",
++ p_Fm->p_FmStateStruct->fmId));
++#else
++ if ((p_Fm->p_FmStateStruct->revInfo.majorRev >= 6) &&
++#ifdef FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
++ !((p_Fm->p_FmStateStruct->revInfo.majorRev == 6) &&
++ (p_Fm->p_FmStateStruct->revInfo.minorRev == 0)) &&
++#endif /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 */
++ (p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas - currentVal + numOfOpenDmas > DMA_THRESH_MAX_COMMQ + 1))
++ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE,
++ ("Requested numOfOpenDmas for fm%d exceeds DMA Command queue (%d)",
++ p_Fm->p_FmStateStruct->fmId, DMA_THRESH_MAX_COMMQ+1));
++#endif /* FM_HAS_TOTAL_DMAS */
++ else
++ {
++ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas >= currentVal);
++ /* update acummulated */
++ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas -= currentVal;
++ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas += numOfOpenDmas;
++
++#ifdef FM_HAS_TOTAL_DMAS
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev < 6)
++ totalNumDmas = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize);
++#endif /* FM_HAS_TOTAL_DMAS */
++ fman_set_num_of_open_dmas(bmi_rg,
++ hardwarePortId,
++ numOfOpenDmas,
++ numOfExtraOpenDmas,
++ totalNumDmas);
++ }
++
++ return E_OK;
++}
++
++#if (DPAA_VERSION >= 11)
++t_Error FmVSPCheckRelativeProfile(t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint16_t relativeProfile)
++{
++ t_Fm *p_Fm;
++ t_FmSp *p_FmPcdSp;
++ uint8_t swPortIndex=0, hardwarePortId;
++
++ ASSERT_COND(h_Fm);
++ p_Fm = (t_Fm*)h_Fm;
++
++ hardwarePortId = SwPortIdToHwPortId(portType,
++ portId,
++ p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++ ASSERT_COND(hardwarePortId);
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ p_FmPcdSp = p_Fm->p_FmSp;
++ ASSERT_COND(p_FmPcdSp);
++
++ if (!p_FmPcdSp->portsMapping[swPortIndex].numOfProfiles)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE , ("Port has no allocated profiles"));
++ if (relativeProfile >= p_FmPcdSp->portsMapping[swPortIndex].numOfProfiles)
++ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE , ("Profile id is out of range"));
++
++ return E_OK;
++}
++
++t_Error FmVSPGetAbsoluteProfileId(t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint16_t relativeProfile,
++ uint16_t *p_AbsoluteId)
++{
++ t_Fm *p_Fm;
++ t_FmSp *p_FmPcdSp;
++ uint8_t swPortIndex=0, hardwarePortId;
++ t_Error err;
++
++ ASSERT_COND(h_Fm);
++ p_Fm = (t_Fm*)h_Fm;
++
++ err = FmVSPCheckRelativeProfile(h_Fm, portType, portId, relativeProfile);
++ if (err != E_OK)
++ return err;
++
++ hardwarePortId = SwPortIdToHwPortId(portType,
++ portId,
++ p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++ ASSERT_COND(hardwarePortId);
++ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
++
++ p_FmPcdSp = p_Fm->p_FmSp;
++ ASSERT_COND(p_FmPcdSp);
++
++ *p_AbsoluteId = (uint16_t)(p_FmPcdSp->portsMapping[swPortIndex].profilesBase + relativeProfile);
++
++ return E_OK;
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++static t_Error InitFmDma(t_Fm *p_Fm)
++{
++ t_Error err;
++
++ err = (t_Error)fman_dma_init(p_Fm->p_FmDmaRegs, p_Fm->p_FmDriverParam);
++ if (err != E_OK)
++ return err;
++
++ /* Allocate MURAM for CAM */
++ p_Fm->camBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram,
++ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*DMA_CAM_SIZEOF_ENTRY),
++ DMA_CAM_ALIGN));
++ if (!p_Fm->camBaseAddr)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for DMA CAM failed"));
++
++ WRITE_BLOCK(UINT_TO_PTR(p_Fm->camBaseAddr),
++ 0,
++ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*DMA_CAM_SIZEOF_ENTRY));
++
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev == 2)
++ {
++ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->camBaseAddr));
++
++ p_Fm->camBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram,
++ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*72 + 128),
++ 64));
++ if (!p_Fm->camBaseAddr)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for DMA CAM failed"));
++
++ WRITE_BLOCK(UINT_TO_PTR(p_Fm->camBaseAddr),
++ 0,
++ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*72 + 128));
++
++ switch(p_Fm->p_FmDriverParam->dma_cam_num_of_entries)
++ {
++ case (8):
++ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xff000000);
++ break;
++ case (16):
++ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xffff0000);
++ break;
++ case (24):
++ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xffffff00);
++ break;
++ case (32):
++ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xffffffff);
++ break;
++ default:
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("wrong dma_cam_num_of_entries"));
++ }
++ }
++
++ p_Fm->p_FmDriverParam->cam_base_addr =
++ (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->camBaseAddr)) - p_Fm->fmMuramPhysBaseAddr);
++
++ return E_OK;
++}
++
++static t_Error InitFmFpm(t_Fm *p_Fm)
++{
++ return (t_Error)fman_fpm_init(p_Fm->p_FmFpmRegs, p_Fm->p_FmDriverParam);
++}
++
++static t_Error InitFmBmi(t_Fm *p_Fm)
++{
++ return (t_Error)fman_bmi_init(p_Fm->p_FmBmiRegs, p_Fm->p_FmDriverParam);
++}
++
++static t_Error InitFmQmi(t_Fm *p_Fm)
++{
++ return (t_Error)fman_qmi_init(p_Fm->p_FmQmiRegs, p_Fm->p_FmDriverParam);
++}
++
++static t_Error InitGuestMode(t_Fm *p_Fm)
++{
++ t_Error err = E_OK;
++ int i;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++
++ ASSERT_COND(p_Fm);
++ ASSERT_COND(p_Fm->guestId != NCSW_MASTER_ID);
++
++ /* build the FM guest partition IPC address */
++ if (Sprint (p_Fm->fmModuleName, "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, p_Fm->guestId) != (p_Fm->guestId<10 ? 6:7))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++
++ /* build the FM master partition IPC address */
++ memset(p_Fm->fmIpcHandlerModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
++ if (Sprint (p_Fm->fmIpcHandlerModuleName[0], "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, NCSW_MASTER_ID) != 6)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++
++ for (i=0;i<e_FM_EV_DUMMY_LAST;i++)
++ p_Fm->intrMng[i].f_Isr = UnimplementedIsr;
++
++ p_Fm->h_IpcSessions[0] = XX_IpcInitSession(p_Fm->fmIpcHandlerModuleName[0], p_Fm->fmModuleName);
++ if (p_Fm->h_IpcSessions[0])
++ {
++ uint8_t isMasterAlive;
++ t_FmIpcParams ipcParams;
++
++ err = XX_IpcRegisterMsgHandler(p_Fm->fmModuleName, FmGuestHandleIpcMsgCB, p_Fm, FM_IPC_MAX_REPLY_SIZE);
++ if (err)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_MASTER_IS_ALIVE;
++ msg.msgBody[0] = p_Fm->guestId;
++ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
++ do
++ {
++ blockingFlag = TRUE;
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId)+sizeof(p_Fm->guestId),
++ (uint8_t*)&reply,
++ &replyLength,
++ IpcMsgCompletionCB,
++ p_Fm)) != E_OK)
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ while (blockingFlag) ;
++ if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t)))
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ isMasterAlive = *(uint8_t*)(reply.replyBody);
++ } while (!isMasterAlive);
++
++ /* read FM parameters and save */
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_GET_PARAMS;
++ replyLength = sizeof(uint32_t) + sizeof(t_FmIpcParams);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmIpcParams)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ memcpy((uint8_t*)&ipcParams, reply.replyBody, sizeof(t_FmIpcParams));
++
++ p_Fm->p_FmStateStruct->fmClkFreq = ipcParams.fmClkFreq;
++ p_Fm->p_FmStateStruct->fmMacClkFreq = ipcParams.fmMacClkFreq;
++ p_Fm->p_FmStateStruct->revInfo.majorRev = ipcParams.majorRev;
++ p_Fm->p_FmStateStruct->revInfo.minorRev = ipcParams.minorRev;
++ }
++ else
++ {
++ DBG(WARNING, ("FM Guest mode - without IPC"));
++ if (!p_Fm->p_FmStateStruct->fmClkFreq)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("No fmClkFreq configured for guest without IPC"));
++ if (p_Fm->baseAddr)
++ {
++ fman_get_revision(p_Fm->p_FmFpmRegs,
++ &p_Fm->p_FmStateStruct->revInfo.majorRev,
++ &p_Fm->p_FmStateStruct->revInfo.minorRev);
++
++ }
++ }
++
++#if (DPAA_VERSION >= 11)
++ p_Fm->partVSPBase = AllocVSPsForPartition(p_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
++ if (p_Fm->partVSPBase == (uint8_t)(ILLEGAL_BASE))
++ DBG(WARNING, ("partition VSPs allocation is FAILED"));
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* General FM driver initialization */
++ if (p_Fm->baseAddr)
++ p_Fm->fmMuramPhysBaseAddr =
++ (uint64_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->baseAddr + FM_MM_MURAM)));
++
++ XX_Free(p_Fm->p_FmDriverParam);
++ p_Fm->p_FmDriverParam = NULL;
++
++ if ((p_Fm->guestId == NCSW_MASTER_ID) ||
++ (p_Fm->h_IpcSessions[0]))
++ {
++ FM_DisableRamsEcc(p_Fm);
++ FmMuramClear(p_Fm->h_FmMuram);
++ FM_EnableRamsEcc(p_Fm);
++ }
++
++ return E_OK;
++}
++
++static __inline__ enum fman_exceptions FmanExceptionTrans(e_FmExceptions exception)
++{
++ switch (exception) {
++ case e_FM_EX_DMA_BUS_ERROR:
++ return E_FMAN_EX_DMA_BUS_ERROR;
++ case e_FM_EX_DMA_READ_ECC:
++ return E_FMAN_EX_DMA_READ_ECC;
++ case e_FM_EX_DMA_SYSTEM_WRITE_ECC:
++ return E_FMAN_EX_DMA_SYSTEM_WRITE_ECC;
++ case e_FM_EX_DMA_FM_WRITE_ECC:
++ return E_FMAN_EX_DMA_FM_WRITE_ECC;
++ case e_FM_EX_FPM_STALL_ON_TASKS:
++ return E_FMAN_EX_FPM_STALL_ON_TASKS;
++ case e_FM_EX_FPM_SINGLE_ECC:
++ return E_FMAN_EX_FPM_SINGLE_ECC;
++ case e_FM_EX_FPM_DOUBLE_ECC:
++ return E_FMAN_EX_FPM_DOUBLE_ECC;
++ case e_FM_EX_QMI_SINGLE_ECC:
++ return E_FMAN_EX_QMI_SINGLE_ECC;
++ case e_FM_EX_QMI_DOUBLE_ECC:
++ return E_FMAN_EX_QMI_DOUBLE_ECC;
++ case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
++ return E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
++ case e_FM_EX_BMI_LIST_RAM_ECC:
++ return E_FMAN_EX_BMI_LIST_RAM_ECC;
++ case e_FM_EX_BMI_STORAGE_PROFILE_ECC:
++ return E_FMAN_EX_BMI_STORAGE_PROFILE_ECC;
++ case e_FM_EX_BMI_STATISTICS_RAM_ECC:
++ return E_FMAN_EX_BMI_STATISTICS_RAM_ECC;
++ case e_FM_EX_BMI_DISPATCH_RAM_ECC:
++ return E_FMAN_EX_BMI_DISPATCH_RAM_ECC;
++ case e_FM_EX_IRAM_ECC:
++ return E_FMAN_EX_IRAM_ECC;
++ case e_FM_EX_MURAM_ECC:
++ return E_FMAN_EX_MURAM_ECC;
++ default:
++ return E_FMAN_EX_DMA_BUS_ERROR;
++ }
++}
++
++uint8_t SwPortIdToHwPortId(e_FmPortType type, uint8_t relativePortId, uint8_t majorRev, uint8_t minorRev)
++{
++ switch (type)
++ {
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
++ CHECK_PORT_ID_OH_PORTS(relativePortId);
++ return (uint8_t)(BASE_OH_PORTID + (relativePortId));
++ case (e_FM_PORT_TYPE_RX):
++ CHECK_PORT_ID_1G_RX_PORTS(relativePortId);
++ return (uint8_t)(BASE_1G_RX_PORTID + (relativePortId));
++ case (e_FM_PORT_TYPE_RX_10G):
++ /* The 10G port in T1024 (FMan Version 6.4) is the first port.
++ * This is the reason why the 1G port offset is used.
++ */
++ if (majorRev == 6 && minorRev == 4)
++ {
++ CHECK_PORT_ID_1G_RX_PORTS(relativePortId);
++ return (uint8_t)(BASE_1G_RX_PORTID + (relativePortId));
++ }
++ else
++ {
++ CHECK_PORT_ID_10G_RX_PORTS(relativePortId);
++ return (uint8_t)(BASE_10G_RX_PORTID + (relativePortId));
++ }
++ case (e_FM_PORT_TYPE_TX):
++ CHECK_PORT_ID_1G_TX_PORTS(relativePortId);
++ return (uint8_t)(BASE_1G_TX_PORTID + (relativePortId));
++ case (e_FM_PORT_TYPE_TX_10G):
++ /* The 10G port in T1024 (FMan Version 6.4) is the first port.
++ * This is the reason why the 1G port offset is used.
++ */
++ if (majorRev == 6 && minorRev == 4)
++ {
++ CHECK_PORT_ID_1G_TX_PORTS(relativePortId);
++ return (uint8_t)(BASE_1G_TX_PORTID + (relativePortId));
++ }
++ else
++ {
++ CHECK_PORT_ID_10G_TX_PORTS(relativePortId);
++ return (uint8_t)(BASE_10G_TX_PORTID + (relativePortId));
++ }
++ default:
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal port type"));
++ return 0;
++ }
++}
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++t_Error FmDumpPortRegs (t_Handle h_Fm, uint8_t hardwarePortId)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++
++ DECLARE_DUMP;
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(((p_Fm->guestId == NCSW_MASTER_ID) ||
++ p_Fm->baseAddr), E_INVALID_OPERATION);
++
++ DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], ("fmbm_pp for port %u", (hardwarePortId)));
++ DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], sizeof(uint32_t));
++
++ DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], ("fmbm_pfs for port %u", (hardwarePortId )));
++ DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], sizeof(uint32_t));
++
++ DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId-1], ("fmbm_spliodn for port %u", (hardwarePortId)));
++ DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId-1], sizeof(uint32_t));
++
++ DUMP_TITLE(&p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId], ("fmfp_ps for port %u", (hardwarePortId)));
++ DUMP_MEMORY(&p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId], sizeof(uint32_t));
++
++ DUMP_TITLE(&p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId/2], ("fmdmplr for port %u", (hardwarePortId)));
++ DUMP_MEMORY(&p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId/2], sizeof(uint32_t));
++
++ return E_OK;
++}
++#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
++
++
++/*****************************************************************************/
++/* API Init unit functions */
++/*****************************************************************************/
++t_Handle FM_Config(t_FmParams *p_FmParam)
++{
++ t_Fm *p_Fm;
++ uint8_t i;
++ uintptr_t baseAddr;
++
++ SANITY_CHECK_RETURN_VALUE(p_FmParam, E_NULL_POINTER, NULL);
++ SANITY_CHECK_RETURN_VALUE(((p_FmParam->firmware.p_Code && p_FmParam->firmware.size) ||
++ (!p_FmParam->firmware.p_Code && !p_FmParam->firmware.size)),
++ E_INVALID_VALUE, NULL);
++
++ baseAddr = p_FmParam->baseAddr;
++
++ /* Allocate FM structure */
++ p_Fm = (t_Fm *) XX_Malloc(sizeof(t_Fm));
++ if (!p_Fm)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM driver structure"));
++ return NULL;
++ }
++ memset(p_Fm, 0, sizeof(t_Fm));
++
++ p_Fm->p_FmStateStruct = (t_FmStateStruct *) XX_Malloc(sizeof(t_FmStateStruct));
++ if (!p_Fm->p_FmStateStruct)
++ {
++ XX_Free(p_Fm);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Status structure"));
++ return NULL;
++ }
++ memset(p_Fm->p_FmStateStruct, 0, sizeof(t_FmStateStruct));
++
++ /* Initialize FM parameters which will be kept by the driver */
++ p_Fm->p_FmStateStruct->fmId = p_FmParam->fmId;
++ p_Fm->guestId = p_FmParam->guestId;
++
++ for (i=0; i<FM_MAX_NUM_OF_HW_PORT_IDS; i++)
++ p_Fm->p_FmStateStruct->portsTypes[i] = e_FM_PORT_TYPE_DUMMY;
++
++ /* Allocate the FM driver's parameters structure */
++ p_Fm->p_FmDriverParam = (struct fman_cfg *)XX_Malloc(sizeof(struct fman_cfg));
++ if (!p_Fm->p_FmDriverParam)
++ {
++ XX_Free(p_Fm->p_FmStateStruct);
++ XX_Free(p_Fm);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM driver parameters"));
++ return NULL;
++ }
++ memset(p_Fm->p_FmDriverParam, 0, sizeof(struct fman_cfg));
++
++#if (DPAA_VERSION >= 11)
++ p_Fm->p_FmSp = (t_FmSp *)XX_Malloc(sizeof(t_FmSp));
++ if (!p_Fm->p_FmSp)
++ {
++ XX_Free(p_Fm->p_FmDriverParam);
++ XX_Free(p_Fm->p_FmStateStruct);
++ XX_Free(p_Fm);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("allocation for internal data structure failed"));
++ return NULL;
++ }
++ memset(p_Fm->p_FmSp, 0, sizeof(t_FmSp));
++
++ for (i=0; i<FM_VSP_MAX_NUM_OF_ENTRIES; i++)
++ p_Fm->p_FmSp->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* Initialize FM parameters which will be kept by the driver */
++ p_Fm->p_FmStateStruct->fmId = p_FmParam->fmId;
++ p_Fm->h_FmMuram = p_FmParam->h_FmMuram;
++ p_Fm->h_App = p_FmParam->h_App;
++ p_Fm->p_FmStateStruct->fmClkFreq = p_FmParam->fmClkFreq;
++ p_Fm->p_FmStateStruct->fmMacClkFreq = p_FmParam->fmClkFreq / ((!p_FmParam->fmMacClkRatio)? 2: p_FmParam->fmMacClkRatio);
++ p_Fm->f_Exception = p_FmParam->f_Exception;
++ p_Fm->f_BusError = p_FmParam->f_BusError;
++ p_Fm->p_FmFpmRegs = (struct fman_fpm_regs *)UINT_TO_PTR(baseAddr + FM_MM_FPM);
++ p_Fm->p_FmBmiRegs = (struct fman_bmi_regs *)UINT_TO_PTR(baseAddr + FM_MM_BMI);
++ p_Fm->p_FmQmiRegs = (struct fman_qmi_regs *)UINT_TO_PTR(baseAddr + FM_MM_QMI);
++ p_Fm->p_FmDmaRegs = (struct fman_dma_regs *)UINT_TO_PTR(baseAddr + FM_MM_DMA);
++ p_Fm->p_FmRegs = (struct fman_regs *)UINT_TO_PTR(baseAddr + FM_MM_BMI);
++ p_Fm->baseAddr = baseAddr;
++ p_Fm->p_FmStateStruct->irq = p_FmParam->irq;
++ p_Fm->p_FmStateStruct->errIrq = p_FmParam->errIrq;
++ p_Fm->hcPortInitialized = FALSE;
++ p_Fm->independentMode = FALSE;
++
++ p_Fm->h_Spinlock = XX_InitSpinlock();
++ if (!p_Fm->h_Spinlock)
++ {
++ XX_Free(p_Fm->p_FmDriverParam);
++ XX_Free(p_Fm->p_FmStateStruct);
++ XX_Free(p_Fm);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("can't allocate spinlock!"));
++ return NULL;
++ }
++
++#if (DPAA_VERSION >= 11)
++ p_Fm->partVSPBase = p_FmParam->partVSPBase;
++ p_Fm->partNumOfVSPs = p_FmParam->partNumOfVSPs;
++ p_Fm->vspBaseAddr = p_FmParam->vspBaseAddr;
++#endif /* (DPAA_VERSION >= 11) */
++
++ fman_defconfig(p_Fm->p_FmDriverParam,
++ !!(p_Fm->guestId == NCSW_MASTER_ID));
++/* overide macros dependent parameters */
++#ifdef FM_PEDANTIC_DMA
++ p_Fm->p_FmDriverParam->pedantic_dma = TRUE;
++ p_Fm->p_FmDriverParam->dma_aid_override = TRUE;
++#endif /* FM_PEDANTIC_DMA */
++#ifndef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++ p_Fm->p_FmDriverParam->qmi_deq_option_support = TRUE;
++#endif /* !FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
++
++ p_Fm->p_FmStateStruct->ramsEccEnable = FALSE;
++ p_Fm->p_FmStateStruct->extraFifoPoolSize = 0;
++ p_Fm->p_FmStateStruct->exceptions = DEFAULT_exceptions;
++ p_Fm->resetOnInit = DEFAULT_resetOnInit;
++ p_Fm->f_ResetOnInitOverride = DEFAULT_resetOnInitOverrideCallback;
++ p_Fm->fwVerify = DEFAULT_VerifyUcode;
++ p_Fm->firmware.size = p_FmParam->firmware.size;
++ if (p_Fm->firmware.size)
++ {
++ p_Fm->firmware.p_Code = (uint32_t *)XX_Malloc(p_Fm->firmware.size);
++ if (!p_Fm->firmware.p_Code)
++ {
++ XX_FreeSpinlock(p_Fm->h_Spinlock);
++ XX_Free(p_Fm->p_FmStateStruct);
++ XX_Free(p_Fm->p_FmDriverParam);
++ XX_Free(p_Fm);
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM firmware code"));
++ return NULL;
++ }
++ memcpy(p_Fm->firmware.p_Code, p_FmParam->firmware.p_Code ,p_Fm->firmware.size);
++ }
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ return p_Fm;
++
++ /* read revision */
++ /* Chip dependent, will be configured in Init */
++ fman_get_revision(p_Fm->p_FmFpmRegs,
++ &p_Fm->p_FmStateStruct->revInfo.majorRev,
++ &p_Fm->p_FmStateStruct->revInfo.minorRev);
++
++#ifdef FM_AID_MODE_NO_TNUM_SW005
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ p_Fm->p_FmDriverParam->dma_aid_mode = e_FM_DMA_AID_OUT_PORT_ID;
++#endif /* FM_AID_MODE_NO_TNUM_SW005 */
++#ifndef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev != 4)
++ p_Fm->p_FmDriverParam->qmi_def_tnums_thresh = QMI_DEF_TNUMS_THRESH;
++#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
++
++ p_Fm->p_FmStateStruct->totalFifoSize = 0;
++ p_Fm->p_FmStateStruct->totalNumOfTasks =
++ DEFAULT_totalNumOfTasks(p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++
++#ifdef FM_HAS_TOTAL_DMAS
++ p_Fm->p_FmStateStruct->maxNumOfOpenDmas = BMI_MAX_NUM_OF_DMAS;
++#endif /* FM_HAS_TOTAL_DMAS */
++#if (DPAA_VERSION < 11)
++ p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer = DEFAULT_dmaCommQLow;
++ p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer = DEFAULT_dmaCommQHigh;
++ p_Fm->p_FmDriverParam->dma_cam_num_of_entries = DEFAULT_dmaCamNumOfEntries;
++ p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer = DEFAULT_dmaReadIntBufLow;
++ p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer = DEFAULT_dmaReadIntBufHigh;
++ p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer = DEFAULT_dmaWriteIntBufLow;
++ p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer = DEFAULT_dmaWriteIntBufHigh;
++ p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats = DEFAULT_axiDbgNumOfBeats;
++#endif /* (DPAA_VERSION < 11) */
++#ifdef FM_NO_TNUM_AGING
++ p_Fm->p_FmDriverParam->tnum_aging_period = 0;
++#endif
++ p_Fm->tnumAgingPeriod = p_Fm->p_FmDriverParam->tnum_aging_period;
++
++ return p_Fm;
++}
++
++/**************************************************************************//**
++ @Function FM_Init
++
++ @Description Initializes the FM module
++
++ @Param[in] h_Fm - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_Init(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_cfg *p_FmDriverParam = NULL;
++ t_Error err = E_OK;
++ int i;
++ t_FmRevisionInfo revInfo;
++ struct fman_rg fman_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++
++ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
++ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
++ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
++ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
++
++ p_Fm->p_FmStateStruct->count1MicroBit = FM_TIMESTAMP_1_USEC_BIT;
++ p_Fm->p_FmDriverParam->num_of_fman_ctrl_evnt_regs = FM_NUM_OF_FMAN_CTRL_EVENT_REGS;
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ return InitGuestMode(p_Fm);
++
++ /* if user didn't configured totalFifoSize - (totalFifoSize=0) we configure default
++ * according to chip. otherwise, we use user's configuration.
++ */
++ if (p_Fm->p_FmStateStruct->totalFifoSize == 0)
++ p_Fm->p_FmStateStruct->totalFifoSize = DEFAULT_totalFifoSize(p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++
++ CHECK_INIT_PARAMETERS(p_Fm, CheckFmParameters);
++
++ p_FmDriverParam = p_Fm->p_FmDriverParam;
++
++ FM_GetRevision(p_Fm, &revInfo);
++
++ /* clear revision-dependent non existing exception */
++#ifdef FM_NO_DISPATCH_RAM_ECC
++ if ((revInfo.majorRev != 4) &&
++ (revInfo.majorRev < 6))
++ p_Fm->p_FmStateStruct->exceptions &= ~FM_EX_BMI_DISPATCH_RAM_ECC;
++#endif /* FM_NO_DISPATCH_RAM_ECC */
++
++#ifdef FM_QMI_NO_ECC_EXCEPTIONS
++ if (revInfo.majorRev == 4)
++ p_Fm->p_FmStateStruct->exceptions &= ~(FM_EX_QMI_SINGLE_ECC | FM_EX_QMI_DOUBLE_ECC);
++#endif /* FM_QMI_NO_ECC_EXCEPTIONS */
++
++#ifdef FM_QMI_NO_SINGLE_ECC_EXCEPTION
++ if (revInfo.majorRev >= 6)
++ p_Fm->p_FmStateStruct->exceptions &= ~FM_EX_QMI_SINGLE_ECC;
++#endif /* FM_QMI_NO_SINGLE_ECC_EXCEPTION */
++
++ FmMuramClear(p_Fm->h_FmMuram);
++
++ /* clear CPG */
++ IOMemSet32(UINT_TO_PTR(p_Fm->baseAddr + FM_MM_CGP), 0, FM_PORT_NUM_OF_CONGESTION_GRPS);
++
++ /* add to the default exceptions the user's definitions */
++ p_Fm->p_FmStateStruct->exceptions |= p_Fm->userSetExceptions;
++
++ /* Reset the FM if required */
++ if (p_Fm->resetOnInit)
++ {
++#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
++ if ((err = FwNotResetErratumBugzilla6173WA(p_Fm)) != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++#else /* not FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
++
++ if (p_Fm->f_ResetOnInitOverride)
++ {
++ /* Perform user specific FMan reset */
++ p_Fm->f_ResetOnInitOverride(h_Fm);
++ }
++ else
++ {
++ /* Perform FMan reset */
++ FmReset(h_Fm);
++ }
++
++ if (fman_is_qmi_halt_not_busy_state(p_Fm->p_FmQmiRegs))
++ {
++ fman_resume(p_Fm->p_FmFpmRegs);
++ XX_UDelay(100);
++ }
++#endif /* not FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
++ }
++
++#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
++ if (!p_Fm->resetOnInit) /* Skip operations done in errata workaround */
++ {
++#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
++ /* Load FMan-Controller code to IRAM */
++
++ ClearIRam(p_Fm);
++
++ if (p_Fm->firmware.p_Code && (LoadFmanCtrlCode(p_Fm) != E_OK))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
++ }
++#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
++
++#ifdef FM_CAPWAP_SUPPORT
++ /* save first 256 byte in MURAM */
++ p_Fm->resAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram, 256, 0));
++ if (!p_Fm->resAddr)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for reserved Area failed"));
++
++ WRITE_BLOCK(UINT_TO_PTR(p_Fm->resAddr), 0, 256);
++#endif /* FM_CAPWAP_SUPPORT */
++
++#if (DPAA_VERSION >= 11)
++ p_Fm->partVSPBase = AllocVSPsForPartition(h_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
++ if (p_Fm->partVSPBase == (uint8_t)(ILLEGAL_BASE))
++ DBG(WARNING, ("partition VSPs allocation is FAILED"));
++#endif /* (DPAA_VERSION >= 11) */
++
++ /* General FM driver initialization */
++ p_Fm->fmMuramPhysBaseAddr =
++ (uint64_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->baseAddr + FM_MM_MURAM)));
++
++ for (i=0;i<e_FM_EV_DUMMY_LAST;i++)
++ p_Fm->intrMng[i].f_Isr = UnimplementedIsr;
++ for (i=0;i<FM_NUM_OF_FMAN_CTRL_EVENT_REGS;i++)
++ p_Fm->fmanCtrlIntr[i].f_Isr = UnimplementedFmanCtrlIsr;
++
++ p_FmDriverParam->exceptions = p_Fm->p_FmStateStruct->exceptions;
++
++ /**********************/
++ /* Init DMA Registers */
++ /**********************/
++ err = InitFmDma(p_Fm);
++ if (err != E_OK)
++ {
++ FreeInitResources(p_Fm);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /**********************/
++ /* Init FPM Registers */
++ /**********************/
++ err = InitFmFpm(p_Fm);
++ if (err != E_OK)
++ {
++ FreeInitResources(p_Fm);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /* define common resources */
++ /* allocate MURAM for FIFO according to total size */
++ p_Fm->fifoBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram,
++ p_Fm->p_FmStateStruct->totalFifoSize,
++ BMI_FIFO_ALIGN));
++ if (!p_Fm->fifoBaseAddr)
++ {
++ FreeInitResources(p_Fm);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for BMI FIFO failed"));
++ }
++
++ p_FmDriverParam->fifo_base_addr = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->fifoBaseAddr)) - p_Fm->fmMuramPhysBaseAddr);
++ p_FmDriverParam->total_fifo_size = p_Fm->p_FmStateStruct->totalFifoSize;
++ p_FmDriverParam->total_num_of_tasks = p_Fm->p_FmStateStruct->totalNumOfTasks;
++ p_FmDriverParam->clk_freq = p_Fm->p_FmStateStruct->fmClkFreq;
++
++ /**********************/
++ /* Init BMI Registers */
++ /**********************/
++ err = InitFmBmi(p_Fm);
++ if (err != E_OK)
++ {
++ FreeInitResources(p_Fm);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /**********************/
++ /* Init QMI Registers */
++ /**********************/
++ err = InitFmQmi(p_Fm);
++ if (err != E_OK)
++ {
++ FreeInitResources(p_Fm);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /* build the FM master partition IPC address */
++ if (Sprint (p_Fm->fmModuleName, "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, NCSW_MASTER_ID) != 6)
++ {
++ FreeInitResources(p_Fm);
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
++ }
++
++ err = XX_IpcRegisterMsgHandler(p_Fm->fmModuleName, FmHandleIpcMsgCB, p_Fm, FM_IPC_MAX_REPLY_SIZE);
++ if (err)
++ {
++ FreeInitResources(p_Fm);
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++ }
++
++ /* Register the FM interrupts handlers */
++ if (p_Fm->p_FmStateStruct->irq != NO_IRQ)
++ {
++ XX_SetIntr(p_Fm->p_FmStateStruct->irq, FM_EventIsr, p_Fm);
++ XX_EnableIntr(p_Fm->p_FmStateStruct->irq);
++ }
++
++ if (p_Fm->p_FmStateStruct->errIrq != NO_IRQ)
++ {
++ XX_SetIntr(p_Fm->p_FmStateStruct->errIrq, (void (*) (t_Handle))FM_ErrorIsr, p_Fm);
++ XX_EnableIntr(p_Fm->p_FmStateStruct->errIrq);
++ }
++
++ err = (t_Error)fman_enable(&fman_rg , p_FmDriverParam);
++ if (err != E_OK)
++ return err; /* FIXME */
++
++ EnableTimeStamp(p_Fm);
++
++ if (p_Fm->firmware.p_Code)
++ {
++ XX_Free(p_Fm->firmware.p_Code);
++ p_Fm->firmware.p_Code = NULL;
++ }
++
++ XX_Free(p_Fm->p_FmDriverParam);
++ p_Fm->p_FmDriverParam = NULL;
++
++ return E_OK;
++}
++
++/**************************************************************************//**
++ @Function FM_Free
++
++ @Description Frees all resources that were assigned to FM module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_Fm - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_Free(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_rg fman_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
++ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
++ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
++ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++#if (DPAA_VERSION >= 11)
++ FreeVSPsForPartition(h_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
++
++ if (p_Fm->p_FmSp)
++ {
++ XX_Free(p_Fm->p_FmSp);
++ p_Fm->p_FmSp = NULL;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (p_Fm->fmModuleName[0] != 0)
++ XX_IpcUnregisterMsgHandler(p_Fm->fmModuleName);
++
++ if (!p_Fm->recoveryMode)
++ XX_Free(p_Fm->p_FmStateStruct);
++
++ XX_Free(p_Fm);
++
++ return E_OK;
++ }
++
++ fman_free_resources(&fman_rg);
++
++ if ((p_Fm->guestId == NCSW_MASTER_ID) && (p_Fm->fmModuleName[0] != 0))
++ XX_IpcUnregisterMsgHandler(p_Fm->fmModuleName);
++
++ if (p_Fm->p_FmStateStruct)
++ {
++ if (p_Fm->p_FmStateStruct->irq != NO_IRQ)
++ {
++ XX_DisableIntr(p_Fm->p_FmStateStruct->irq);
++ XX_FreeIntr(p_Fm->p_FmStateStruct->irq);
++ }
++ if (p_Fm->p_FmStateStruct->errIrq != NO_IRQ)
++ {
++ XX_DisableIntr(p_Fm->p_FmStateStruct->errIrq);
++ XX_FreeIntr(p_Fm->p_FmStateStruct->errIrq);
++ }
++ }
++
++#if (DPAA_VERSION >= 11)
++ FreeVSPsForPartition(h_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
++
++ if (p_Fm->p_FmSp)
++ {
++ XX_Free(p_Fm->p_FmSp);
++ p_Fm->p_FmSp = NULL;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ if (p_Fm->h_Spinlock)
++ XX_FreeSpinlock(p_Fm->h_Spinlock);
++
++ if (p_Fm->p_FmDriverParam)
++ {
++ if (p_Fm->firmware.p_Code)
++ XX_Free(p_Fm->firmware.p_Code);
++ XX_Free(p_Fm->p_FmDriverParam);
++ p_Fm->p_FmDriverParam = NULL;
++ }
++
++ FreeInitResources(p_Fm);
++
++ if (!p_Fm->recoveryMode && p_Fm->p_FmStateStruct)
++ XX_Free(p_Fm->p_FmStateStruct);
++
++ XX_Free(p_Fm);
++
++ return E_OK;
++}
++
++/*************************************************/
++/* API Advanced Init unit functions */
++/*************************************************/
++
++t_Error FM_ConfigResetOnInit(t_Handle h_Fm, bool enable)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->resetOnInit = enable;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigResetOnInitOverrideCallback(t_Handle h_Fm, t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->f_ResetOnInitOverride = f_ResetOnInitOverride;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigTotalFifoSize(t_Handle h_Fm, uint32_t totalFifoSize)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmStateStruct->totalFifoSize = totalFifoSize;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaCacheOverride(t_Handle h_Fm, e_FmDmaCacheOverride cacheOverride)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ enum fman_dma_cache_override fsl_cache_override;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ FMAN_CACHE_OVERRIDE_TRANS(fsl_cache_override, cacheOverride)
++ p_Fm->p_FmDriverParam->dma_cache_override = fsl_cache_override;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaAidOverride(t_Handle h_Fm, bool aidOverride)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->dma_aid_override = aidOverride;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaAidMode(t_Handle h_Fm, e_FmDmaAidMode aidMode)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ enum fman_dma_aid_mode fsl_aid_mode;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ FMAN_AID_MODE_TRANS(fsl_aid_mode, aidMode);
++ p_Fm->p_FmDriverParam->dma_aid_mode = fsl_aid_mode;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaAxiDbgNumOfBeats(t_Handle h_Fm, uint8_t axiDbgNumOfBeats)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++#if (DPAA_VERSION >= 11)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
++#else
++ p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats = axiDbgNumOfBeats;
++
++ return E_OK;
++#endif /* (DPAA_VERSION >= 11) */
++}
++
++t_Error FM_ConfigDmaCamNumOfEntries(t_Handle h_Fm, uint8_t numOfEntries)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->dma_cam_num_of_entries = numOfEntries;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaDbgCounter(t_Handle h_Fm, e_FmDmaDbgCntMode fmDmaDbgCntMode)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ enum fman_dma_dbg_cnt_mode fsl_dma_dbg_cnt;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ FMAN_DMA_DBG_CNT_TRANS(fsl_dma_dbg_cnt, fmDmaDbgCntMode);
++ p_Fm->p_FmDriverParam->dma_dbg_cnt_mode = fsl_dma_dbg_cnt;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaStopOnBusErr(t_Handle h_Fm, bool stop)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->dma_stop_on_bus_error = stop;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaEmergency(t_Handle h_Fm, t_FmDmaEmergency *p_Emergency)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ enum fman_dma_emergency_level fsl_dma_emer;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ FMAN_DMA_EMER_TRANS(fsl_dma_emer, p_Emergency->emergencyLevel);
++ p_Fm->p_FmDriverParam->dma_en_emergency = TRUE;
++ p_Fm->p_FmDriverParam->dma_emergency_bus_select = (uint32_t)p_Emergency->emergencyBusSelect;
++ p_Fm->p_FmDriverParam->dma_emergency_level = fsl_dma_emer;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaEmergencySmoother(t_Handle h_Fm, uint32_t emergencyCnt)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->dma_en_emergency_smoother = TRUE;
++ p_Fm->p_FmDriverParam->dma_emergency_switch_counter = emergencyCnt;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaErr(t_Handle h_Fm, e_FmDmaErr dmaErr)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ enum fman_dma_err fsl_dma_err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ FMAN_DMA_ERR_TRANS(fsl_dma_err, dmaErr);
++ p_Fm->p_FmDriverParam->dma_err = fsl_dma_err;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigCatastrophicErr(t_Handle h_Fm, e_FmCatastrophicErr catastrophicErr)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ enum fman_catastrophic_err fsl_catastrophic_err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ FMAN_CATASTROPHIC_ERR_TRANS(fsl_catastrophic_err, catastrophicErr);
++ p_Fm->p_FmDriverParam->catastrophic_err = fsl_catastrophic_err;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigEnableMuramTestMode(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
++
++ p_Fm->p_FmDriverParam->en_muram_test_mode = TRUE;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigEnableIramTestMode(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE );
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
++
++ p_Fm->p_FmDriverParam->en_iram_test_mode = TRUE;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigHaltOnExternalActivation(t_Handle h_Fm, bool enable)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->halt_on_external_activ = enable;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigHaltOnUnrecoverableEccError(t_Handle h_Fm, bool enable)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
++
++ p_Fm->p_FmDriverParam->halt_on_unrecov_ecc_err = enable;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigException(t_Handle h_Fm, e_FmExceptions exception, bool enable)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ uint32_t bitMask = 0;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Fm->userSetExceptions |= bitMask;
++ else
++ p_Fm->p_FmStateStruct->exceptions &= ~bitMask;
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ return E_OK;
++}
++
++t_Error FM_ConfigExternalEccRamsEnable(t_Handle h_Fm, bool enable)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->external_ecc_rams_enable = enable;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigTnumAgingPeriod(t_Handle h_Fm, uint16_t tnumAgingPeriod)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->tnum_aging_period = tnumAgingPeriod;
++ p_Fm->tnumAgingPeriod = p_Fm->p_FmDriverParam->tnum_aging_period;
++
++ return E_OK;
++}
++
++/****************************************************/
++/* Hidden-DEBUG Only API */
++/****************************************************/
++
++t_Error FM_ConfigThresholds(t_Handle h_Fm, t_FmThresholds *p_FmThresholds)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->disp_limit_tsh = p_FmThresholds->dispLimit;
++ p_Fm->p_FmDriverParam->prs_disp_tsh = p_FmThresholds->prsDispTh;
++ p_Fm->p_FmDriverParam->plcr_disp_tsh = p_FmThresholds->plcrDispTh;
++ p_Fm->p_FmDriverParam->kg_disp_tsh = p_FmThresholds->kgDispTh;
++ p_Fm->p_FmDriverParam->bmi_disp_tsh = p_FmThresholds->bmiDispTh;
++ p_Fm->p_FmDriverParam->qmi_enq_disp_tsh = p_FmThresholds->qmiEnqDispTh;
++ p_Fm->p_FmDriverParam->qmi_deq_disp_tsh = p_FmThresholds->qmiDeqDispTh;
++ p_Fm->p_FmDriverParam->fm_ctl1_disp_tsh = p_FmThresholds->fmCtl1DispTh;
++ p_Fm->p_FmDriverParam->fm_ctl2_disp_tsh = p_FmThresholds->fmCtl2DispTh;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaSosEmergencyThreshold(t_Handle h_Fm, uint32_t dmaSosEmergency)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->dma_sos_emergency = dmaSosEmergency;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaWriteBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds)
++
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++#if (DPAA_VERSION >= 11)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
++#else
++ p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer = p_FmDmaThresholds->assertEmergency;
++ p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer = p_FmDmaThresholds->clearEmergency;
++
++ return E_OK;
++#endif
++}
++
++t_Error FM_ConfigDmaCommQThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer = p_FmDmaThresholds->assertEmergency;
++ p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer = p_FmDmaThresholds->clearEmergency;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigDmaReadBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++#if (DPAA_VERSION >= 11)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
++#else
++ p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer = p_FmDmaThresholds->clearEmergency;
++ p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer = p_FmDmaThresholds->assertEmergency;
++
++ return E_OK;
++#endif
++}
++
++t_Error FM_ConfigDmaWatchdog(t_Handle h_Fm, uint32_t watchdogValue)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ p_Fm->p_FmDriverParam->dma_watchdog = watchdogValue;
++
++ return E_OK;
++}
++
++t_Error FM_ConfigEnableCounters(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++UNUSED(p_Fm);
++
++ return E_OK;
++}
++
++t_Error FmGetSetParams(t_Handle h_Fm, t_FmGetSetParams *p_Params)
++{
++ t_Fm* p_Fm = (t_Fm*)h_Fm;
++ if (p_Params->setParams.type & UPDATE_FM_CLD)
++ {
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_cld, GET_UINT32(
++ p_Fm->p_FmFpmRegs->fm_cld) | 0x00000800);
++ }
++ if (p_Params->setParams.type & CLEAR_IRAM_READY)
++ {
++ t_FMIramRegs *p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
++ WRITE_UINT32(p_Iram->iready,GET_UINT32(p_Iram->iready) & ~IRAM_READY);
++ }
++ if (p_Params->setParams.type & UPDATE_FPM_EXTC)
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_extc,0x80000000);
++ if (p_Params->setParams.type & UPDATE_FPM_EXTC_CLEAR)
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_extc,0x00800000);
++ if (p_Params->setParams.type & UPDATE_FPM_BRKC_SLP)
++ {
++ if (p_Params->setParams.sleep)
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc, GET_UINT32(
++ p_Fm->p_FmFpmRegs->fmfp_brkc) | FPM_BRKC_SLP);
++ else
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc, GET_UINT32(
++ p_Fm->p_FmFpmRegs->fmfp_brkc) & ~FPM_BRKC_SLP);
++ }
++ if (p_Params->getParams.type & GET_FM_CLD)
++ p_Params->getParams.fm_cld = GET_UINT32(p_Fm->p_FmFpmRegs->fm_cld);
++ if (p_Params->getParams.type & GET_FMQM_GS)
++ p_Params->getParams.fmqm_gs = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_gs);
++ if (p_Params->getParams.type & GET_FM_NPI)
++ p_Params->getParams.fm_npi = GET_UINT32(p_Fm->p_FmFpmRegs->fm_npi);
++ if (p_Params->getParams.type & GET_FMFP_EXTC)
++ p_Params->getParams.fmfp_extc = GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_extc);
++ return E_OK;
++}
++
++
++/****************************************************/
++/* API Run-time Control uint functions */
++/****************************************************/
++void FM_EventIsr(t_Handle h_Fm)
++{
++#define FM_M_CALL_1G_MAC_ISR(_id) \
++ { \
++ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].guestId) \
++ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id), pending); \
++ else \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].h_SrcHandle);\
++ }
++#define FM_M_CALL_10G_MAC_ISR(_id) \
++ { \
++ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].guestId) \
++ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id), pending); \
++ else \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].h_SrcHandle);\
++ }
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ uint32_t pending, event;
++ struct fman_fpm_regs *fpm_rg;
++
++ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ fpm_rg = p_Fm->p_FmFpmRegs;
++
++ /* normal interrupts */
++ pending = fman_get_normal_pending(fpm_rg);
++ if (!pending)
++ return;
++ if (pending & INTR_EN_WAKEUP) // this is a wake up from sleep interrupt
++ {
++ t_FmGetSetParams fmGetSetParams;
++ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
++ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
++ fmGetSetParams.setParams.sleep = 0;
++ FmGetSetParams(h_Fm, &fmGetSetParams);
++ }
++ if (pending & INTR_EN_QMI)
++ QmiEvent(p_Fm);
++ if (pending & INTR_EN_PRS)
++ p_Fm->intrMng[e_FM_EV_PRS].f_Isr(p_Fm->intrMng[e_FM_EV_PRS].h_SrcHandle);
++ if (pending & INTR_EN_PLCR)
++ p_Fm->intrMng[e_FM_EV_PLCR].f_Isr(p_Fm->intrMng[e_FM_EV_PLCR].h_SrcHandle);
++ if (pending & INTR_EN_TMR)
++ p_Fm->intrMng[e_FM_EV_TMR].f_Isr(p_Fm->intrMng[e_FM_EV_TMR].h_SrcHandle);
++
++ /* MAC events may belong to different partitions */
++ if (pending & INTR_EN_1G_MAC0)
++ FM_M_CALL_1G_MAC_ISR(0);
++ if (pending & INTR_EN_1G_MAC1)
++ FM_M_CALL_1G_MAC_ISR(1);
++ if (pending & INTR_EN_1G_MAC2)
++ FM_M_CALL_1G_MAC_ISR(2);
++ if (pending & INTR_EN_1G_MAC3)
++ FM_M_CALL_1G_MAC_ISR(3);
++ if (pending & INTR_EN_1G_MAC4)
++ FM_M_CALL_1G_MAC_ISR(4);
++ if (pending & INTR_EN_1G_MAC5)
++ FM_M_CALL_1G_MAC_ISR(5);
++ if (pending & INTR_EN_1G_MAC6)
++ FM_M_CALL_1G_MAC_ISR(6);
++ if (pending & INTR_EN_1G_MAC7)
++ FM_M_CALL_1G_MAC_ISR(7);
++ if (pending & INTR_EN_10G_MAC0)
++ FM_M_CALL_10G_MAC_ISR(0);
++ if (pending & INTR_EN_10G_MAC1)
++ FM_M_CALL_10G_MAC_ISR(1);
++
++ /* IM port events may belong to different partitions */
++ if (pending & INTR_EN_REV0)
++ {
++ event = fman_get_controller_event(fpm_rg, 0);
++ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_0].guestId)
++ /*TODO IPC ISR For Fman Ctrl */
++ ASSERT_COND(0);
++ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_0, pending); */
++ else
++ p_Fm->fmanCtrlIntr[0].f_Isr(p_Fm->fmanCtrlIntr[0].h_SrcHandle, event);
++
++ }
++ if (pending & INTR_EN_REV1)
++ {
++ event = fman_get_controller_event(fpm_rg, 1);
++ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_1].guestId)
++ /*TODO IPC ISR For Fman Ctrl */
++ ASSERT_COND(0);
++ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_1, pending); */
++ else
++ p_Fm->fmanCtrlIntr[1].f_Isr(p_Fm->fmanCtrlIntr[1].h_SrcHandle, event);
++ }
++ if (pending & INTR_EN_REV2)
++ {
++ event = fman_get_controller_event(fpm_rg, 2);
++ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_2].guestId)
++ /*TODO IPC ISR For Fman Ctrl */
++ ASSERT_COND(0);
++ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_2, pending); */
++ else
++ p_Fm->fmanCtrlIntr[2].f_Isr(p_Fm->fmanCtrlIntr[2].h_SrcHandle, event);
++ }
++ if (pending & INTR_EN_REV3)
++ {
++ event = fman_get_controller_event(fpm_rg, 3);
++ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_3].guestId)
++ /*TODO IPC ISR For Fman Ctrl */
++ ASSERT_COND(0);
++ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_2, pendin3); */
++ else
++ p_Fm->fmanCtrlIntr[3].f_Isr(p_Fm->fmanCtrlIntr[3].h_SrcHandle, event);
++ }
++#ifdef FM_MACSEC_SUPPORT
++ if (pending & INTR_EN_MACSEC_MAC0)
++ {
++ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_MACSEC_MAC0].guestId)
++ SendIpcIsr(p_Fm, e_FM_EV_MACSEC_MAC0, pending);
++ else
++ p_Fm->intrMng[e_FM_EV_MACSEC_MAC0].f_Isr(p_Fm->intrMng[e_FM_EV_MACSEC_MAC0].h_SrcHandle);
++ }
++#endif /* FM_MACSEC_SUPPORT */
++}
++
++t_Error FM_ErrorIsr(t_Handle h_Fm)
++{
++#define FM_M_CALL_1G_MAC_ERR_ISR(_id) \
++ { \
++ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].guestId) \
++ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id), pending); \
++ else \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].h_SrcHandle);\
++ }
++#define FM_M_CALL_10G_MAC_ERR_ISR(_id) \
++ { \
++ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].guestId) \
++ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id), pending); \
++ else \
++ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].h_SrcHandle);\
++ }
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ uint32_t pending;
++ struct fman_fpm_regs *fpm_rg;
++
++ SANITY_CHECK_RETURN_ERROR(h_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ fpm_rg = p_Fm->p_FmFpmRegs;
++
++ /* error interrupts */
++ pending = fman_get_fpm_error_interrupts(fpm_rg);
++ if (!pending)
++ return ERROR_CODE(E_EMPTY);
++
++ if (pending & ERR_INTR_EN_BMI)
++ BmiErrEvent(p_Fm);
++ if (pending & ERR_INTR_EN_QMI)
++ QmiErrEvent(p_Fm);
++ if (pending & ERR_INTR_EN_FPM)
++ FpmErrEvent(p_Fm);
++ if (pending & ERR_INTR_EN_DMA)
++ DmaErrEvent(p_Fm);
++ if (pending & ERR_INTR_EN_IRAM)
++ IramErrIntr(p_Fm);
++ if (pending & ERR_INTR_EN_MURAM)
++ MuramErrIntr(p_Fm);
++ if (pending & ERR_INTR_EN_PRS)
++ p_Fm->intrMng[e_FM_EV_ERR_PRS].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_PRS].h_SrcHandle);
++ if (pending & ERR_INTR_EN_PLCR)
++ p_Fm->intrMng[e_FM_EV_ERR_PLCR].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_PLCR].h_SrcHandle);
++ if (pending & ERR_INTR_EN_KG)
++ p_Fm->intrMng[e_FM_EV_ERR_KG].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_KG].h_SrcHandle);
++
++ /* MAC events may belong to different partitions */
++ if (pending & ERR_INTR_EN_1G_MAC0)
++ FM_M_CALL_1G_MAC_ERR_ISR(0);
++ if (pending & ERR_INTR_EN_1G_MAC1)
++ FM_M_CALL_1G_MAC_ERR_ISR(1);
++ if (pending & ERR_INTR_EN_1G_MAC2)
++ FM_M_CALL_1G_MAC_ERR_ISR(2);
++ if (pending & ERR_INTR_EN_1G_MAC3)
++ FM_M_CALL_1G_MAC_ERR_ISR(3);
++ if (pending & ERR_INTR_EN_1G_MAC4)
++ FM_M_CALL_1G_MAC_ERR_ISR(4);
++ if (pending & ERR_INTR_EN_1G_MAC5)
++ FM_M_CALL_1G_MAC_ERR_ISR(5);
++ if (pending & ERR_INTR_EN_1G_MAC6)
++ FM_M_CALL_1G_MAC_ERR_ISR(6);
++ if (pending & ERR_INTR_EN_1G_MAC7)
++ FM_M_CALL_1G_MAC_ERR_ISR(7);
++ if (pending & ERR_INTR_EN_10G_MAC0)
++ FM_M_CALL_10G_MAC_ERR_ISR(0);
++ if (pending & ERR_INTR_EN_10G_MAC1)
++ FM_M_CALL_10G_MAC_ERR_ISR(1);
++
++#ifdef FM_MACSEC_SUPPORT
++ if (pending & ERR_INTR_EN_MACSEC_MAC0)
++ {
++ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_ERR_MACSEC_MAC0].guestId)
++ SendIpcIsr(p_Fm, e_FM_EV_ERR_MACSEC_MAC0, pending);
++ else
++ p_Fm->intrMng[e_FM_EV_ERR_MACSEC_MAC0].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_MACSEC_MAC0].h_SrcHandle);
++ }
++#endif /* FM_MACSEC_SUPPORT */
++
++ return E_OK;
++}
++
++t_Error FM_SetPortsBandwidth(t_Handle h_Fm, t_FmPortsBandwidthParams *p_PortsBandwidth)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ int i;
++ uint8_t sum;
++ uint8_t hardwarePortId;
++ uint8_t weights[64];
++ uint8_t weight, maxPercent = 0;
++ struct fman_bmi_regs *bmi_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ bmi_rg = p_Fm->p_FmBmiRegs;
++
++ memset(weights, 0, (sizeof(uint8_t) * 64));
++
++ /* check that all ports add up to 100% */
++ sum = 0;
++ for (i=0; i < p_PortsBandwidth->numOfPorts; i++)
++ sum +=p_PortsBandwidth->portsBandwidths[i].bandwidth;
++ if (sum != 100)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Sum of ports bandwidth differ from 100%"));
++
++ /* find highest percent */
++ for (i=0; i < p_PortsBandwidth->numOfPorts; i++)
++ {
++ if (p_PortsBandwidth->portsBandwidths[i].bandwidth > maxPercent)
++ maxPercent = p_PortsBandwidth->portsBandwidths[i].bandwidth;
++ }
++
++ ASSERT_COND(maxPercent > 0); /* guaranteed by sum = 100 */
++
++ /* calculate weight for each port */
++ for (i=0; i < p_PortsBandwidth->numOfPorts; i++)
++ {
++ weight = (uint8_t)((p_PortsBandwidth->portsBandwidths[i].bandwidth * PORT_MAX_WEIGHT ) / maxPercent);
++ /* we want even division between 1-to-PORT_MAX_WEIGHT. so if exact division
++ is not reached, we round up so that:
++ 0 until maxPercent/PORT_MAX_WEIGHT get "1"
++ maxPercent/PORT_MAX_WEIGHT+1 until (maxPercent/PORT_MAX_WEIGHT)*2 get "2"
++ ...
++ maxPercent - maxPercent/PORT_MAX_WEIGHT until maxPercent get "PORT_MAX_WEIGHT: */
++ if ((uint8_t)((p_PortsBandwidth->portsBandwidths[i].bandwidth * PORT_MAX_WEIGHT ) % maxPercent))
++ weight++;
++
++ /* find the location of this port within the register */
++ hardwarePortId =
++ SwPortIdToHwPortId(p_PortsBandwidth->portsBandwidths[i].type,
++ p_PortsBandwidth->portsBandwidths[i].relativePortId,
++ p_Fm->p_FmStateStruct->revInfo.majorRev,
++ p_Fm->p_FmStateStruct->revInfo.minorRev);
++
++ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
++ weights[hardwarePortId] = weight;
++ }
++
++ fman_set_ports_bandwidth(bmi_rg, weights);
++
++ return E_OK;
++}
++
++t_Error FM_EnableRamsEcc(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_fpm_regs *fpm_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++
++ fpm_rg = p_Fm->p_FmFpmRegs;
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ t_FmIpcMsg msg;
++ t_Error err;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_ENABLE_RAM_ECC;
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ NULL,
++ NULL,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ return E_OK;
++ }
++
++ if (!p_Fm->p_FmStateStruct->internalCall)
++ p_Fm->p_FmStateStruct->explicitEnable = TRUE;
++ p_Fm->p_FmStateStruct->internalCall = FALSE;
++
++ if (p_Fm->p_FmStateStruct->ramsEccEnable)
++ return E_OK;
++ else
++ {
++ fman_enable_rams_ecc(fpm_rg);
++ p_Fm->p_FmStateStruct->ramsEccEnable = TRUE;
++ }
++
++ return E_OK;
++}
++
++t_Error FM_DisableRamsEcc(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ bool explicitDisable = FALSE;
++ struct fman_fpm_regs *fpm_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
++
++ fpm_rg = p_Fm->p_FmFpmRegs;
++
++ if (p_Fm->guestId != NCSW_MASTER_ID)
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.msgId = FM_DISABLE_RAM_ECC;
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ NULL,
++ NULL,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ return E_OK;
++ }
++
++ if (!p_Fm->p_FmStateStruct->internalCall)
++ explicitDisable = TRUE;
++ p_Fm->p_FmStateStruct->internalCall = FALSE;
++
++ /* if rams are already disabled, or if rams were explicitly enabled and are
++ currently called indirectly (not explicitly), ignore this call. */
++ if (!p_Fm->p_FmStateStruct->ramsEccEnable ||
++ (p_Fm->p_FmStateStruct->explicitEnable && !explicitDisable))
++ return E_OK;
++ else
++ {
++ if (p_Fm->p_FmStateStruct->explicitEnable)
++ /* This is the case were both explicit are TRUE.
++ Turn off this flag for cases were following ramsEnable
++ routines are called */
++ p_Fm->p_FmStateStruct->explicitEnable = FALSE;
++
++ fman_enable_rams_ecc(fpm_rg);
++ p_Fm->p_FmStateStruct->ramsEccEnable = FALSE;
++ }
++
++ return E_OK;
++}
++
++t_Error FM_SetException(t_Handle h_Fm, e_FmExceptions exception, bool enable)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ uint32_t bitMask = 0;
++ enum fman_exceptions fslException;
++ struct fman_rg fman_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
++ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
++ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
++ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
++
++ GET_EXCEPTION_FLAG(bitMask, exception);
++ if (bitMask)
++ {
++ if (enable)
++ p_Fm->p_FmStateStruct->exceptions |= bitMask;
++ else
++ p_Fm->p_FmStateStruct->exceptions &= ~bitMask;
++
++ fslException = FmanExceptionTrans(exception);
++
++ return (t_Error)fman_set_exception(&fman_rg,
++ fslException,
++ enable);
++ }
++ else
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
++
++ return E_OK;
++}
++
++t_Error FM_GetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ p_FmRevisionInfo->majorRev = p_Fm->p_FmStateStruct->revInfo.majorRev;
++ p_FmRevisionInfo->minorRev = p_Fm->p_FmStateStruct->revInfo.minorRev;
++
++ return E_OK;
++}
++
++t_Error FM_GetFmanCtrlCodeRevision(t_Handle h_Fm, t_FmCtrlCodeRevisionInfo *p_RevisionInfo)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_FMIramRegs *p_Iram;
++ uint32_t revInfo;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_RevisionInfo, E_NULL_POINTER);
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_Error err;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength;
++ t_FmIpcFmanCtrlCodeRevisionInfo ipcRevInfo;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_GET_FMAN_CTRL_CODE_REV;
++ replyLength = sizeof(uint32_t) + sizeof(t_FmCtrlCodeRevisionInfo);
++ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmCtrlCodeRevisionInfo)))
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ memcpy((uint8_t*)&ipcRevInfo, reply.replyBody, sizeof(t_FmCtrlCodeRevisionInfo));
++ p_RevisionInfo->packageRev = ipcRevInfo.packageRev;
++ p_RevisionInfo->majorRev = ipcRevInfo.majorRev;
++ p_RevisionInfo->minorRev = ipcRevInfo.minorRev;
++ return (t_Error)(reply.error);
++ }
++ else if (p_Fm->guestId != NCSW_MASTER_ID)
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("running in guest-mode without IPC!"));
++
++ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
++ WRITE_UINT32(p_Iram->iadd, 0x4);
++ while (GET_UINT32(p_Iram->iadd) != 0x4) ;
++ revInfo = GET_UINT32(p_Iram->idata);
++ p_RevisionInfo->packageRev = (uint16_t)((revInfo & 0xFFFF0000) >> 16);
++ p_RevisionInfo->majorRev = (uint8_t)((revInfo & 0x0000FF00) >> 8);
++ p_RevisionInfo->minorRev = (uint8_t)(revInfo & 0x000000FF);
++
++ return E_OK;
++}
++
++uint32_t FM_GetCounter(t_Handle h_Fm, e_FmCounters counter)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_Error err;
++ uint32_t counterValue;
++ struct fman_rg fman_rg;
++ enum fman_counters fsl_counter;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(!p_Fm->p_FmDriverParam, E_INVALID_STATE, 0);
++
++ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
++ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
++ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
++ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ uint32_t replyLength, outCounter;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_GET_COUNTER;
++ memcpy(msg.msgBody, (uint8_t *)&counter, sizeof(uint32_t));
++ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId) +sizeof(counterValue),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MAJOR, err, NO_MSG);
++ return 0;
++ }
++ if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t)))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return 0;
++ }
++
++ memcpy((uint8_t*)&outCounter, reply.replyBody, sizeof(uint32_t));
++ return outCounter;
++ }
++ else if (!p_Fm->baseAddr)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Either IPC or 'baseAddress' is required!"));
++ return 0;
++ }
++
++ /* When applicable (when there is an 'enable counters' bit,
++ check that counters are enabled */
++ switch (counter)
++ {
++ case (e_FM_COUNTERS_DEQ_1):
++ case (e_FM_COUNTERS_DEQ_2):
++ case (e_FM_COUNTERS_DEQ_3):
++ if ((p_Fm->p_FmStateStruct->revInfo.majorRev == 4) ||
++ (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6))
++ {
++ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Requested counter not supported"));
++ return 0;
++ }
++ case (e_FM_COUNTERS_ENQ_TOTAL_FRAME):
++ case (e_FM_COUNTERS_DEQ_TOTAL_FRAME):
++ case (e_FM_COUNTERS_DEQ_0):
++ case (e_FM_COUNTERS_DEQ_FROM_DEFAULT):
++ case (e_FM_COUNTERS_DEQ_FROM_CONTEXT):
++ case (e_FM_COUNTERS_DEQ_FROM_FD):
++ case (e_FM_COUNTERS_DEQ_CONFIRM):
++ if (!(GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc) & QMI_CFG_EN_COUNTERS))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Requested counter was not enabled"));
++ return 0;
++ }
++ break;
++ default:
++ break;
++ }
++
++ FMAN_COUNTERS_TRANS(fsl_counter, counter);
++ return fman_get_counter(&fman_rg, fsl_counter);
++}
++
++t_Error FM_ModifyCounter(t_Handle h_Fm, e_FmCounters counter, uint32_t val)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_rg fman_rg;
++ enum fman_counters fsl_counter;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
++ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
++ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
++ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
++
++ FMAN_COUNTERS_TRANS(fsl_counter, counter);
++ return (t_Error)fman_modify_counter(&fman_rg, fsl_counter, val);
++}
++
++void FM_SetDmaEmergency(t_Handle h_Fm, e_FmDmaMuramPort muramPort, bool enable)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_dma_regs *dma_rg;
++
++ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ dma_rg = p_Fm->p_FmDmaRegs;
++
++ fman_set_dma_emergency(dma_rg, !!(muramPort==e_FM_DMA_MURAM_PORT_WRITE), enable);
++}
++
++void FM_SetDmaExtBusPri(t_Handle h_Fm, e_FmDmaExtBusPri pri)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_dma_regs *dma_rg;
++
++ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ dma_rg = p_Fm->p_FmDmaRegs;
++
++ fman_set_dma_ext_bus_pri(dma_rg, pri);
++}
++
++void FM_GetDmaStatus(t_Handle h_Fm, t_FmDmaStatus *p_FmDmaStatus)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ uint32_t dmaStatus;
++ struct fman_dma_regs *dma_rg;
++
++ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ dma_rg = p_Fm->p_FmDmaRegs;
++
++ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
++ !p_Fm->baseAddr &&
++ p_Fm->h_IpcSessions[0])
++ {
++ t_FmIpcDmaStatus ipcDmaStatus;
++ t_FmIpcMsg msg;
++ t_FmIpcReply reply;
++ t_Error err;
++ uint32_t replyLength;
++
++ memset(&msg, 0, sizeof(msg));
++ memset(&reply, 0, sizeof(reply));
++ msg.msgId = FM_DMA_STAT;
++ replyLength = sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus);
++ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
++ (uint8_t*)&msg,
++ sizeof(msg.msgId),
++ (uint8_t*)&reply,
++ &replyLength,
++ NULL,
++ NULL);
++ if (err != E_OK)
++ {
++ REPORT_ERROR(MINOR, err, NO_MSG);
++ return;
++ }
++ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus)))
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
++ return;
++ }
++ memcpy((uint8_t*)&ipcDmaStatus, reply.replyBody, sizeof(t_FmIpcDmaStatus));
++
++ p_FmDmaStatus->cmqNotEmpty = (bool)ipcDmaStatus.boolCmqNotEmpty; /**< Command queue is not empty */
++ p_FmDmaStatus->busError = (bool)ipcDmaStatus.boolBusError; /**< Bus error occurred */
++ p_FmDmaStatus->readBufEccError = (bool)ipcDmaStatus.boolReadBufEccError; /**< Double ECC error on buffer Read */
++ p_FmDmaStatus->writeBufEccSysError =(bool)ipcDmaStatus.boolWriteBufEccSysError; /**< Double ECC error on buffer write from system side */
++ p_FmDmaStatus->writeBufEccFmError = (bool)ipcDmaStatus.boolWriteBufEccFmError; /**< Double ECC error on buffer write from FM side */
++ p_FmDmaStatus->singlePortEccError = (bool)ipcDmaStatus.boolSinglePortEccError; /**< Double ECC error on buffer write from FM side */
++ return;
++ }
++ else if (!p_Fm->baseAddr)
++ {
++ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
++ ("Either IPC or 'baseAddress' is required!"));
++ return;
++ }
++
++ dmaStatus = fman_get_dma_status(dma_rg);
++
++ p_FmDmaStatus->cmqNotEmpty = (bool)(dmaStatus & DMA_STATUS_CMD_QUEUE_NOT_EMPTY);
++ p_FmDmaStatus->busError = (bool)(dmaStatus & DMA_STATUS_BUS_ERR);
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ p_FmDmaStatus->singlePortEccError = (bool)(dmaStatus & DMA_STATUS_FM_SPDAT_ECC);
++ else
++ {
++ p_FmDmaStatus->readBufEccError = (bool)(dmaStatus & DMA_STATUS_READ_ECC);
++ p_FmDmaStatus->writeBufEccSysError = (bool)(dmaStatus & DMA_STATUS_SYSTEM_WRITE_ECC);
++ p_FmDmaStatus->writeBufEccFmError = (bool)(dmaStatus & DMA_STATUS_FM_WRITE_ECC);
++ }
++}
++
++void FM_Resume(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ struct fman_fpm_regs *fpm_rg;
++
++ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ fpm_rg = p_Fm->p_FmFpmRegs;
++
++ fman_resume(fpm_rg);
++}
++
++t_Error FM_GetSpecialOperationCoding(t_Handle h_Fm,
++ fmSpecialOperations_t spOper,
++ uint8_t *p_SpOperCoding)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ t_FmCtrlCodeRevisionInfo revInfo;
++ t_Error err;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR(p_SpOperCoding, E_NULL_POINTER);
++
++ if (!spOper)
++ {
++ *p_SpOperCoding = 0;
++ return E_OK;
++ }
++
++ if ((err = FM_GetFmanCtrlCodeRevision(p_Fm, &revInfo)) != E_OK)
++ {
++ DBG(WARNING, ("FM in guest-mode without IPC, can't validate firmware revision."));
++ revInfo.packageRev = IP_OFFLOAD_PACKAGE_NUMBER;
++ }
++ else if (!IS_OFFLOAD_PACKAGE(revInfo.packageRev))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Fman ctrl code package"));
++
++ switch (spOper)
++ {
++ case (FM_SP_OP_CAPWAP_DTLS_DEC):
++ *p_SpOperCoding = 9;
++ break;
++ case (FM_SP_OP_CAPWAP_DTLS_ENC):
++ *p_SpOperCoding = 10;
++ break;
++ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_IPSEC_MANIP):
++ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_IPSEC_MANIP|FM_SP_OP_RPD):
++ *p_SpOperCoding = 5;
++ break;
++ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_MANIP):
++ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_MANIP|FM_SP_OP_RPD):
++ *p_SpOperCoding = 6;
++ break;
++ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_RPD):
++ *p_SpOperCoding = 3;
++ break;
++ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN):
++ *p_SpOperCoding = 1;
++ break;
++ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_IPSEC_NO_ETH_HDR):
++ *p_SpOperCoding = 12;
++ break;
++ case (FM_SP_OP_IPSEC|FM_SP_OP_RPD):
++ *p_SpOperCoding = 4;
++ break;
++ case (FM_SP_OP_IPSEC):
++ *p_SpOperCoding = 2;
++ break;
++ case (FM_SP_OP_DCL4C):
++ *p_SpOperCoding = 7;
++ break;
++ case (FM_SP_OP_CLEAR_RPD):
++ *p_SpOperCoding = 8;
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++ }
++
++ return E_OK;
++}
++
++t_Error FM_CtrlMonStart(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ t_FmTrbRegs *p_MonRegs;
++ uint8_t i;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc,
++ GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc) | FPM_BRKC_RDBG);
++
++ for (i = 0; i < FM_NUM_OF_CTRL; i++)
++ {
++ p_MonRegs = (t_FmTrbRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_TRB(i));
++
++ /* Reset control registers */
++ WRITE_UINT32(p_MonRegs->tcrh, TRB_TCRH_RESET);
++ WRITE_UINT32(p_MonRegs->tcrl, TRB_TCRL_RESET);
++
++ /* Configure: counter #1 counts all stalls in risc - ldsched stall
++ counter #2 counts all stalls in risc - other stall*/
++ WRITE_UINT32(p_MonRegs->tcrl, TRB_TCRL_RESET | TRB_TCRL_UTIL);
++
++ /* Enable monitoring */
++ WRITE_UINT32(p_MonRegs->tcrh, TRB_TCRH_ENABLE_COUNTERS);
++ }
++
++ return E_OK;
++}
++
++t_Error FM_CtrlMonStop(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ t_FmTrbRegs *p_MonRegs;
++ uint8_t i;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++
++ for (i = 0; i < FM_NUM_OF_CTRL; i++)
++ {
++ p_MonRegs = (t_FmTrbRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_TRB(i));
++ WRITE_UINT32(p_MonRegs->tcrh, TRB_TCRH_DISABLE_COUNTERS);
++ }
++
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc,
++ GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc) & ~FPM_BRKC_RDBG);
++
++ return E_OK;
++}
++
++t_Error FM_CtrlMonGetCounters(t_Handle h_Fm, uint8_t fmCtrlIndex, t_FmCtrlMon *p_Mon)
++{
++ t_Fm *p_Fm = (t_Fm *)h_Fm;
++ t_FmTrbRegs *p_MonRegs;
++ uint64_t clkCnt, utilValue, effValue;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
++ SANITY_CHECK_RETURN_ERROR(p_Mon, E_NULL_POINTER);
++
++ if (fmCtrlIndex >= FM_NUM_OF_CTRL)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FM Controller index"));
++
++ p_MonRegs = (t_FmTrbRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_TRB(fmCtrlIndex));
++
++ clkCnt = (uint64_t)
++ ((uint64_t)GET_UINT32(p_MonRegs->tpcch) << 32 | GET_UINT32(p_MonRegs->tpccl));
++
++ utilValue = (uint64_t)
++ ((uint64_t)GET_UINT32(p_MonRegs->tpc1h) << 32 | GET_UINT32(p_MonRegs->tpc1l));
++
++ effValue = (uint64_t)
++ ((uint64_t)GET_UINT32(p_MonRegs->tpc2h) << 32 | GET_UINT32(p_MonRegs->tpc2l));
++
++ p_Mon->percentCnt[0] = (uint8_t)div64_u64((clkCnt - utilValue) * 100, clkCnt);
++ if (clkCnt != utilValue)
++ p_Mon->percentCnt[1] = (uint8_t)div64_u64(((clkCnt - utilValue) - effValue) * 100, clkCnt - utilValue);
++ else
++ p_Mon->percentCnt[1] = 0;
++
++ return E_OK;
++}
++
++t_Handle FM_GetMuramHandle(t_Handle h_Fm)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++
++ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, NULL);
++
++ return (p_Fm->h_FmMuram);
++}
++
++/****************************************************/
++/* Hidden-DEBUG Only API */
++/****************************************************/
++t_Error FM_ForceIntr (t_Handle h_Fm, e_FmExceptions exception)
++{
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ enum fman_exceptions fslException;
++ struct fman_rg fman_rg;
++
++ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
++
++ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
++ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
++ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
++ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
++
++ switch (exception)
++ {
++ case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
++ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_EX_QMI_SINGLE_ECC:
++ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("e_FM_EX_QMI_SINGLE_ECC not supported on this integration."));
++
++ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_SINGLE_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_EX_QMI_DOUBLE_ECC:
++ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DOUBLE_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_EX_BMI_LIST_RAM_ECC:
++ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_LIST_RAM_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_EX_BMI_STORAGE_PROFILE_ECC:
++ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_STORAGE_PROFILE_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_EX_BMI_STATISTICS_RAM_ECC:
++ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_STATISTICS_RAM_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ case e_FM_EX_BMI_DISPATCH_RAM_ECC:
++ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_DISPATCH_RAM_ECC))
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
++ break;
++ default:
++ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception may not be forced"));
++ }
++
++ fslException = FmanExceptionTrans(exception);
++ fman_force_intr (&fman_rg, fslException);
++
++ return E_OK;
++}
++
++t_Handle FmGetPcd(t_Handle h_Fm)
++{
++ return ((t_Fm*)h_Fm)->h_Pcd;
++}
++#if (DPAA_VERSION >= 11)
++extern void *g_MemacRegs;
++void fm_clk_down(void);
++uint32_t fman_memac_get_event(void *regs, uint32_t ev_mask);
++void FM_ChangeClock(t_Handle h_Fm, int hardwarePortId)
++{
++ int macId;
++ uint32_t event, rcr;
++ t_Fm *p_Fm = (t_Fm*)h_Fm;
++ rcr = GET_UINT32(p_Fm->p_FmFpmRegs->fm_rcr);
++ rcr |= 0x04000000;
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rcr, rcr);
++
++ HW_PORT_ID_TO_SW_PORT_ID(macId, hardwarePortId);
++ do
++ {
++ event = fman_memac_get_event(g_MemacRegs, 0xFFFFFFFF);
++ } while ((event & 0x00000020) == 0);
++ fm_clk_down();
++ rcr = GET_UINT32(p_Fm->p_FmFpmRegs->fm_rcr);
++ rcr &= ~0x04000000;
++ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rcr, rcr);
++}
++#endif
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.h
+new file mode 100644
+index 00000000..0bded75d
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.h
+@@ -0,0 +1,648 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm.h
++
++ @Description FM internal structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_H
++#define __FM_H
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_ext.h"
++#include "fm_ipc.h"
++
++#include "fsl_fman.h"
++
++#define __ERR_MODULE__ MODULE_FM
++
++#define FM_MAX_NUM_OF_HW_PORT_IDS 64
++#define FM_MAX_NUM_OF_GUESTS 100
++
++/**************************************************************************//**
++ @Description Exceptions
++*//***************************************************************************/
++#define FM_EX_DMA_BUS_ERROR 0x80000000 /**< DMA bus error. */
++#define FM_EX_DMA_READ_ECC 0x40000000
++#define FM_EX_DMA_SYSTEM_WRITE_ECC 0x20000000
++#define FM_EX_DMA_FM_WRITE_ECC 0x10000000
++#define FM_EX_FPM_STALL_ON_TASKS 0x08000000 /**< Stall of tasks on FPM */
++#define FM_EX_FPM_SINGLE_ECC 0x04000000 /**< Single ECC on FPM */
++#define FM_EX_FPM_DOUBLE_ECC 0x02000000
++#define FM_EX_QMI_SINGLE_ECC 0x01000000 /**< Single ECC on FPM */
++#define FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000 /**< Dequeu from default queue id */
++#define FM_EX_QMI_DOUBLE_ECC 0x00400000
++#define FM_EX_BMI_LIST_RAM_ECC 0x00200000
++#define FM_EX_BMI_STORAGE_PROFILE_ECC 0x00100000
++#define FM_EX_BMI_STATISTICS_RAM_ECC 0x00080000
++#define FM_EX_IRAM_ECC 0x00040000
++#define FM_EX_MURAM_ECC 0x00020000
++#define FM_EX_BMI_DISPATCH_RAM_ECC 0x00010000
++#define FM_EX_DMA_SINGLE_PORT_ECC 0x00008000
++
++#define DMA_EMSR_EMSTR_MASK 0x0000FFFF
++
++#define DMA_THRESH_COMMQ_MASK 0xFF000000
++#define DMA_THRESH_READ_INT_BUF_MASK 0x007F0000
++#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000007F
++
++#define GET_EXCEPTION_FLAG(bitMask, exception) \
++switch (exception){ \
++ case e_FM_EX_DMA_BUS_ERROR: \
++ bitMask = FM_EX_DMA_BUS_ERROR; break; \
++ case e_FM_EX_DMA_SINGLE_PORT_ECC: \
++ bitMask = FM_EX_DMA_SINGLE_PORT_ECC; break; \
++ case e_FM_EX_DMA_READ_ECC: \
++ bitMask = FM_EX_DMA_READ_ECC; break; \
++ case e_FM_EX_DMA_SYSTEM_WRITE_ECC: \
++ bitMask = FM_EX_DMA_SYSTEM_WRITE_ECC; break; \
++ case e_FM_EX_DMA_FM_WRITE_ECC: \
++ bitMask = FM_EX_DMA_FM_WRITE_ECC; break; \
++ case e_FM_EX_FPM_STALL_ON_TASKS: \
++ bitMask = FM_EX_FPM_STALL_ON_TASKS; break; \
++ case e_FM_EX_FPM_SINGLE_ECC: \
++ bitMask = FM_EX_FPM_SINGLE_ECC; break; \
++ case e_FM_EX_FPM_DOUBLE_ECC: \
++ bitMask = FM_EX_FPM_DOUBLE_ECC; break; \
++ case e_FM_EX_QMI_SINGLE_ECC: \
++ bitMask = FM_EX_QMI_SINGLE_ECC; break; \
++ case e_FM_EX_QMI_DOUBLE_ECC: \
++ bitMask = FM_EX_QMI_DOUBLE_ECC; break; \
++ case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: \
++ bitMask = FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID; break; \
++ case e_FM_EX_BMI_LIST_RAM_ECC: \
++ bitMask = FM_EX_BMI_LIST_RAM_ECC; break; \
++ case e_FM_EX_BMI_STORAGE_PROFILE_ECC: \
++ bitMask = FM_EX_BMI_STORAGE_PROFILE_ECC; break; \
++ case e_FM_EX_BMI_STATISTICS_RAM_ECC: \
++ bitMask = FM_EX_BMI_STATISTICS_RAM_ECC; break; \
++ case e_FM_EX_BMI_DISPATCH_RAM_ECC: \
++ bitMask = FM_EX_BMI_DISPATCH_RAM_ECC; break; \
++ case e_FM_EX_IRAM_ECC: \
++ bitMask = FM_EX_IRAM_ECC; break; \
++ case e_FM_EX_MURAM_ECC: \
++ bitMask = FM_EX_MURAM_ECC; break; \
++ default: bitMask = 0;break; \
++}
++
++#define GET_FM_MODULE_EVENT(_mod, _id, _intrType, _event) \
++ switch (_mod) { \
++ case e_FM_MOD_PRS: \
++ if (_id) _event = e_FM_EV_DUMMY_LAST; \
++ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PRS : e_FM_EV_PRS; \
++ break; \
++ case e_FM_MOD_KG: \
++ if (_id) _event = e_FM_EV_DUMMY_LAST; \
++ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_KG : e_FM_EV_DUMMY_LAST; \
++ break; \
++ case e_FM_MOD_PLCR: \
++ if (_id) _event = e_FM_EV_DUMMY_LAST; \
++ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PLCR : e_FM_EV_PLCR; \
++ break; \
++ case e_FM_MOD_TMR: \
++ if (_id) _event = e_FM_EV_DUMMY_LAST; \
++ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_TMR; \
++ break; \
++ case e_FM_MOD_10G_MAC: \
++ if (_id >= FM_MAX_NUM_OF_10G_MACS) _event = e_FM_EV_DUMMY_LAST; \
++ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? (e_FM_EV_ERR_10G_MAC0 + _id) : (e_FM_EV_10G_MAC0 + _id); \
++ break; \
++ case e_FM_MOD_1G_MAC: \
++ if (_id >= FM_MAX_NUM_OF_1G_MACS) _event = e_FM_EV_DUMMY_LAST; \
++ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? (e_FM_EV_ERR_1G_MAC0 + _id) : (e_FM_EV_1G_MAC0 + _id); \
++ break; \
++ case e_FM_MOD_MACSEC: \
++ switch (_id){ \
++ case (0): _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_MACSEC_MAC0:e_FM_EV_MACSEC_MAC0; \
++ break; \
++ } \
++ break; \
++ case e_FM_MOD_FMAN_CTRL: \
++ if (_intrType == e_FM_INTR_TYPE_ERR) _event = e_FM_EV_DUMMY_LAST; \
++ else _event = (e_FM_EV_FMAN_CTRL_0 + _id); \
++ break; \
++ default: _event = e_FM_EV_DUMMY_LAST; \
++ break; \
++ }
++
++#define FMAN_CACHE_OVERRIDE_TRANS(fsl_cache_override, _cache_override) \
++ switch (_cache_override){ \
++ case e_FM_DMA_NO_CACHE_OR: \
++ fsl_cache_override = E_FMAN_DMA_NO_CACHE_OR; break; \
++ case e_FM_DMA_NO_STASH_DATA: \
++ fsl_cache_override = E_FMAN_DMA_NO_STASH_DATA; break; \
++ case e_FM_DMA_MAY_STASH_DATA: \
++ fsl_cache_override = E_FMAN_DMA_MAY_STASH_DATA; break; \
++ case e_FM_DMA_STASH_DATA: \
++ fsl_cache_override = E_FMAN_DMA_STASH_DATA; break; \
++ default: \
++ fsl_cache_override = E_FMAN_DMA_NO_CACHE_OR; break; \
++ }
++
++#define FMAN_AID_MODE_TRANS(fsl_aid_mode, _aid_mode) \
++ switch (_aid_mode){ \
++ case e_FM_DMA_AID_OUT_PORT_ID: \
++ fsl_aid_mode = E_FMAN_DMA_AID_OUT_PORT_ID; break; \
++ case e_FM_DMA_AID_OUT_TNUM: \
++ fsl_aid_mode = E_FMAN_DMA_AID_OUT_TNUM; break; \
++ default: \
++ fsl_aid_mode = E_FMAN_DMA_AID_OUT_PORT_ID; break; \
++ }
++
++#define FMAN_DMA_DBG_CNT_TRANS(fsl_dma_dbg_cnt, _dma_dbg_cnt) \
++ switch (_dma_dbg_cnt){ \
++ case e_FM_DMA_DBG_NO_CNT: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_NO_CNT; break; \
++ case e_FM_DMA_DBG_CNT_DONE: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_DONE; break; \
++ case e_FM_DMA_DBG_CNT_COMM_Q_EM: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_COMM_Q_EM; break; \
++ case e_FM_DMA_DBG_CNT_INT_READ_EM: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_INT_READ_EM; break; \
++ case e_FM_DMA_DBG_CNT_INT_WRITE_EM: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_INT_WRITE_EM ; break; \
++ case e_FM_DMA_DBG_CNT_FPM_WAIT: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_FPM_WAIT ; break; \
++ case e_FM_DMA_DBG_CNT_SIGLE_BIT_ECC: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_SIGLE_BIT_ECC ; break; \
++ case e_FM_DMA_DBG_CNT_RAW_WAR_PROT: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_RAW_WAR_PROT ; break; \
++ default: \
++ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_NO_CNT; break; \
++ }
++
++#define FMAN_DMA_EMER_TRANS(fsl_dma_emer, _dma_emer) \
++ switch (_dma_emer){ \
++ case e_FM_DMA_EM_EBS: \
++ fsl_dma_emer = E_FMAN_DMA_EM_EBS; break; \
++ case e_FM_DMA_EM_SOS: \
++ fsl_dma_emer = E_FMAN_DMA_EM_SOS; break; \
++ default: \
++ fsl_dma_emer = E_FMAN_DMA_EM_EBS; break; \
++ }
++
++#define FMAN_DMA_ERR_TRANS(fsl_dma_err, _dma_err) \
++ switch (_dma_err){ \
++ case e_FM_DMA_ERR_CATASTROPHIC: \
++ fsl_dma_err = E_FMAN_DMA_ERR_CATASTROPHIC; break; \
++ case e_FM_DMA_ERR_REPORT: \
++ fsl_dma_err = E_FMAN_DMA_ERR_REPORT; break; \
++ default: \
++ fsl_dma_err = E_FMAN_DMA_ERR_CATASTROPHIC; break; \
++ }
++
++#define FMAN_CATASTROPHIC_ERR_TRANS(fsl_catastrophic_err, _catastrophic_err) \
++ switch (_catastrophic_err){ \
++ case e_FM_CATASTROPHIC_ERR_STALL_PORT: \
++ fsl_catastrophic_err = E_FMAN_CATAST_ERR_STALL_PORT; break; \
++ case e_FM_CATASTROPHIC_ERR_STALL_TASK: \
++ fsl_catastrophic_err = E_FMAN_CATAST_ERR_STALL_TASK; break; \
++ default: \
++ fsl_catastrophic_err = E_FMAN_CATAST_ERR_STALL_PORT; break; \
++ }
++
++#define FMAN_COUNTERS_TRANS(fsl_counters, _counters) \
++ switch (_counters){ \
++ case e_FM_COUNTERS_ENQ_TOTAL_FRAME: \
++ fsl_counters = E_FMAN_COUNTERS_ENQ_TOTAL_FRAME; break; \
++ case e_FM_COUNTERS_DEQ_TOTAL_FRAME: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_TOTAL_FRAME; break; \
++ case e_FM_COUNTERS_DEQ_0: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_0; break; \
++ case e_FM_COUNTERS_DEQ_1: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_1; break; \
++ case e_FM_COUNTERS_DEQ_2: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_2; break; \
++ case e_FM_COUNTERS_DEQ_3: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_3; break; \
++ case e_FM_COUNTERS_DEQ_FROM_DEFAULT: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_FROM_DEFAULT; break; \
++ case e_FM_COUNTERS_DEQ_FROM_CONTEXT: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_FROM_CONTEXT; break; \
++ case e_FM_COUNTERS_DEQ_FROM_FD: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_FROM_FD; break; \
++ case e_FM_COUNTERS_DEQ_CONFIRM: \
++ fsl_counters = E_FMAN_COUNTERS_DEQ_CONFIRM; break; \
++ default: \
++ fsl_counters = E_FMAN_COUNTERS_ENQ_TOTAL_FRAME; break; \
++ }
++
++/**************************************************************************//**
++ @Description defaults
++*//***************************************************************************/
++#define DEFAULT_exceptions (FM_EX_DMA_BUS_ERROR |\
++ FM_EX_DMA_READ_ECC |\
++ FM_EX_DMA_SYSTEM_WRITE_ECC |\
++ FM_EX_DMA_FM_WRITE_ECC |\
++ FM_EX_FPM_STALL_ON_TASKS |\
++ FM_EX_FPM_SINGLE_ECC |\
++ FM_EX_FPM_DOUBLE_ECC |\
++ FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID|\
++ FM_EX_BMI_LIST_RAM_ECC |\
++ FM_EX_BMI_STORAGE_PROFILE_ECC |\
++ FM_EX_BMI_STATISTICS_RAM_ECC |\
++ FM_EX_IRAM_ECC |\
++ FM_EX_MURAM_ECC |\
++ FM_EX_BMI_DISPATCH_RAM_ECC |\
++ FM_EX_QMI_DOUBLE_ECC |\
++ FM_EX_QMI_SINGLE_ECC)
++
++#define DEFAULT_eccEnable FALSE
++#ifdef FM_PEDANTIC_DMA
++#define DEFAULT_aidOverride TRUE
++#else
++#define DEFAULT_aidOverride FALSE
++#endif /* FM_PEDANTIC_DMA */
++#define DEFAULT_aidMode e_FM_DMA_AID_OUT_TNUM
++#define DEFAULT_dmaStopOnBusError FALSE
++#define DEFAULT_stopAtBusError FALSE
++#define DEFAULT_axiDbgNumOfBeats 1
++#define DEFAULT_dmaReadIntBufLow ((DMA_THRESH_MAX_BUF+1)/2)
++#define DEFAULT_dmaReadIntBufHigh ((DMA_THRESH_MAX_BUF+1)*3/4)
++#define DEFAULT_dmaWriteIntBufLow ((DMA_THRESH_MAX_BUF+1)/2)
++#define DEFAULT_dmaWriteIntBufHigh ((DMA_THRESH_MAX_BUF+1)*3/4)
++#define DEFAULT_catastrophicErr e_FM_CATASTROPHIC_ERR_STALL_PORT
++#define DEFAULT_dmaErr e_FM_DMA_ERR_CATASTROPHIC
++#define DEFAULT_resetOnInit FALSE
++#define DEFAULT_resetOnInitOverrideCallback NULL
++#define DEFAULT_haltOnExternalActivation FALSE /* do not change! if changed, must be disabled for rev1 ! */
++#define DEFAULT_haltOnUnrecoverableEccError FALSE /* do not change! if changed, must be disabled for rev1 ! */
++#define DEFAULT_externalEccRamsEnable FALSE
++#define DEFAULT_VerifyUcode FALSE
++
++#if (DPAA_VERSION < 11)
++#define DEFAULT_totalFifoSize(major, minor) \
++ (((major == 2) || (major == 5)) ? \
++ (100*KILOBYTE) : ((major == 4) ? \
++ (49*KILOBYTE) : (122*KILOBYTE)))
++#define DEFAULT_totalNumOfTasks(major, minor) \
++ BMI_MAX_NUM_OF_TASKS
++
++#define DEFAULT_dmaCommQLow ((DMA_THRESH_MAX_COMMQ+1)/2)
++#define DEFAULT_dmaCommQHigh ((DMA_THRESH_MAX_COMMQ+1)*3/4)
++#define DEFAULT_cacheOverride e_FM_DMA_NO_CACHE_OR
++#define DEFAULT_dmaCamNumOfEntries 32
++#define DEFAULT_dmaDbgCntMode e_FM_DMA_DBG_NO_CNT
++#define DEFAULT_dmaEnEmergency FALSE
++#define DEFAULT_dmaSosEmergency 0
++#define DEFAULT_dmaWatchdog 0 /* disabled */
++#define DEFAULT_dmaEnEmergencySmoother FALSE
++#define DEFAULT_dmaEmergencySwitchCounter 0
++
++#define DEFAULT_dispLimit 0
++#define DEFAULT_prsDispTh 16
++#define DEFAULT_plcrDispTh 16
++#define DEFAULT_kgDispTh 16
++#define DEFAULT_bmiDispTh 16
++#define DEFAULT_qmiEnqDispTh 16
++#define DEFAULT_qmiDeqDispTh 16
++#define DEFAULT_fmCtl1DispTh 16
++#define DEFAULT_fmCtl2DispTh 16
++
++#else /* (DPAA_VERSION < 11) */
++/* Defaults are registers' reset values */
++#define DEFAULT_totalFifoSize(major, minor) \
++ (((major == 6) && ((minor == 1) || (minor == 4))) ? \
++ (156*KILOBYTE) : (295*KILOBYTE))
++
++/* According to the default value of FMBM_CFG2[TNTSKS] */
++#define DEFAULT_totalNumOfTasks(major, minor) \
++ (((major == 6) && ((minor == 1) || (minor == 4))) ? 59 : 124)
++
++#define DEFAULT_dmaCommQLow 0x2A
++#define DEFAULT_dmaCommQHigh 0x3F
++#define DEFAULT_cacheOverride e_FM_DMA_NO_CACHE_OR
++#define DEFAULT_dmaCamNumOfEntries 64
++#define DEFAULT_dmaDbgCntMode e_FM_DMA_DBG_NO_CNT
++#define DEFAULT_dmaEnEmergency FALSE
++#define DEFAULT_dmaSosEmergency 0
++#define DEFAULT_dmaWatchdog 0 /* disabled */
++#define DEFAULT_dmaEnEmergencySmoother FALSE
++#define DEFAULT_dmaEmergencySwitchCounter 0
++
++#define DEFAULT_dispLimit 0
++#define DEFAULT_prsDispTh 16
++#define DEFAULT_plcrDispTh 16
++#define DEFAULT_kgDispTh 16
++#define DEFAULT_bmiDispTh 16
++#define DEFAULT_qmiEnqDispTh 16
++#define DEFAULT_qmiDeqDispTh 16
++#define DEFAULT_fmCtl1DispTh 16
++#define DEFAULT_fmCtl2DispTh 16
++#endif /* (DPAA_VERSION < 11) */
++
++#define FM_TIMESTAMP_1_USEC_BIT 8
++
++/**************************************************************************//**
++ @Collection Defines used for enabling/disabling FM interrupts
++ @{
++*//***************************************************************************/
++#define ERR_INTR_EN_DMA 0x00010000
++#define ERR_INTR_EN_FPM 0x80000000
++#define ERR_INTR_EN_BMI 0x00800000
++#define ERR_INTR_EN_QMI 0x00400000
++#define ERR_INTR_EN_PRS 0x00200000
++#define ERR_INTR_EN_KG 0x00100000
++#define ERR_INTR_EN_PLCR 0x00080000
++#define ERR_INTR_EN_MURAM 0x00040000
++#define ERR_INTR_EN_IRAM 0x00020000
++#define ERR_INTR_EN_10G_MAC0 0x00008000
++#define ERR_INTR_EN_10G_MAC1 0x00000040
++#define ERR_INTR_EN_1G_MAC0 0x00004000
++#define ERR_INTR_EN_1G_MAC1 0x00002000
++#define ERR_INTR_EN_1G_MAC2 0x00001000
++#define ERR_INTR_EN_1G_MAC3 0x00000800
++#define ERR_INTR_EN_1G_MAC4 0x00000400
++#define ERR_INTR_EN_1G_MAC5 0x00000200
++#define ERR_INTR_EN_1G_MAC6 0x00000100
++#define ERR_INTR_EN_1G_MAC7 0x00000080
++#define ERR_INTR_EN_MACSEC_MAC0 0x00000001
++
++#define INTR_EN_QMI 0x40000000
++#define INTR_EN_PRS 0x20000000
++#define INTR_EN_WAKEUP 0x10000000
++#define INTR_EN_PLCR 0x08000000
++#define INTR_EN_1G_MAC0 0x00080000
++#define INTR_EN_1G_MAC1 0x00040000
++#define INTR_EN_1G_MAC2 0x00020000
++#define INTR_EN_1G_MAC3 0x00010000
++#define INTR_EN_1G_MAC4 0x00000040
++#define INTR_EN_1G_MAC5 0x00000020
++#define INTR_EN_1G_MAC6 0x00000008
++#define INTR_EN_1G_MAC7 0x00000002
++#define INTR_EN_10G_MAC0 0x00200000
++#define INTR_EN_10G_MAC1 0x00100000
++#define INTR_EN_REV0 0x00008000
++#define INTR_EN_REV1 0x00004000
++#define INTR_EN_REV2 0x00002000
++#define INTR_EN_REV3 0x00001000
++#define INTR_EN_BRK 0x00000080
++#define INTR_EN_TMR 0x01000000
++#define INTR_EN_MACSEC_MAC0 0x00000001
++/* @} */
++
++/**************************************************************************//**
++ @Description Memory Mapped Registers
++*//***************************************************************************/
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++typedef struct
++{
++ volatile uint32_t iadd; /**< FM IRAM instruction address register */
++ volatile uint32_t idata; /**< FM IRAM instruction data register */
++ volatile uint32_t itcfg; /**< FM IRAM timing config register */
++ volatile uint32_t iready; /**< FM IRAM ready register */
++ volatile uint32_t res[0x1FFFC];
++} t_FMIramRegs;
++
++/* Trace buffer registers -
++ each FM Controller has its own trace buffer residing at FM_MM_TRB(fmCtrlIndex) offset */
++typedef struct t_FmTrbRegs
++{
++ volatile uint32_t tcrh;
++ volatile uint32_t tcrl;
++ volatile uint32_t tesr;
++ volatile uint32_t tecr0h;
++ volatile uint32_t tecr0l;
++ volatile uint32_t terf0h;
++ volatile uint32_t terf0l;
++ volatile uint32_t tecr1h;
++ volatile uint32_t tecr1l;
++ volatile uint32_t terf1h;
++ volatile uint32_t terf1l;
++ volatile uint32_t tpcch;
++ volatile uint32_t tpccl;
++ volatile uint32_t tpc1h;
++ volatile uint32_t tpc1l;
++ volatile uint32_t tpc2h;
++ volatile uint32_t tpc2l;
++ volatile uint32_t twdimr;
++ volatile uint32_t twicvr;
++ volatile uint32_t tar;
++ volatile uint32_t tdr;
++ volatile uint32_t tsnum1;
++ volatile uint32_t tsnum2;
++ volatile uint32_t tsnum3;
++ volatile uint32_t tsnum4;
++} t_FmTrbRegs;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Description General defines
++*//***************************************************************************/
++#define FM_DEBUG_STATUS_REGISTER_OFFSET 0x000d1084UL
++#define FM_FW_DEBUG_INSTRUCTION 0x6ffff805UL
++
++/**************************************************************************//**
++ @Description FPM defines
++*//***************************************************************************/
++/* masks */
++#define FPM_BRKC_RDBG 0x00000200
++#define FPM_BRKC_SLP 0x00000800
++/**************************************************************************//**
++ @Description BMI defines
++*//***************************************************************************/
++/* masks */
++#define BMI_INIT_START 0x80000000
++#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
++#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
++#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
++#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
++/**************************************************************************//**
++ @Description QMI defines
++*//***************************************************************************/
++/* masks */
++#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
++#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
++#define QMI_INTR_EN_SINGLE_ECC 0x80000000
++
++/**************************************************************************//**
++ @Description IRAM defines
++*//***************************************************************************/
++/* masks */
++#define IRAM_IADD_AIE 0x80000000
++#define IRAM_READY 0x80000000
++
++/**************************************************************************//**
++ @Description TRB defines
++*//***************************************************************************/
++/* masks */
++#define TRB_TCRH_RESET 0x04000000
++#define TRB_TCRH_ENABLE_COUNTERS 0x84008000
++#define TRB_TCRH_DISABLE_COUNTERS 0x8400C000
++#define TRB_TCRL_RESET 0x20000000
++#define TRB_TCRL_UTIL 0x00000460
++typedef struct {
++ void (*f_Isr) (t_Handle h_Arg, uint32_t event);
++ t_Handle h_SrcHandle;
++} t_FmanCtrlIntrSrc;
++
++
++typedef void (t_FmanCtrlIsr)( t_Handle h_Fm, uint32_t event);
++
++typedef struct
++{
++/***************************/
++/* Master/Guest parameters */
++/***************************/
++ uint8_t fmId;
++ e_FmPortType portsTypes[FM_MAX_NUM_OF_HW_PORT_IDS];
++ uint16_t fmClkFreq;
++ uint16_t fmMacClkFreq;
++ t_FmRevisionInfo revInfo;
++/**************************/
++/* Master Only parameters */
++/**************************/
++ bool enabledTimeStamp;
++ uint8_t count1MicroBit;
++ uint8_t totalNumOfTasks;
++ uint32_t totalFifoSize;
++ uint8_t maxNumOfOpenDmas;
++ uint8_t accumulatedNumOfTasks;
++ uint32_t accumulatedFifoSize;
++ uint8_t accumulatedNumOfOpenDmas;
++ uint8_t accumulatedNumOfDeqTnums;
++#ifdef FM_LOW_END_RESTRICTION
++ bool lowEndRestriction;
++#endif /* FM_LOW_END_RESTRICTION */
++ uint32_t exceptions;
++ int irq;
++ int errIrq;
++ bool ramsEccEnable;
++ bool explicitEnable;
++ bool internalCall;
++ uint8_t ramsEccOwners;
++ uint32_t extraFifoPoolSize;
++ uint8_t extraTasksPoolSize;
++ uint8_t extraOpenDmasPoolSize;
++#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)
++ uint16_t portMaxFrameLengths10G[FM_MAX_NUM_OF_10G_MACS];
++ uint16_t macMaxFrameLengths10G[FM_MAX_NUM_OF_10G_MACS];
++#endif /* defined(FM_MAX_NUM_OF_10G_MACS) && ... */
++ uint16_t portMaxFrameLengths1G[FM_MAX_NUM_OF_1G_MACS];
++ uint16_t macMaxFrameLengths1G[FM_MAX_NUM_OF_1G_MACS];
++} t_FmStateStruct;
++
++#if (DPAA_VERSION >= 11)
++typedef struct t_FmMapParam {
++ uint16_t profilesBase;
++ uint16_t numOfProfiles;
++ t_Handle h_FmPort;
++} t_FmMapParam;
++
++typedef struct t_FmAllocMng {
++ bool allocated;
++ uint8_t ownerId; /* guestId for KG in multi-partition only,
++ portId for PLCR in any environment */
++} t_FmAllocMng;
++
++typedef struct t_FmPcdSpEntry {
++ bool valid;
++ t_FmAllocMng profilesMng;
++} t_FmPcdSpEntry;
++
++typedef struct t_FmSp {
++ void *p_FmPcdStoragePrflRegs;
++ t_FmPcdSpEntry profiles[FM_VSP_MAX_NUM_OF_ENTRIES];
++ t_FmMapParam portsMapping[FM_MAX_NUM_OF_PORTS];
++} t_FmSp;
++#endif /* (DPAA_VERSION >= 11) */
++
++typedef struct t_Fm
++{
++/***************************/
++/* Master/Guest parameters */
++/***************************/
++/* locals for recovery */
++ uintptr_t baseAddr;
++
++/* un-needed for recovery */
++ t_Handle h_Pcd;
++ char fmModuleName[MODULE_NAME_SIZE];
++ char fmIpcHandlerModuleName[FM_MAX_NUM_OF_GUESTS][MODULE_NAME_SIZE];
++ t_Handle h_IpcSessions[FM_MAX_NUM_OF_GUESTS];
++ t_FmIntrSrc intrMng[e_FM_EV_DUMMY_LAST]; /* FM exceptions user callback */
++ uint8_t guestId;
++/**************************/
++/* Master Only parameters */
++/**************************/
++/* locals for recovery */
++ struct fman_fpm_regs *p_FmFpmRegs;
++ struct fman_bmi_regs *p_FmBmiRegs;
++ struct fman_qmi_regs *p_FmQmiRegs;
++ struct fman_dma_regs *p_FmDmaRegs;
++ struct fman_regs *p_FmRegs;
++ t_FmExceptionsCallback *f_Exception;
++ t_FmBusErrorCallback *f_BusError;
++ t_Handle h_App; /* Application handle */
++ t_Handle h_Spinlock;
++ bool recoveryMode;
++ t_FmStateStruct *p_FmStateStruct;
++ uint16_t tnumAgingPeriod;
++#if (DPAA_VERSION >= 11)
++ t_FmSp *p_FmSp;
++ uint8_t partNumOfVSPs;
++ uint8_t partVSPBase;
++ uintptr_t vspBaseAddr;
++#endif /* (DPAA_VERSION >= 11) */
++ bool portsPreFetchConfigured[FM_MAX_NUM_OF_HW_PORT_IDS]; /* Prefetch configration per Tx-port */
++ bool portsPreFetchValue[FM_MAX_NUM_OF_HW_PORT_IDS]; /* Prefetch configration per Tx-port */
++
++/* un-needed for recovery */
++ struct fman_cfg *p_FmDriverParam;
++ t_Handle h_FmMuram;
++ uint64_t fmMuramPhysBaseAddr;
++ bool independentMode;
++ bool hcPortInitialized;
++ uintptr_t camBaseAddr; /* save for freeing */
++ uintptr_t resAddr;
++ uintptr_t fifoBaseAddr; /* save for freeing */
++ t_FmanCtrlIntrSrc fmanCtrlIntr[FM_NUM_OF_FMAN_CTRL_EVENT_REGS]; /* FM exceptions user callback */
++ bool usedEventRegs[FM_NUM_OF_FMAN_CTRL_EVENT_REGS];
++ t_FmFirmwareParams firmware;
++ bool fwVerify;
++ bool resetOnInit;
++ t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride;
++ uint32_t userSetExceptions;
++} t_Fm;
++
++
++#endif /* __FM_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_ipc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_ipc.h
+new file mode 100644
+index 00000000..7ce36a76
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_ipc.h
+@@ -0,0 +1,465 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_ipc.h
++
++ @Description FM Inter-Partition prototypes, structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_IPC_H
++#define __FM_IPC_H
++
++#include "error_ext.h"
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_IPC_grp FM Inter-Partition messaging Unit
++
++ @Description FM Inter-Partition messaging unit API definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Description enum for defining MAC types
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description A structure of parameters for specifying a MAC.
++*//***************************************************************************/
++typedef _Packed struct
++{
++ uint8_t id;
++ uint32_t enumType;
++} _PackedType t_FmIpcMacParams;
++
++/**************************************************************************//**
++ @Description A structure of parameters for specifying a MAC.
++*//***************************************************************************/
++typedef _Packed struct
++{
++ t_FmIpcMacParams macParams;
++ uint16_t maxFrameLength;
++} _PackedType t_FmIpcMacMaxFrameParams;
++
++/**************************************************************************//**
++ @Description FM physical Address
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcPhysAddr
++{
++ volatile uint8_t high;
++ volatile uint32_t low;
++} _PackedType t_FmIpcPhysAddr;
++
++
++typedef _Packed struct t_FmIpcPortOutInitParams {
++ uint8_t numOfTasks; /**< OUT */
++ uint8_t numOfExtraTasks; /**< OUT */
++ uint8_t numOfOpenDmas; /**< OUT */
++ uint8_t numOfExtraOpenDmas; /**< OUT */
++ uint32_t sizeOfFifo; /**< OUT */
++ uint32_t extraSizeOfFifo; /**< OUT */
++ t_FmIpcPhysAddr ipcPhysAddr; /**< OUT */
++} _PackedType t_FmIpcPortOutInitParams;
++
++/**************************************************************************//**
++ @Description Structure for IPC communication during FM_PORT_Init.
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcPortInInitParams {
++ uint8_t hardwarePortId; /**< IN. port Id */
++ uint32_t enumPortType; /**< IN. Port type */
++ uint8_t boolIndependentMode;/**< IN. TRUE if FM Port operates in independent mode */
++ uint16_t liodnOffset; /**< IN. Port's requested resource */
++ uint8_t numOfTasks; /**< IN. Port's requested resource */
++ uint8_t numOfExtraTasks; /**< IN. Port's requested resource */
++ uint8_t numOfOpenDmas; /**< IN. Port's requested resource */
++ uint8_t numOfExtraOpenDmas; /**< IN. Port's requested resource */
++ uint32_t sizeOfFifo; /**< IN. Port's requested resource */
++ uint32_t extraSizeOfFifo; /**< IN. Port's requested resource */
++ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
++ uint16_t maxFrameLength; /**< IN. Port's max frame length. */
++ uint16_t liodnBase; /**< IN. Irrelevant for P4080 rev 1.
++ LIODN base for this port, to be
++ used together with LIODN offset. */
++} _PackedType t_FmIpcPortInInitParams;
++
++
++/**************************************************************************//**
++ @Description Structure for IPC communication between port and FM
++ regarding tasks and open DMA resources management.
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcPortRsrcParams {
++ uint8_t hardwarePortId; /**< IN. port Id */
++ uint32_t val; /**< IN. Port's requested resource */
++ uint32_t extra; /**< IN. Port's requested resource */
++ uint8_t boolInitialConfig;
++} _PackedType t_FmIpcPortRsrcParams;
++
++
++/**************************************************************************//**
++ @Description Structure for IPC communication between port and FM
++ regarding tasks and open DMA resources management.
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcPortFifoParams {
++ t_FmIpcPortRsrcParams rsrcParams;
++ uint32_t enumPortType;
++ uint8_t boolIndependentMode;
++ uint8_t deqPipelineDepth;
++ uint8_t numOfPools;
++ uint16_t secondLargestBufSize;
++ uint16_t largestBufSize;
++ uint8_t boolInitialConfig;
++} _PackedType t_FmIpcPortFifoParams;
++
++/**************************************************************************//**
++ @Description Structure for port-FM communication during FM_PORT_Free.
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcPortFreeParams {
++ uint8_t hardwarePortId; /**< IN. port Id */
++ uint32_t enumPortType; /**< IN. Port type */
++ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
++} _PackedType t_FmIpcPortFreeParams;
++
++/**************************************************************************//**
++ @Description Structure for defining DMA status
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcDmaStatus {
++ uint8_t boolCmqNotEmpty; /**< Command queue is not empty */
++ uint8_t boolBusError; /**< Bus error occurred */
++ uint8_t boolReadBufEccError; /**< Double ECC error on buffer Read */
++ uint8_t boolWriteBufEccSysError; /**< Double ECC error on buffer write from system side */
++ uint8_t boolWriteBufEccFmError; /**< Double ECC error on buffer write from FM side */
++ uint8_t boolSinglePortEccError; /**< Single port ECC error from FM side */
++} _PackedType t_FmIpcDmaStatus;
++
++typedef _Packed struct t_FmIpcRegisterIntr
++{
++ uint8_t guestId; /* IN */
++ uint32_t event; /* IN */
++} _PackedType t_FmIpcRegisterIntr;
++
++typedef _Packed struct t_FmIpcIsr
++{
++ uint8_t boolErr; /* IN */
++ uint32_t pendingReg; /* IN */
++} _PackedType t_FmIpcIsr;
++
++/**************************************************************************//**
++ @Description structure for returning FM parameters
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcParams {
++ uint16_t fmClkFreq; /**< OUT: FM Clock frequency */
++ uint16_t fmMacClkFreq; /**< OUT: FM MAC clock frequence */
++ uint8_t majorRev; /**< OUT: FM Major revision */
++ uint8_t minorRev; /**< OUT: FM Minor revision */
++} _PackedType t_FmIpcParams;
++
++
++/**************************************************************************//**
++ @Description structure for returning Fman Ctrl Code revision information
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcFmanCtrlCodeRevisionInfo {
++ uint16_t packageRev; /**< OUT: Package revision */
++ uint8_t majorRev; /**< OUT: Major revision */
++ uint8_t minorRev; /**< OUT: Minor revision */
++} _PackedType t_FmIpcFmanCtrlCodeRevisionInfo;
++
++/**************************************************************************//**
++ @Description Structure for defining Fm number of Fman controlers
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcPortNumOfFmanCtrls {
++ uint8_t hardwarePortId; /**< IN. port Id */
++ uint8_t numOfFmanCtrls; /**< IN. Port type */
++ t_FmFmanCtrl orFmanCtrl; /**< IN. fman controller for order restoration*/
++} t_FmIpcPortNumOfFmanCtrls;
++
++/**************************************************************************//**
++ @Description structure for setting Fman contriller events
++*//***************************************************************************/
++typedef _Packed struct t_FmIpcFmanEvents {
++ uint8_t eventRegId; /**< IN: Fman controller event register id */
++ uint32_t enableEvents; /**< IN/OUT: required enabled events mask */
++} _PackedType t_FmIpcFmanEvents;
++
++typedef _Packed struct t_FmIpcResourceAllocParams {
++ uint8_t guestId;
++ uint16_t base;
++ uint16_t num;
++}_PackedType t_FmIpcResourceAllocParams;
++
++typedef _Packed struct t_FmIpcVspSetPortWindow {
++ uint8_t hardwarePortId;
++ uint8_t baseStorageProfile;
++ uint8_t log2NumOfProfiles;
++}_PackedType t_FmIpcVspSetPortWindow;
++
++typedef _Packed struct t_FmIpcSetCongestionGroupPfcPriority {
++ uint32_t congestionGroupId;
++ uint8_t priorityBitMap;
++}_PackedType t_FmIpcSetCongestionGroupPfcPriority;
++
++#define FM_IPC_MAX_REPLY_BODY_SIZE 20
++#define FM_IPC_MAX_REPLY_SIZE (FM_IPC_MAX_REPLY_BODY_SIZE + sizeof(uint32_t))
++#define FM_IPC_MAX_MSG_SIZE 30
++
++typedef _Packed struct t_FmIpcMsg
++{
++ uint32_t msgId;
++ uint8_t msgBody[FM_IPC_MAX_MSG_SIZE];
++} _PackedType t_FmIpcMsg;
++
++typedef _Packed struct t_FmIpcReply
++{
++ uint32_t error;
++ uint8_t replyBody[FM_IPC_MAX_REPLY_BODY_SIZE];
++} _PackedType t_FmIpcReply;
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/***************************************************************************/
++/************************ FRONT-END-TO-BACK-END*****************************/
++/***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_GET_TIMESTAMP_SCALE
++
++ @Description Used by FM front-end.
++
++ @Param[out] uint32_t Pointer
++*//***************************************************************************/
++#define FM_GET_TIMESTAMP_SCALE 1
++
++/**************************************************************************//**
++ @Function FM_GET_COUNTER
++
++ @Description Used by FM front-end.
++
++ @Param[in/out] t_FmIpcGetCounter Pointer
++*//***************************************************************************/
++#define FM_GET_COUNTER 2
++
++/**************************************************************************//**
++ @Function FM_GET_SET_PORT_PARAMS
++
++ @Description Used by FM front-end for the PORT module in order to set and get
++ parameters in/from master FM module on FM PORT initialization time.
++
++ @Param[in/out] t_FmIcPortInitParams Pointer
++*//***************************************************************************/
++#define FM_GET_SET_PORT_PARAMS 4
++
++/**************************************************************************//**
++ @Function FM_FREE_PORT
++
++ @Description Used by FM front-end for the PORT module when a port is freed
++ to free all FM PORT resources.
++
++ @Param[in] uint8_t Pointer
++*//***************************************************************************/
++#define FM_FREE_PORT 5
++
++/**************************************************************************//**
++ @Function FM_RESET_MAC
++
++ @Description Used by front-end for the MAC module to reset the MAC registers
++
++ @Param[in] t_FmIpcMacParams Pointer .
++*//***************************************************************************/
++#define FM_RESET_MAC 6
++
++/**************************************************************************//**
++ @Function FM_RESUME_STALLED_PORT
++
++ @Description Used by FM front-end for the PORT module in order to
++ release a stalled FM Port.
++
++ @Param[in] uint8_t Pointer
++*//***************************************************************************/
++#define FM_RESUME_STALLED_PORT 7
++
++/**************************************************************************//**
++ @Function FM_IS_PORT_STALLED
++
++ @Description Used by FM front-end for the PORT module in order to check whether
++ an FM port is stalled.
++
++ @Param[in/out] t_FmIcPortIsStalled Pointer
++*//***************************************************************************/
++#define FM_IS_PORT_STALLED 8
++
++/**************************************************************************//**
++ @Function FM_GET_PARAMS
++
++ @Description Used by FM front-end for the PORT module in order to dump
++ return FM parameters.
++
++ @Param[in] uint8_t Pointer
++*//***************************************************************************/
++#define FM_GET_PARAMS 10
++
++/**************************************************************************//**
++ @Function FM_REGISTER_INTR
++
++ @Description Used by FM front-end to register an interrupt handler to
++ be called upon interrupt for guest.
++
++ @Param[out] t_FmIpcRegisterIntr Pointer
++*//***************************************************************************/
++#define FM_REGISTER_INTR 11
++
++/**************************************************************************//**
++ @Function FM_DMA_STAT
++
++ @Description Used by FM front-end to read the FM DMA status.
++
++ @Param[out] t_FmIpcDmaStatus Pointer
++*//***************************************************************************/
++#define FM_DMA_STAT 13
++
++/**************************************************************************//**
++ @Function FM_ALLOC_FMAN_CTRL_EVENT_REG
++
++ @Description Used by FM front-end to allocate event register.
++
++ @Param[out] Event register id Pointer
++*//***************************************************************************/
++#define FM_ALLOC_FMAN_CTRL_EVENT_REG 14
++
++/**************************************************************************//**
++ @Function FM_FREE_FMAN_CTRL_EVENT_REG
++
++ @Description Used by FM front-end to free locate event register.
++
++ @Param[in] uint8_t Pointer - Event register id
++*//***************************************************************************/
++#define FM_FREE_FMAN_CTRL_EVENT_REG 15
++
++/**************************************************************************//**
++ @Function FM_SET_FMAN_CTRL_EVENTS_ENABLE
++
++ @Description Used by FM front-end to enable events in the FPM
++ Fman controller event register.
++
++ @Param[in] t_FmIpcFmanEvents Pointer
++*//***************************************************************************/
++#define FM_SET_FMAN_CTRL_EVENTS_ENABLE 16
++
++/**************************************************************************//**
++ @Function FM_SET_FMAN_CTRL_EVENTS_ENABLE
++
++ @Description Used by FM front-end to enable events in the FPM
++ Fman controller event register.
++
++ @Param[in/out] t_FmIpcFmanEvents Pointer
++*//***************************************************************************/
++#define FM_GET_FMAN_CTRL_EVENTS_ENABLE 17
++
++/**************************************************************************//**
++ @Function FM_SET_MAC_MAX_FRAME
++
++ @Description Used by FM front-end to set MAC's MTU/RTU's in
++ back-end.
++
++ @Param[in/out] t_FmIpcMacMaxFrameParams Pointer
++*//***************************************************************************/
++#define FM_SET_MAC_MAX_FRAME 18
++
++/**************************************************************************//**
++ @Function FM_GET_PHYS_MURAM_BASE
++
++ @Description Used by FM front-end in order to get MURAM base address
++
++ @Param[in/out] t_FmIpcPhysAddr Pointer
++*//***************************************************************************/
++#define FM_GET_PHYS_MURAM_BASE 19
++
++/**************************************************************************//**
++ @Function FM_MASTER_IS_ALIVE
++
++ @Description Used by FM front-end in order to verify Master is up
++
++ @Param[in/out] bool
++*//***************************************************************************/
++#define FM_MASTER_IS_ALIVE 20
++
++#define FM_ENABLE_RAM_ECC 21
++#define FM_DISABLE_RAM_ECC 22
++#define FM_SET_NUM_OF_FMAN_CTRL 23
++#define FM_SET_SIZE_OF_FIFO 24
++#define FM_SET_NUM_OF_TASKS 25
++#define FM_SET_NUM_OF_OPEN_DMAS 26
++#define FM_VSP_ALLOC 27
++#define FM_VSP_FREE 28
++#define FM_VSP_SET_PORT_WINDOW 29
++#define FM_GET_FMAN_CTRL_CODE_REV 30
++#define FM_SET_CONG_GRP_PFC_PRIO 31
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++#define FM_10G_TX_ECC_WA 100
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++/***************************************************************************/
++/************************ BACK-END-TO-FRONT-END*****************************/
++/***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_GUEST_ISR
++
++ @Description Used by FM back-end to report an interrupt to the front-end.
++
++ @Param[out] t_FmIpcIsr Pointer
++*//***************************************************************************/
++#define FM_GUEST_ISR 1
++
++
++
++/** @} */ /* end of FM_IPC_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++#endif /* __FM_IPC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_muram.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_muram.c
+new file mode 100644
+index 00000000..0bc67cb7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_muram.c
+@@ -0,0 +1,174 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File FM_muram.c
++
++ @Description FM MURAM ...
++*//***************************************************************************/
++#include "error_ext.h"
++#include "std_ext.h"
++#include "mm_ext.h"
++#include "string_ext.h"
++#include "sprint_ext.h"
++#include "fm_muram_ext.h"
++#include "fm_common.h"
++
++#define __ERR_MODULE__ MODULE_FM_MURAM
++
++
++typedef struct
++{
++ t_Handle h_Mem;
++ uintptr_t baseAddr;
++ uint32_t size;
++} t_FmMuram;
++
++
++void FmMuramClear(t_Handle h_FmMuram)
++{
++ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
++
++ SANITY_CHECK_RETURN(h_FmMuram, E_INVALID_HANDLE);
++ IOMemSet32(UINT_TO_PTR(p_FmMuram->baseAddr), 0, p_FmMuram->size);
++}
++
++
++t_Handle FM_MURAM_ConfigAndInit(uintptr_t baseAddress, uint32_t size)
++{
++ t_Handle h_Mem;
++ t_FmMuram *p_FmMuram;
++
++ if (!baseAddress)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("baseAddress 0 is not supported"));
++ return NULL;
++ }
++
++ if (baseAddress%4)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("baseAddress not 4 bytes aligned!"));
++ return NULL;
++ }
++
++ /* Allocate FM MURAM structure */
++ p_FmMuram = (t_FmMuram *) XX_Malloc(sizeof(t_FmMuram));
++ if (!p_FmMuram)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MURAM driver structure"));
++ return NULL;
++ }
++ memset(p_FmMuram, 0, sizeof(t_FmMuram));
++
++
++ if ((MM_Init(&h_Mem, baseAddress, size) != E_OK) || (!h_Mem))
++ {
++ XX_Free(p_FmMuram);
++ REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-MURAM partition!!!"));
++ return NULL;
++ }
++
++ /* Initialize FM MURAM parameters which will be kept by the driver */
++ p_FmMuram->baseAddr = baseAddress;
++ p_FmMuram->size = size;
++ p_FmMuram->h_Mem = h_Mem;
++
++ return p_FmMuram;
++}
++
++t_Error FM_MURAM_Free(t_Handle h_FmMuram)
++{
++ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
++
++ if (p_FmMuram->h_Mem)
++ MM_Free(p_FmMuram->h_Mem);
++
++ XX_Free(h_FmMuram);
++
++ return E_OK;
++}
++
++void * FM_MURAM_AllocMem(t_Handle h_FmMuram, uint32_t size, uint32_t align)
++{
++ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
++ uintptr_t addr;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, NULL);
++
++ addr = (uintptr_t)MM_Get(p_FmMuram->h_Mem, size, align ,"FM MURAM");
++
++ if (addr == ILLEGAL_BASE)
++ return NULL;
++
++ return UINT_TO_PTR(addr);
++}
++
++void * FM_MURAM_AllocMemForce(t_Handle h_FmMuram, uint64_t base, uint32_t size)
++{
++ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
++ uintptr_t addr;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, NULL);
++ SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, NULL);
++
++ addr = (uintptr_t)MM_GetForce(p_FmMuram->h_Mem, base, size, "FM MURAM");
++
++ if (addr == ILLEGAL_BASE)
++ return NULL;
++
++ return UINT_TO_PTR(addr);
++}
++
++t_Error FM_MURAM_FreeMem(t_Handle h_FmMuram, void *ptr)
++{
++ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
++
++ SANITY_CHECK_RETURN_ERROR(h_FmMuram, E_INVALID_HANDLE);
++ SANITY_CHECK_RETURN_ERROR(p_FmMuram->h_Mem, E_INVALID_HANDLE);
++
++ if (MM_Put(p_FmMuram->h_Mem, PTR_TO_UINT(ptr)) == 0)
++ RETURN_ERROR(MINOR, E_INVALID_ADDRESS, ("memory pointer!!!"));
++
++ return E_OK;
++}
++
++uint64_t FM_MURAM_GetFreeMemSize(t_Handle h_FmMuram)
++{
++ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
++
++ SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, 0);
++ SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, 0);
++
++ return MM_GetFreeMemSize(p_FmMuram->h_Mem);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c
+new file mode 100755
+index 00000000..a41ecd04
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c
+@@ -0,0 +1,1398 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include <linux/math64.h>
++#include "fsl_fman.h"
++#include "dpaa_integration_ext.h"
++
++uint32_t fman_get_bmi_err_event(struct fman_bmi_regs *bmi_rg)
++{
++ uint32_t event, mask, force;
++
++ event = ioread32be(&bmi_rg->fmbm_ievr);
++ mask = ioread32be(&bmi_rg->fmbm_ier);
++ event &= mask;
++ /* clear the forced events */
++ force = ioread32be(&bmi_rg->fmbm_ifr);
++ if (force & event)
++ iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
++ /* clear the acknowledged events */
++ iowrite32be(event, &bmi_rg->fmbm_ievr);
++ return event;
++}
++
++uint32_t fman_get_qmi_err_event(struct fman_qmi_regs *qmi_rg)
++{
++ uint32_t event, mask, force;
++
++ event = ioread32be(&qmi_rg->fmqm_eie);
++ mask = ioread32be(&qmi_rg->fmqm_eien);
++ event &= mask;
++
++ /* clear the forced events */
++ force = ioread32be(&qmi_rg->fmqm_eif);
++ if (force & event)
++ iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
++ /* clear the acknowledged events */
++ iowrite32be(event, &qmi_rg->fmqm_eie);
++ return event;
++}
++
++uint32_t fman_get_dma_com_id(struct fman_dma_regs *dma_rg)
++{
++ return ioread32be(&dma_rg->fmdmtcid);
++}
++
++uint64_t fman_get_dma_addr(struct fman_dma_regs *dma_rg)
++{
++ uint64_t addr;
++
++ addr = (uint64_t)ioread32be(&dma_rg->fmdmtal);
++ addr |= ((uint64_t)(ioread32be(&dma_rg->fmdmtah)) << 32);
++
++ return addr;
++}
++
++uint32_t fman_get_dma_err_event(struct fman_dma_regs *dma_rg)
++{
++ uint32_t status, mask;
++
++ status = ioread32be(&dma_rg->fmdmsr);
++ mask = ioread32be(&dma_rg->fmdmmr);
++
++ /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
++ if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
++ status &= ~DMA_STATUS_BUS_ERR;
++
++ /* clear relevant bits if mask has no DMA_MODE_ECC */
++ if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
++ status &= ~(DMA_STATUS_FM_SPDAT_ECC |
++ DMA_STATUS_READ_ECC |
++ DMA_STATUS_SYSTEM_WRITE_ECC |
++ DMA_STATUS_FM_WRITE_ECC);
++
++ /* clear set events */
++ iowrite32be(status, &dma_rg->fmdmsr);
++
++ return status;
++}
++
++uint32_t fman_get_fpm_err_event(struct fman_fpm_regs *fpm_rg)
++{
++ uint32_t event;
++
++ event = ioread32be(&fpm_rg->fmfp_ee);
++ /* clear the all occurred events */
++ iowrite32be(event, &fpm_rg->fmfp_ee);
++ return event;
++}
++
++uint32_t fman_get_muram_err_event(struct fman_fpm_regs *fpm_rg)
++{
++ uint32_t event, mask;
++
++ event = ioread32be(&fpm_rg->fm_rcr);
++ mask = ioread32be(&fpm_rg->fm_rie);
++
++ /* clear MURAM event bit (do not clear IRAM event) */
++ iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
++
++ if ((mask & FPM_MURAM_ECC_ERR_EX_EN))
++ return event;
++ else
++ return 0;
++}
++
++uint32_t fman_get_iram_err_event(struct fman_fpm_regs *fpm_rg)
++{
++ uint32_t event, mask;
++
++ event = ioread32be(&fpm_rg->fm_rcr) ;
++ mask = ioread32be(&fpm_rg->fm_rie);
++ /* clear IRAM event bit (do not clear MURAM event) */
++ iowrite32be(event & ~FPM_RAM_MURAM_ECC,
++ &fpm_rg->fm_rcr);
++
++ if ((mask & FPM_IRAM_ECC_ERR_EX_EN))
++ return event;
++ else
++ return 0;
++}
++
++uint32_t fman_get_qmi_event(struct fman_qmi_regs *qmi_rg)
++{
++ uint32_t event, mask, force;
++
++ event = ioread32be(&qmi_rg->fmqm_ie);
++ mask = ioread32be(&qmi_rg->fmqm_ien);
++ event &= mask;
++ /* clear the forced events */
++ force = ioread32be(&qmi_rg->fmqm_if);
++ if (force & event)
++ iowrite32be(force & ~event, &qmi_rg->fmqm_if);
++ /* clear the acknowledged events */
++ iowrite32be(event, &qmi_rg->fmqm_ie);
++ return event;
++}
++
++void fman_enable_time_stamp(struct fman_fpm_regs *fpm_rg,
++ uint8_t count1ubit,
++ uint16_t fm_clk_freq)
++{
++ uint32_t tmp;
++ uint64_t frac;
++ uint32_t intgr;
++ uint32_t ts_freq = (uint32_t)(1 << count1ubit); /* in Mhz */
++
++ /* configure timestamp so that bit 8 will count 1 microsecond
++ * Find effective count rate at TIMESTAMP least significant bits:
++ * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
++ * Find frequency ratio between effective count rate and the clock:
++ * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
++ * 256/600 = 0.4266666... */
++
++ intgr = ts_freq / fm_clk_freq;
++ /* we multiply by 2^16 to keep the fraction of the division
++ * we do not div back, since we write this value as a fraction
++ * see spec */
++
++ frac = ((uint64_t)ts_freq << 16) - ((uint64_t)intgr << 16) * fm_clk_freq;
++ /* we check remainder of the division in order to round up if not int */
++ if (do_div(frac, fm_clk_freq))
++ frac++;
++
++ tmp = (intgr << FPM_TS_INT_SHIFT) | (uint16_t)frac;
++ iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
++
++ /* enable timestamp with original clock */
++ iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
++}
++
++uint32_t fman_get_fpm_error_interrupts(struct fman_fpm_regs *fpm_rg)
++{
++ return ioread32be(&fpm_rg->fm_epi);
++}
++
++
++int fman_set_erratum_10gmac_a004_wa(struct fman_fpm_regs *fpm_rg)
++{
++ int timeout = 100;
++
++ iowrite32be(0x40000000, &fpm_rg->fmfp_extc);
++
++ while ((ioread32be(&fpm_rg->fmfp_extc) & 0x40000000) && --timeout)
++ udelay(10);
++
++ if (!timeout)
++ return -EBUSY;
++ return 0;
++}
++
++void fman_set_ctrl_intr(struct fman_fpm_regs *fpm_rg,
++ uint8_t event_reg_id,
++ uint32_t enable_events)
++{
++ iowrite32be(enable_events, &fpm_rg->fmfp_cee[event_reg_id]);
++}
++
++uint32_t fman_get_ctrl_intr(struct fman_fpm_regs *fpm_rg, uint8_t event_reg_id)
++{
++ return ioread32be(&fpm_rg->fmfp_cee[event_reg_id]);
++}
++
++void fman_set_num_of_riscs_per_port(struct fman_fpm_regs *fpm_rg,
++ uint8_t port_id,
++ uint8_t num_fman_ctrls,
++ uint32_t or_fman_ctrl)
++{
++ uint32_t tmp = 0;
++
++ tmp = (uint32_t)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
++ /*TODO - maybe to put CTL# according to another criteria*/
++ if (num_fman_ctrls == 2)
++ tmp = FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
++ /* order restoration */
++ tmp |= (or_fman_ctrl << FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | or_fman_ctrl;
++
++ iowrite32be(tmp, &fpm_rg->fmfp_prc);
++}
++
++void fman_set_order_restoration_per_port(struct fman_fpm_regs *fpm_rg,
++ uint8_t port_id,
++ bool independent_mode,
++ bool is_rx_port)
++{
++ uint32_t tmp = 0;
++
++ tmp = (uint32_t)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
++ if (independent_mode) {
++ if (is_rx_port)
++ tmp |= (FPM_PRT_FM_CTL1 <<
++ FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PRT_FM_CTL1;
++ else
++ tmp |= (FPM_PRT_FM_CTL2 <<
++ FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PRT_FM_CTL2;
++ } else {
++ tmp |= (FPM_PRT_FM_CTL2|FPM_PRT_FM_CTL1);
++
++ /* order restoration */
++ if (port_id % 2)
++ tmp |= (FPM_PRT_FM_CTL1 <<
++ FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
++ else
++ tmp |= (FPM_PRT_FM_CTL2 <<
++ FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
++ }
++ iowrite32be(tmp, &fpm_rg->fmfp_prc);
++}
++
++uint8_t fman_get_qmi_deq_th(struct fman_qmi_regs *qmi_rg)
++{
++ return (uint8_t)ioread32be(&qmi_rg->fmqm_gc);
++}
++
++uint8_t fman_get_qmi_enq_th(struct fman_qmi_regs *qmi_rg)
++{
++ return (uint8_t)(ioread32be(&qmi_rg->fmqm_gc) >> 8);
++}
++
++void fman_set_qmi_enq_th(struct fman_qmi_regs *qmi_rg, uint8_t val)
++{
++ uint32_t tmp_reg;
++
++ tmp_reg = ioread32be(&qmi_rg->fmqm_gc);
++ tmp_reg &= ~QMI_CFG_ENQ_MASK;
++ tmp_reg |= ((uint32_t)val << 8);
++ iowrite32be(tmp_reg, &qmi_rg->fmqm_gc);
++}
++
++void fman_set_qmi_deq_th(struct fman_qmi_regs *qmi_rg, uint8_t val)
++{
++ uint32_t tmp_reg;
++
++ tmp_reg = ioread32be(&qmi_rg->fmqm_gc);
++ tmp_reg &= ~QMI_CFG_DEQ_MASK;
++ tmp_reg |= (uint32_t)val;
++ iowrite32be(tmp_reg, &qmi_rg->fmqm_gc);
++}
++
++void fman_qmi_disable_dispatch_limit(struct fman_fpm_regs *fpm_rg)
++{
++ iowrite32be(0, &fpm_rg->fmfp_mxd);
++}
++
++void fman_set_liodn_per_port(struct fman_rg *fman_rg, uint8_t port_id,
++ uint16_t liodn_base,
++ uint16_t liodn_ofst)
++{
++ uint32_t tmp;
++
++ if ((port_id > 63) || (port_id < 1))
++ return;
++
++ /* set LIODN base for this port */
++ tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
++ if (port_id % 2) {
++ tmp &= ~FM_LIODN_BASE_MASK;
++ tmp |= (uint32_t)liodn_base;
++ } else {
++ tmp &= ~(FM_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
++ tmp |= (uint32_t)liodn_base << DMA_LIODN_SHIFT;
++ }
++ iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
++ iowrite32be((uint32_t)liodn_ofst,
++ &fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
++}
++
++bool fman_is_port_stalled(struct fman_fpm_regs *fpm_rg, uint8_t port_id)
++{
++ return (bool)!!(ioread32be(&fpm_rg->fmfp_ps[port_id]) & FPM_PS_STALLED);
++}
++
++void fman_resume_stalled_port(struct fman_fpm_regs *fpm_rg, uint8_t port_id)
++{
++ uint32_t tmp;
++
++ tmp = (uint32_t)((port_id << FPM_PORT_FM_CTL_PORTID_SHIFT) |
++ FPM_PRC_REALSE_STALLED);
++ iowrite32be(tmp, &fpm_rg->fmfp_prc);
++}
++
++int fman_reset_mac(struct fman_fpm_regs *fpm_rg, uint8_t mac_id, bool is_10g)
++{
++ uint32_t msk, timeout = 100;
++
++ /* Get the relevant bit mask */
++ if (is_10g) {
++ switch (mac_id) {
++ case(0):
++ msk = FPM_RSTC_10G0_RESET;
++ break;
++ case(1):
++ msk = FPM_RSTC_10G1_RESET;
++ break;
++ default:
++ return -EINVAL;
++ }
++ } else {
++ switch (mac_id) {
++ case(0):
++ msk = FPM_RSTC_1G0_RESET;
++ break;
++ case(1):
++ msk = FPM_RSTC_1G1_RESET;
++ break;
++ case(2):
++ msk = FPM_RSTC_1G2_RESET;
++ break;
++ case(3):
++ msk = FPM_RSTC_1G3_RESET;
++ break;
++ case(4):
++ msk = FPM_RSTC_1G4_RESET;
++ break;
++ case (5):
++ msk = FPM_RSTC_1G5_RESET;
++ break;
++ case (6):
++ msk = FPM_RSTC_1G6_RESET;
++ break;
++ case (7):
++ msk = FPM_RSTC_1G7_RESET;
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++ /* reset */
++ iowrite32be(msk, &fpm_rg->fm_rstc);
++ while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
++ udelay(10);
++
++ if (!timeout)
++ return -EBUSY;
++ return 0;
++}
++
++uint16_t fman_get_size_of_fifo(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
++{
++ uint32_t tmp_reg;
++
++ if ((port_id > 63) || (port_id < 1))
++ return 0;
++
++ tmp_reg = ioread32be(&bmi_rg->fmbm_pfs[port_id - 1]);
++ return (uint16_t)((tmp_reg & BMI_FIFO_SIZE_MASK) + 1);
++}
++
++uint32_t fman_get_total_fifo_size(struct fman_bmi_regs *bmi_rg)
++{
++ uint32_t reg, res;
++
++ reg = ioread32be(&bmi_rg->fmbm_cfg1);
++ res = (reg >> BMI_CFG1_FIFO_SIZE_SHIFT) & 0x3ff;
++ return res * FMAN_BMI_FIFO_UNITS;
++}
++
++uint16_t fman_get_size_of_extra_fifo(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id)
++{
++ uint32_t tmp_reg;
++
++ if ((port_id > 63) || (port_id < 1))
++ return 0;
++
++ tmp_reg = ioread32be(&bmi_rg->fmbm_pfs[port_id-1]);
++ return (uint16_t)((tmp_reg & BMI_EXTRA_FIFO_SIZE_MASK) >>
++ BMI_EXTRA_FIFO_SIZE_SHIFT);
++}
++
++void fman_set_size_of_fifo(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint32_t sz_fifo,
++ uint32_t extra_sz_fifo)
++{
++ uint32_t tmp;
++
++ if ((port_id > 63) || (port_id < 1))
++ return;
++
++ /* calculate reg */
++ tmp = (uint32_t)((sz_fifo / FMAN_BMI_FIFO_UNITS - 1) |
++ ((extra_sz_fifo / FMAN_BMI_FIFO_UNITS) <<
++ BMI_EXTRA_FIFO_SIZE_SHIFT));
++ iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
++}
++
++uint8_t fman_get_num_of_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
++{
++ uint32_t tmp;
++
++ if ((port_id > 63) || (port_id < 1))
++ return 0;
++
++ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
++ return (uint8_t)(((tmp & BMI_NUM_OF_TASKS_MASK) >>
++ BMI_NUM_OF_TASKS_SHIFT) + 1);
++}
++
++uint8_t fman_get_num_extra_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
++{
++ uint32_t tmp;
++
++ if ((port_id > 63) || (port_id < 1))
++ return 0;
++
++ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
++ return (uint8_t)((tmp & BMI_NUM_OF_EXTRA_TASKS_MASK) >>
++ BMI_EXTRA_NUM_OF_TASKS_SHIFT);
++}
++
++void fman_set_num_of_tasks(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint8_t num_tasks,
++ uint8_t num_extra_tasks)
++{
++ uint32_t tmp;
++
++ if ((port_id > 63) || (port_id < 1))
++ return;
++
++ /* calculate reg */
++ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
++ ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
++ tmp |= (uint32_t)(((num_tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
++ (num_extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
++ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
++}
++
++uint8_t fman_get_num_of_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
++{
++ uint32_t tmp;
++
++ if ((port_id > 63) || (port_id < 1))
++ return 0;
++
++ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
++ return (uint8_t)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
++ BMI_NUM_OF_DMAS_SHIFT) + 1);
++}
++
++uint8_t fman_get_num_extra_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
++{
++ uint32_t tmp;
++
++ if ((port_id > 63) || (port_id < 1))
++ return 0;
++
++ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
++ return (uint8_t)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
++ BMI_EXTRA_NUM_OF_DMAS_SHIFT);
++}
++
++void fman_set_num_of_open_dmas(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint8_t num_open_dmas,
++ uint8_t num_extra_open_dmas,
++ uint8_t total_num_dmas)
++{
++ uint32_t tmp = 0;
++
++ if ((port_id > 63) || (port_id < 1))
++ return;
++
++ /* calculate reg */
++ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
++ ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
++ tmp |= (uint32_t)(((num_open_dmas-1) << BMI_NUM_OF_DMAS_SHIFT) |
++ (num_extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
++ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
++
++ /* update total num of DMA's with committed number of open DMAS,
++ * and max uncommitted pool. */
++ if (total_num_dmas)
++ {
++ tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
++ tmp |= (uint32_t)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
++ iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
++ }
++}
++
++void fman_set_vsp_window(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint8_t base_storage_profile,
++ uint8_t log2_num_of_profiles)
++{
++ uint32_t tmp = 0;
++ if ((port_id > 63) || (port_id < 1))
++ return;
++
++ tmp = ioread32be(&bmi_rg->fmbm_spliodn[port_id-1]);
++ tmp |= (uint32_t)((uint32_t)base_storage_profile & 0x3f) << 16;
++ tmp |= (uint32_t)log2_num_of_profiles << 28;
++ iowrite32be(tmp, &bmi_rg->fmbm_spliodn[port_id-1]);
++}
++
++void fman_set_congestion_group_pfc_priority(uint32_t *cpg_rg,
++ uint32_t congestion_group_id,
++ uint8_t priority_bit_map,
++ uint32_t reg_num)
++{
++ uint32_t offset, tmp = 0;
++
++ offset = (congestion_group_id%4)*8;
++
++ tmp = ioread32be(&cpg_rg[reg_num]);
++ tmp &= ~(0xFF<<offset);
++ tmp |= (uint32_t)priority_bit_map << offset;
++
++ iowrite32be(tmp,&cpg_rg[reg_num]);
++}
++
++/*****************************************************************************/
++/* API Init unit functions */
++/*****************************************************************************/
++void fman_defconfig(struct fman_cfg *cfg, bool is_master)
++{
++ memset(cfg, 0, sizeof(struct fman_cfg));
++
++ cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
++ cfg->dma_err = DEFAULT_DMA_ERR;
++ cfg->halt_on_external_activ = DEFAULT_HALT_ON_EXTERNAL_ACTIVATION;
++ cfg->halt_on_unrecov_ecc_err = DEFAULT_HALT_ON_UNRECOVERABLE_ECC_ERROR;
++ cfg->en_iram_test_mode = FALSE;
++ cfg->en_muram_test_mode = FALSE;
++ cfg->external_ecc_rams_enable = DEFAULT_EXTERNAL_ECC_RAMS_ENABLE;
++
++ if (!is_master)
++ return;
++
++ cfg->dma_aid_override = DEFAULT_AID_OVERRIDE;
++ cfg->dma_aid_mode = DEFAULT_AID_MODE;
++ cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
++ cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
++ cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
++ cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
++ cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
++ cfg->dma_en_emergency = DEFAULT_DMA_EN_EMERGENCY;
++ cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
++ cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
++ cfg->dma_en_emergency_smoother = DEFAULT_DMA_EN_EMERGENCY_SMOOTHER;
++ cfg->dma_emergency_switch_counter = DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
++ cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
++ cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
++ cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
++ cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
++ cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
++ cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
++ cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
++ cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
++ cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
++
++ cfg->pedantic_dma = FALSE;
++ cfg->tnum_aging_period = DEFAULT_TNUM_AGING_PERIOD;
++ cfg->dma_stop_on_bus_error = FALSE;
++ cfg->qmi_deq_option_support = FALSE;
++}
++
++void fman_regconfig(struct fman_rg *fman_rg, struct fman_cfg *cfg)
++{
++ uint32_t tmp_reg;
++
++ /* read the values from the registers as they are initialized by the HW with
++ * the required values.
++ */
++ tmp_reg = ioread32be(&fman_rg->bmi_rg->fmbm_cfg1);
++ cfg->total_fifo_size =
++ (((tmp_reg & BMI_TOTAL_FIFO_SIZE_MASK) >> BMI_CFG1_FIFO_SIZE_SHIFT) + 1) * FMAN_BMI_FIFO_UNITS;
++
++ tmp_reg = ioread32be(&fman_rg->bmi_rg->fmbm_cfg2);
++ cfg->total_num_of_tasks =
++ (uint8_t)(((tmp_reg & BMI_TOTAL_NUM_OF_TASKS_MASK) >> BMI_CFG2_TASKS_SHIFT) + 1);
++
++ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmtr);
++ cfg->dma_comm_qtsh_asrt_emer = (uint8_t)(tmp_reg >> DMA_THRESH_COMMQ_SHIFT);
++
++ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmhy);
++ cfg->dma_comm_qtsh_clr_emer = (uint8_t)(tmp_reg >> DMA_THRESH_COMMQ_SHIFT);
++
++ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmmr);
++ cfg->dma_cache_override = (enum fman_dma_cache_override)((tmp_reg & DMA_MODE_CACHE_OR_MASK) >> DMA_MODE_CACHE_OR_SHIFT);
++ cfg->dma_cam_num_of_entries = (uint8_t)((((tmp_reg & DMA_MODE_CEN_MASK) >> DMA_MODE_CEN_SHIFT) +1)*DMA_CAM_UNITS);
++ cfg->dma_aid_override = (bool)((tmp_reg & DMA_MODE_AID_OR)? TRUE:FALSE);
++ cfg->dma_dbg_cnt_mode = (enum fman_dma_dbg_cnt_mode)((tmp_reg & DMA_MODE_DBG_MASK) >> DMA_MODE_DBG_SHIFT);
++ cfg->dma_en_emergency = (bool)((tmp_reg & DMA_MODE_EB)? TRUE : FALSE);
++
++ tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_mxd);
++ cfg->disp_limit_tsh = (uint8_t)((tmp_reg & FPM_DISP_LIMIT_MASK) >> FPM_DISP_LIMIT_SHIFT);
++
++ tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_dist1);
++ cfg->prs_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_PRS_MASK ) >> FPM_THR1_PRS_SHIFT);
++ cfg->plcr_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_KG_MASK ) >> FPM_THR1_KG_SHIFT);
++ cfg->kg_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_PLCR_MASK ) >> FPM_THR1_PLCR_SHIFT);
++ cfg->bmi_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_BMI_MASK ) >> FPM_THR1_BMI_SHIFT);
++
++ tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_dist2);
++ cfg->qmi_enq_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_QMI_ENQ_MASK ) >> FPM_THR2_QMI_ENQ_SHIFT);
++ cfg->qmi_deq_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_QMI_DEQ_MASK ) >> FPM_THR2_QMI_DEQ_SHIFT);
++ cfg->fm_ctl1_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_FM_CTL1_MASK ) >> FPM_THR2_FM_CTL1_SHIFT);
++ cfg->fm_ctl2_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_FM_CTL2_MASK ) >> FPM_THR2_FM_CTL2_SHIFT);
++
++ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmsetr);
++ cfg->dma_sos_emergency = tmp_reg;
++
++ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmwcr);
++ cfg->dma_watchdog = tmp_reg/cfg->clk_freq;
++
++ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmemsr);
++ cfg->dma_en_emergency_smoother = (bool)((tmp_reg & DMA_EMSR_EMSTR_MASK)? TRUE : FALSE);
++ cfg->dma_emergency_switch_counter = (tmp_reg & DMA_EMSR_EMSTR_MASK);
++}
++
++void fman_reset(struct fman_fpm_regs *fpm_rg)
++{
++ iowrite32be(FPM_RSTC_FM_RESET, &fpm_rg->fm_rstc);
++}
++
++/**************************************************************************//**
++ @Function FM_Init
++
++ @Description Initializes the FM module
++
++ @Param[in] h_Fm - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++int fman_dma_init(struct fman_dma_regs *dma_rg, struct fman_cfg *cfg)
++{
++ uint32_t tmp_reg;
++
++ /**********************/
++ /* Init DMA Registers */
++ /**********************/
++ /* clear status reg events */
++ /* oren - check!!! */
++ tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
++ DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
++ iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg,
++ &dma_rg->fmdmsr);
++
++ /* configure mode register */
++ tmp_reg = 0;
++ tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
++ if (cfg->dma_aid_override)
++ tmp_reg |= DMA_MODE_AID_OR;
++ if (cfg->exceptions & FMAN_EX_DMA_BUS_ERROR)
++ tmp_reg |= DMA_MODE_BER;
++ if ((cfg->exceptions & FMAN_EX_DMA_SYSTEM_WRITE_ECC) |
++ (cfg->exceptions & FMAN_EX_DMA_READ_ECC) |
++ (cfg->exceptions & FMAN_EX_DMA_FM_WRITE_ECC))
++ tmp_reg |= DMA_MODE_ECC;
++ if (cfg->dma_stop_on_bus_error)
++ tmp_reg |= DMA_MODE_SBER;
++ if(cfg->dma_axi_dbg_num_of_beats)
++ tmp_reg |= (uint32_t)(DMA_MODE_AXI_DBG_MASK &
++ ((cfg->dma_axi_dbg_num_of_beats - 1) << DMA_MODE_AXI_DBG_SHIFT));
++
++ if (cfg->dma_en_emergency) {
++ tmp_reg |= cfg->dma_emergency_bus_select;
++ tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
++ if (cfg->dma_en_emergency_smoother)
++ iowrite32be(cfg->dma_emergency_switch_counter,
++ &dma_rg->fmdmemsr);
++ }
++ tmp_reg |= ((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) <<
++ DMA_MODE_CEN_SHIFT;
++ tmp_reg |= DMA_MODE_SECURE_PROT;
++ tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
++ tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
++
++ if (cfg->pedantic_dma)
++ tmp_reg |= DMA_MODE_EMER_READ;
++
++ iowrite32be(tmp_reg, &dma_rg->fmdmmr);
++
++ /* configure thresholds register */
++ tmp_reg = ((uint32_t)cfg->dma_comm_qtsh_asrt_emer <<
++ DMA_THRESH_COMMQ_SHIFT) |
++ ((uint32_t)cfg->dma_read_buf_tsh_asrt_emer <<
++ DMA_THRESH_READ_INT_BUF_SHIFT) |
++ ((uint32_t)cfg->dma_write_buf_tsh_asrt_emer);
++
++ iowrite32be(tmp_reg, &dma_rg->fmdmtr);
++
++ /* configure hysteresis register */
++ tmp_reg = ((uint32_t)cfg->dma_comm_qtsh_clr_emer <<
++ DMA_THRESH_COMMQ_SHIFT) |
++ ((uint32_t)cfg->dma_read_buf_tsh_clr_emer <<
++ DMA_THRESH_READ_INT_BUF_SHIFT) |
++ ((uint32_t)cfg->dma_write_buf_tsh_clr_emer);
++
++ iowrite32be(tmp_reg, &dma_rg->fmdmhy);
++
++ /* configure emergency threshold */
++ iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
++
++ /* configure Watchdog */
++ iowrite32be((cfg->dma_watchdog * cfg->clk_freq),
++ &dma_rg->fmdmwcr);
++
++ iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
++
++ return 0;
++}
++
++int fman_fpm_init(struct fman_fpm_regs *fpm_rg, struct fman_cfg *cfg)
++{
++ uint32_t tmp_reg;
++ int i;
++
++ /**********************/
++ /* Init FPM Registers */
++ /**********************/
++ tmp_reg = (uint32_t)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
++ iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
++
++ tmp_reg = (((uint32_t)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
++ ((uint32_t)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
++ ((uint32_t)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
++ ((uint32_t)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
++ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
++
++ tmp_reg = (((uint32_t)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
++ ((uint32_t)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
++ ((uint32_t)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
++ ((uint32_t)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
++ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
++
++ /* define exceptions and error behavior */
++ tmp_reg = 0;
++ /* Clear events */
++ tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
++ FPM_EV_MASK_SINGLE_ECC);
++ /* enable interrupts */
++ if (cfg->exceptions & FMAN_EX_FPM_STALL_ON_TASKS)
++ tmp_reg |= FPM_EV_MASK_STALL_EN;
++ if (cfg->exceptions & FMAN_EX_FPM_SINGLE_ECC)
++ tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
++ if (cfg->exceptions & FMAN_EX_FPM_DOUBLE_ECC)
++ tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
++ tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
++ tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
++ if (!cfg->halt_on_external_activ)
++ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
++ if (!cfg->halt_on_unrecov_ecc_err)
++ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
++ iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
++
++ /* clear all fmCtls event registers */
++ for (i = 0; i < cfg->num_of_fman_ctrl_evnt_regs; i++)
++ iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
++
++ /* RAM ECC - enable and clear events*/
++ /* first we need to clear all parser memory,
++ * as it is uninitialized and may cause ECC errors */
++ /* event bits */
++ tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
++ /* Rams enable not effected by RCR bit, but by a COP configuration */
++ if (cfg->external_ecc_rams_enable)
++ tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
++
++ /* enable test mode */
++ if (cfg->en_muram_test_mode)
++ tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
++ if (cfg->en_iram_test_mode)
++ tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
++ iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
++
++ tmp_reg = 0;
++ if (cfg->exceptions & FMAN_EX_IRAM_ECC) {
++ tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
++ fman_enable_rams_ecc(fpm_rg);
++ }
++ if (cfg->exceptions & FMAN_EX_NURAM_ECC) {
++ tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
++ fman_enable_rams_ecc(fpm_rg);
++ }
++ iowrite32be(tmp_reg, &fpm_rg->fm_rie);
++
++ return 0;
++}
++
++int fman_bmi_init(struct fman_bmi_regs *bmi_rg, struct fman_cfg *cfg)
++{
++ uint32_t tmp_reg;
++
++ /**********************/
++ /* Init BMI Registers */
++ /**********************/
++
++ /* define common resources */
++ tmp_reg = cfg->fifo_base_addr;
++ tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
++
++ tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
++ BMI_CFG1_FIFO_SIZE_SHIFT);
++ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
++
++ tmp_reg = ((uint32_t)(cfg->total_num_of_tasks - 1) <<
++ BMI_CFG2_TASKS_SHIFT);
++ /* num of DMA's will be dynamically updated when each port is set */
++ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
++
++ /* define unmaskable exceptions, enable and clear events */
++ tmp_reg = 0;
++ iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
++ BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
++ BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
++ BMI_ERR_INTR_EN_DISPATCH_RAM_ECC,
++ &bmi_rg->fmbm_ievr);
++
++ if (cfg->exceptions & FMAN_EX_BMI_LIST_RAM_ECC)
++ tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
++ if (cfg->exceptions & FMAN_EX_BMI_PIPELINE_ECC)
++ tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
++ if (cfg->exceptions & FMAN_EX_BMI_STATISTICS_RAM_ECC)
++ tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
++ if (cfg->exceptions & FMAN_EX_BMI_DISPATCH_RAM_ECC)
++ tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
++ iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
++
++ return 0;
++}
++
++int fman_qmi_init(struct fman_qmi_regs *qmi_rg, struct fman_cfg *cfg)
++{
++ uint32_t tmp_reg;
++ uint16_t period_in_fm_clocks;
++ uint8_t remainder;
++ /**********************/
++ /* Init QMI Registers */
++ /**********************/
++ /* Clear error interrupt events */
++
++ iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
++ &qmi_rg->fmqm_eie);
++ tmp_reg = 0;
++ if (cfg->exceptions & FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
++ tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
++ if (cfg->exceptions & FMAN_EX_QMI_DOUBLE_ECC)
++ tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
++ /* enable events */
++ iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
++
++ if (cfg->tnum_aging_period) {
++ /* tnum_aging_period is in units of usec, p_FmClockFreq in Mhz */
++ period_in_fm_clocks = (uint16_t)
++ (cfg->tnum_aging_period * cfg->clk_freq);
++ /* period_in_fm_clocks must be a 64 multiply */
++ remainder = (uint8_t)(period_in_fm_clocks % 64);
++ if (remainder)
++ tmp_reg = (uint32_t)((period_in_fm_clocks / 64) + 1);
++ else{
++ tmp_reg = (uint32_t)(period_in_fm_clocks / 64);
++ if (!tmp_reg)
++ tmp_reg = 1;
++ }
++ tmp_reg <<= QMI_TAPC_TAP;
++ iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
++ }
++ tmp_reg = 0;
++ /* Clear interrupt events */
++ iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
++ if (cfg->exceptions & FMAN_EX_QMI_SINGLE_ECC)
++ tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
++ /* enable events */
++ iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
++
++ return 0;
++}
++
++int fman_enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
++{
++ uint32_t cfg_reg = 0;
++
++ /**********************/
++ /* Enable all modules */
++ /**********************/
++ /* clear & enable global counters - calculate reg and save for later,
++ because it's the same reg for QMI enable */
++ cfg_reg = QMI_CFG_EN_COUNTERS;
++ if (cfg->qmi_deq_option_support)
++ cfg_reg |= (uint32_t)(((cfg->qmi_def_tnums_thresh) << 8) |
++ (uint32_t)cfg->qmi_def_tnums_thresh);
++
++ iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
++ iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
++ &fman_rg->qmi_rg->fmqm_gc);
++
++ return 0;
++}
++
++void fman_free_resources(struct fman_rg *fman_rg)
++{
++ /* disable BMI and QMI */
++ iowrite32be(0, &fman_rg->bmi_rg->fmbm_init);
++ iowrite32be(0, &fman_rg->qmi_rg->fmqm_gc);
++
++ /* release BMI resources */
++ iowrite32be(0, &fman_rg->bmi_rg->fmbm_cfg2);
++ iowrite32be(0, &fman_rg->bmi_rg->fmbm_cfg1);
++
++ /* disable ECC */
++ iowrite32be(0, &fman_rg->fpm_rg->fm_rcr);
++}
++
++/****************************************************/
++/* API Run-time Control uint functions */
++/****************************************************/
++uint32_t fman_get_normal_pending(struct fman_fpm_regs *fpm_rg)
++{
++ return ioread32be(&fpm_rg->fm_npi);
++}
++
++uint32_t fman_get_controller_event(struct fman_fpm_regs *fpm_rg, uint8_t reg_id)
++{
++ uint32_t event;
++
++ event = ioread32be(&fpm_rg->fmfp_fcev[reg_id]) &
++ ioread32be(&fpm_rg->fmfp_cee[reg_id]);
++ iowrite32be(event, &fpm_rg->fmfp_cev[reg_id]);
++
++ return event;
++}
++
++uint32_t fman_get_error_pending(struct fman_fpm_regs *fpm_rg)
++{
++ return ioread32be(&fpm_rg->fm_epi);
++}
++
++void fman_set_ports_bandwidth(struct fman_bmi_regs *bmi_rg, uint8_t *weights)
++{
++ int i;
++ uint8_t shift;
++ uint32_t tmp = 0;
++
++ for (i = 0; i < 64; i++) {
++ if (weights[i] > 1) { /* no need to write 1 since it is 0 */
++ /* Add this port to tmp_reg */
++ /* (each 8 ports result in one register)*/
++ shift = (uint8_t)(32 - 4 * ((i % 8) + 1));
++ tmp |= ((weights[i] - 1) << shift);
++ }
++ if (i % 8 == 7) { /* last in this set */
++ iowrite32be(tmp, &bmi_rg->fmbm_arb[i / 8]);
++ tmp = 0;
++ }
++ }
++}
++
++void fman_enable_rams_ecc(struct fman_fpm_regs *fpm_rg)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&fpm_rg->fm_rcr);
++ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
++ iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN,
++ &fpm_rg->fm_rcr);
++ else
++ iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
++ FPM_RAM_IRAM_ECC_EN,
++ &fpm_rg->fm_rcr);
++}
++
++void fman_disable_rams_ecc(struct fman_fpm_regs *fpm_rg)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&fpm_rg->fm_rcr);
++ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
++ iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN,
++ &fpm_rg->fm_rcr);
++ else
++ iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
++ &fpm_rg->fm_rcr);
++}
++
++int fman_set_exception(struct fman_rg *fman_rg,
++ enum fman_exceptions exception,
++ bool enable)
++{
++ uint32_t tmp;
++
++ switch (exception) {
++ case(E_FMAN_EX_DMA_BUS_ERROR):
++ tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
++ if (enable)
++ tmp |= DMA_MODE_BER;
++ else
++ tmp &= ~DMA_MODE_BER;
++ /* disable bus error */
++ iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
++ break;
++ case(E_FMAN_EX_DMA_READ_ECC):
++ case(E_FMAN_EX_DMA_SYSTEM_WRITE_ECC):
++ case(E_FMAN_EX_DMA_FM_WRITE_ECC):
++ tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
++ if (enable)
++ tmp |= DMA_MODE_ECC;
++ else
++ tmp &= ~DMA_MODE_ECC;
++ iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
++ break;
++ case(E_FMAN_EX_FPM_STALL_ON_TASKS):
++ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
++ if (enable)
++ tmp |= FPM_EV_MASK_STALL_EN;
++ else
++ tmp &= ~FPM_EV_MASK_STALL_EN;
++ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
++ break;
++ case(E_FMAN_EX_FPM_SINGLE_ECC):
++ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
++ if (enable)
++ tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
++ else
++ tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
++ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
++ break;
++ case(E_FMAN_EX_FPM_DOUBLE_ECC):
++ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
++ if (enable)
++ tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
++ else
++ tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
++ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
++ break;
++ case(E_FMAN_EX_QMI_SINGLE_ECC):
++ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
++ if (enable)
++ tmp |= QMI_INTR_EN_SINGLE_ECC;
++ else
++ tmp &= ~QMI_INTR_EN_SINGLE_ECC;
++ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
++ break;
++ case(E_FMAN_EX_QMI_DOUBLE_ECC):
++ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
++ if (enable)
++ tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
++ else
++ tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
++ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
++ break;
++ case(E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID):
++ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
++ if (enable)
++ tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
++ else
++ tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
++ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
++ break;
++ case(E_FMAN_EX_BMI_LIST_RAM_ECC):
++ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
++ if (enable)
++ tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
++ else
++ tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
++ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
++ break;
++ case(E_FMAN_EX_BMI_STORAGE_PROFILE_ECC):
++ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
++ if (enable)
++ tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
++ else
++ tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
++ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
++ break;
++ case(E_FMAN_EX_BMI_STATISTICS_RAM_ECC):
++ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
++ if (enable)
++ tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
++ else
++ tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
++ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
++ break;
++ case(E_FMAN_EX_BMI_DISPATCH_RAM_ECC):
++ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
++ if (enable)
++ tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
++ else
++ tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
++ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
++ break;
++ case(E_FMAN_EX_IRAM_ECC):
++ tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
++ if (enable) {
++ /* enable ECC if not enabled */
++ fman_enable_rams_ecc(fman_rg->fpm_rg);
++ /* enable ECC interrupts */
++ tmp |= FPM_IRAM_ECC_ERR_EX_EN;
++ } else {
++ /* ECC mechanism may be disabled,
++ * depending on driver status */
++ fman_disable_rams_ecc(fman_rg->fpm_rg);
++ tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
++ }
++ iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
++ break;
++ case(E_FMAN_EX_MURAM_ECC):
++ tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
++ if (enable) {
++ /* enable ECC if not enabled */
++ fman_enable_rams_ecc(fman_rg->fpm_rg);
++ /* enable ECC interrupts */
++ tmp |= FPM_MURAM_ECC_ERR_EX_EN;
++ } else {
++ /* ECC mechanism may be disabled,
++ * depending on driver status */
++ fman_disable_rams_ecc(fman_rg->fpm_rg);
++ tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
++ }
++ iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++void fman_get_revision(struct fman_fpm_regs *fpm_rg,
++ uint8_t *major,
++ uint8_t *minor)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&fpm_rg->fm_ip_rev_1);
++ *major = (uint8_t)((tmp & FPM_REV1_MAJOR_MASK) >> FPM_REV1_MAJOR_SHIFT);
++ *minor = (uint8_t)((tmp & FPM_REV1_MINOR_MASK) >> FPM_REV1_MINOR_SHIFT);
++
++}
++
++uint32_t fman_get_counter(struct fman_rg *fman_rg,
++ enum fman_counters reg_name)
++{
++ uint32_t ret_val;
++
++ switch (reg_name) {
++ case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_etfc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dtfc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_0):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc0);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_1):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc1);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_2):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc2);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_3):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc3);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dfdc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dfcc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_FROM_FD):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dffc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_CONFIRM):
++ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dcc);
++ break;
++ default:
++ ret_val = 0;
++ }
++ return ret_val;
++}
++
++int fman_modify_counter(struct fman_rg *fman_rg,
++ enum fman_counters reg_name,
++ uint32_t val)
++{
++ /* When applicable (when there is an 'enable counters' bit,
++ * check that counters are enabled */
++ switch (reg_name) {
++ case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
++ case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
++ case(E_FMAN_COUNTERS_DEQ_0):
++ case(E_FMAN_COUNTERS_DEQ_1):
++ case(E_FMAN_COUNTERS_DEQ_2):
++ case(E_FMAN_COUNTERS_DEQ_3):
++ case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
++ case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
++ case(E_FMAN_COUNTERS_DEQ_FROM_FD):
++ case(E_FMAN_COUNTERS_DEQ_CONFIRM):
++ if (!(ioread32be(&fman_rg->qmi_rg->fmqm_gc) &
++ QMI_CFG_EN_COUNTERS))
++ return -EINVAL;
++ break;
++ default:
++ break;
++ }
++ /* Set counter */
++ switch (reg_name) {
++ case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_etfc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dtfc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_0):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc0);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_1):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc1);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_2):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc2);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_3):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc3);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dfdc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dfcc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_FROM_FD):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dffc);
++ break;
++ case(E_FMAN_COUNTERS_DEQ_CONFIRM):
++ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dcc);
++ break;
++ case(E_FMAN_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT):
++ iowrite32be(val, &fman_rg->dma_rg->fmdmsefrc);
++ break;
++ case(E_FMAN_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT):
++ iowrite32be(val, &fman_rg->dma_rg->fmdmsqfrc);
++ break;
++ case(E_FMAN_COUNTERS_SEMAPHOR_SYNC_REJECT):
++ iowrite32be(val, &fman_rg->dma_rg->fmdmssrc);
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++void fman_set_dma_emergency(struct fman_dma_regs *dma_rg,
++ bool is_write,
++ bool enable)
++{
++ uint32_t msk;
++
++ msk = (uint32_t)(is_write ? DMA_MODE_EMER_WRITE : DMA_MODE_EMER_READ);
++
++ if (enable)
++ iowrite32be(ioread32be(&dma_rg->fmdmmr) | msk,
++ &dma_rg->fmdmmr);
++ else /* disable */
++ iowrite32be(ioread32be(&dma_rg->fmdmmr) & ~msk,
++ &dma_rg->fmdmmr);
++}
++
++void fman_set_dma_ext_bus_pri(struct fman_dma_regs *dma_rg, uint32_t pri)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&dma_rg->fmdmmr) |
++ (pri << DMA_MODE_BUS_PRI_SHIFT);
++
++ iowrite32be(tmp, &dma_rg->fmdmmr);
++}
++
++uint32_t fman_get_dma_status(struct fman_dma_regs *dma_rg)
++{
++ return ioread32be(&dma_rg->fmdmsr);
++}
++
++void fman_force_intr(struct fman_rg *fman_rg,
++ enum fman_exceptions exception)
++{
++ switch (exception) {
++ case E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
++ iowrite32be(QMI_ERR_INTR_EN_DEQ_FROM_DEF,
++ &fman_rg->qmi_rg->fmqm_eif);
++ break;
++ case E_FMAN_EX_QMI_SINGLE_ECC:
++ iowrite32be(QMI_INTR_EN_SINGLE_ECC,
++ &fman_rg->qmi_rg->fmqm_if);
++ break;
++ case E_FMAN_EX_QMI_DOUBLE_ECC:
++ iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC,
++ &fman_rg->qmi_rg->fmqm_eif);
++ break;
++ case E_FMAN_EX_BMI_LIST_RAM_ECC:
++ iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC,
++ &fman_rg->bmi_rg->fmbm_ifr);
++ break;
++ case E_FMAN_EX_BMI_STORAGE_PROFILE_ECC:
++ iowrite32be(BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC,
++ &fman_rg->bmi_rg->fmbm_ifr);
++ break;
++ case E_FMAN_EX_BMI_STATISTICS_RAM_ECC:
++ iowrite32be(BMI_ERR_INTR_EN_STATISTICS_RAM_ECC,
++ &fman_rg->bmi_rg->fmbm_ifr);
++ break;
++ case E_FMAN_EX_BMI_DISPATCH_RAM_ECC:
++ iowrite32be(BMI_ERR_INTR_EN_DISPATCH_RAM_ECC,
++ &fman_rg->bmi_rg->fmbm_ifr);
++ break;
++ default:
++ break;
++ }
++}
++
++bool fman_is_qmi_halt_not_busy_state(struct fman_qmi_regs *qmi_rg)
++{
++ return (bool)!!(ioread32be(&qmi_rg->fmqm_gs) & QMI_GS_HALT_NOT_BUSY);
++}
++void fman_resume(struct fman_fpm_regs *fpm_rg)
++{
++ uint32_t tmp;
++
++ tmp = ioread32be(&fpm_rg->fmfp_ee);
++ /* clear tmp_reg event bits in order not to clear standing events */
++ tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
++ FPM_EV_MASK_STALL |
++ FPM_EV_MASK_SINGLE_ECC);
++ tmp |= FPM_EV_MASK_RELEASE_FM;
++
++ iowrite32be(tmp, &fpm_rg->fmfp_ee);
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_common.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_common.h
+new file mode 100644
+index 00000000..204840c9
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_common.h
+@@ -0,0 +1,1214 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_common.h
++
++ @Description FM internal structures and definitions.
++*//***************************************************************************/
++#ifndef __FM_COMMON_H
++#define __FM_COMMON_H
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_ext.h"
++#include "fm_port_ext.h"
++
++
++#define e_FM_PORT_TYPE_OH_HOST_COMMAND e_FM_PORT_TYPE_DUMMY
++
++#define CLS_PLAN_NUM_PER_GRP 8
++
++#define IP_OFFLOAD_PACKAGE_NUMBER 106
++#define CAPWAP_OFFLOAD_PACKAGE_NUMBER 108
++#define IS_OFFLOAD_PACKAGE(num) ((num == IP_OFFLOAD_PACKAGE_NUMBER) || (num == CAPWAP_OFFLOAD_PACKAGE_NUMBER))
++
++
++
++/**************************************************************************//**
++ @Description Modules registers offsets
++*//***************************************************************************/
++#define FM_MM_MURAM 0x00000000
++#define FM_MM_BMI 0x00080000
++#define FM_MM_QMI 0x00080400
++#define FM_MM_PRS 0x000c7000
++#define FM_MM_KG 0x000C1000
++#define FM_MM_DMA 0x000C2000
++#define FM_MM_FPM 0x000C3000
++#define FM_MM_PLCR 0x000C0000
++#define FM_MM_IMEM 0x000C4000
++#define FM_MM_CGP 0x000DB000
++#define FM_MM_TRB(i) (0x000D0200 + 0x400 * (i))
++#if (DPAA_VERSION >= 11)
++#define FM_MM_SP 0x000dc000
++#endif /* (DPAA_VERSION >= 11) */
++
++
++/**************************************************************************//**
++ @Description Enum for inter-module interrupts registration
++*//***************************************************************************/
++typedef enum e_FmEventModules{
++ e_FM_MOD_PRS, /**< Parser event */
++ e_FM_MOD_KG, /**< Keygen event */
++ e_FM_MOD_PLCR, /**< Policer event */
++ e_FM_MOD_10G_MAC, /**< 10G MAC event */
++ e_FM_MOD_1G_MAC, /**< 1G MAC event */
++ e_FM_MOD_TMR, /**< Timer event */
++ e_FM_MOD_FMAN_CTRL, /**< FMAN Controller Timer event */
++ e_FM_MOD_MACSEC,
++ e_FM_MOD_DUMMY_LAST
++} e_FmEventModules;
++
++/**************************************************************************//**
++ @Description Enum for interrupts types
++*//***************************************************************************/
++typedef enum e_FmIntrType {
++ e_FM_INTR_TYPE_ERR,
++ e_FM_INTR_TYPE_NORMAL
++} e_FmIntrType;
++
++/**************************************************************************//**
++ @Description Enum for inter-module interrupts registration
++*//***************************************************************************/
++typedef enum e_FmInterModuleEvent
++{
++ e_FM_EV_PRS = 0, /**< Parser event */
++ e_FM_EV_ERR_PRS, /**< Parser error event */
++ e_FM_EV_KG, /**< Keygen event */
++ e_FM_EV_ERR_KG, /**< Keygen error event */
++ e_FM_EV_PLCR, /**< Policer event */
++ e_FM_EV_ERR_PLCR, /**< Policer error event */
++ e_FM_EV_ERR_10G_MAC0, /**< 10G MAC 0 error event */
++ e_FM_EV_ERR_10G_MAC1, /**< 10G MAC 1 error event */
++ e_FM_EV_ERR_1G_MAC0, /**< 1G MAC 0 error event */
++ e_FM_EV_ERR_1G_MAC1, /**< 1G MAC 1 error event */
++ e_FM_EV_ERR_1G_MAC2, /**< 1G MAC 2 error event */
++ e_FM_EV_ERR_1G_MAC3, /**< 1G MAC 3 error event */
++ e_FM_EV_ERR_1G_MAC4, /**< 1G MAC 4 error event */
++ e_FM_EV_ERR_1G_MAC5, /**< 1G MAC 5 error event */
++ e_FM_EV_ERR_1G_MAC6, /**< 1G MAC 6 error event */
++ e_FM_EV_ERR_1G_MAC7, /**< 1G MAC 7 error event */
++ e_FM_EV_ERR_MACSEC_MAC0,
++ e_FM_EV_TMR, /**< Timer event */
++ e_FM_EV_10G_MAC0, /**< 10G MAC 0 event (Magic packet detection)*/
++ e_FM_EV_10G_MAC1, /**< 10G MAC 1 event (Magic packet detection)*/
++ e_FM_EV_1G_MAC0, /**< 1G MAC 0 event (Magic packet detection)*/
++ e_FM_EV_1G_MAC1, /**< 1G MAC 1 event (Magic packet detection)*/
++ e_FM_EV_1G_MAC2, /**< 1G MAC 2 (Magic packet detection)*/
++ e_FM_EV_1G_MAC3, /**< 1G MAC 3 (Magic packet detection)*/
++ e_FM_EV_1G_MAC4, /**< 1G MAC 4 (Magic packet detection)*/
++ e_FM_EV_1G_MAC5, /**< 1G MAC 5 (Magic packet detection)*/
++ e_FM_EV_1G_MAC6, /**< 1G MAC 6 (Magic packet detection)*/
++ e_FM_EV_1G_MAC7, /**< 1G MAC 7 (Magic packet detection)*/
++ e_FM_EV_MACSEC_MAC0, /**< MACSEC MAC 0 event */
++ e_FM_EV_FMAN_CTRL_0, /**< Fman controller event 0 */
++ e_FM_EV_FMAN_CTRL_1, /**< Fman controller event 1 */
++ e_FM_EV_FMAN_CTRL_2, /**< Fman controller event 2 */
++ e_FM_EV_FMAN_CTRL_3, /**< Fman controller event 3 */
++ e_FM_EV_DUMMY_LAST
++} e_FmInterModuleEvent;
++
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Description PCD KG scheme registers
++*//***************************************************************************/
++typedef _Packed struct t_FmPcdPlcrProfileRegs {
++ volatile uint32_t fmpl_pemode; /* 0x090 FMPL_PEMODE - FM Policer Profile Entry Mode*/
++ volatile uint32_t fmpl_pegnia; /* 0x094 FMPL_PEGNIA - FM Policer Profile Entry GREEN Next Invoked Action*/
++ volatile uint32_t fmpl_peynia; /* 0x098 FMPL_PEYNIA - FM Policer Profile Entry YELLOW Next Invoked Action*/
++ volatile uint32_t fmpl_pernia; /* 0x09C FMPL_PERNIA - FM Policer Profile Entry RED Next Invoked Action*/
++ volatile uint32_t fmpl_pecir; /* 0x0A0 FMPL_PECIR - FM Policer Profile Entry Committed Information Rate*/
++ volatile uint32_t fmpl_pecbs; /* 0x0A4 FMPL_PECBS - FM Policer Profile Entry Committed Burst Size*/
++ volatile uint32_t fmpl_pepepir_eir; /* 0x0A8 FMPL_PEPIR_EIR - FM Policer Profile Entry Peak/Excess Information Rate*/
++ volatile uint32_t fmpl_pepbs_ebs; /* 0x0AC FMPL_PEPBS_EBS - FM Policer Profile Entry Peak/Excess Information Rate*/
++ volatile uint32_t fmpl_pelts; /* 0x0B0 FMPL_PELTS - FM Policer Profile Entry Last TimeStamp*/
++ volatile uint32_t fmpl_pects; /* 0x0B4 FMPL_PECTS - FM Policer Profile Entry Committed Token Status*/
++ volatile uint32_t fmpl_pepts_ets; /* 0x0B8 FMPL_PEPTS_ETS - FM Policer Profile Entry Peak/Excess Token Status*/
++ volatile uint32_t fmpl_pegpc; /* 0x0BC FMPL_PEGPC - FM Policer Profile Entry GREEN Packet Counter*/
++ volatile uint32_t fmpl_peypc; /* 0x0C0 FMPL_PEYPC - FM Policer Profile Entry YELLOW Packet Counter*/
++ volatile uint32_t fmpl_perpc; /* 0x0C4 FMPL_PERPC - FM Policer Profile Entry RED Packet Counter */
++ volatile uint32_t fmpl_perypc; /* 0x0C8 FMPL_PERYPC - FM Policer Profile Entry Recolored YELLOW Packet Counter*/
++ volatile uint32_t fmpl_perrpc; /* 0x0CC FMPL_PERRPC - FM Policer Profile Entry Recolored RED Packet Counter*/
++ volatile uint32_t fmpl_res1[12]; /* 0x0D0-0x0FF Reserved */
++} _PackedType t_FmPcdPlcrProfileRegs;
++
++
++typedef _Packed struct t_FmPcdCcCapwapReassmTimeoutParams {
++ volatile uint32_t portIdAndCapwapReassmTbl;
++ volatile uint32_t fqidForTimeOutFrames;
++ volatile uint32_t timeoutRequestTime;
++}_PackedType t_FmPcdCcCapwapReassmTimeoutParams;
++
++/**************************************************************************//**
++ @Description PCD CTRL Parameters Page
++*//***************************************************************************/
++typedef _Packed struct t_FmPcdCtrlParamsPage {
++ volatile uint8_t reserved0[16];
++ volatile uint32_t iprIpv4Nia;
++ volatile uint32_t iprIpv6Nia;
++ volatile uint8_t reserved1[24];
++ volatile uint32_t ipfOptionsCounter;
++ volatile uint8_t reserved2[12];
++ volatile uint32_t misc;
++ volatile uint32_t errorsDiscardMask;
++ volatile uint32_t discardMask;
++ volatile uint8_t reserved3[4];
++ volatile uint32_t postBmiFetchNia;
++ volatile uint8_t reserved4[172];
++} _PackedType t_FmPcdCtrlParamsPage;
++
++
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/*for UNDER_CONSTRUCTION_FM_RMU_USE_SEC its defined in fm_ext.h*/
++typedef uint32_t t_FmFmanCtrl;
++
++#define FPM_PORT_FM_CTL1 0x00000001
++#define FPM_PORT_FM_CTL2 0x00000002
++
++
++
++typedef struct t_FmPcdCcFragScratchPoolCmdParams {
++ uint32_t numOfBuffers;
++ uint8_t bufferPoolId;
++} t_FmPcdCcFragScratchPoolCmdParams;
++
++typedef struct t_FmPcdCcReassmTimeoutParams {
++ bool activate;
++ uint8_t tsbs;
++ uint32_t iprcpt;
++} t_FmPcdCcReassmTimeoutParams;
++
++typedef struct {
++ uint8_t baseEntry;
++ uint16_t numOfClsPlanEntries;
++ uint32_t vectors[FM_PCD_MAX_NUM_OF_CLS_PLANS];
++} t_FmPcdKgInterModuleClsPlanSet;
++
++/**************************************************************************//**
++ @Description Structure for binding a port to keygen schemes.
++*//***************************************************************************/
++typedef struct t_FmPcdKgInterModuleBindPortToSchemes {
++ uint8_t hardwarePortId;
++ uint8_t netEnvId;
++ bool useClsPlan; /**< TRUE if this port uses the clsPlan mechanism */
++ uint8_t numOfSchemes;
++ uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES];
++} t_FmPcdKgInterModuleBindPortToSchemes;
++
++typedef struct {
++ uint32_t nextCcNodeInfo;
++ t_List node;
++} t_CcNodeInfo;
++
++typedef struct
++{
++ t_Handle h_CcNode;
++ uint16_t index;
++ t_List node;
++}t_CcNodeInformation;
++#define CC_NODE_F_OBJECT(ptr) LIST_OBJECT(ptr, t_CcNodeInformation, node)
++
++typedef enum e_ModifyState
++{
++ e_MODIFY_STATE_ADD = 0,
++ e_MODIFY_STATE_REMOVE,
++ e_MODIFY_STATE_CHANGE
++} e_ModifyState;
++
++typedef struct
++{
++ t_Handle h_Manip;
++ t_List node;
++}t_ManipInfo;
++#define CC_NEXT_NODE_F_OBJECT(ptr) LIST_OBJECT(ptr, t_CcNodeInfo, node)
++
++typedef struct {
++ uint32_t type;
++ uint8_t prOffset;
++ uint16_t dataOffset;
++ uint8_t internalBufferOffset;
++ uint8_t numOfTasks;
++ uint8_t numOfExtraTasks;
++ uint8_t hardwarePortId;
++ t_FmRevisionInfo revInfo;
++ uint32_t nia;
++ uint32_t discardMask;
++} t_GetCcParams;
++
++typedef struct {
++ uint32_t type;
++ int psoSize;
++ uint32_t nia;
++ t_FmFmanCtrl orFmanCtrl;
++ bool overwrite;
++ uint8_t ofpDpde;
++} t_SetCcParams;
++
++typedef struct {
++ t_GetCcParams getCcParams;
++ t_SetCcParams setCcParams;
++} t_FmPortGetSetCcParams;
++
++typedef struct {
++ uint32_t type;
++ bool sleep;
++} t_FmSetParams;
++
++typedef struct {
++ uint32_t type;
++ uint32_t fmqm_gs;
++ uint32_t fm_npi;
++ uint32_t fm_cld;
++ uint32_t fmfp_extc;
++} t_FmGetParams;
++
++typedef struct {
++ t_FmSetParams setParams;
++ t_FmGetParams getParams;
++} t_FmGetSetParams;
++
++t_Error FmGetSetParams(t_Handle h_Fm, t_FmGetSetParams *p_Params);
++
++static __inline__ bool TRY_LOCK(t_Handle h_Spinlock, volatile bool *p_Flag)
++{
++ uint32_t intFlags;
++ if (h_Spinlock)
++ intFlags = XX_LockIntrSpinlock(h_Spinlock);
++ else
++ intFlags = XX_DisableAllIntr();
++
++ if (*p_Flag)
++ {
++ if (h_Spinlock)
++ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
++ else
++ XX_RestoreAllIntr(intFlags);
++ return FALSE;
++ }
++ *p_Flag = TRUE;
++
++ if (h_Spinlock)
++ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
++ else
++ XX_RestoreAllIntr(intFlags);
++
++ return TRUE;
++}
++
++#define RELEASE_LOCK(_flag) _flag = FALSE;
++
++/**************************************************************************//**
++ @Collection Defines used for manipulation CC and BMI
++ @{
++*//***************************************************************************/
++#define INTERNAL_CONTEXT_OFFSET 0x80000000
++#define OFFSET_OF_PR 0x40000000
++#define MANIP_EXTRA_SPACE 0x20000000
++#define NUM_OF_TASKS 0x10000000
++#define OFFSET_OF_DATA 0x08000000
++#define HW_PORT_ID 0x04000000
++#define FM_REV 0x02000000
++#define GET_NIA_FPNE 0x01000000
++#define GET_NIA_PNDN 0x00800000
++#define NUM_OF_EXTRA_TASKS 0x00400000
++#define DISCARD_MASK 0x00200000
++
++#define UPDATE_NIA_PNEN 0x80000000
++#define UPDATE_PSO 0x40000000
++#define UPDATE_NIA_PNDN 0x20000000
++#define UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY 0x10000000
++#define UPDATE_OFP_DPTE 0x08000000
++#define UPDATE_NIA_FENE 0x04000000
++#define UPDATE_NIA_CMNE 0x02000000
++#define UPDATE_NIA_FPNE 0x01000000
++/* @} */
++
++/**************************************************************************//**
++ @Collection Defines used for manipulation CC and CC
++ @{
++*//***************************************************************************/
++#define UPDATE_NIA_ENQ_WITHOUT_DMA 0x80000000
++#define UPDATE_CC_WITH_TREE 0x40000000
++#define UPDATE_CC_WITH_DELETE_TREE 0x20000000
++#define UPDATE_KG_NIA_CC_WA 0x10000000
++#define UPDATE_KG_OPT_MODE 0x08000000
++#define UPDATE_KG_NIA 0x04000000
++#define UPDATE_CC_SHADOW_CLEAR 0x02000000
++/* @} */
++
++#define UPDATE_FPM_BRKC_SLP 0x80000000
++#define UPDATE_FPM_EXTC 0x40000000
++#define UPDATE_FPM_EXTC_CLEAR 0x20000000
++#define GET_FMQM_GS 0x10000000
++#define GET_FM_NPI 0x08000000
++#define GET_FMFP_EXTC 0x04000000
++#define CLEAR_IRAM_READY 0x02000000
++#define UPDATE_FM_CLD 0x01000000
++#define GET_FM_CLD 0x00800000
++#define FM_MAX_NUM_OF_PORTS (FM_MAX_NUM_OF_OH_PORTS + \
++ FM_MAX_NUM_OF_1G_RX_PORTS + \
++ FM_MAX_NUM_OF_10G_RX_PORTS + \
++ FM_MAX_NUM_OF_1G_TX_PORTS + \
++ FM_MAX_NUM_OF_10G_TX_PORTS)
++
++#define MODULE_NAME_SIZE 30
++#define DUMMY_PORT_ID 0
++
++#define FM_LIODN_OFFSET_MASK 0x3FF
++
++/**************************************************************************//**
++ @Description NIA Description
++*//***************************************************************************/
++#define NIA_ENG_MASK 0x007C0000
++#define NIA_AC_MASK 0x0003ffff
++
++#define NIA_ORDER_RESTOR 0x00800000
++#define NIA_ENG_FM_CTL 0x00000000
++#define NIA_ENG_PRS 0x00440000
++#define NIA_ENG_KG 0x00480000
++#define NIA_ENG_PLCR 0x004C0000
++#define NIA_ENG_BMI 0x00500000
++#define NIA_ENG_QMI_ENQ 0x00540000
++#define NIA_ENG_QMI_DEQ 0x00580000
++
++#define NIA_FM_CTL_AC_CC 0x00000006
++#define NIA_FM_CTL_AC_HC 0x0000000C
++#define NIA_FM_CTL_AC_IND_MODE_TX 0x00000008
++#define NIA_FM_CTL_AC_IND_MODE_RX 0x0000000A
++#define NIA_FM_CTL_AC_POP_TO_N_STEP 0x0000000e
++#define NIA_FM_CTL_AC_PRE_BMI_FETCH_HEADER 0x00000010
++#define NIA_FM_CTL_AC_PRE_BMI_FETCH_FULL_FRAME 0x00000018
++#define NIA_FM_CTL_AC_POST_BMI_FETCH 0x00000012
++#define NIA_FM_CTL_AC_PRE_BMI_ENQ_FRAME 0x0000001A
++#define NIA_FM_CTL_AC_PRE_BMI_DISCARD_FRAME 0x0000001E
++#define NIA_FM_CTL_AC_POST_BMI_ENQ_ORR 0x00000014
++#define NIA_FM_CTL_AC_POST_BMI_ENQ 0x00000022
++#define NIA_FM_CTL_AC_PRE_CC 0x00000020
++#define NIA_FM_CTL_AC_POST_TX 0x00000024
++/* V3 only */
++#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
++#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_DISCARD_FRAME 0x0000002A
++#define NIA_FM_CTL_AC_NO_IPACC_POP_TO_N_STEP 0x0000002C
++
++#define NIA_BMI_AC_ENQ_FRAME 0x00000002
++#define NIA_BMI_AC_TX_RELEASE 0x000002C0
++#define NIA_BMI_AC_RELEASE 0x000000C0
++#define NIA_BMI_AC_DISCARD 0x000000C1
++#define NIA_BMI_AC_TX 0x00000274
++#define NIA_BMI_AC_FETCH 0x00000208
++#define NIA_BMI_AC_MASK 0x000003FF
++
++#define NIA_KG_DIRECT 0x00000100
++#define NIA_KG_CC_EN 0x00000200
++#define NIA_PLCR_ABSOLUTE 0x00008000
++
++#define NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA 0x00000202
++
++#if defined(FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675) || defined(FM_ERROR_VSP_NO_MATCH_SW006)
++#define GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd) \
++ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
++ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_ENQ_FRAME) : \
++ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME))
++#define GET_NIA_BMI_AC_DISCARD_FRAME(h_FmPcd) \
++ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
++ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_DISCARD_FRAME) : \
++ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_DISCARD_FRAME))
++#define GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME() \
++ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME)
++#else
++#define GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd) \
++ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
++ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_ENQ_FRAME) : \
++ (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))
++#define GET_NIA_BMI_AC_DISCARD_FRAME(h_FmPcd) \
++ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
++ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_DISCARD_FRAME) : \
++ (NIA_ENG_BMI | NIA_BMI_AC_DISCARD))
++#define GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME() \
++ (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)
++#endif /* defined(FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675) || ... */
++
++/**************************************************************************//**
++ @Description CTRL Parameters Page defines
++*//***************************************************************************/
++#define FM_CTL_PARAMS_PAGE_OP_FIX_EN 0x80000000
++#define FM_CTL_PARAMS_PAGE_OFFLOAD_SUPPORT_EN 0x40000000
++#define FM_CTL_PARAMS_PAGE_ALWAYS_ON 0x00000100
++
++#define FM_CTL_PARAMS_PAGE_ERROR_VSP_MASK 0x0000003f
++
++/**************************************************************************//**
++ @Description Port Id defines
++*//***************************************************************************/
++#if (DPAA_VERSION == 10)
++#define BASE_OH_PORTID 1
++#else
++#define BASE_OH_PORTID 2
++#endif /* (DPAA_VERSION == 10) */
++#define BASE_1G_RX_PORTID 8
++#define BASE_10G_RX_PORTID 0x10
++#define BASE_1G_TX_PORTID 0x28
++#define BASE_10G_TX_PORTID 0x30
++
++#define FM_PCD_PORT_OH_BASE_INDX 0
++#define FM_PCD_PORT_1G_RX_BASE_INDX (FM_PCD_PORT_OH_BASE_INDX+FM_MAX_NUM_OF_OH_PORTS)
++#define FM_PCD_PORT_10G_RX_BASE_INDX (FM_PCD_PORT_1G_RX_BASE_INDX+FM_MAX_NUM_OF_1G_RX_PORTS)
++#define FM_PCD_PORT_1G_TX_BASE_INDX (FM_PCD_PORT_10G_RX_BASE_INDX+FM_MAX_NUM_OF_10G_RX_PORTS)
++#define FM_PCD_PORT_10G_TX_BASE_INDX (FM_PCD_PORT_1G_TX_BASE_INDX+FM_MAX_NUM_OF_1G_TX_PORTS)
++
++#if (FM_MAX_NUM_OF_OH_PORTS > 0)
++#define CHECK_PORT_ID_OH_PORTS(_relativePortId) \
++ if ((_relativePortId) >= FM_MAX_NUM_OF_OH_PORTS) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal OH_PORT port id"))
++#else
++#define CHECK_PORT_ID_OH_PORTS(_relativePortId) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal OH_PORT port id"))
++#endif
++#if (FM_MAX_NUM_OF_1G_RX_PORTS > 0)
++#define CHECK_PORT_ID_1G_RX_PORTS(_relativePortId) \
++ if ((_relativePortId) >= FM_MAX_NUM_OF_1G_RX_PORTS) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_RX_PORT port id"))
++#else
++#define CHECK_PORT_ID_1G_RX_PORTS(_relativePortId) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_RX_PORT port id"))
++#endif
++#if (FM_MAX_NUM_OF_10G_RX_PORTS > 0)
++#define CHECK_PORT_ID_10G_RX_PORTS(_relativePortId) \
++ if ((_relativePortId) >= FM_MAX_NUM_OF_10G_RX_PORTS) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_RX_PORT port id"))
++#else
++#define CHECK_PORT_ID_10G_RX_PORTS(_relativePortId) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_RX_PORT port id"))
++#endif
++#if (FM_MAX_NUM_OF_1G_TX_PORTS > 0)
++#define CHECK_PORT_ID_1G_TX_PORTS(_relativePortId) \
++ if ((_relativePortId) >= FM_MAX_NUM_OF_1G_TX_PORTS) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_TX_PORT port id"))
++#else
++#define CHECK_PORT_ID_1G_TX_PORTS(_relativePortId) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_TX_PORT port id"))
++#endif
++#if (FM_MAX_NUM_OF_10G_TX_PORTS > 0)
++#define CHECK_PORT_ID_10G_TX_PORTS(_relativePortId) \
++ if ((_relativePortId) >= FM_MAX_NUM_OF_10G_TX_PORTS) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_TX_PORT port id"))
++#else
++#define CHECK_PORT_ID_10G_TX_PORTS(_relativePortId) \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_TX_PORT port id"))
++#endif
++
++uint8_t SwPortIdToHwPortId(e_FmPortType type, uint8_t relativePortId, uint8_t majorRev, uint8_t minorRev);
++
++#define HW_PORT_ID_TO_SW_PORT_ID(_relativePortId, hardwarePortId) \
++{ if (((hardwarePortId) >= BASE_OH_PORTID) && \
++ ((hardwarePortId) < BASE_OH_PORTID+FM_MAX_NUM_OF_OH_PORTS)) \
++ _relativePortId = (uint8_t)((hardwarePortId)-BASE_OH_PORTID); \
++ else if (((hardwarePortId) >= BASE_10G_TX_PORTID) && \
++ ((hardwarePortId) < BASE_10G_TX_PORTID+FM_MAX_NUM_OF_10G_TX_PORTS)) \
++ _relativePortId = (uint8_t)((hardwarePortId)-BASE_10G_TX_PORTID); \
++ else if (((hardwarePortId) >= BASE_1G_TX_PORTID) && \
++ ((hardwarePortId) < BASE_1G_TX_PORTID+FM_MAX_NUM_OF_1G_TX_PORTS)) \
++ _relativePortId = (uint8_t)((hardwarePortId)-BASE_1G_TX_PORTID); \
++ else if (((hardwarePortId) >= BASE_10G_RX_PORTID) && \
++ ((hardwarePortId) < BASE_10G_RX_PORTID+FM_MAX_NUM_OF_10G_RX_PORTS)) \
++ _relativePortId = (uint8_t)((hardwarePortId)-BASE_10G_RX_PORTID); \
++ else if (((hardwarePortId) >= BASE_1G_RX_PORTID) && \
++ ((hardwarePortId) < BASE_1G_RX_PORTID+FM_MAX_NUM_OF_1G_RX_PORTS)) \
++ _relativePortId = (uint8_t)((hardwarePortId)-BASE_1G_RX_PORTID); \
++ else { \
++ _relativePortId = (uint8_t)DUMMY_PORT_ID; \
++ ASSERT_COND(TRUE); \
++ } \
++}
++
++#define HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId) \
++do { \
++ if (((hardwarePortId) >= BASE_OH_PORTID) && ((hardwarePortId) < BASE_OH_PORTID+FM_MAX_NUM_OF_OH_PORTS)) \
++ swPortIndex = (uint8_t)((hardwarePortId)-BASE_OH_PORTID+FM_PCD_PORT_OH_BASE_INDX); \
++ else if (((hardwarePortId) >= BASE_1G_RX_PORTID) && \
++ ((hardwarePortId) < BASE_1G_RX_PORTID+FM_MAX_NUM_OF_1G_RX_PORTS)) \
++ swPortIndex = (uint8_t)((hardwarePortId)-BASE_1G_RX_PORTID+FM_PCD_PORT_1G_RX_BASE_INDX); \
++ else if (((hardwarePortId) >= BASE_10G_RX_PORTID) && \
++ ((hardwarePortId) < BASE_10G_RX_PORTID+FM_MAX_NUM_OF_10G_RX_PORTS)) \
++ swPortIndex = (uint8_t)((hardwarePortId)-BASE_10G_RX_PORTID+FM_PCD_PORT_10G_RX_BASE_INDX); \
++ else if (((hardwarePortId) >= BASE_1G_TX_PORTID) && \
++ ((hardwarePortId) < BASE_1G_TX_PORTID+FM_MAX_NUM_OF_1G_TX_PORTS)) \
++ swPortIndex = (uint8_t)((hardwarePortId)-BASE_1G_TX_PORTID+FM_PCD_PORT_1G_TX_BASE_INDX); \
++ else if (((hardwarePortId) >= BASE_10G_TX_PORTID) && \
++ ((hardwarePortId) < BASE_10G_TX_PORTID+FM_MAX_NUM_OF_10G_TX_PORTS)) \
++ swPortIndex = (uint8_t)((hardwarePortId)-BASE_10G_TX_PORTID+FM_PCD_PORT_10G_TX_BASE_INDX); \
++ else ASSERT_COND(FALSE); \
++} while (0)
++
++#define SW_PORT_INDX_TO_HW_PORT_ID(hardwarePortId, swPortIndex) \
++do { \
++ if (((swPortIndex) >= FM_PCD_PORT_OH_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_1G_RX_BASE_INDX)) \
++ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_OH_BASE_INDX+BASE_OH_PORTID); \
++ else if (((swPortIndex) >= FM_PCD_PORT_1G_RX_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_10G_RX_BASE_INDX)) \
++ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_1G_RX_BASE_INDX+BASE_1G_RX_PORTID); \
++ else if (((swPortIndex) >= FM_PCD_PORT_10G_RX_BASE_INDX) && ((swPortIndex) < FM_MAX_NUM_OF_PORTS)) \
++ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_10G_RX_BASE_INDX+BASE_10G_RX_PORTID); \
++ else if (((swPortIndex) >= FM_PCD_PORT_1G_TX_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_10G_TX_BASE_INDX)) \
++ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_1G_TX_BASE_INDX+BASE_1G_TX_PORTID); \
++ else if (((swPortIndex) >= FM_PCD_PORT_10G_TX_BASE_INDX) && ((swPortIndex) < FM_MAX_NUM_OF_PORTS)) \
++ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_10G_TX_BASE_INDX+BASE_10G_TX_PORTID); \
++ else ASSERT_COND(FALSE); \
++} while (0)
++
++#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
++#define BMI_FIFO_UNITS 0x100
++
++typedef struct {
++ void (*f_Isr) (t_Handle h_Arg);
++ t_Handle h_SrcHandle;
++ uint8_t guestId;
++} t_FmIntrSrc;
++
++#define ILLEGAL_HDR_NUM 0xFF
++#define NO_HDR_NUM FM_PCD_PRS_NUM_OF_HDRS
++
++#define IS_PRIVATE_HEADER(hdr) (((hdr) == HEADER_TYPE_USER_DEFINED_SHIM1) || \
++ ((hdr) == HEADER_TYPE_USER_DEFINED_SHIM2))
++#define IS_SPECIAL_HEADER(hdr) ((hdr) == HEADER_TYPE_MACSEC)
++
++static __inline__ uint8_t GetPrsHdrNum(e_NetHeaderType hdr)
++{
++ switch (hdr)
++ { case (HEADER_TYPE_ETH): return 0;
++ case (HEADER_TYPE_LLC_SNAP): return 1;
++ case (HEADER_TYPE_VLAN): return 2;
++ case (HEADER_TYPE_PPPoE): return 3;
++ case (HEADER_TYPE_PPP): return 3;
++ case (HEADER_TYPE_MPLS): return 4;
++ case (HEADER_TYPE_IPv4): return 5;
++ case (HEADER_TYPE_IPv6): return 6;
++ case (HEADER_TYPE_GRE): return 7;
++ case (HEADER_TYPE_MINENCAP): return 8;
++ case (HEADER_TYPE_USER_DEFINED_L3): return 9;
++ case (HEADER_TYPE_TCP): return 10;
++ case (HEADER_TYPE_UDP): return 11;
++ case (HEADER_TYPE_IPSEC_AH):
++ case (HEADER_TYPE_IPSEC_ESP): return 12;
++ case (HEADER_TYPE_SCTP): return 13;
++ case (HEADER_TYPE_DCCP): return 14;
++ case (HEADER_TYPE_USER_DEFINED_L4): return 15;
++ case (HEADER_TYPE_USER_DEFINED_SHIM1):
++ case (HEADER_TYPE_USER_DEFINED_SHIM2):
++ case (HEADER_TYPE_MACSEC): return NO_HDR_NUM;
++ default:
++ return ILLEGAL_HDR_NUM;
++ }
++}
++
++#define FM_PCD_MAX_NUM_OF_OPTIONS(clsPlanEntries) ((clsPlanEntries==256)? 8:((clsPlanEntries==128)? 7: ((clsPlanEntries==64)? 6: ((clsPlanEntries==32)? 5:0))))
++
++
++/**************************************************************************//**
++ @Description A structure for initializing a keygen classification plan group
++*//***************************************************************************/
++typedef struct t_FmPcdKgInterModuleClsPlanGrpParams {
++ uint8_t netEnvId; /* IN */
++ bool grpExists; /* OUT (unused in FmPcdKgBuildClsPlanGrp)*/
++ uint8_t clsPlanGrpId; /* OUT */
++ bool emptyClsPlanGrp; /* OUT */
++ uint8_t numOfOptions; /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/
++ protocolOpt_t options[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
++ /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/
++ uint32_t optVectors[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
++ /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/
++} t_FmPcdKgInterModuleClsPlanGrpParams;
++
++typedef struct t_FmPcdLock {
++ t_Handle h_Spinlock;
++ volatile bool flag;
++ t_List node;
++} t_FmPcdLock;
++#define FM_PCD_LOCK_OBJ(ptr) LIST_OBJECT(ptr, t_FmPcdLock, node)
++
++
++typedef t_Error (t_FmPortGetSetCcParamsCallback) (t_Handle h_FmPort,
++ t_FmPortGetSetCcParams *p_FmPortGetSetCcParams);
++
++
++/***********************************************************************/
++/* Common API for FM-PCD module */
++/***********************************************************************/
++t_Handle FmPcdGetHcHandle(t_Handle h_FmPcd);
++uint32_t FmPcdGetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr);
++uint32_t FmPcdGetLcv(t_Handle h_FmPcd, uint32_t netEnvId, uint8_t hdrNum);
++uint32_t FmPcdGetMacsecLcv(t_Handle h_FmPcd, uint32_t netEnvId);
++void FmPcdIncNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId);
++void FmPcdDecNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId);
++uint8_t FmPcdGetNetEnvId(t_Handle h_NetEnv);
++void FmPcdPortRegister(t_Handle h_FmPcd, t_Handle h_FmPort, uint8_t hardwarePortId);
++uint32_t FmPcdLock(t_Handle h_FmPcd);
++void FmPcdUnlock(t_Handle h_FmPcd, uint32_t intFlags);
++bool FmPcdNetEnvIsHdrExist(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr);
++t_Error FmPcdFragHcScratchPoolInit(t_Handle h_FmPcd, uint8_t scratchBpid);
++t_Error FmPcdRegisterReassmPort(t_Handle h_FmPcd, t_Handle h_IpReasmCommonPramTbl);
++t_Error FmPcdUnregisterReassmPort(t_Handle h_FmPcd, t_Handle h_IpReasmCommonPramTbl);
++bool FmPcdIsAdvancedOffloadSupported(t_Handle h_FmPcd);
++bool FmPcdLockTryLockAll(t_Handle h_FmPcd);
++void FmPcdLockUnlockAll(t_Handle h_FmPcd);
++t_Error FmPcdHcSync(t_Handle h_FmPcd);
++t_Handle FmGetPcd(t_Handle h_Fm);
++/***********************************************************************/
++/* Common API for FM-PCD KG module */
++/***********************************************************************/
++uint8_t FmPcdKgGetClsPlanGrpBase(t_Handle h_FmPcd, uint8_t clsPlanGrp);
++uint16_t FmPcdKgGetClsPlanGrpSize(t_Handle h_FmPcd, uint8_t clsPlanGrp);
++t_Error FmPcdKgBuildClsPlanGrp(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_Grp, t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet);
++
++uint8_t FmPcdKgGetSchemeId(t_Handle h_Scheme);
++#if (DPAA_VERSION >= 11)
++bool FmPcdKgGetVspe(t_Handle h_Scheme);
++#endif /* (DPAA_VERSION >= 11) */
++uint8_t FmPcdKgGetRelativeSchemeId(t_Handle h_FmPcd, uint8_t schemeId);
++void FmPcdKgDestroyClsPlanGrp(t_Handle h_FmPcd, uint8_t grpId);
++t_Error FmPcdKgCheckInvalidateSchemeSw(t_Handle h_Scheme);
++t_Error FmPcdKgBuildBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPortToSchemes, uint32_t *p_SpReg, bool add);
++bool FmPcdKgHwSchemeIsValid(uint32_t schemeModeReg);
++uint32_t FmPcdKgBuildWriteSchemeActionReg(uint8_t schemeId, bool updateCounter);
++uint32_t FmPcdKgBuildReadSchemeActionReg(uint8_t schemeId);
++uint32_t FmPcdKgBuildWriteClsPlanBlockActionReg(uint8_t grpId);
++uint32_t FmPcdKgBuildWritePortSchemeBindActionReg(uint8_t hardwarePortId);
++uint32_t FmPcdKgBuildReadPortSchemeBindActionReg(uint8_t hardwarePortId);
++uint32_t FmPcdKgBuildWritePortClsPlanBindActionReg(uint8_t hardwarePortId);
++bool FmPcdKgIsSchemeValidSw(t_Handle h_Scheme);
++
++t_Error FmPcdKgBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind);
++t_Error FmPcdKgUnbindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind);
++uint32_t FmPcdKgGetRequiredAction(t_Handle h_FmPcd, uint8_t schemeId);
++uint32_t FmPcdKgGetRequiredActionFlag(t_Handle h_FmPcd, uint8_t schemeId);
++e_FmPcdDoneAction FmPcdKgGetDoneAction(t_Handle h_FmPcd, uint8_t schemeId);
++e_FmPcdEngine FmPcdKgGetNextEngine(t_Handle h_FmPcd, uint8_t schemeId);
++void FmPcdKgUpdateRequiredAction(t_Handle h_Scheme, uint32_t requiredAction);
++bool FmPcdKgIsDirectPlcr(t_Handle h_FmPcd, uint8_t schemeId);
++bool FmPcdKgIsDistrOnPlcrProfile(t_Handle h_FmPcd, uint8_t schemeId);
++uint16_t FmPcdKgGetRelativeProfileId(t_Handle h_FmPcd, uint8_t schemeId);
++t_Handle FmPcdKgGetSchemeHandle(t_Handle h_FmPcd, uint8_t relativeSchemeId);
++bool FmPcdKgIsSchemeHasOwners(t_Handle h_Scheme);
++t_Error FmPcdKgCcGetSetParams(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value);
++t_Error FmPcdKgSetOrBindToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t netEnvId, protocolOpt_t *p_OptArray, uint8_t *p_ClsPlanGrpId, bool *p_IsEmptyClsPlanGrp);
++t_Error FmPcdKgDeleteOrUnbindPortToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId);
++
++/***********************************************************************/
++/* Common API for FM-PCD parser module */
++/***********************************************************************/
++t_Error FmPcdPrsIncludePortInStatistics(t_Handle p_FmPcd, uint8_t hardwarePortId, bool include);
++
++/***********************************************************************/
++/* Common API for FM-PCD policer module */
++/***********************************************************************/
++t_Error FmPcdPlcrAllocProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles);
++t_Error FmPcdPlcrFreeProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId);
++bool FmPcdPlcrIsProfileValid(t_Handle h_FmPcd, uint16_t absoluteProfileId);
++uint16_t FmPcdPlcrGetPortProfilesBase(t_Handle h_FmPcd, uint8_t hardwarePortId);
++uint16_t FmPcdPlcrGetPortNumOfProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId);
++uint32_t FmPcdPlcrBuildWritePlcrActionRegs(uint16_t absoluteProfileId);
++uint32_t FmPcdPlcrBuildCounterProfileReg(e_FmPcdPlcrProfileCounters counter);
++uint32_t FmPcdPlcrBuildWritePlcrActionReg(uint16_t absoluteProfileId);
++uint32_t FmPcdPlcrBuildReadPlcrActionReg(uint16_t absoluteProfileId);
++uint16_t FmPcdPlcrProfileGetAbsoluteId(t_Handle h_Profile);
++t_Error FmPcdPlcrGetAbsoluteIdByProfileParams(t_Handle h_FmPcd,
++ e_FmPcdProfileTypeSelection profileType,
++ t_Handle h_FmPort,
++ uint16_t relativeProfile,
++ uint16_t *p_AbsoluteId);
++void FmPcdPlcrInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
++void FmPcdPlcrValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
++bool FmPcdPlcrHwProfileIsValid(uint32_t profileModeReg);
++uint32_t FmPcdPlcrGetRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId);
++uint32_t FmPcdPlcrGetRequiredActionFlag(t_Handle h_FmPcd, uint16_t absoluteProfileId);
++uint32_t FmPcdPlcrBuildNiaProfileReg(bool green, bool yellow, bool red);
++void FmPcdPlcrUpdateRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId, uint32_t requiredAction);
++t_Error FmPcdPlcrCcGetSetParams(t_Handle h_FmPcd, uint16_t profileIndx,uint32_t requiredAction);
++
++/***********************************************************************/
++/* Common API for FM-PCD CC module */
++/***********************************************************************/
++uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode);
++uint8_t FmPcdCcGetOffset(t_Handle h_CcNode);
++t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex);
++t_Error FmPcdCcAddKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPCdCcKeyParams);
++t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint16_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask);
++t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPcdCcKeyParams);
++t_Error FmPcdCcModifyMissNextEngineParamNode(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++t_Error FmPcdCcModifyNextEngineParamTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd, t_Handle h_Pointer);
++t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree);
++void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree, t_Handle h_SavedManipParams);
++t_Error FmPcdCcTreeAddIPR(t_Handle h_FmPcd, t_Handle h_FmTree, t_Handle h_NetEnv, t_Handle h_ReassemblyManip, bool schemes);
++t_Error FmPcdCcTreeAddCPR(t_Handle h_FmPcd, t_Handle h_FmTree, t_Handle h_NetEnv, t_Handle h_ReassemblyManip, bool schemes);
++t_Error FmPcdCcBindTree(t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_CcTree, uint32_t *p_Offset,t_Handle h_FmPort);
++t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_CcTree);
++
++/***********************************************************************/
++/* Common API for FM-PCD Manip module */
++/***********************************************************************/
++t_Error FmPcdManipUpdate(t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_FmPort, t_Handle h_Manip, t_Handle h_Ad, bool validate, int level, t_Handle h_FmTree, bool modify);
++
++/***********************************************************************/
++/* Common API for FM-Port module */
++/***********************************************************************/
++#if (DPAA_VERSION >= 11)
++typedef enum e_FmPortGprFuncType
++{
++ e_FM_PORT_GPR_EMPTY = 0,
++ e_FM_PORT_GPR_MURAM_PAGE
++} e_FmPortGprFuncType;
++
++t_Error FmPortSetGprFunc(t_Handle h_FmPort, e_FmPortGprFuncType gprFunc, void **p_Value);
++#endif /* DPAA_VERSION >= 11) */
++t_Error FmGetSetParams(t_Handle h_Fm, t_FmGetSetParams *p_FmGetSetParams);
++t_Error FmPortGetSetCcParams(t_Handle h_FmPort, t_FmPortGetSetCcParams *p_FmPortGetSetCcParams);
++uint8_t FmPortGetNetEnvId(t_Handle h_FmPort);
++uint8_t FmPortGetHardwarePortId(t_Handle h_FmPort);
++uint32_t FmPortGetPcdEngines(t_Handle h_FmPort);
++void FmPortPcdKgSwUnbindClsPlanGrp (t_Handle h_FmPort);
++
++
++#if (DPAA_VERSION >= 11)
++t_Error FmPcdFrmReplicUpdate(t_Handle h_FmPcd, t_Handle h_FmPort, t_Handle h_FrmReplic);
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Function FmRegisterIntr
++
++ @Description Used to register an inter-module event handler to be processed by FM
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] mod The module that causes the event
++ @Param[in] modId Module id - if more than 1 instansiation of this
++ mode exists,0 otherwise.
++ @Param[in] intrType Interrupt type (error/normal) selection.
++ @Param[in] f_Isr The interrupt service routine.
++ @Param[in] h_Arg Argument to be passed to f_Isr.
++
++ @Return None.
++*//***************************************************************************/
++void FmRegisterIntr(t_Handle h_Fm,
++ e_FmEventModules mod,
++ uint8_t modId,
++ e_FmIntrType intrType,
++ void (*f_Isr) (t_Handle h_Arg),
++ t_Handle h_Arg);
++
++/**************************************************************************//**
++ @Function FmUnregisterIntr
++
++ @Description Used to un-register an inter-module event handler that was processed by FM
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] mod The module that causes the event
++ @Param[in] modId Module id - if more than 1 instansiation of this
++ mode exists,0 otherwise.
++ @Param[in] intrType Interrupt type (error/normal) selection.
++
++ @Return None.
++*//***************************************************************************/
++void FmUnregisterIntr(t_Handle h_Fm,
++ e_FmEventModules mod,
++ uint8_t modId,
++ e_FmIntrType intrType);
++
++/**************************************************************************//**
++ @Function FmRegisterFmCtlIntr
++
++ @Description Used to register to one of the fmCtl events in the FM module
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] eventRegId FmCtl event id (0-7).
++ @Param[in] f_Isr The interrupt service routine.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++void FmRegisterFmCtlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Fm, uint32_t event));
++
++
++/**************************************************************************//**
++ @Description enum for defining MAC types
++*//***************************************************************************/
++typedef enum e_FmMacType {
++ e_FM_MAC_10G = 0, /**< 10G MAC */
++ e_FM_MAC_1G /**< 1G MAC */
++} e_FmMacType;
++
++/**************************************************************************//**
++ @Description Structure for port-FM communication during FM_PORT_Init.
++ Fields commented 'IN' are passed by the port module to be used
++ by the FM module.
++ Fields commented 'OUT' will be filled by FM before returning to port.
++ Some fields are optional (depending on configuration) and
++ will be analized by the port and FM modules accordingly.
++*//***************************************************************************/
++typedef struct t_FmInterModulePortInitParams {
++ uint8_t hardwarePortId; /**< IN. port Id */
++ e_FmPortType portType; /**< IN. Port type */
++ bool independentMode; /**< IN. TRUE if FM Port operates in independent mode */
++ uint16_t liodnOffset; /**< IN. Port's requested resource */
++ uint8_t numOfTasks; /**< IN. Port's requested resource */
++ uint8_t numOfExtraTasks; /**< IN. Port's requested resource */
++ uint8_t numOfOpenDmas; /**< IN. Port's requested resource */
++ uint8_t numOfExtraOpenDmas; /**< IN. Port's requested resource */
++ uint32_t sizeOfFifo; /**< IN. Port's requested resource */
++ uint32_t extraSizeOfFifo; /**< IN. Port's requested resource */
++ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
++ uint16_t maxFrameLength; /**< IN. Port's max frame length. */
++ uint16_t liodnBase; /**< IN. Irrelevant for P4080 rev 1.
++ LIODN base for this port, to be
++ used together with LIODN offset. */
++ t_FmPhysAddr fmMuramPhysBaseAddr;/**< OUT. FM-MURAM physical address*/
++} t_FmInterModulePortInitParams;
++
++/**************************************************************************//**
++ @Description Structure for port-FM communication during FM_PORT_Free.
++*//***************************************************************************/
++typedef struct t_FmInterModulePortFreeParams {
++ uint8_t hardwarePortId; /**< IN. port Id */
++ e_FmPortType portType; /**< IN. Port type */
++ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
++} t_FmInterModulePortFreeParams;
++
++/**************************************************************************//**
++ @Function FmGetPcdPrsBaseAddr
++
++ @Description Get the base address of the Parser from the FM module
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return Base address.
++*//***************************************************************************/
++uintptr_t FmGetPcdPrsBaseAddr(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmGetPcdKgBaseAddr
++
++ @Description Get the base address of the Keygen from the FM module
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return Base address.
++*//***************************************************************************/
++uintptr_t FmGetPcdKgBaseAddr(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmGetPcdPlcrBaseAddr
++
++ @Description Get the base address of the Policer from the FM module
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return Base address.
++*//***************************************************************************/
++uintptr_t FmGetPcdPlcrBaseAddr(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmGetMuramHandle
++
++ @Description Get the handle of the MURAM from the FM module
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return MURAM module handle.
++*//***************************************************************************/
++t_Handle FmGetMuramHandle(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmGetPhysicalMuramBase
++
++ @Description Get the physical base address of the MURAM from the FM module
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] fmPhysAddr Physical MURAM base
++
++ @Return Physical base address.
++*//***************************************************************************/
++void FmGetPhysicalMuramBase(t_Handle h_Fm, t_FmPhysAddr *fmPhysAddr);
++
++/**************************************************************************//**
++ @Function FmGetTimeStampScale
++
++ @Description Used internally by other modules in order to get the timeStamp
++ period as requested by the application.
++
++ This function returns bit number that is incremented every 1 usec.
++ To calculate timestamp period in nsec, use
++ 1000 / (1 << FmGetTimeStampScale()).
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return Bit that counts 1 usec.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++uint32_t FmGetTimeStampScale(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmResumeStalledPort
++
++ @Description Used internally by FM port to release a stalled port.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] hardwarePortId HW port id.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FmResumeStalledPort(t_Handle h_Fm, uint8_t hardwarePortId);
++
++/**************************************************************************//**
++ @Function FmIsPortStalled
++
++ @Description Used internally by FM port to read the port's status.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] hardwarePortId HW port id.
++ @Param[in] p_IsStalled A pointer to the boolean port stalled state
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FmIsPortStalled(t_Handle h_Fm, uint8_t hardwarePortId, bool *p_IsStalled);
++
++/**************************************************************************//**
++ @Function FmResetMac
++
++ @Description Used by MAC driver to reset the MAC registers
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] type MAC type.
++ @Param[in] macId MAC id - according to type.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FmResetMac(t_Handle h_Fm, e_FmMacType type, uint8_t macId);
++
++/**************************************************************************//**
++ @Function FmGetClockFreq
++
++ @Description Used by MAC driver to get the FM clock frequency
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return clock-freq on success; 0 otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++uint16_t FmGetClockFreq(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmGetMacClockFreq
++
++ @Description Used by MAC driver to get the MAC clock frequency
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return clock-freq on success; 0 otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++uint16_t FmGetMacClockFreq(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmGetId
++
++ @Description Used by PCD driver to read rhe FM id
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++uint8_t FmGetId(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmReset
++
++ @Description Used to reset the FM
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FmReset(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FmGetSetPortParams
++
++ @Description Used by FM-PORT driver to pass and receive parameters between
++ PORT and FM modules.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in,out] p_PortParams A structure of FM Port parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FmGetSetPortParams(t_Handle h_Fm,t_FmInterModulePortInitParams *p_PortParams);
++
++/**************************************************************************//**
++ @Function FmFreePortParams
++
++ @Description Used by FM-PORT driver to free port's resources within the FM.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in,out] p_PortParams A structure of FM Port parameters.
++
++ @Return None.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++void FmFreePortParams(t_Handle h_Fm,t_FmInterModulePortFreeParams *p_PortParams);
++
++/**************************************************************************//**
++ @Function FmSetNumOfRiscsPerPort
++
++ @Description Used by FM-PORT driver to pass parameter between
++ PORT and FM modules for working with number of RISC..
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] hardwarePortId hardware port Id.
++ @Param[in] numOfFmanCtrls number of Fman Controllers.
++ @Param[in] orFmanCtrl Fman Controller for order restoration.
++
++ @Return None.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FmSetNumOfRiscsPerPort(t_Handle h_Fm, uint8_t hardwarePortId, uint8_t numOfFmanCtrls, t_FmFmanCtrl orFmanCtrl);
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/**************************************************************************//*
++ @Function FmDumpPortRegs
++
++ @Description Dumps FM port registers which are part of FM common registers
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] hardwarePortId HW port id.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only FM_Init().
++*//***************************************************************************/
++t_Error FmDumpPortRegs(t_Handle h_Fm,uint8_t hardwarePortId);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++void FmRegisterPcd(t_Handle h_Fm, t_Handle h_FmPcd);
++void FmUnregisterPcd(t_Handle h_Fm);
++t_Handle FmGetPcdHandle(t_Handle h_Fm);
++t_Error FmEnableRamsEcc(t_Handle h_Fm);
++t_Error FmDisableRamsEcc(t_Handle h_Fm);
++void FmGetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo);
++t_Error FmAllocFmanCtrlEventReg(t_Handle h_Fm, uint8_t *p_EventId);
++void FmFreeFmanCtrlEventReg(t_Handle h_Fm, uint8_t eventId);
++void FmSetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, uint32_t enableEvents);
++uint32_t FmGetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId);
++void FmRegisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Fm, uint32_t event), t_Handle h_Arg);
++void FmUnregisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId);
++t_Error FmSetMacMaxFrame(t_Handle h_Fm, e_FmMacType type, uint8_t macId, uint16_t mtu);
++bool FmIsMaster(t_Handle h_Fm);
++uint8_t FmGetGuestId(t_Handle h_Fm);
++uint16_t FmGetTnumAgingPeriod(t_Handle h_Fm);
++t_Error FmSetPortPreFetchConfiguration(t_Handle h_Fm, uint8_t portNum, bool preFetchConfigured);
++t_Error FmGetPortPreFetchConfiguration(t_Handle h_Fm, uint8_t portNum, bool *p_PortConfigured, bool *p_PreFetchConfigured);
++
++
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++t_Error Fm10GTxEccWorkaround(t_Handle h_Fm, uint8_t macId);
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++
++void FmMuramClear(t_Handle h_FmMuram);
++t_Error FmSetNumOfOpenDmas(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint8_t *p_NumOfOpenDmas,
++ uint8_t *p_NumOfExtraOpenDmas,
++ bool initialConfig);
++t_Error FmSetNumOfTasks(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint8_t *p_NumOfTasks,
++ uint8_t *p_NumOfExtraTasks,
++ bool initialConfig);
++t_Error FmSetSizeOfFifo(t_Handle h_Fm,
++ uint8_t hardwarePortId,
++ uint32_t *p_SizeOfFifo,
++ uint32_t *p_ExtraSizeOfFifo,
++ bool initialConfig);
++
++t_Error FmSetCongestionGroupPFCpriority(t_Handle h_Fm,
++ uint32_t congestionGroupId,
++ uint8_t priorityBitMap);
++
++#if (DPAA_VERSION >= 11)
++t_Error FmVSPAllocForPort(t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint8_t numOfStorageProfiles);
++
++t_Error FmVSPFreeForPort(t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId);
++
++t_Error FmVSPGetAbsoluteProfileId(t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint16_t relativeProfile,
++ uint16_t *p_AbsoluteId);
++t_Error FmVSPCheckRelativeProfile(t_Handle h_Fm,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint16_t relativeProfile);
++
++uintptr_t FmGetVSPBaseAddr(t_Handle h_Fm);
++#endif /* (DPAA_VERSION >= 11) */
++
++
++#endif /* __FM_COMMON_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h
+new file mode 100644
+index 00000000..492aa8a3
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h
+@@ -0,0 +1,93 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __FM_HC_H
++#define __FM_HC_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "fsl_fman_kg.h"
++
++#define __ERR_MODULE__ MODULE_FM_PCD
++
++
++typedef struct t_FmHcParams {
++ t_Handle h_Fm;
++ t_Handle h_FmPcd;
++ t_FmPcdHcParams params;
++} t_FmHcParams;
++
++
++t_Handle FmHcConfigAndInit(t_FmHcParams *p_FmHcParams);
++void FmHcFree(t_Handle h_FmHc);
++t_Error FmHcSetFramesDataMemory(t_Handle h_FmHc,
++ uint8_t memId);
++t_Error FmHcDumpRegs(t_Handle h_FmHc);
++
++void FmHcTxConf(t_Handle h_FmHc, t_DpaaFD *p_Fd);
++
++t_Error FmHcPcdKgSetScheme(t_Handle h_FmHc,
++ t_Handle h_Scheme,
++ struct fman_kg_scheme_regs *p_SchemeRegs,
++ bool updateCounter);
++t_Error FmHcPcdKgDeleteScheme(t_Handle h_FmHc, t_Handle h_Scheme);
++t_Error FmHcPcdCcCapwapTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcCapwapReassmTimeoutParams *p_CcCapwapReassmTimeoutParams );
++t_Error FmHcPcdCcIpFragScratchPollCmd(t_Handle h_FmHc, bool fill, t_FmPcdCcFragScratchPoolCmdParams *p_FmPcdCcFragScratchPoolCmdParams);
++t_Error FmHcPcdCcTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcReassmTimeoutParams *p_CcReassmTimeoutParams, uint8_t *p_Result);
++t_Error FmHcPcdKgSetClsPlan(t_Handle h_FmHc, t_FmPcdKgInterModuleClsPlanSet *p_Set);
++t_Error FmHcPcdKgDeleteClsPlan(t_Handle h_FmHc, uint8_t clsPlanGrpId);
++
++t_Error FmHcPcdKgSetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t value);
++uint32_t FmHcPcdKgGetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme);
++
++t_Error FmHcPcdCcDoDynamicChange(t_Handle h_FmHc, uint32_t oldAdAddrOffset, uint32_t newAdAddrOffset);
++
++t_Error FmHcPcdPlcrSetProfile(t_Handle h_FmHc, t_Handle h_Profile, t_FmPcdPlcrProfileRegs *p_PlcrRegs);
++t_Error FmHcPcdPlcrDeleteProfile(t_Handle h_FmHc, t_Handle h_Profile);
++
++t_Error FmHcPcdPlcrSetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value);
++uint32_t FmHcPcdPlcrGetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter);
++
++t_Error FmHcKgWriteSp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t spReg, bool add);
++t_Error FmHcKgWriteCpp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t cppReg);
++
++t_Error FmHcPcdKgCcGetSetParams(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value);
++t_Error FmHcPcdPlcrCcGetSetParams(t_Handle h_FmHc,uint16_t absoluteProfileId, uint32_t requiredAction);
++
++t_Error FmHcPcdSync(t_Handle h_FmHc);
++t_Handle FmHcGetPort(t_Handle h_FmHc);
++
++
++
++
++#endif /* __FM_HC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_sp_common.h b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_sp_common.h
+new file mode 100644
+index 00000000..f9dd384b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_sp_common.h
+@@ -0,0 +1,117 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_sp_common.h
++
++ @Description FM SP ...
++*//***************************************************************************/
++#ifndef __FM_SP_COMMON_H
++#define __FM_SP_COMMON_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++
++#include "fm_ext.h"
++#include "fm_pcd_ext.h"
++#include "fsl_fman.h"
++
++/**************************************************************************//**
++ @Description defaults
++*//***************************************************************************/
++#define DEFAULT_FM_SP_bufferPrefixContent_privDataSize 0
++#define DEFAULT_FM_SP_bufferPrefixContent_passPrsResult FALSE
++#define DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp FALSE
++#define DEFAULT_FM_SP_bufferPrefixContent_allOtherPCDInfo FALSE
++#define DEFAULT_FM_SP_bufferPrefixContent_dataAlign 64
++
++/**************************************************************************//**
++ @Description structure for defining internal context copying
++*//***************************************************************************/
++typedef struct
++{
++ uint16_t extBufOffset; /**< Offset in External buffer to which internal
++ context is copied to (Rx) or taken from (Tx, Op). */
++ uint8_t intContextOffset; /**< Offset within internal context to copy from
++ (Rx) or to copy to (Tx, Op). */
++ uint16_t size; /**< Internal offset size to be copied */
++} t_FmSpIntContextDataCopy;
++
++/**************************************************************************//**
++ @Description struct for defining external buffer margins
++*//***************************************************************************/
++typedef struct {
++ uint16_t startMargins; /**< Number of bytes to be left at the beginning
++ of the external buffer (must be divisible by 16) */
++ uint16_t endMargins; /**< number of bytes to be left at the end
++ of the external buffer(must be divisible by 16) */
++} t_FmSpBufMargins;
++
++typedef struct {
++ uint32_t dataOffset;
++ uint32_t prsResultOffset;
++ uint32_t timeStampOffset;
++ uint32_t hashResultOffset;
++ uint32_t pcdInfoOffset;
++ uint32_t manipOffset;
++} t_FmSpBufferOffsets;
++
++
++t_Error FmSpBuildBufferStructure(t_FmSpIntContextDataCopy *p_FmPortIntContextDataCopy,
++ t_FmBufferPrefixContent *p_BufferPrefixContent,
++ t_FmSpBufMargins *p_FmPortBufMargins,
++ t_FmSpBufferOffsets *p_FmPortBufferOffsets,
++ uint8_t *internalBufferOffset);
++
++t_Error FmSpCheckIntContextParams(t_FmSpIntContextDataCopy *p_FmSpIntContextDataCopy);
++t_Error FmSpCheckBufPoolsParams(t_FmExtPools *p_FmExtPools,
++ t_FmBackupBmPools *p_FmBackupBmPools,
++ t_FmBufPoolDepletion *p_FmBufPoolDepletion);
++t_Error FmSpCheckBufMargins(t_FmSpBufMargins *p_FmSpBufMargins);
++void FmSpSetBufPoolsInAscOrderOfBufSizes(t_FmExtPools *p_FmExtPools, uint8_t *orderedArray, uint16_t *sizesArray);
++
++t_Error FmPcdSpAllocProfiles(t_Handle h_FmPcd,
++ uint8_t hardwarePortId,
++ uint16_t numOfStorageProfiles,
++ uint16_t *base,
++ uint8_t *log2Num);
++t_Error FmPcdSpGetAbsoluteProfileId(t_Handle h_FmPcd,
++ t_Handle h_FmPort,
++ uint16_t relativeProfile,
++ uint16_t *p_AbsoluteId);
++void SpInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
++void SpValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
++
++
++#endif /* __FM_SP_COMMON_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/etc/Makefile b/drivers/net/ethernet/freescale/sdk_fman/etc/Makefile
+new file mode 100644
+index 00000000..d03a519c
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/etc/Makefile
+@@ -0,0 +1,12 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++obj-y += fsl-ncsw-etc.o
++
++fsl-ncsw-etc-objs := mm.o memcpy.o sprint.o list.o error.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/etc/error.c b/drivers/net/ethernet/freescale/sdk_fman/etc/error.c
+new file mode 100644
+index 00000000..fead7f50
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/etc/error.c
+@@ -0,0 +1,95 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/*
++
++ @File error.c
++
++ @Description General errors and events reporting utilities.
++*//***************************************************************************/
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++#include "error_ext.h"
++
++
++const char *dbgLevelStrings[] =
++{
++ "CRITICAL"
++ ,"MAJOR"
++ ,"MINOR"
++ ,"WARNING"
++ ,"INFO"
++ ,"TRACE"
++};
++
++
++char * ErrTypeStrings (e_ErrorType err)
++{
++ switch (err)
++ {
++ case (E_OK): return "OK";
++ case (E_WRITE_FAILED): return "Write Access Failed";
++ case (E_NO_DEVICE): return "No Device";
++ case (E_NOT_AVAILABLE): return "Resource Is Unavailable";
++ case (E_NO_MEMORY): return "Memory Allocation Failed";
++ case (E_INVALID_ADDRESS): return "Invalid Address";
++ case (E_BUSY): return "Resource Is Busy";
++ case (E_ALREADY_EXISTS): return "Resource Already Exists";
++ case (E_INVALID_OPERATION): return "Invalid Operation";
++ case (E_INVALID_VALUE): return "Invalid Value";
++ case (E_NOT_IN_RANGE): return "Value Out Of Range";
++ case (E_NOT_SUPPORTED): return "Unsupported Operation";
++ case (E_INVALID_STATE): return "Invalid State";
++ case (E_INVALID_HANDLE): return "Invalid Handle";
++ case (E_INVALID_ID): return "Invalid ID";
++ case (E_NULL_POINTER): return "Unexpected NULL Pointer";
++ case (E_INVALID_SELECTION): return "Invalid Selection";
++ case (E_INVALID_COMM_MODE): return "Invalid Communication Mode";
++ case (E_INVALID_MEMORY_TYPE): return "Invalid Memory Type";
++ case (E_INVALID_CLOCK): return "Invalid Clock";
++ case (E_CONFLICT): return "Conflict In Settings";
++ case (E_NOT_ALIGNED): return "Incorrect Alignment";
++ case (E_NOT_FOUND): return "Resource Not Found";
++ case (E_FULL): return "Resource Is Full";
++ case (E_EMPTY): return "Resource Is Empty";
++ case (E_ALREADY_FREE): return "Resource Already Free";
++ case (E_READ_FAILED): return "Read Access Failed";
++ case (E_INVALID_FRAME): return "Invalid Frame";
++ case (E_SEND_FAILED): return "Send Operation Failed";
++ case (E_RECEIVE_FAILED): return "Receive Operation Failed";
++ case (E_TIMEOUT): return "Operation Timed Out";
++ default:
++ break;
++ }
++ return NULL;
++}
++#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/etc/list.c b/drivers/net/ethernet/freescale/sdk_fman/etc/list.c
+new file mode 100644
+index 00000000..2d044be2
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/etc/list.c
+@@ -0,0 +1,71 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++
++ @File list.c
++
++ @Description Implementation of list.
++*//***************************************************************************/
++#include "std_ext.h"
++#include "list_ext.h"
++
++
++void LIST_Append(t_List *p_NewList, t_List *p_Head)
++{
++ t_List *p_First = LIST_FIRST(p_NewList);
++
++ if (p_First != p_NewList)
++ {
++ t_List *p_Last = LIST_LAST(p_NewList);
++ t_List *p_Cur = LIST_NEXT(p_Head);
++
++ LIST_PREV(p_First) = p_Head;
++ LIST_FIRST(p_Head) = p_First;
++ LIST_NEXT(p_Last) = p_Cur;
++ LIST_LAST(p_Cur) = p_Last;
++ }
++}
++
++
++int LIST_NumOfObjs(t_List *p_List)
++{
++ t_List *p_Tmp;
++ int numOfObjs = 0;
++
++ if (!LIST_IsEmpty(p_List))
++ LIST_FOR_EACH(p_Tmp, p_List)
++ numOfObjs++;
++
++ return numOfObjs;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/etc/memcpy.c b/drivers/net/ethernet/freescale/sdk_fman/etc/memcpy.c
+new file mode 100644
+index 00000000..fa203ec7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/etc/memcpy.c
+@@ -0,0 +1,620 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++
++#include "std_ext.h"
++#include "xx_ext.h"
++#include "memcpy_ext.h"
++
++void * MemCpy8(void* pDst, void* pSrc, uint32_t size)
++{
++ int i;
++
++ for(i = 0; i < size; ++i)
++ *(((uint8_t*)(pDst)) + i) = *(((uint8_t*)(pSrc)) + i);
++
++ return pDst;
++}
++
++void * MemSet8(void* pDst, int c, uint32_t size)
++{
++ int i;
++
++ for(i = 0; i < size; ++i)
++ *(((uint8_t*)(pDst)) + i) = (uint8_t)(c);
++
++ return pDst;
++}
++
++void * MemCpy32(void* pDst,void* pSrc, uint32_t size)
++{
++ uint32_t leftAlign;
++ uint32_t rightAlign;
++ uint32_t lastWord;
++ uint32_t currWord;
++ uint32_t *p_Src32;
++ uint32_t *p_Dst32;
++ uint8_t *p_Src8;
++ uint8_t *p_Dst8;
++
++ p_Src8 = (uint8_t*)(pSrc);
++ p_Dst8 = (uint8_t*)(pDst);
++ /* first copy byte by byte till the source first alignment
++ * this step is necessary to ensure we do not even try to access
++ * data which is before the source buffer, hence it is not ours.
++ */
++ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
++ {
++ *p_Dst8++ = *p_Src8++;
++ size--;
++ }
++
++ /* align destination (possibly disaligning source)*/
++ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
++ {
++ *p_Dst8++ = *p_Src8++;
++ size--;
++ }
++
++ /* dest is aligned and source is not necessarily aligned */
++ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
++ rightAlign = 32 - leftAlign;
++
++
++ if (leftAlign == 0)
++ {
++ /* source is also aligned */
++ p_Src32 = (uint32_t*)(p_Src8);
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ while (size >> 2) /* size >= 4 */
++ {
++ *p_Dst32++ = *p_Src32++;
++ size -= 4;
++ }
++ p_Src8 = (uint8_t*)(p_Src32);
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ }
++ else
++ {
++ /* source is not aligned (destination is aligned)*/
++ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ lastWord = *p_Src32++;
++ while(size >> 3) /* size >= 8 */
++ {
++ currWord = *p_Src32;
++ *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign);
++ lastWord = currWord;
++ p_Src32++;
++ p_Dst32++;
++ size -= 4;
++ }
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
++ }
++
++ /* complete the left overs */
++ while (size--)
++ *p_Dst8++ = *p_Src8++;
++
++ return pDst;
++}
++
++void * IO2IOCpy32(void* pDst,void* pSrc, uint32_t size)
++{
++ uint32_t leftAlign;
++ uint32_t rightAlign;
++ uint32_t lastWord;
++ uint32_t currWord;
++ uint32_t *p_Src32;
++ uint32_t *p_Dst32;
++ uint8_t *p_Src8;
++ uint8_t *p_Dst8;
++
++ p_Src8 = (uint8_t*)(pSrc);
++ p_Dst8 = (uint8_t*)(pDst);
++ /* first copy byte by byte till the source first alignment
++ * this step is necessary to ensure we do not even try to access
++ * data which is before the source buffer, hence it is not ours.
++ */
++ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
++ {
++ WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8));
++ p_Dst8++;p_Src8++;
++ size--;
++ }
++
++ /* align destination (possibly disaligning source)*/
++ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
++ {
++ WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8));
++ p_Dst8++;p_Src8++;
++ size--;
++ }
++
++ /* dest is aligned and source is not necessarily aligned */
++ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
++ rightAlign = 32 - leftAlign;
++
++ if (leftAlign == 0)
++ {
++ /* source is also aligned */
++ p_Src32 = (uint32_t*)(p_Src8);
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ while (size >> 2) /* size >= 4 */
++ {
++ WRITE_UINT32(*p_Dst32, GET_UINT32(*p_Src32));
++ p_Dst32++;p_Src32++;
++ size -= 4;
++ }
++ p_Src8 = (uint8_t*)(p_Src32);
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ }
++ else
++ {
++ /* source is not aligned (destination is aligned)*/
++ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ lastWord = GET_UINT32(*p_Src32);
++ p_Src32++;
++ while(size >> 3) /* size >= 8 */
++ {
++ currWord = GET_UINT32(*p_Src32);
++ WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign));
++ lastWord = currWord;
++ p_Src32++;p_Dst32++;
++ size -= 4;
++ }
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
++ }
++
++ /* complete the left overs */
++ while (size--)
++ {
++ WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8));
++ p_Dst8++;p_Src8++;
++ }
++
++ return pDst;
++}
++
++void * Mem2IOCpy32(void* pDst,void* pSrc, uint32_t size)
++{
++ uint32_t leftAlign;
++ uint32_t rightAlign;
++ uint32_t lastWord;
++ uint32_t currWord;
++ uint32_t *p_Src32;
++ uint32_t *p_Dst32;
++ uint8_t *p_Src8;
++ uint8_t *p_Dst8;
++
++ p_Src8 = (uint8_t*)(pSrc);
++ p_Dst8 = (uint8_t*)(pDst);
++ /* first copy byte by byte till the source first alignment
++ * this step is necessary to ensure we do not even try to access
++ * data which is before the source buffer, hence it is not ours.
++ */
++ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
++ {
++ WRITE_UINT8(*p_Dst8, *p_Src8);
++ p_Dst8++;p_Src8++;
++ size--;
++ }
++
++ /* align destination (possibly disaligning source)*/
++ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
++ {
++ WRITE_UINT8(*p_Dst8, *p_Src8);
++ p_Dst8++;p_Src8++;
++ size--;
++ }
++
++ /* dest is aligned and source is not necessarily aligned */
++ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
++ rightAlign = 32 - leftAlign;
++
++ if (leftAlign == 0)
++ {
++ /* source is also aligned */
++ p_Src32 = (uint32_t*)(p_Src8);
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ while (size >> 2) /* size >= 4 */
++ {
++ WRITE_UINT32(*p_Dst32, *p_Src32);
++ p_Dst32++;p_Src32++;
++ size -= 4;
++ }
++ p_Src8 = (uint8_t*)(p_Src32);
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ }
++ else
++ {
++ /* source is not aligned (destination is aligned)*/
++ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ lastWord = *p_Src32++;
++ while(size >> 3) /* size >= 8 */
++ {
++ currWord = *p_Src32;
++ WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign));
++ lastWord = currWord;
++ p_Src32++;p_Dst32++;
++ size -= 4;
++ }
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
++ }
++
++ /* complete the left overs */
++ while (size--)
++ {
++ WRITE_UINT8(*p_Dst8, *p_Src8);
++ p_Dst8++;p_Src8++;
++ }
++
++ return pDst;
++}
++
++void * IO2MemCpy32(void* pDst,void* pSrc, uint32_t size)
++{
++ uint32_t leftAlign;
++ uint32_t rightAlign;
++ uint32_t lastWord;
++ uint32_t currWord;
++ uint32_t *p_Src32;
++ uint32_t *p_Dst32;
++ uint8_t *p_Src8;
++ uint8_t *p_Dst8;
++
++ p_Src8 = (uint8_t*)(pSrc);
++ p_Dst8 = (uint8_t*)(pDst);
++ /* first copy byte by byte till the source first alignment
++ * this step is necessary to ensure we do not even try to access
++ * data which is before the source buffer, hence it is not ours.
++ */
++ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
++ {
++ *p_Dst8 = GET_UINT8(*p_Src8);
++ p_Dst8++;p_Src8++;
++ size--;
++ }
++
++ /* align destination (possibly disaligning source)*/
++ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
++ {
++ *p_Dst8 = GET_UINT8(*p_Src8);
++ p_Dst8++;p_Src8++;
++ size--;
++ }
++
++ /* dest is aligned and source is not necessarily aligned */
++ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
++ rightAlign = 32 - leftAlign;
++
++ if (leftAlign == 0)
++ {
++ /* source is also aligned */
++ p_Src32 = (uint32_t*)(p_Src8);
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ while (size >> 2) /* size >= 4 */
++ {
++ *p_Dst32 = GET_UINT32(*p_Src32);
++ p_Dst32++;p_Src32++;
++ size -= 4;
++ }
++ p_Src8 = (uint8_t*)(p_Src32);
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ }
++ else
++ {
++ /* source is not aligned (destination is aligned)*/
++ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ lastWord = GET_UINT32(*p_Src32);
++ p_Src32++;
++ while(size >> 3) /* size >= 8 */
++ {
++ currWord = GET_UINT32(*p_Src32);
++ *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign);
++ lastWord = currWord;
++ p_Src32++;p_Dst32++;
++ size -= 4;
++ }
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
++ }
++
++ /* complete the left overs */
++ while (size--)
++ {
++ *p_Dst8 = GET_UINT8(*p_Src8);
++ p_Dst8++;p_Src8++;
++ }
++
++ return pDst;
++}
++
++void * MemCpy64(void* pDst,void* pSrc, uint32_t size)
++{
++ uint32_t leftAlign;
++ uint32_t rightAlign;
++ uint64_t lastWord;
++ uint64_t currWord;
++ uint64_t *pSrc64;
++ uint64_t *pDst64;
++ uint8_t *p_Src8;
++ uint8_t *p_Dst8;
++
++ p_Src8 = (uint8_t*)(pSrc);
++ p_Dst8 = (uint8_t*)(pDst);
++ /* first copy byte by byte till the source first alignment
++ * this step is necessarily to ensure we do not even try to access
++ * data which is before the source buffer, hence it is not ours.
++ */
++ while((PTR_TO_UINT(p_Src8) & 7) && size) /* (pSrc mod 8) > 0 and size > 0 */
++ {
++ *p_Dst8++ = *p_Src8++;
++ size--;
++ }
++
++ /* align destination (possibly disaligning source)*/
++ while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */
++ {
++ *p_Dst8++ = *p_Src8++;
++ size--;
++ }
++
++ /* dest is aligned and source is not necessarily aligned */
++ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 7) << 3); /* leftAlign = (pSrc mod 8)*8 */
++ rightAlign = 64 - leftAlign;
++
++
++ if (leftAlign == 0)
++ {
++ /* source is also aligned */
++ pSrc64 = (uint64_t*)(p_Src8);
++ pDst64 = (uint64_t*)(p_Dst8);
++ while (size >> 3) /* size >= 8 */
++ {
++ *pDst64++ = *pSrc64++;
++ size -= 8;
++ }
++ p_Src8 = (uint8_t*)(pSrc64);
++ p_Dst8 = (uint8_t*)(pDst64);
++ }
++ else
++ {
++ /* source is not aligned (destination is aligned)*/
++ pSrc64 = (uint64_t*)(p_Src8 - (leftAlign >> 3));
++ pDst64 = (uint64_t*)(p_Dst8);
++ lastWord = *pSrc64++;
++ while(size >> 4) /* size >= 16 */
++ {
++ currWord = *pSrc64;
++ *pDst64 = (lastWord << leftAlign) | (currWord >> rightAlign);
++ lastWord = currWord;
++ pSrc64++;
++ pDst64++;
++ size -= 8;
++ }
++ p_Dst8 = (uint8_t*)(pDst64);
++ p_Src8 = (uint8_t*)(pSrc64) - 8 + (leftAlign >> 3);
++ }
++
++ /* complete the left overs */
++ while (size--)
++ *p_Dst8++ = *p_Src8++;
++
++ return pDst;
++}
++
++void * MemSet32(void* pDst, uint8_t val, uint32_t size)
++{
++ uint32_t val32;
++ uint32_t *p_Dst32;
++ uint8_t *p_Dst8;
++
++ p_Dst8 = (uint8_t*)(pDst);
++
++ /* generate four 8-bit val's in 32-bit container */
++ val32 = (uint32_t) val;
++ val32 |= (val32 << 8);
++ val32 |= (val32 << 16);
++
++ /* align destination to 32 */
++ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
++ {
++ *p_Dst8++ = val;
++ size--;
++ }
++
++ /* 32-bit chunks */
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ while (size >> 2) /* size >= 4 */
++ {
++ *p_Dst32++ = val32;
++ size -= 4;
++ }
++
++ /* complete the leftovers */
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ while (size--)
++ *p_Dst8++ = val;
++
++ return pDst;
++}
++
++void * IOMemSet32(void* pDst, uint8_t val, uint32_t size)
++{
++ uint32_t val32;
++ uint32_t *p_Dst32;
++ uint8_t *p_Dst8;
++
++ p_Dst8 = (uint8_t*)(pDst);
++
++ /* generate four 8-bit val's in 32-bit container */
++ val32 = (uint32_t) val;
++ val32 |= (val32 << 8);
++ val32 |= (val32 << 16);
++
++ /* align destination to 32 */
++ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
++ {
++ WRITE_UINT8(*p_Dst8, val);
++ p_Dst8++;
++ size--;
++ }
++
++ /* 32-bit chunks */
++ p_Dst32 = (uint32_t*)(p_Dst8);
++ while (size >> 2) /* size >= 4 */
++ {
++ WRITE_UINT32(*p_Dst32, val32);
++ p_Dst32++;
++ size -= 4;
++ }
++
++ /* complete the leftovers */
++ p_Dst8 = (uint8_t*)(p_Dst32);
++ while (size--)
++ {
++ WRITE_UINT8(*p_Dst8, val);
++ p_Dst8++;
++ }
++
++ return pDst;
++}
++
++void * MemSet64(void* pDst, uint8_t val, uint32_t size)
++{
++ uint64_t val64;
++ uint64_t *pDst64;
++ uint8_t *p_Dst8;
++
++ p_Dst8 = (uint8_t*)(pDst);
++
++ /* generate four 8-bit val's in 32-bit container */
++ val64 = (uint64_t) val;
++ val64 |= (val64 << 8);
++ val64 |= (val64 << 16);
++ val64 |= (val64 << 24);
++ val64 |= (val64 << 32);
++
++ /* align destination to 64 */
++ while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */
++ {
++ *p_Dst8++ = val;
++ size--;
++ }
++
++ /* 64-bit chunks */
++ pDst64 = (uint64_t*)(p_Dst8);
++ while (size >> 4) /* size >= 8 */
++ {
++ *pDst64++ = val64;
++ size -= 8;
++ }
++
++ /* complete the leftovers */
++ p_Dst8 = (uint8_t*)(pDst64);
++ while (size--)
++ *p_Dst8++ = val;
++
++ return pDst;
++}
++
++void MemDisp(uint8_t *p, int size)
++{
++ uint32_t space = (uint32_t)(PTR_TO_UINT(p) & 0x3);
++ uint8_t *p_Limit;
++
++ if (space)
++ {
++ p_Limit = (p - space + 4);
++
++ XX_Print("0x%08X: ", (p - space));
++
++ while (space--)
++ {
++ XX_Print("--");
++ }
++ while (size && (p < p_Limit))
++ {
++ XX_Print("%02x", *(uint8_t*)p);
++ size--;
++ p++;
++ }
++
++ XX_Print(" ");
++ p_Limit += 12;
++
++ while ((size > 3) && (p < p_Limit))
++ {
++ XX_Print("%08x ", *(uint32_t*)p);
++ size -= 4;
++ p += 4;
++ }
++ XX_Print("\r\n");
++ }
++
++ while (size > 15)
++ {
++ XX_Print("0x%08X: %08x %08x %08x %08x\r\n",
++ p, *(uint32_t *)p, *(uint32_t *)(p + 4),
++ *(uint32_t *)(p + 8), *(uint32_t *)(p + 12));
++ size -= 16;
++ p += 16;
++ }
++
++ if (size)
++ {
++ XX_Print("0x%08X: ", p);
++
++ while (size > 3)
++ {
++ XX_Print("%08x ", *(uint32_t *)p);
++ size -= 4;
++ p += 4;
++ }
++ while (size)
++ {
++ XX_Print("%02x", *(uint8_t *)p);
++ size--;
++ p++;
++ }
++
++ XX_Print("\r\n");
++ }
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/etc/mm.c b/drivers/net/ethernet/freescale/sdk_fman/etc/mm.c
+new file mode 100644
+index 00000000..9fcc46e0
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/etc/mm.c
+@@ -0,0 +1,1155 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include "string_ext.h"
++#include "error_ext.h"
++#include "std_ext.h"
++#include "part_ext.h"
++#include "xx_ext.h"
++
++#include "mm.h"
++
++
++
++
++/**********************************************************************
++ * MM internal routines set *
++ **********************************************************************/
++
++/****************************************************************
++ * Routine: CreateBusyBlock
++ *
++ * Description:
++ * Initializes a new busy block of "size" bytes and started
++ * rom "base" address. Each busy block has a name that
++ * specified the purpose of the memory allocation.
++ *
++ * Arguments:
++ * base - base address of the busy block
++ * size - size of the busy block
++ * name - name that specified the busy block
++ *
++ * Return value:
++ * A pointer to new created structure returned on success;
++ * Otherwise, NULL.
++ ****************************************************************/
++static t_BusyBlock * CreateBusyBlock(uint64_t base, uint64_t size, char *name)
++{
++ t_BusyBlock *p_BusyBlock;
++ uint32_t n;
++
++ p_BusyBlock = (t_BusyBlock *)XX_Malloc(sizeof(t_BusyBlock));
++ if ( !p_BusyBlock )
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ return NULL;
++ }
++
++ p_BusyBlock->base = base;
++ p_BusyBlock->end = base + size;
++
++ n = strlen(name);
++ if (n >= MM_MAX_NAME_LEN)
++ n = MM_MAX_NAME_LEN - 1;
++ strncpy(p_BusyBlock->name, name, MM_MAX_NAME_LEN-1);
++ p_BusyBlock->name[n] = '\0';
++ p_BusyBlock->p_Next = 0;
++
++ return p_BusyBlock;
++}
++
++/****************************************************************
++ * Routine: CreateNewBlock
++ *
++ * Description:
++ * Initializes a new memory block of "size" bytes and started
++ * from "base" address.
++ *
++ * Arguments:
++ * base - base address of the memory block
++ * size - size of the memory block
++ *
++ * Return value:
++ * A pointer to new created structure returned on success;
++ * Otherwise, NULL.
++ ****************************************************************/
++static t_MemBlock * CreateNewBlock(uint64_t base, uint64_t size)
++{
++ t_MemBlock *p_MemBlock;
++
++ p_MemBlock = (t_MemBlock *)XX_Malloc(sizeof(t_MemBlock));
++ if ( !p_MemBlock )
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ return NULL;
++ }
++
++ p_MemBlock->base = base;
++ p_MemBlock->end = base+size;
++ p_MemBlock->p_Next = 0;
++
++ return p_MemBlock;
++}
++
++/****************************************************************
++ * Routine: CreateFreeBlock
++ *
++ * Description:
++ * Initializes a new free block of of "size" bytes and
++ * started from "base" address.
++ *
++ * Arguments:
++ * base - base address of the free block
++ * size - size of the free block
++ *
++ * Return value:
++ * A pointer to new created structure returned on success;
++ * Otherwise, NULL.
++ ****************************************************************/
++static t_FreeBlock * CreateFreeBlock(uint64_t base, uint64_t size)
++{
++ t_FreeBlock *p_FreeBlock;
++
++ p_FreeBlock = (t_FreeBlock *)XX_Malloc(sizeof(t_FreeBlock));
++ if ( !p_FreeBlock )
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ return NULL;
++ }
++
++ p_FreeBlock->base = base;
++ p_FreeBlock->end = base + size;
++ p_FreeBlock->p_Next = 0;
++
++ return p_FreeBlock;
++}
++
++/****************************************************************
++ * Routine: AddFree
++ *
++ * Description:
++ * Adds a new free block to the free lists. It updates each
++ * free list to include a new free block.
++ * Note, that all free block in each free list are ordered
++ * by their base address.
++ *
++ * Arguments:
++ * p_MM - pointer to the MM object
++ * base - base address of a given free block
++ * end - end address of a given free block
++ *
++ * Return value:
++ *
++ *
++ ****************************************************************/
++static t_Error AddFree(t_MM *p_MM, uint64_t base, uint64_t end)
++{
++ t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB;
++ uint64_t alignment;
++ uint64_t alignBase;
++ int i;
++
++ /* Updates free lists to include a just released block */
++ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
++ {
++ p_PrevB = p_NewB = 0;
++ p_CurrB = p_MM->freeBlocks[i];
++
++ alignment = (uint64_t)(0x1 << i);
++ alignBase = MAKE_ALIGNED(base, alignment);
++
++ /* Goes to the next free list if there is no block to free */
++ if (alignBase >= end)
++ continue;
++
++ /* Looks for a free block that should be updated */
++ while ( p_CurrB )
++ {
++ if ( alignBase <= p_CurrB->end )
++ {
++ if ( end > p_CurrB->end )
++ {
++ t_FreeBlock *p_NextB;
++ while ( p_CurrB->p_Next && end > p_CurrB->p_Next->end )
++ {
++ p_NextB = p_CurrB->p_Next;
++ p_CurrB->p_Next = p_CurrB->p_Next->p_Next;
++ XX_Free(p_NextB);
++ }
++
++ p_NextB = p_CurrB->p_Next;
++ if ( !p_NextB || (p_NextB && end < p_NextB->base) )
++ {
++ p_CurrB->end = end;
++ }
++ else
++ {
++ p_CurrB->end = p_NextB->end;
++ p_CurrB->p_Next = p_NextB->p_Next;
++ XX_Free(p_NextB);
++ }
++ }
++ else if ( (end < p_CurrB->base) && ((end-alignBase) >= alignment) )
++ {
++ if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++
++ p_NewB->p_Next = p_CurrB;
++ if (p_PrevB)
++ p_PrevB->p_Next = p_NewB;
++ else
++ p_MM->freeBlocks[i] = p_NewB;
++ break;
++ }
++
++ if ((alignBase < p_CurrB->base) && (end >= p_CurrB->base))
++ {
++ p_CurrB->base = alignBase;
++ }
++
++ /* if size of the free block is less then alignment
++ * deletes that free block from the free list. */
++ if ( (p_CurrB->end - p_CurrB->base) < alignment)
++ {
++ if ( p_PrevB )
++ p_PrevB->p_Next = p_CurrB->p_Next;
++ else
++ p_MM->freeBlocks[i] = p_CurrB->p_Next;
++ XX_Free(p_CurrB);
++ p_CurrB = NULL;
++ }
++ break;
++ }
++ else
++ {
++ p_PrevB = p_CurrB;
++ p_CurrB = p_CurrB->p_Next;
++ }
++ }
++
++ /* If no free block found to be updated, insert a new free block
++ * to the end of the free list.
++ */
++ if ( !p_CurrB && ((((uint64_t)(end-base)) & ((uint64_t)(alignment-1))) == 0) )
++ {
++ if ((p_NewB = CreateFreeBlock(alignBase, end-base)) == NULL)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++
++ if (p_PrevB)
++ p_PrevB->p_Next = p_NewB;
++ else
++ p_MM->freeBlocks[i] = p_NewB;
++ }
++
++ /* Update boundaries of the new free block */
++ if ((alignment == 1) && !p_NewB)
++ {
++ if ( p_CurrB && base > p_CurrB->base )
++ base = p_CurrB->base;
++ if ( p_CurrB && end < p_CurrB->end )
++ end = p_CurrB->end;
++ }
++ }
++
++ return (E_OK);
++}
++
++/****************************************************************
++ * Routine: CutFree
++ *
++ * Description:
++ * Cuts a free block from holdBase to holdEnd from the free lists.
++ * That is, it updates all free lists of the MM object do
++ * not include a block of memory from holdBase to holdEnd.
++ * For each free lists it seek for a free block that holds
++ * either holdBase or holdEnd. If such block is found it updates it.
++ *
++ * Arguments:
++ * p_MM - pointer to the MM object
++ * holdBase - base address of the allocated block
++ * holdEnd - end address of the allocated block
++ *
++ * Return value:
++ * E_OK is returned on success,
++ * otherwise returns an error code.
++ *
++ ****************************************************************/
++static t_Error CutFree(t_MM *p_MM, uint64_t holdBase, uint64_t holdEnd)
++{
++ t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB;
++ uint64_t alignBase, base, end;
++ uint64_t alignment;
++ int i;
++
++ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
++ {
++ p_PrevB = p_NewB = 0;
++ p_CurrB = p_MM->freeBlocks[i];
++
++ alignment = (uint64_t)(0x1 << i);
++ alignBase = MAKE_ALIGNED(holdEnd, alignment);
++
++ while ( p_CurrB )
++ {
++ base = p_CurrB->base;
++ end = p_CurrB->end;
++
++ if ( (holdBase <= base) && (holdEnd <= end) && (holdEnd > base) )
++ {
++ if ( alignBase >= end ||
++ (alignBase < end && ((end-alignBase) < alignment)) )
++ {
++ if (p_PrevB)
++ p_PrevB->p_Next = p_CurrB->p_Next;
++ else
++ p_MM->freeBlocks[i] = p_CurrB->p_Next;
++ XX_Free(p_CurrB);
++ }
++ else
++ {
++ p_CurrB->base = alignBase;
++ }
++ break;
++ }
++ else if ( (holdBase > base) && (holdEnd <= end) )
++ {
++ if ( (holdBase-base) >= alignment )
++ {
++ if ( (alignBase < end) && ((end-alignBase) >= alignment) )
++ {
++ if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ p_NewB->p_Next = p_CurrB->p_Next;
++ p_CurrB->p_Next = p_NewB;
++ }
++ p_CurrB->end = holdBase;
++ }
++ else if ( (alignBase < end) && ((end-alignBase) >= alignment) )
++ {
++ p_CurrB->base = alignBase;
++ }
++ else
++ {
++ if (p_PrevB)
++ p_PrevB->p_Next = p_CurrB->p_Next;
++ else
++ p_MM->freeBlocks[i] = p_CurrB->p_Next;
++ XX_Free(p_CurrB);
++ }
++ break;
++ }
++ else
++ {
++ p_PrevB = p_CurrB;
++ p_CurrB = p_CurrB->p_Next;
++ }
++ }
++ }
++
++ return (E_OK);
++}
++
++/****************************************************************
++ * Routine: AddBusy
++ *
++ * Description:
++ * Adds a new busy block to the list of busy blocks. Note,
++ * that all busy blocks are ordered by their base address in
++ * the busy list.
++ *
++ * Arguments:
++ * MM - handler to the MM object
++ * p_NewBusyB - pointer to the a busy block
++ *
++ * Return value:
++ * None.
++ *
++ ****************************************************************/
++static void AddBusy(t_MM *p_MM, t_BusyBlock *p_NewBusyB)
++{
++ t_BusyBlock *p_CurrBusyB, *p_PrevBusyB;
++
++ /* finds a place of a new busy block in the list of busy blocks */
++ p_PrevBusyB = 0;
++ p_CurrBusyB = p_MM->busyBlocks;
++
++ while ( p_CurrBusyB && p_NewBusyB->base > p_CurrBusyB->base )
++ {
++ p_PrevBusyB = p_CurrBusyB;
++ p_CurrBusyB = p_CurrBusyB->p_Next;
++ }
++
++ /* insert the new busy block into the list of busy blocks */
++ if ( p_CurrBusyB )
++ p_NewBusyB->p_Next = p_CurrBusyB;
++ if ( p_PrevBusyB )
++ p_PrevBusyB->p_Next = p_NewBusyB;
++ else
++ p_MM->busyBlocks = p_NewBusyB;
++}
++
++/****************************************************************
++ * Routine: CutBusy
++ *
++ * Description:
++ * Cuts a block from base to end from the list of busy blocks.
++ * This is done by updating the list of busy blocks do not
++ * include a given block, that block is going to be free. If a
++ * given block is a part of some other busy block, so that
++ * busy block is updated. If there are number of busy blocks
++ * included in the given block, so all that blocks are removed
++ * from the busy list and the end blocks are updated.
++ * If the given block devides some block into two parts, a new
++ * busy block is added to the busy list.
++ *
++ * Arguments:
++ * p_MM - pointer to the MM object
++ * base - base address of a given busy block
++ * end - end address of a given busy block
++ *
++ * Return value:
++ * E_OK on success, E_NOMEMORY otherwise.
++ *
++ ****************************************************************/
++static t_Error CutBusy(t_MM *p_MM, uint64_t base, uint64_t end)
++{
++ t_BusyBlock *p_CurrB, *p_PrevB, *p_NewB;
++
++ p_CurrB = p_MM->busyBlocks;
++ p_PrevB = p_NewB = 0;
++
++ while ( p_CurrB )
++ {
++ if ( base < p_CurrB->end )
++ {
++ if ( end > p_CurrB->end )
++ {
++ t_BusyBlock *p_NextB;
++ while ( p_CurrB->p_Next && end >= p_CurrB->p_Next->end )
++ {
++ p_NextB = p_CurrB->p_Next;
++ p_CurrB->p_Next = p_CurrB->p_Next->p_Next;
++ XX_Free(p_NextB);
++ }
++
++ p_NextB = p_CurrB->p_Next;
++ if ( p_NextB && end > p_NextB->base )
++ {
++ p_NextB->base = end;
++ }
++ }
++
++ if ( base <= p_CurrB->base )
++ {
++ if ( end < p_CurrB->end && end > p_CurrB->base )
++ {
++ p_CurrB->base = end;
++ }
++ else if ( end >= p_CurrB->end )
++ {
++ if ( p_PrevB )
++ p_PrevB->p_Next = p_CurrB->p_Next;
++ else
++ p_MM->busyBlocks = p_CurrB->p_Next;
++ XX_Free(p_CurrB);
++ }
++ }
++ else
++ {
++ if ( end < p_CurrB->end && end > p_CurrB->base )
++ {
++ if ((p_NewB = CreateBusyBlock(end,
++ p_CurrB->end-end,
++ p_CurrB->name)) == NULL)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ p_NewB->p_Next = p_CurrB->p_Next;
++ p_CurrB->p_Next = p_NewB;
++ }
++ p_CurrB->end = base;
++ }
++ break;
++ }
++ else
++ {
++ p_PrevB = p_CurrB;
++ p_CurrB = p_CurrB->p_Next;
++ }
++ }
++
++ return (E_OK);
++}
++
++/****************************************************************
++ * Routine: MmGetGreaterAlignment
++ *
++ * Description:
++ * Allocates a block of memory according to the given size
++ * and the alignment. That routine is called from the MM_Get
++ * routine if the required alignment is greater then MM_MAX_ALIGNMENT.
++ * In that case, it goes over free blocks of 64 byte align list
++ * and checks if it has the required size of bytes of the required
++ * alignment. If no blocks found returns ILLEGAL_BASE.
++ * After the block is found and data is allocated, it calls
++ * the internal CutFree routine to update all free lists
++ * do not include a just allocated block. Of course, each
++ * free list contains a free blocks with the same alignment.
++ * It is also creates a busy block that holds
++ * information about an allocated block.
++ *
++ * Arguments:
++ * MM - handle to the MM object
++ * size - size of the MM
++ * alignment - index as a power of two defines
++ * a required alignment that is greater then 64.
++ * name - the name that specifies an allocated block.
++ *
++ * Return value:
++ * base address of an allocated block.
++ * ILLEGAL_BASE if can't allocate a block
++ *
++ ****************************************************************/
++static uint64_t MmGetGreaterAlignment(t_MM *p_MM, uint64_t size, uint64_t alignment, char* name)
++{
++ t_FreeBlock *p_FreeB;
++ t_BusyBlock *p_NewBusyB;
++ uint64_t holdBase, holdEnd, alignBase = 0;
++
++ /* goes over free blocks of the 64 byte alignment list
++ and look for a block of the suitable size and
++ base address according to the alignment. */
++ p_FreeB = p_MM->freeBlocks[MM_MAX_ALIGNMENT];
++
++ while ( p_FreeB )
++ {
++ alignBase = MAKE_ALIGNED(p_FreeB->base, alignment);
++
++ /* the block is found if the aligned base inside the block
++ * and has the anough size. */
++ if ( alignBase >= p_FreeB->base &&
++ alignBase < p_FreeB->end &&
++ size <= (p_FreeB->end - alignBase) )
++ break;
++ else
++ p_FreeB = p_FreeB->p_Next;
++ }
++
++ /* If such block isn't found */
++ if ( !p_FreeB )
++ return (uint64_t)(ILLEGAL_BASE);
++
++ holdBase = alignBase;
++ holdEnd = alignBase + size;
++
++ /* init a new busy block */
++ if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL)
++ return (uint64_t)(ILLEGAL_BASE);
++
++ /* calls Update routine to update a lists of free blocks */
++ if ( CutFree ( p_MM, holdBase, holdEnd ) != E_OK )
++ {
++ XX_Free(p_NewBusyB);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* insert the new busy block into the list of busy blocks */
++ AddBusy ( p_MM, p_NewBusyB );
++
++ return (holdBase);
++}
++
++
++/**********************************************************************
++ * MM API routines set *
++ **********************************************************************/
++
++/*****************************************************************************/
++t_Error MM_Init(t_Handle *h_MM, uint64_t base, uint64_t size)
++{
++ t_MM *p_MM;
++ uint64_t newBase, newSize;
++ int i;
++
++ if (!size)
++ {
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Size (should be positive)"));
++ }
++
++ /* Initializes a new MM object */
++ p_MM = (t_MM *)XX_Malloc(sizeof(t_MM));
++ if (!p_MM)
++ {
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ }
++
++ p_MM->h_Spinlock = XX_InitSpinlock();
++ if (!p_MM->h_Spinlock)
++ {
++ XX_Free(p_MM);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MM spinlock!"));
++ }
++
++ /* Initializes counter of free memory to total size */
++ p_MM->freeMemSize = size;
++
++ /* A busy list is empty */
++ p_MM->busyBlocks = 0;
++
++ /* Initializes a new memory block */
++ if ((p_MM->memBlocks = CreateNewBlock(base, size)) == NULL)
++ {
++ MM_Free(p_MM);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ }
++
++ /* Initializes a new free block for each free list*/
++ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
++ {
++ newBase = MAKE_ALIGNED( base, (0x1 << i) );
++ newSize = size - (newBase - base);
++
++ if ((p_MM->freeBlocks[i] = CreateFreeBlock(newBase, newSize)) == NULL)
++ {
++ MM_Free(p_MM);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ }
++ }
++
++ *h_MM = p_MM;
++
++ return (E_OK);
++}
++
++/*****************************************************************************/
++void MM_Free(t_Handle h_MM)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ t_MemBlock *p_MemBlock;
++ t_BusyBlock *p_BusyBlock;
++ t_FreeBlock *p_FreeBlock;
++ void *p_Block;
++ int i;
++
++ ASSERT_COND(p_MM);
++
++ /* release memory allocated for busy blocks */
++ p_BusyBlock = p_MM->busyBlocks;
++ while ( p_BusyBlock )
++ {
++ p_Block = p_BusyBlock;
++ p_BusyBlock = p_BusyBlock->p_Next;
++ XX_Free(p_Block);
++ }
++
++ /* release memory allocated for free blocks */
++ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
++ {
++ p_FreeBlock = p_MM->freeBlocks[i];
++ while ( p_FreeBlock )
++ {
++ p_Block = p_FreeBlock;
++ p_FreeBlock = p_FreeBlock->p_Next;
++ XX_Free(p_Block);
++ }
++ }
++
++ /* release memory allocated for memory blocks */
++ p_MemBlock = p_MM->memBlocks;
++ while ( p_MemBlock )
++ {
++ p_Block = p_MemBlock;
++ p_MemBlock = p_MemBlock->p_Next;
++ XX_Free(p_Block);
++ }
++
++ if (p_MM->h_Spinlock)
++ XX_FreeSpinlock(p_MM->h_Spinlock);
++
++ /* release memory allocated for MM object itself */
++ XX_Free(p_MM);
++}
++
++/*****************************************************************************/
++uint64_t MM_Get(t_Handle h_MM, uint64_t size, uint64_t alignment, char* name)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ t_FreeBlock *p_FreeB;
++ t_BusyBlock *p_NewBusyB;
++ uint64_t holdBase, holdEnd, j, i = 0;
++ uint32_t intFlags;
++
++ SANITY_CHECK_RETURN_VALUE(p_MM, E_INVALID_HANDLE, (uint64_t)ILLEGAL_BASE);
++
++ /* checks that alignment value is greater then zero */
++ if (alignment == 0)
++ {
++ alignment = 1;
++ }
++
++ j = alignment;
++
++ /* checks if alignment is a power of two, if it correct and if the
++ required size is multiple of the given alignment. */
++ while ((j & 0x1) == 0)
++ {
++ i++;
++ j = j >> 1;
++ }
++
++ /* if the given alignment isn't power of two, returns an error */
++ if (j != 1)
++ {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("alignment (should be power of 2)"));
++ return (uint64_t)ILLEGAL_BASE;
++ }
++
++ if (i > MM_MAX_ALIGNMENT)
++ {
++ return (MmGetGreaterAlignment(p_MM, size, alignment, name));
++ }
++
++ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
++ /* look for a block of the size greater or equal to the required size. */
++ p_FreeB = p_MM->freeBlocks[i];
++ while ( p_FreeB && (p_FreeB->end - p_FreeB->base) < size )
++ p_FreeB = p_FreeB->p_Next;
++
++ /* If such block is found */
++ if ( !p_FreeB )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ holdBase = p_FreeB->base;
++ holdEnd = holdBase + size;
++
++ /* init a new busy block */
++ if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL)
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* calls Update routine to update a lists of free blocks */
++ if ( CutFree ( p_MM, holdBase, holdEnd ) != E_OK )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ XX_Free(p_NewBusyB);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* Decreasing the allocated memory size from free memory size */
++ p_MM->freeMemSize -= size;
++
++ /* insert the new busy block into the list of busy blocks */
++ AddBusy ( p_MM, p_NewBusyB );
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++
++ return (holdBase);
++}
++
++/*****************************************************************************/
++uint64_t MM_GetForce(t_Handle h_MM, uint64_t base, uint64_t size, char* name)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ t_FreeBlock *p_FreeB;
++ t_BusyBlock *p_NewBusyB;
++ uint32_t intFlags;
++ bool blockIsFree = FALSE;
++
++ ASSERT_COND(p_MM);
++
++ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
++ p_FreeB = p_MM->freeBlocks[0]; /* The biggest free blocks are in the
++ free list with alignment 1 */
++
++ while ( p_FreeB )
++ {
++ if ( base >= p_FreeB->base && (base+size) <= p_FreeB->end )
++ {
++ blockIsFree = TRUE;
++ break;
++ }
++ else
++ p_FreeB = p_FreeB->p_Next;
++ }
++
++ if ( !blockIsFree )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* init a new busy block */
++ if ((p_NewBusyB = CreateBusyBlock(base, size, name)) == NULL)
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* calls Update routine to update a lists of free blocks */
++ if ( CutFree ( p_MM, base, base+size ) != E_OK )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ XX_Free(p_NewBusyB);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* Decreasing the allocated memory size from free memory size */
++ p_MM->freeMemSize -= size;
++
++ /* insert the new busy block into the list of busy blocks */
++ AddBusy ( p_MM, p_NewBusyB );
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++
++ return (base);
++}
++
++/*****************************************************************************/
++uint64_t MM_GetForceMin(t_Handle h_MM, uint64_t size, uint64_t alignment, uint64_t min, char* name)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ t_FreeBlock *p_FreeB;
++ t_BusyBlock *p_NewBusyB;
++ uint64_t holdBase, holdEnd, j = alignment, i=0;
++ uint32_t intFlags;
++
++ ASSERT_COND(p_MM);
++
++ /* checks if alignment is a power of two, if it correct and if the
++ required size is multiple of the given alignment. */
++ while ((j & 0x1) == 0)
++ {
++ i++;
++ j = j >> 1;
++ }
++
++ if ( (j != 1) || (i > MM_MAX_ALIGNMENT) )
++ {
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
++ p_FreeB = p_MM->freeBlocks[i];
++
++ /* look for the first block that contains the minimum
++ base address. If the whole required size may be fit
++ into it, use that block, otherwise look for the next
++ block of size greater or equal to the required size. */
++ while ( p_FreeB && (min >= p_FreeB->end))
++ p_FreeB = p_FreeB->p_Next;
++
++ /* If such block is found */
++ if ( !p_FreeB )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* if this block is large enough, use this block */
++ holdBase = ( min <= p_FreeB->base ) ? p_FreeB->base : min;
++ if ((holdBase + size) <= p_FreeB->end )
++ {
++ holdEnd = holdBase + size;
++ }
++ else
++ {
++ p_FreeB = p_FreeB->p_Next;
++ while ( p_FreeB && ((p_FreeB->end - p_FreeB->base) < size) )
++ p_FreeB = p_FreeB->p_Next;
++
++ /* If such block is found */
++ if ( !p_FreeB )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ holdBase = p_FreeB->base;
++ holdEnd = holdBase + size;
++ }
++
++ /* init a new busy block */
++ if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL)
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* calls Update routine to update a lists of free blocks */
++ if ( CutFree( p_MM, holdBase, holdEnd ) != E_OK )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ XX_Free(p_NewBusyB);
++ return (uint64_t)(ILLEGAL_BASE);
++ }
++
++ /* Decreasing the allocated memory size from free memory size */
++ p_MM->freeMemSize -= size;
++
++ /* insert the new busy block into the list of busy blocks */
++ AddBusy( p_MM, p_NewBusyB );
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++
++ return (holdBase);
++}
++
++/*****************************************************************************/
++uint64_t MM_Put(t_Handle h_MM, uint64_t base)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ t_BusyBlock *p_BusyB, *p_PrevBusyB;
++ uint64_t size;
++ uint32_t intFlags;
++
++ ASSERT_COND(p_MM);
++
++ /* Look for a busy block that have the given base value.
++ * That block will be returned back to the memory.
++ */
++ p_PrevBusyB = 0;
++
++ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
++ p_BusyB = p_MM->busyBlocks;
++ while ( p_BusyB && base != p_BusyB->base )
++ {
++ p_PrevBusyB = p_BusyB;
++ p_BusyB = p_BusyB->p_Next;
++ }
++
++ if ( !p_BusyB )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(0);
++ }
++
++ if ( AddFree( p_MM, p_BusyB->base, p_BusyB->end ) != E_OK )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(0);
++ }
++
++ /* removes a busy block form the list of busy blocks */
++ if ( p_PrevBusyB )
++ p_PrevBusyB->p_Next = p_BusyB->p_Next;
++ else
++ p_MM->busyBlocks = p_BusyB->p_Next;
++
++ size = p_BusyB->end - p_BusyB->base;
++
++ /* Adding the deallocated memory size to free memory size */
++ p_MM->freeMemSize += size;
++
++ XX_Free(p_BusyB);
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++
++ return (size);
++}
++
++/*****************************************************************************/
++uint64_t MM_PutForce(t_Handle h_MM, uint64_t base, uint64_t size)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ uint64_t end = base + size;
++ uint32_t intFlags;
++
++ ASSERT_COND(p_MM);
++
++ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
++
++ if ( CutBusy( p_MM, base, end ) != E_OK )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(0);
++ }
++
++ if ( AddFree ( p_MM, base, end ) != E_OK )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ return (uint64_t)(0);
++ }
++
++ /* Adding the deallocated memory size to free memory size */
++ p_MM->freeMemSize += size;
++
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++
++ return (size);
++}
++
++/*****************************************************************************/
++t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ t_MemBlock *p_MemB, *p_NewMemB;
++ t_Error errCode;
++ uint32_t intFlags;
++
++ ASSERT_COND(p_MM);
++
++ /* find a last block in the list of memory blocks to insert a new
++ * memory block
++ */
++ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
++
++ p_MemB = p_MM->memBlocks;
++ while ( p_MemB->p_Next )
++ {
++ if ( base >= p_MemB->base && base < p_MemB->end )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
++ }
++ p_MemB = p_MemB->p_Next;
++ }
++ /* check for a last memory block */
++ if ( base >= p_MemB->base && base < p_MemB->end )
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
++ }
++
++ /* create a new memory block */
++ if ((p_NewMemB = CreateNewBlock(base, size)) == NULL)
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ }
++
++ /* append a new memory block to the end of the list of memory blocks */
++ p_MemB->p_Next = p_NewMemB;
++
++ /* add a new free block to the free lists */
++ errCode = AddFree(p_MM, base, base+size);
++ if (errCode)
++ {
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++ p_MemB->p_Next = 0;
++ XX_Free(p_NewMemB);
++ return ((t_Error)errCode);
++ }
++
++ /* Adding the new block size to free memory size */
++ p_MM->freeMemSize += size;
++
++ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
++
++ return (E_OK);
++}
++
++/*****************************************************************************/
++uint64_t MM_GetMemBlock(t_Handle h_MM, int index)
++{
++ t_MM *p_MM = (t_MM*)h_MM;
++ t_MemBlock *p_MemBlock;
++ int i;
++
++ ASSERT_COND(p_MM);
++
++ p_MemBlock = p_MM->memBlocks;
++ for (i=0; i < index; i++)
++ p_MemBlock = p_MemBlock->p_Next;
++
++ if ( p_MemBlock )
++ return (p_MemBlock->base);
++ else
++ return (uint64_t)ILLEGAL_BASE;
++}
++
++/*****************************************************************************/
++uint64_t MM_GetBase(t_Handle h_MM)
++{
++ t_MM *p_MM = (t_MM*)h_MM;
++ t_MemBlock *p_MemBlock;
++
++ ASSERT_COND(p_MM);
++
++ p_MemBlock = p_MM->memBlocks;
++ return p_MemBlock->base;
++}
++
++/*****************************************************************************/
++bool MM_InRange(t_Handle h_MM, uint64_t addr)
++{
++ t_MM *p_MM = (t_MM*)h_MM;
++ t_MemBlock *p_MemBlock;
++
++ ASSERT_COND(p_MM);
++
++ p_MemBlock = p_MM->memBlocks;
++
++ if ((addr >= p_MemBlock->base) && (addr < p_MemBlock->end))
++ return TRUE;
++ else
++ return FALSE;
++}
++
++/*****************************************************************************/
++uint64_t MM_GetFreeMemSize(t_Handle h_MM)
++{
++ t_MM *p_MM = (t_MM*)h_MM;
++
++ ASSERT_COND(p_MM);
++
++ return p_MM->freeMemSize;
++}
++
++/*****************************************************************************/
++void MM_Dump(t_Handle h_MM)
++{
++ t_MM *p_MM = (t_MM *)h_MM;
++ t_FreeBlock *p_FreeB;
++ t_BusyBlock *p_BusyB;
++ int i;
++
++ p_BusyB = p_MM->busyBlocks;
++ XX_Print("List of busy blocks:\n");
++ while (p_BusyB)
++ {
++ XX_Print("\t0x%p: (%s: b=0x%llx, e=0x%llx)\n", p_BusyB, p_BusyB->name, p_BusyB->base, p_BusyB->end );
++ p_BusyB = p_BusyB->p_Next;
++ }
++
++ XX_Print("\nLists of free blocks according to alignment:\n");
++ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
++ {
++ XX_Print("%d alignment:\n", (0x1 << i));
++ p_FreeB = p_MM->freeBlocks[i];
++ while (p_FreeB)
++ {
++ XX_Print("\t0x%p: (b=0x%llx, e=0x%llx)\n", p_FreeB, p_FreeB->base, p_FreeB->end);
++ p_FreeB = p_FreeB->p_Next;
++ }
++ XX_Print("\n");
++ }
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/etc/mm.h b/drivers/net/ethernet/freescale/sdk_fman/etc/mm.h
+new file mode 100644
+index 00000000..43b2298f
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/etc/mm.h
+@@ -0,0 +1,105 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/****************************************************************
++ *
++ * File: mm.h
++ *
++ *
++ * Description:
++ * MM (Memory Management) object definitions.
++ * It also includes definitions of the Free Block, Busy Block
++ * and Memory Block structures used by the MM object.
++ *
++ ****************************************************************/
++
++#ifndef __MM_H
++#define __MM_H
++
++
++#include "mm_ext.h"
++
++#define __ERR_MODULE__ MODULE_MM
++
++
++#define MAKE_ALIGNED(addr, align) \
++ (((uint64_t)(addr) + ((align) - 1)) & (~(((uint64_t)align) - 1)))
++
++
++/* t_MemBlock data structure defines parameters of the Memory Block */
++typedef struct t_MemBlock
++{
++ struct t_MemBlock *p_Next; /* Pointer to the next memory block */
++
++ uint64_t base; /* Base address of the memory block */
++ uint64_t end; /* End address of the memory block */
++} t_MemBlock;
++
++
++/* t_FreeBlock data structure defines parameters of the Free Block */
++typedef struct t_FreeBlock
++{
++ struct t_FreeBlock *p_Next; /* Pointer to the next free block */
++
++ uint64_t base; /* Base address of the block */
++ uint64_t end; /* End address of the block */
++} t_FreeBlock;
++
++
++/* t_BusyBlock data structure defines parameters of the Busy Block */
++typedef struct t_BusyBlock
++{
++ struct t_BusyBlock *p_Next; /* Pointer to the next free block */
++
++ uint64_t base; /* Base address of the block */
++ uint64_t end; /* End address of the block */
++ char name[MM_MAX_NAME_LEN]; /* That block of memory was allocated for
++ something specified by the Name */
++} t_BusyBlock;
++
++
++/* t_MM data structure defines parameters of the MM object */
++typedef struct t_MM
++{
++ t_Handle h_Spinlock;
++
++ t_MemBlock *memBlocks; /* List of memory blocks (Memory list) */
++ t_BusyBlock *busyBlocks; /* List of busy blocks (Busy list) */
++ t_FreeBlock *freeBlocks[MM_MAX_ALIGNMENT + 1];
++ /* Alignment lists of free blocks (Free lists) */
++
++ uint64_t freeMemSize; /* Total size of free memory (in bytes) */
++} t_MM;
++
++
++#endif /* __MM_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/etc/sprint.c b/drivers/net/ethernet/freescale/sdk_fman/etc/sprint.c
+new file mode 100644
+index 00000000..46d2956a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/etc/sprint.c
+@@ -0,0 +1,81 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/*------------------------------------------------------*/
++/* File: sprint.c */
++/* */
++/* Description: */
++/* Debug routines (externals) */
++/*------------------------------------------------------*/
++#include "string_ext.h"
++#include "stdlib_ext.h"
++#include "stdarg_ext.h"
++#include "sprint_ext.h"
++#include "std_ext.h"
++#include "xx_ext.h"
++
++
++int Sprint(char * buf, const char *fmt, ...)
++{
++ va_list args;
++ int i;
++
++ va_start(args, fmt);
++ i=vsprintf(buf,fmt,args);
++ va_end(args);
++ return i;
++}
++
++int Snprint(char * buf, uint32_t size, const char *fmt, ...)
++{
++ va_list args;
++ int i;
++
++ va_start(args, fmt);
++ i=vsnprintf(buf,size,fmt,args);
++ va_end(args);
++ return i;
++}
++
++#ifndef NCSW_VXWORKS
++int Sscan(const char * buf, const char * fmt, ...)
++{
++ va_list args;
++ int i;
++
++ va_start(args,fmt);
++ i = vsscanf(buf,fmt,args);
++ va_end(args);
++ return i;
++}
++#endif /* NCSW_VXWORKS */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/fmanv3h_dflags.h b/drivers/net/ethernet/freescale/sdk_fman/fmanv3h_dflags.h
+new file mode 100644
+index 00000000..435b0d2b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/fmanv3h_dflags.h
+@@ -0,0 +1,57 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __dflags_h
++#define __dflags_h
++
++
++#define NCSW_LINUX
++
++#define T4240
++#define NCSW_PPC_CORE
++
++#define DEBUG_ERRORS 1
++
++#if defined(DEBUG)
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
++
++#define DEBUG_XX_MALLOC
++#define DEBUG_MEM_LEAKS
++
++#else
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
++#endif /* (DEBUG) */
++
++#define REPORT_EVENTS 1
++#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
++
++#endif /* __dflags_h */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/fmanv3l_dflags.h b/drivers/net/ethernet/freescale/sdk_fman/fmanv3l_dflags.h
+new file mode 100644
+index 00000000..789eb879
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/fmanv3l_dflags.h
+@@ -0,0 +1,56 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __dflags_h
++#define __dflags_h
++
++
++#define NCSW_LINUX
++
++#define NCSW_PPC_CORE
++
++#define DEBUG_ERRORS 1
++
++#if defined(DEBUG)
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
++
++#define DEBUG_XX_MALLOC
++#define DEBUG_MEM_LEAKS
++
++#else
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
++#endif /* (DEBUG) */
++
++#define REPORT_EVENTS 1
++#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
++
++#endif /* __dflags_h */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/crc_mac_addr_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/crc_mac_addr_ext.h
+new file mode 100644
+index 00000000..a84d5631
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/crc_mac_addr_ext.h
+@@ -0,0 +1,364 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/*------------------------------------------------------*/
++/* */
++/* File: crc_mac_addr_ext.h */
++/* */
++/* Description: */
++/* Define a macro that calculate the crc value of */
++/* an Ethernet MAC address (48 bitd address */
++/*------------------------------------------------------*/
++
++#ifndef __crc_mac_addr_ext_h
++#define __crc_mac_addr_ext_h
++
++#include "std_ext.h"
++
++
++static uint32_t crc_table[256] =
++{
++ 0x00000000,
++ 0x77073096,
++ 0xee0e612c,
++ 0x990951ba,
++ 0x076dc419,
++ 0x706af48f,
++ 0xe963a535,
++ 0x9e6495a3,
++ 0x0edb8832,
++ 0x79dcb8a4,
++ 0xe0d5e91e,
++ 0x97d2d988,
++ 0x09b64c2b,
++ 0x7eb17cbd,
++ 0xe7b82d07,
++ 0x90bf1d91,
++ 0x1db71064,
++ 0x6ab020f2,
++ 0xf3b97148,
++ 0x84be41de,
++ 0x1adad47d,
++ 0x6ddde4eb,
++ 0xf4d4b551,
++ 0x83d385c7,
++ 0x136c9856,
++ 0x646ba8c0,
++ 0xfd62f97a,
++ 0x8a65c9ec,
++ 0x14015c4f,
++ 0x63066cd9,
++ 0xfa0f3d63,
++ 0x8d080df5,
++ 0x3b6e20c8,
++ 0x4c69105e,
++ 0xd56041e4,
++ 0xa2677172,
++ 0x3c03e4d1,
++ 0x4b04d447,
++ 0xd20d85fd,
++ 0xa50ab56b,
++ 0x35b5a8fa,
++ 0x42b2986c,
++ 0xdbbbc9d6,
++ 0xacbcf940,
++ 0x32d86ce3,
++ 0x45df5c75,
++ 0xdcd60dcf,
++ 0xabd13d59,
++ 0x26d930ac,
++ 0x51de003a,
++ 0xc8d75180,
++ 0xbfd06116,
++ 0x21b4f4b5,
++ 0x56b3c423,
++ 0xcfba9599,
++ 0xb8bda50f,
++ 0x2802b89e,
++ 0x5f058808,
++ 0xc60cd9b2,
++ 0xb10be924,
++ 0x2f6f7c87,
++ 0x58684c11,
++ 0xc1611dab,
++ 0xb6662d3d,
++ 0x76dc4190,
++ 0x01db7106,
++ 0x98d220bc,
++ 0xefd5102a,
++ 0x71b18589,
++ 0x06b6b51f,
++ 0x9fbfe4a5,
++ 0xe8b8d433,
++ 0x7807c9a2,
++ 0x0f00f934,
++ 0x9609a88e,
++ 0xe10e9818,
++ 0x7f6a0dbb,
++ 0x086d3d2d,
++ 0x91646c97,
++ 0xe6635c01,
++ 0x6b6b51f4,
++ 0x1c6c6162,
++ 0x856530d8,
++ 0xf262004e,
++ 0x6c0695ed,
++ 0x1b01a57b,
++ 0x8208f4c1,
++ 0xf50fc457,
++ 0x65b0d9c6,
++ 0x12b7e950,
++ 0x8bbeb8ea,
++ 0xfcb9887c,
++ 0x62dd1ddf,
++ 0x15da2d49,
++ 0x8cd37cf3,
++ 0xfbd44c65,
++ 0x4db26158,
++ 0x3ab551ce,
++ 0xa3bc0074,
++ 0xd4bb30e2,
++ 0x4adfa541,
++ 0x3dd895d7,
++ 0xa4d1c46d,
++ 0xd3d6f4fb,
++ 0x4369e96a,
++ 0x346ed9fc,
++ 0xad678846,
++ 0xda60b8d0,
++ 0x44042d73,
++ 0x33031de5,
++ 0xaa0a4c5f,
++ 0xdd0d7cc9,
++ 0x5005713c,
++ 0x270241aa,
++ 0xbe0b1010,
++ 0xc90c2086,
++ 0x5768b525,
++ 0x206f85b3,
++ 0xb966d409,
++ 0xce61e49f,
++ 0x5edef90e,
++ 0x29d9c998,
++ 0xb0d09822,
++ 0xc7d7a8b4,
++ 0x59b33d17,
++ 0x2eb40d81,
++ 0xb7bd5c3b,
++ 0xc0ba6cad,
++ 0xedb88320,
++ 0x9abfb3b6,
++ 0x03b6e20c,
++ 0x74b1d29a,
++ 0xead54739,
++ 0x9dd277af,
++ 0x04db2615,
++ 0x73dc1683,
++ 0xe3630b12,
++ 0x94643b84,
++ 0x0d6d6a3e,
++ 0x7a6a5aa8,
++ 0xe40ecf0b,
++ 0x9309ff9d,
++ 0x0a00ae27,
++ 0x7d079eb1,
++ 0xf00f9344,
++ 0x8708a3d2,
++ 0x1e01f268,
++ 0x6906c2fe,
++ 0xf762575d,
++ 0x806567cb,
++ 0x196c3671,
++ 0x6e6b06e7,
++ 0xfed41b76,
++ 0x89d32be0,
++ 0x10da7a5a,
++ 0x67dd4acc,
++ 0xf9b9df6f,
++ 0x8ebeeff9,
++ 0x17b7be43,
++ 0x60b08ed5,
++ 0xd6d6a3e8,
++ 0xa1d1937e,
++ 0x38d8c2c4,
++ 0x4fdff252,
++ 0xd1bb67f1,
++ 0xa6bc5767,
++ 0x3fb506dd,
++ 0x48b2364b,
++ 0xd80d2bda,
++ 0xaf0a1b4c,
++ 0x36034af6,
++ 0x41047a60,
++ 0xdf60efc3,
++ 0xa867df55,
++ 0x316e8eef,
++ 0x4669be79,
++ 0xcb61b38c,
++ 0xbc66831a,
++ 0x256fd2a0,
++ 0x5268e236,
++ 0xcc0c7795,
++ 0xbb0b4703,
++ 0x220216b9,
++ 0x5505262f,
++ 0xc5ba3bbe,
++ 0xb2bd0b28,
++ 0x2bb45a92,
++ 0x5cb36a04,
++ 0xc2d7ffa7,
++ 0xb5d0cf31,
++ 0x2cd99e8b,
++ 0x5bdeae1d,
++ 0x9b64c2b0,
++ 0xec63f226,
++ 0x756aa39c,
++ 0x026d930a,
++ 0x9c0906a9,
++ 0xeb0e363f,
++ 0x72076785,
++ 0x05005713,
++ 0x95bf4a82,
++ 0xe2b87a14,
++ 0x7bb12bae,
++ 0x0cb61b38,
++ 0x92d28e9b,
++ 0xe5d5be0d,
++ 0x7cdcefb7,
++ 0x0bdbdf21,
++ 0x86d3d2d4,
++ 0xf1d4e242,
++ 0x68ddb3f8,
++ 0x1fda836e,
++ 0x81be16cd,
++ 0xf6b9265b,
++ 0x6fb077e1,
++ 0x18b74777,
++ 0x88085ae6,
++ 0xff0f6a70,
++ 0x66063bca,
++ 0x11010b5c,
++ 0x8f659eff,
++ 0xf862ae69,
++ 0x616bffd3,
++ 0x166ccf45,
++ 0xa00ae278,
++ 0xd70dd2ee,
++ 0x4e048354,
++ 0x3903b3c2,
++ 0xa7672661,
++ 0xd06016f7,
++ 0x4969474d,
++ 0x3e6e77db,
++ 0xaed16a4a,
++ 0xd9d65adc,
++ 0x40df0b66,
++ 0x37d83bf0,
++ 0xa9bcae53,
++ 0xdebb9ec5,
++ 0x47b2cf7f,
++ 0x30b5ffe9,
++ 0xbdbdf21c,
++ 0xcabac28a,
++ 0x53b39330,
++ 0x24b4a3a6,
++ 0xbad03605,
++ 0xcdd70693,
++ 0x54de5729,
++ 0x23d967bf,
++ 0xb3667a2e,
++ 0xc4614ab8,
++ 0x5d681b02,
++ 0x2a6f2b94,
++ 0xb40bbe37,
++ 0xc30c8ea1,
++ 0x5a05df1b,
++ 0x2d02ef8d
++};
++
++
++#define GET_MAC_ADDR_CRC(addr, crc) \
++{ \
++ uint32_t i; \
++ uint8_t data; \
++ \
++ /* CRC calculation */ \
++ crc = 0xffffffff; \
++ for (i=0; i < 6; i++) \
++ { \
++ data = (uint8_t)(addr >> ((5-i)*8)); \
++ crc = crc^data; \
++ crc = crc_table[crc&0xff] ^ (crc>>8); \
++ } \
++} \
++
++/* Define a macro for getting the mirrored value of */
++/* a byte size number. (0x11010011 --> 0x11001011) */
++/* Sometimes the mirrored value of the CRC is required */
++static __inline__ uint8_t GetMirror(uint8_t n)
++{
++ uint8_t mirror[16] =
++ {
++ 0x00,
++ 0x08,
++ 0x04,
++ 0x0c,
++ 0x02,
++ 0x0a,
++ 0x06,
++ 0x0e,
++ 0x01,
++ 0x09,
++ 0x05,
++ 0x0d,
++ 0x03,
++ 0x0b,
++ 0x07,
++ 0x0f
++ };
++ return ((uint8_t)(((mirror[n & 0x0f] << 4) | (mirror[n >> 4]))));
++}
++
++static __inline__ uint32_t GetMirror32(uint32_t n)
++{
++ return (((uint32_t)GetMirror((uint8_t)(n))<<24) |
++ ((uint32_t)GetMirror((uint8_t)(n>>8))<<16) |
++ ((uint32_t)GetMirror((uint8_t)(n>>16))<<8) |
++ ((uint32_t)GetMirror((uint8_t)(n>>24))));
++}
++
++#define MIRROR GetMirror
++#define MIRROR_32 GetMirror32
++
++
++#endif /* __crc_mac_addr_ext_h */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/dpaa_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/dpaa_ext.h
+new file mode 100644
+index 00000000..e6d9e932
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/dpaa_ext.h
+@@ -0,0 +1,210 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File dpaa_ext.h
++
++ @Description DPAA Application Programming Interface.
++*//***************************************************************************/
++#ifndef __DPAA_EXT_H
++#define __DPAA_EXT_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++
++
++/**************************************************************************//**
++ @Group DPAA_grp Data Path Acceleration Architecture API
++
++ @Description DPAA API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Description Frame descriptor
++*//***************************************************************************/
++typedef _Packed struct t_DpaaFD {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ volatile uint8_t liodn;
++ volatile uint8_t bpid;
++ volatile uint8_t elion;
++ volatile uint8_t addrh;
++ volatile uint32_t addrl;
++#else
++ volatile uint32_t addrl;
++ volatile uint8_t addrh;
++ volatile uint8_t elion;
++ volatile uint8_t bpid;
++ volatile uint8_t liodn;
++ #endif
++ volatile uint32_t length; /**< Frame length */
++ volatile uint32_t status; /**< FD status */
++} _PackedType t_DpaaFD;
++
++/**************************************************************************//**
++ @Description enum for defining frame format
++*//***************************************************************************/
++typedef enum e_DpaaFDFormatType {
++ e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF = 0x0, /**< Simple frame Single buffer; Offset and
++ small length (9b OFFSET, 20b LENGTH) */
++ e_DPAA_FD_FORMAT_TYPE_LONG_SBSF = 0x2, /**< Simple frame, single buffer; big length
++ (29b LENGTH ,No OFFSET) */
++ e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF = 0x4, /**< Simple frame, Scatter Gather table; Offset
++ and small length (9b OFFSET, 20b LENGTH) */
++ e_DPAA_FD_FORMAT_TYPE_LONG_MBSF = 0x6, /**< Simple frame, Scatter Gather table;
++ big length (29b LENGTH ,No OFFSET) */
++ e_DPAA_FD_FORMAT_TYPE_COMPOUND = 0x1, /**< Compound Frame (29b CONGESTION-WEIGHT
++ No LENGTH or OFFSET) */
++ e_DPAA_FD_FORMAT_TYPE_DUMMY
++} e_DpaaFDFormatType;
++
++/**************************************************************************//**
++ @Collection Frame descriptor macros
++*//***************************************************************************/
++#define DPAA_FD_DD_MASK 0xc0000000 /**< FD DD field mask */
++#define DPAA_FD_PID_MASK 0x3f000000 /**< FD PID field mask */
++#define DPAA_FD_ELIODN_MASK 0x0000f000 /**< FD ELIODN field mask */
++#define DPAA_FD_BPID_MASK 0x00ff0000 /**< FD BPID field mask */
++#define DPAA_FD_ADDRH_MASK 0x000000ff /**< FD ADDRH field mask */
++#define DPAA_FD_ADDRL_MASK 0xffffffff /**< FD ADDRL field mask */
++#define DPAA_FD_FORMAT_MASK 0xe0000000 /**< FD FORMAT field mask */
++#define DPAA_FD_OFFSET_MASK 0x1ff00000 /**< FD OFFSET field mask */
++#define DPAA_FD_LENGTH_MASK 0x000fffff /**< FD LENGTH field mask */
++
++#define DPAA_FD_GET_ADDRH(fd) ((t_DpaaFD *)fd)->addrh /**< Macro to get FD ADDRH field */
++#define DPAA_FD_GET_ADDRL(fd) ((t_DpaaFD *)fd)->addrl /**< Macro to get FD ADDRL field */
++#define DPAA_FD_GET_PHYS_ADDR(fd) ((physAddress_t)(((uint64_t)DPAA_FD_GET_ADDRH(fd) << 32) | (uint64_t)DPAA_FD_GET_ADDRL(fd))) /**< Macro to get FD ADDR field */
++#define DPAA_FD_GET_FORMAT(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_FORMAT_MASK) >> (31-2)) /**< Macro to get FD FORMAT field */
++#define DPAA_FD_GET_OFFSET(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_OFFSET_MASK) >> (31-11)) /**< Macro to get FD OFFSET field */
++#define DPAA_FD_GET_LENGTH(fd) (((t_DpaaFD *)fd)->length & DPAA_FD_LENGTH_MASK) /**< Macro to get FD LENGTH field */
++#define DPAA_FD_GET_STATUS(fd) ((t_DpaaFD *)fd)->status /**< Macro to get FD STATUS field */
++#define DPAA_FD_GET_ADDR(fd) XX_PhysToVirt(DPAA_FD_GET_PHYS_ADDR(fd)) /**< Macro to get FD ADDR (virtual) */
++
++#define DPAA_FD_SET_ADDRH(fd,val) ((t_DpaaFD *)fd)->addrh = (val) /**< Macro to set FD ADDRH field */
++#define DPAA_FD_SET_ADDRL(fd,val) ((t_DpaaFD *)fd)->addrl = (val) /**< Macro to set FD ADDRL field */
++#define DPAA_FD_SET_ADDR(fd,val) \
++do { \
++ uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \
++ DPAA_FD_SET_ADDRH(fd, ((uint32_t)(physAddr >> 32))); \
++ DPAA_FD_SET_ADDRL(fd, (uint32_t)physAddr); \
++} while (0) /**< Macro to set FD ADDR field */
++#define DPAA_FD_SET_FORMAT(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_FORMAT_MASK) | (((val) << (31-2))& DPAA_FD_FORMAT_MASK))) /**< Macro to set FD FORMAT field */
++#define DPAA_FD_SET_OFFSET(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_OFFSET_MASK) | (((val) << (31-11))& DPAA_FD_OFFSET_MASK) )) /**< Macro to set FD OFFSET field */
++#define DPAA_FD_SET_LENGTH(fd,val) (((t_DpaaFD *)fd)->length = (((t_DpaaFD *)fd)->length & ~DPAA_FD_LENGTH_MASK) | ((val) & DPAA_FD_LENGTH_MASK)) /**< Macro to set FD LENGTH field */
++#define DPAA_FD_SET_STATUS(fd,val) ((t_DpaaFD *)fd)->status = (val) /**< Macro to set FD STATUS field */
++/* @} */
++
++/**************************************************************************//**
++ @Description Frame Scatter/Gather Table Entry
++*//***************************************************************************/
++typedef _Packed struct t_DpaaSGTE {
++ volatile uint32_t addrh; /**< Buffer Address high */
++ volatile uint32_t addrl; /**< Buffer Address low */
++ volatile uint32_t length; /**< Buffer length */
++ volatile uint32_t offset; /**< SGTE offset */
++} _PackedType t_DpaaSGTE;
++
++#define DPAA_NUM_OF_SG_TABLE_ENTRY 16
++
++/**************************************************************************//**
++ @Description Frame Scatter/Gather Table
++*//***************************************************************************/
++typedef _Packed struct t_DpaaSGT {
++ t_DpaaSGTE tableEntry[DPAA_NUM_OF_SG_TABLE_ENTRY];
++ /**< Structure that holds information about
++ a single S/G entry. */
++} _PackedType t_DpaaSGT;
++
++/**************************************************************************//**
++ @Description Compound Frame Table
++*//***************************************************************************/
++typedef _Packed struct t_DpaaCompTbl {
++ t_DpaaSGTE outputBuffInfo; /**< Structure that holds information about
++ the compound-frame output buffer;
++ NOTE: this may point to a S/G table */
++ t_DpaaSGTE inputBuffInfo; /**< Structure that holds information about
++ the compound-frame input buffer;
++ NOTE: this may point to a S/G table */
++} _PackedType t_DpaaCompTbl;
++
++/**************************************************************************//**
++ @Collection Frame Scatter/Gather Table Entry macros
++*//***************************************************************************/
++#define DPAA_SGTE_ADDRH_MASK 0x000000ff /**< SGTE ADDRH field mask */
++#define DPAA_SGTE_ADDRL_MASK 0xffffffff /**< SGTE ADDRL field mask */
++#define DPAA_SGTE_E_MASK 0x80000000 /**< SGTE Extension field mask */
++#define DPAA_SGTE_F_MASK 0x40000000 /**< SGTE Final field mask */
++#define DPAA_SGTE_LENGTH_MASK 0x3fffffff /**< SGTE LENGTH field mask */
++#define DPAA_SGTE_BPID_MASK 0x00ff0000 /**< SGTE BPID field mask */
++#define DPAA_SGTE_OFFSET_MASK 0x00001fff /**< SGTE OFFSET field mask */
++
++#define DPAA_SGTE_GET_ADDRH(sgte) (((t_DpaaSGTE *)sgte)->addrh & DPAA_SGTE_ADDRH_MASK) /**< Macro to get SGTE ADDRH field */
++#define DPAA_SGTE_GET_ADDRL(sgte) ((t_DpaaSGTE *)sgte)->addrl /**< Macro to get SGTE ADDRL field */
++#define DPAA_SGTE_GET_PHYS_ADDR(sgte) ((physAddress_t)(((uint64_t)DPAA_SGTE_GET_ADDRH(sgte) << 32) | (uint64_t)DPAA_SGTE_GET_ADDRL(sgte))) /**< Macro to get FD ADDR field */
++#define DPAA_SGTE_GET_EXTENSION(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_E_MASK) >> (31-0)) /**< Macro to get SGTE EXTENSION field */
++#define DPAA_SGTE_GET_FINAL(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_F_MASK) >> (31-1)) /**< Macro to get SGTE FINAL field */
++#define DPAA_SGTE_GET_LENGTH(sgte) (((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_LENGTH_MASK) /**< Macro to get SGTE LENGTH field */
++#define DPAA_SGTE_GET_BPID(sgte) ((((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_BPID_MASK) >> (31-15)) /**< Macro to get SGTE BPID field */
++#define DPAA_SGTE_GET_OFFSET(sgte) (((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_OFFSET_MASK) /**< Macro to get SGTE OFFSET field */
++#define DPAA_SGTE_GET_ADDR(sgte) XX_PhysToVirt(DPAA_SGTE_GET_PHYS_ADDR(sgte))
++
++#define DPAA_SGTE_SET_ADDRH(sgte,val) (((t_DpaaSGTE *)sgte)->addrh = ((((t_DpaaSGTE *)sgte)->addrh & ~DPAA_SGTE_ADDRH_MASK) | ((val) & DPAA_SGTE_ADDRH_MASK))) /**< Macro to set SGTE ADDRH field */
++#define DPAA_SGTE_SET_ADDRL(sgte,val) ((t_DpaaSGTE *)sgte)->addrl = (val) /**< Macro to set SGTE ADDRL field */
++#define DPAA_SGTE_SET_ADDR(sgte,val) \
++do { \
++ uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \
++ DPAA_SGTE_SET_ADDRH(sgte, ((uint32_t)(physAddr >> 32))); \
++ DPAA_SGTE_SET_ADDRL(sgte, (uint32_t)physAddr); \
++} while (0) /**< Macro to set SGTE ADDR field */
++#define DPAA_SGTE_SET_EXTENSION(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_E_MASK) | (((val) << (31-0))& DPAA_SGTE_E_MASK))) /**< Macro to set SGTE EXTENSION field */
++#define DPAA_SGTE_SET_FINAL(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_F_MASK) | (((val) << (31-1))& DPAA_SGTE_F_MASK))) /**< Macro to set SGTE FINAL field */
++#define DPAA_SGTE_SET_LENGTH(sgte,val) (((t_DpaaSGTE *)sgte)->length = (((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_LENGTH_MASK) | ((val) & DPAA_SGTE_LENGTH_MASK)) /**< Macro to set SGTE LENGTH field */
++#define DPAA_SGTE_SET_BPID(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_BPID_MASK) | (((val) << (31-15))& DPAA_SGTE_BPID_MASK))) /**< Macro to set SGTE BPID field */
++#define DPAA_SGTE_SET_OFFSET(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_OFFSET_MASK) | (((val) << (31-31))& DPAA_SGTE_OFFSET_MASK) )) /**< Macro to set SGTE OFFSET field */
++/* @} */
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++#define DPAA_LIODN_DONT_OVERRIDE (-1)
++
++/** @} */ /* end of DPAA_grp group */
++
++
++#endif /* __DPAA_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_ext.h
+new file mode 100644
+index 00000000..a8a64386
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_ext.h
+@@ -0,0 +1,1731 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_ext.h
++
++ @Description FM Application Programming Interface.
++*//***************************************************************************/
++#ifndef __FM_EXT
++#define __FM_EXT
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "dpaa_ext.h"
++#include "fsl_fman_sp.h"
++
++/**************************************************************************//**
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_lib_grp FM library
++
++ @Description FM API functions, definitions and enums.
++
++ The FM module is the main driver module and is a mandatory module
++ for FM driver users. This module must be initialized first prior
++ to any other drivers modules.
++ The FM is a "singleton" module. It is responsible of the common
++ HW modules: FPM, DMA, common QMI and common BMI initializations and
++ run-time control routines. This module must be initialized always
++ when working with any of the FM modules.
++ NOTE - We assume that the FM library will be initialized only by core No. 0!
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Enum for defining port types
++*//***************************************************************************/
++typedef enum e_FmPortType {
++ e_FM_PORT_TYPE_OH_OFFLINE_PARSING = 0, /**< Offline parsing port */
++ e_FM_PORT_TYPE_RX, /**< 1G Rx port */
++ e_FM_PORT_TYPE_RX_10G, /**< 10G Rx port */
++ e_FM_PORT_TYPE_TX, /**< 1G Tx port */
++ e_FM_PORT_TYPE_TX_10G, /**< 10G Tx port */
++ e_FM_PORT_TYPE_DUMMY
++} e_FmPortType;
++
++/**************************************************************************//**
++ @Collection General FM defines
++*//***************************************************************************/
++#define FM_MAX_NUM_OF_PARTITIONS 64 /**< Maximum number of partitions */
++#define FM_PHYS_ADDRESS_SIZE 6 /**< FM Physical address size */
++/* @} */
++
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(push,1)
++#endif /* defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Description FM physical Address
++*//***************************************************************************/
++typedef _Packed struct t_FmPhysAddr {
++ volatile uint8_t high; /**< High part of the physical address */
++ volatile uint32_t low; /**< Low part of the physical address */
++} _PackedType t_FmPhysAddr;
++
++/**************************************************************************//**
++ @Description Parse results memory layout
++*//***************************************************************************/
++typedef _Packed struct t_FmPrsResult {
++ volatile uint8_t lpid; /**< Logical port id */
++ volatile uint8_t shimr; /**< Shim header result */
++ volatile uint16_t l2r; /**< Layer 2 result */
++ volatile uint16_t l3r; /**< Layer 3 result */
++ volatile uint8_t l4r; /**< Layer 4 result */
++ volatile uint8_t cplan; /**< Classification plan id */
++ volatile uint16_t nxthdr; /**< Next Header */
++ volatile uint16_t cksum; /**< Running-sum */
++ volatile uint16_t flags_frag_off; /**< Flags & fragment-offset field of the last IP-header */
++ volatile uint8_t route_type; /**< Routing type field of a IPv6 routing extension header */
++ volatile uint8_t rhp_ip_valid; /**< Routing Extension Header Present; last bit is IP valid */
++ volatile uint8_t shim_off[2]; /**< Shim offset */
++ volatile uint8_t ip_pid_off; /**< IP PID (last IP-proto) offset */
++ volatile uint8_t eth_off; /**< ETH offset */
++ volatile uint8_t llc_snap_off; /**< LLC_SNAP offset */
++ volatile uint8_t vlan_off[2]; /**< VLAN offset */
++ volatile uint8_t etype_off; /**< ETYPE offset */
++ volatile uint8_t pppoe_off; /**< PPP offset */
++ volatile uint8_t mpls_off[2]; /**< MPLS offset */
++ volatile uint8_t ip_off[2]; /**< IP offset */
++ volatile uint8_t gre_off; /**< GRE offset */
++ volatile uint8_t l4_off; /**< Layer 4 offset */
++ volatile uint8_t nxthdr_off; /**< Parser end point */
++} _PackedType t_FmPrsResult;
++
++/**************************************************************************//**
++ @Collection FM Parser results
++*//***************************************************************************/
++#define FM_PR_L2_VLAN_STACK 0x00000100 /**< Parse Result: VLAN stack */
++#define FM_PR_L2_ETHERNET 0x00008000 /**< Parse Result: Ethernet*/
++#define FM_PR_L2_VLAN 0x00004000 /**< Parse Result: VLAN */
++#define FM_PR_L2_LLC_SNAP 0x00002000 /**< Parse Result: LLC_SNAP */
++#define FM_PR_L2_MPLS 0x00001000 /**< Parse Result: MPLS */
++#define FM_PR_L2_PPPoE 0x00000800 /**< Parse Result: PPPoE */
++/* @} */
++
++/**************************************************************************//**
++ @Collection FM Frame descriptor macros
++*//***************************************************************************/
++#define FM_FD_CMD_FCO 0x80000000 /**< Frame queue Context Override */
++#define FM_FD_CMD_RPD 0x40000000 /**< Read Prepended Data */
++#define FM_FD_CMD_UPD 0x20000000 /**< Update Prepended Data */
++#define FM_FD_CMD_DTC 0x10000000 /**< Do L4 Checksum */
++#define FM_FD_CMD_DCL4C 0x10000000 /**< Didn't calculate L4 Checksum */
++#define FM_FD_CMD_CFQ 0x00ffffff /**< Confirmation Frame Queue */
++
++#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000 /**< Not for Rx-Port! Unsupported Format */
++#define FM_FD_ERR_LENGTH 0x02000000 /**< Not for Rx-Port! Length Error */
++#define FM_FD_ERR_DMA 0x01000000 /**< DMA Data error */
++
++#define FM_FD_IPR 0x00000001 /**< IPR frame (not error) */
++
++#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR) /**< IPR non-consistent-sp */
++#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR) /**< IPR error */
++#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR) /**< IPR timeout */
++
++#ifdef FM_CAPWAP_SUPPORT
++#define FM_FD_ERR_CRE 0x00200000
++#define FM_FD_ERR_CHE 0x00100000
++#endif /* FM_CAPWAP_SUPPORT */
++
++#define FM_FD_ERR_PHYSICAL 0x00080000 /**< Rx FIFO overflow, FCS error, code error, running disparity
++ error (SGMII and TBI modes), FIFO parity error. PHY
++ Sequence error, PHY error control character detected. */
++#define FM_FD_ERR_SIZE 0x00040000 /**< Frame too long OR Frame size exceeds max_length_frame */
++#define FM_FD_ERR_CLS_DISCARD 0x00020000 /**< classification discard */
++#define FM_FD_ERR_EXTRACTION 0x00008000 /**< Extract Out of Frame */
++#define FM_FD_ERR_NO_SCHEME 0x00004000 /**< No Scheme Selected */
++#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000 /**< Keysize Overflow */
++#define FM_FD_ERR_COLOR_RED 0x00000800 /**< Frame color is red */
++#define FM_FD_ERR_COLOR_YELLOW 0x00000400 /**< Frame color is yellow */
++#define FM_FD_ERR_ILL_PLCR 0x00000200 /**< Illegal Policer Profile selected */
++#define FM_FD_ERR_PLCR_FRAME_LEN 0x00000100 /**< Policer frame length error */
++#define FM_FD_ERR_PRS_TIMEOUT 0x00000080 /**< Parser Time out Exceed */
++#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040 /**< Invalid Soft Parser instruction */
++#define FM_FD_ERR_PRS_HDR_ERR 0x00000020 /**< Header error was identified during parsing */
++#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008 /**< Frame parsed beyind 256 first bytes */
++
++#define FM_FD_TX_STATUS_ERR_MASK (FM_FD_ERR_UNSUPPORTED_FORMAT | \
++ FM_FD_ERR_LENGTH | \
++ FM_FD_ERR_DMA) /**< TX Error FD bits */
++
++#define FM_FD_RX_STATUS_ERR_MASK (FM_FD_ERR_UNSUPPORTED_FORMAT | \
++ FM_FD_ERR_LENGTH | \
++ FM_FD_ERR_DMA | \
++ FM_FD_ERR_IPR | \
++ FM_FD_ERR_IPR_TO | \
++ FM_FD_ERR_IPR_NCSP | \
++ FM_FD_ERR_PHYSICAL | \
++ FM_FD_ERR_SIZE | \
++ FM_FD_ERR_CLS_DISCARD | \
++ FM_FD_ERR_COLOR_RED | \
++ FM_FD_ERR_COLOR_YELLOW | \
++ FM_FD_ERR_ILL_PLCR | \
++ FM_FD_ERR_PLCR_FRAME_LEN | \
++ FM_FD_ERR_EXTRACTION | \
++ FM_FD_ERR_NO_SCHEME | \
++ FM_FD_ERR_KEYSIZE_OVERFLOW | \
++ FM_FD_ERR_PRS_TIMEOUT | \
++ FM_FD_ERR_PRS_ILL_INSTRUCT | \
++ FM_FD_ERR_PRS_HDR_ERR | \
++ FM_FD_ERR_BLOCK_LIMIT_EXCEEDED) /**< RX Error FD bits */
++
++#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000 /**< non Frame-Manager error */
++/* @} */
++
++/**************************************************************************//**
++ @Description Context A
++*//***************************************************************************/
++typedef _Packed struct t_FmContextA {
++ volatile uint32_t command; /**< ContextA Command */
++ volatile uint8_t res0[4]; /**< ContextA Reserved bits */
++} _PackedType t_FmContextA;
++
++/**************************************************************************//**
++ @Description Context B
++*//***************************************************************************/
++typedef uint32_t t_FmContextB;
++
++/**************************************************************************//**
++ @Collection Special Operation options
++*//***************************************************************************/
++typedef uint32_t fmSpecialOperations_t; /**< typedef for defining Special Operation options */
++
++#define FM_SP_OP_IPSEC 0x80000000 /**< activate features that related to IPSec (e.g fix Eth-type) */
++#define FM_SP_OP_IPSEC_UPDATE_UDP_LEN 0x40000000 /**< update the UDP-Len after Encryption */
++#define FM_SP_OP_IPSEC_MANIP 0x20000000 /**< handle the IPSec-manip options */
++#define FM_SP_OP_RPD 0x10000000 /**< Set the RPD bit */
++#define FM_SP_OP_DCL4C 0x08000000 /**< Set the DCL4C bit */
++#define FM_SP_OP_CHECK_SEC_ERRORS 0x04000000 /**< Check SEC errors */
++#define FM_SP_OP_CLEAR_RPD 0x02000000 /**< Clear the RPD bit */
++#define FM_SP_OP_CAPWAP_DTLS_ENC 0x01000000 /**< activate features that related to CAPWAP-DTLS post Encryption */
++#define FM_SP_OP_CAPWAP_DTLS_DEC 0x00800000 /**< activate features that related to CAPWAP-DTLS post Decryption */
++#define FM_SP_OP_IPSEC_NO_ETH_HDR 0x00400000 /**< activate features that related to IPSec without Eth hdr */
++/* @} */
++
++/**************************************************************************//**
++ @Collection Context A macros
++*//***************************************************************************/
++#define FM_CONTEXTA_OVERRIDE_MASK 0x80000000
++#define FM_CONTEXTA_ICMD_MASK 0x40000000
++#define FM_CONTEXTA_A1_VALID_MASK 0x20000000
++#define FM_CONTEXTA_MACCMD_MASK 0x00ff0000
++#define FM_CONTEXTA_MACCMD_VALID_MASK 0x00800000
++#define FM_CONTEXTA_MACCMD_SECURED_MASK 0x00100000
++#define FM_CONTEXTA_MACCMD_SC_MASK 0x000f0000
++#define FM_CONTEXTA_A1_MASK 0x0000ffff
++
++#define FM_CONTEXTA_GET_OVERRIDE(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_OVERRIDE_MASK) >> (31-0))
++#define FM_CONTEXTA_GET_ICMD(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_ICMD_MASK) >> (31-1))
++#define FM_CONTEXTA_GET_A1_VALID(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_A1_VALID_MASK) >> (31-2))
++#define FM_CONTEXTA_GET_A1(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_A1_MASK) >> (31-31))
++#define FM_CONTEXTA_GET_MACCMD(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_MASK) >> (31-15))
++#define FM_CONTEXTA_GET_MACCMD_VALID(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_VALID_MASK) >> (31-8))
++#define FM_CONTEXTA_GET_MACCMD_SECURED(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_SECURED_MASK) >> (31-11))
++#define FM_CONTEXTA_GET_MACCMD_SECURE_CHANNEL(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_SC_MASK) >> (31-15))
++
++#define FM_CONTEXTA_SET_OVERRIDE(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_OVERRIDE_MASK) | (((uint32_t)(val) << (31-0)) & FM_CONTEXTA_OVERRIDE_MASK) ))
++#define FM_CONTEXTA_SET_ICMD(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_ICMD_MASK) | (((val) << (31-1)) & FM_CONTEXTA_ICMD_MASK) ))
++#define FM_CONTEXTA_SET_A1_VALID(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_A1_VALID_MASK) | (((val) << (31-2)) & FM_CONTEXTA_A1_VALID_MASK) ))
++#define FM_CONTEXTA_SET_A1(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_A1_MASK) | (((val) << (31-31)) & FM_CONTEXTA_A1_MASK) ))
++#define FM_CONTEXTA_SET_MACCMD(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_MASK) | (((val) << (31-15)) & FM_CONTEXTA_MACCMD_MASK) ))
++#define FM_CONTEXTA_SET_MACCMD_VALID(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_VALID_MASK) | (((val) << (31-8)) & FM_CONTEXTA_MACCMD_VALID_MASK) ))
++#define FM_CONTEXTA_SET_MACCMD_SECURED(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_SECURED_MASK) | (((val) << (31-11)) & FM_CONTEXTA_MACCMD_SECURED_MASK) ))
++#define FM_CONTEXTA_SET_MACCMD_SECURE_CHANNEL(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_SC_MASK) | (((val) << (31-15)) & FM_CONTEXTA_MACCMD_SC_MASK) ))
++/* @} */
++
++/**************************************************************************//**
++ @Collection Context B macros
++*//***************************************************************************/
++#define FM_CONTEXTB_FQID_MASK 0x00ffffff
++
++#define FM_CONTEXTB_GET_FQID(contextB) (*((t_FmContextB *)contextB) & FM_CONTEXTB_FQID_MASK)
++#define FM_CONTEXTB_SET_FQID(contextB,val) (*((t_FmContextB *)contextB) = ((*((t_FmContextB *)contextB) & ~FM_CONTEXTB_FQID_MASK) | ((val) & FM_CONTEXTB_FQID_MASK)))
++/* @} */
++
++#if defined(__MWERKS__) && !defined(__GNUC__)
++#pragma pack(pop)
++#endif /* defined(__MWERKS__) && ... */
++
++
++/**************************************************************************//**
++ @Description FM Exceptions
++*//***************************************************************************/
++typedef enum e_FmExceptions {
++ e_FM_EX_DMA_BUS_ERROR = 0, /**< DMA bus error. */
++ e_FM_EX_DMA_READ_ECC, /**< Read Buffer ECC error (Valid for FM rev < 6)*/
++ e_FM_EX_DMA_SYSTEM_WRITE_ECC, /**< Write Buffer ECC error on system side (Valid for FM rev < 6)*/
++ e_FM_EX_DMA_FM_WRITE_ECC, /**< Write Buffer ECC error on FM side (Valid for FM rev < 6)*/
++ e_FM_EX_DMA_SINGLE_PORT_ECC, /**< Single Port ECC error on FM side (Valid for FM rev > 6)*/
++ e_FM_EX_FPM_STALL_ON_TASKS, /**< Stall of tasks on FPM */
++ e_FM_EX_FPM_SINGLE_ECC, /**< Single ECC on FPM. */
++ e_FM_EX_FPM_DOUBLE_ECC, /**< Double ECC error on FPM ram access */
++ e_FM_EX_QMI_SINGLE_ECC, /**< Single ECC on QMI. */
++ e_FM_EX_QMI_DOUBLE_ECC, /**< Double bit ECC occurred on QMI */
++ e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/**< Dequeue from unknown port id */
++ e_FM_EX_BMI_LIST_RAM_ECC, /**< Linked List RAM ECC error */
++ e_FM_EX_BMI_STORAGE_PROFILE_ECC, /**< Storage Profile ECC Error */
++ e_FM_EX_BMI_STATISTICS_RAM_ECC, /**< Statistics Count RAM ECC Error Enable */
++ e_FM_EX_BMI_DISPATCH_RAM_ECC, /**< Dispatch RAM ECC Error Enable */
++ e_FM_EX_IRAM_ECC, /**< Double bit ECC occurred on IRAM*/
++ e_FM_EX_MURAM_ECC /**< Double bit ECC occurred on MURAM*/
++} e_FmExceptions;
++
++/**************************************************************************//**
++ @Description Enum for defining port DMA swap mode
++*//***************************************************************************/
++typedef enum e_FmDmaSwapOption {
++ e_FM_DMA_NO_SWP = FMAN_DMA_NO_SWP, /**< No swap, transfer data as is.*/
++ e_FM_DMA_SWP_PPC_LE = FMAN_DMA_SWP_PPC_LE, /**< The transferred data should be swapped
++ in PowerPc Little Endian mode. */
++ e_FM_DMA_SWP_BE = FMAN_DMA_SWP_BE /**< The transferred data should be swapped
++ in Big Endian mode */
++} e_FmDmaSwapOption;
++
++/**************************************************************************//**
++ @Description Enum for defining port DMA cache attributes
++*//***************************************************************************/
++typedef enum e_FmDmaCacheOption {
++ e_FM_DMA_NO_STASH = FMAN_DMA_NO_STASH, /**< Cacheable, no Allocate (No Stashing) */
++ e_FM_DMA_STASH = FMAN_DMA_STASH /**< Cacheable and Allocate (Stashing on) */
++} e_FmDmaCacheOption;
++
++
++/**************************************************************************//**
++ @Group FM_init_grp FM Initialization Unit
++
++ @Description FM Initialization Unit
++
++ Initialization Flow
++ Initialization of the FM Module will be carried out by the application
++ according to the following sequence:
++ - Calling the configuration routine with basic parameters.
++ - Calling the advance initialization routines to change driver's defaults.
++ - Calling the initialization routine.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function t_FmExceptionsCallback
++
++ @Description Exceptions user callback routine, will be called upon an
++ exception passing the exception identification.
++
++ @Param[in] h_App - User's application descriptor.
++ @Param[in] exception - The exception.
++*//***************************************************************************/
++typedef void (t_FmExceptionsCallback)(t_Handle h_App,
++ e_FmExceptions exception);
++
++
++/**************************************************************************//**
++ @Function t_FmBusErrorCallback
++
++ @Description Bus error user callback routine, will be called upon a
++ bus error, passing parameters describing the errors and the owner.
++
++ @Param[in] h_App - User's application descriptor.
++ @Param[in] portType - Port type (e_FmPortType)
++ @Param[in] portId - Port id - relative to type.
++ @Param[in] addr - Address that caused the error
++ @Param[in] tnum - Owner of error
++ @Param[in] liodn - Logical IO device number
++*//***************************************************************************/
++typedef void (t_FmBusErrorCallback) (t_Handle h_App,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint64_t addr,
++ uint8_t tnum,
++ uint16_t liodn);
++
++/**************************************************************************//**
++ @Description A structure for defining buffer prefix area content.
++*//***************************************************************************/
++typedef struct t_FmBufferPrefixContent {
++ uint16_t privDataSize; /**< Number of bytes to be left at the beginning
++ of the external buffer; Note that the private-area will
++ start from the base of the buffer address. */
++ bool passPrsResult; /**< TRUE to pass the parse result to/from the FM;
++ User may use FM_PORT_GetBufferPrsResult() in order to
++ get the parser-result from a buffer. */
++ bool passTimeStamp; /**< TRUE to pass the timeStamp to/from the FM
++ User may use FM_PORT_GetBufferTimeStamp() in order to
++ get the parser-result from a buffer. */
++ bool passHashResult; /**< TRUE to pass the KG hash result to/from the FM
++ User may use FM_PORT_GetBufferHashResult() in order to
++ get the parser-result from a buffer. */
++ bool passAllOtherPCDInfo;/**< Add all other Internal-Context information:
++ AD, hash-result, key, etc. */
++ uint16_t dataAlign; /**< 0 to use driver's default alignment [DEFAULT_FM_SP_bufferPrefixContent_dataAlign],
++ other value for selecting a data alignment (must be a power of 2);
++ if write optimization is used, must be >= 16. */
++ uint8_t manipExtraSpace; /**< Maximum extra size needed (insertion-size minus removal-size);
++ Note that this field impacts the size of the buffer-prefix
++ (i.e. it pushes the data offset);
++ This field is irrelevant if DPAA_VERSION==10 */
++} t_FmBufferPrefixContent;
++
++/**************************************************************************//**
++ @Description A structure of information about each of the external
++ buffer pools used by a port or storage-profile.
++*//***************************************************************************/
++typedef struct t_FmExtPoolParams {
++ uint8_t id; /**< External buffer pool id */
++ uint16_t size; /**< External buffer pool buffer size */
++} t_FmExtPoolParams;
++
++/**************************************************************************//**
++ @Description A structure for informing the driver about the external
++ buffer pools allocated in the BM and used by a port or a
++ storage-profile.
++*//***************************************************************************/
++typedef struct t_FmExtPools {
++ uint8_t numOfPoolsUsed; /**< Number of pools use by this port */
++ t_FmExtPoolParams extBufPool[FM_PORT_MAX_NUM_OF_EXT_POOLS];
++ /**< Parameters for each port */
++} t_FmExtPools;
++
++/**************************************************************************//**
++ @Description A structure for defining backup BM Pools.
++*//***************************************************************************/
++typedef struct t_FmBackupBmPools {
++ uint8_t numOfBackupPools; /**< Number of BM backup pools -
++ must be smaller than the total number of
++ pools defined for the specified port.*/
++ uint8_t poolIds[FM_PORT_MAX_NUM_OF_EXT_POOLS];
++ /**< numOfBackupPools pool id's, specifying which
++ pools should be used only as backup. Pool
++ id's specified here must be a subset of the
++ pools used by the specified port.*/
++} t_FmBackupBmPools;
++
++/**************************************************************************//**
++ @Description A structure for defining BM pool depletion criteria
++*//***************************************************************************/
++typedef struct t_FmBufPoolDepletion {
++ bool poolsGrpModeEnable; /**< select mode in which pause frames will be sent after
++ a number of pools (all together!) are depleted */
++ uint8_t numOfPools; /**< the number of depleted pools that will invoke
++ pause frames transmission. */
++ bool poolsToConsider[BM_MAX_NUM_OF_POOLS];
++ /**< For each pool, TRUE if it should be considered for
++ depletion (Note - this pool must be used by this port!). */
++ bool singlePoolModeEnable; /**< select mode in which pause frames will be sent after
++ a single-pool is depleted; */
++ bool poolsToConsiderForSingleMode[BM_MAX_NUM_OF_POOLS];
++ /**< For each pool, TRUE if it should be considered for
++ depletion (Note - this pool must be used by this port!) */
++#if (DPAA_VERSION >= 11)
++ bool pfcPrioritiesEn[FM_MAX_NUM_OF_PFC_PRIORITIES];
++ /**< This field is used by the MAC as the Priority Enable Vector in the PFC frame which is transmitted */
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmBufPoolDepletion;
++
++/**************************************************************************//**
++ @Description A Structure for defining Ucode patch for loading.
++*//***************************************************************************/
++typedef struct t_FmFirmwareParams {
++ uint32_t size; /**< Size of uCode */
++ uint32_t *p_Code; /**< A pointer to the uCode */
++} t_FmFirmwareParams;
++
++/**************************************************************************//**
++ @Description A Structure for defining FM initialization parameters
++*//***************************************************************************/
++typedef struct t_FmParams {
++ uint8_t fmId; /**< Index of the FM */
++ uint8_t guestId; /**< FM Partition Id */
++ uintptr_t baseAddr; /**< A pointer to base of memory mapped FM registers (virtual);
++ this field is optional when the FM runs in "guest-mode"
++ (i.e. guestId != NCSW_MASTER_ID); in that case, the driver will
++ use the memory-map instead of calling the IPC where possible;
++ NOTE that this should include ALL common registers of the FM including
++ the PCD registers area (i.e. until the VSP pages - 880KB). */
++ t_Handle h_FmMuram; /**< A handle of an initialized MURAM object,
++ to be used by the FM. */
++ uint16_t fmClkFreq; /**< In Mhz;
++ Relevant when FM not runs in "guest-mode". */
++ uint16_t fmMacClkRatio; /**< FM MAC Clock ratio, for backward comparability:
++ when fmMacClkRatio = 0, ratio is 2:1
++ when fmMacClkRatio = 1, ratio is 1:1 */
++ t_FmExceptionsCallback *f_Exception; /**< An application callback routine to handle exceptions;
++ Relevant when FM not runs in "guest-mode". */
++ t_FmBusErrorCallback *f_BusError; /**< An application callback routine to handle exceptions;
++ Relevant when FM not runs in "guest-mode". */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks;
++ Relevant when FM not runs in "guest-mode". */
++ int irq; /**< FM interrupt source for normal events;
++ Relevant when FM not runs in "guest-mode". */
++ int errIrq; /**< FM interrupt source for errors;
++ Relevant when FM not runs in "guest-mode". */
++ t_FmFirmwareParams firmware; /**< The firmware parameters structure;
++ Relevant when FM not runs in "guest-mode". */
++
++#if (DPAA_VERSION >= 11)
++ uintptr_t vspBaseAddr; /**< A pointer to base of memory mapped FM VSP registers (virtual);
++ i.e. up to 24KB, depending on the specific chip. */
++ uint8_t partVSPBase; /**< The first Virtual-Storage-Profile-id dedicated to this partition.
++ NOTE: this parameter relevant only when working with multiple partitions. */
++ uint8_t partNumOfVSPs; /**< Number of VSPs dedicated to this partition.
++ NOTE: this parameter relevant only when working with multiple partitions. */
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmParams;
++
++
++/**************************************************************************//**
++ @Function FM_Config
++
++ @Description Creates the FM module and returns its handle (descriptor).
++ This descriptor must be passed as first parameter to all other
++ FM function calls.
++
++ No actual initialization or configuration of FM hardware is
++ done by this routine. All FM parameters get default values that
++ may be changed by calling one or more of the advance config routines.
++
++ @Param[in] p_FmParams - A pointer to a data structure of mandatory FM parameters
++
++ @Return A handle to the FM object, or NULL for Failure.
++*//***************************************************************************/
++t_Handle FM_Config(t_FmParams *p_FmParams);
++
++/**************************************************************************//**
++ @Function FM_Init
++
++ @Description Initializes the FM module by defining the software structure
++ and configuring the hardware registers.
++
++ @Param[in] h_Fm - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_Init(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_Free
++
++ @Description Frees all resources that were assigned to FM module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_Fm - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_Free(t_Handle h_Fm);
++
++
++/**************************************************************************//**
++ @Group FM_advanced_init_grp FM Advanced Configuration Unit
++
++ @Description Advanced configuration routines are optional routines that may
++ be called in order to change the default driver settings.
++
++ Note: Advanced configuration routines are not available for guest partition.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Enum for selecting DMA debug mode
++*//***************************************************************************/
++typedef enum e_FmDmaDbgCntMode {
++ e_FM_DMA_DBG_NO_CNT = 0, /**< No counting */
++ e_FM_DMA_DBG_CNT_DONE, /**< Count DONE commands */
++ e_FM_DMA_DBG_CNT_COMM_Q_EM, /**< count command queue emergency signals */
++ e_FM_DMA_DBG_CNT_INT_READ_EM, /**< Count Internal Read buffer emergency signal */
++ e_FM_DMA_DBG_CNT_INT_WRITE_EM, /**< Count Internal Write buffer emergency signal */
++ e_FM_DMA_DBG_CNT_FPM_WAIT, /**< Count FPM WAIT signal */
++ e_FM_DMA_DBG_CNT_SIGLE_BIT_ECC, /**< Single bit ECC errors. */
++ e_FM_DMA_DBG_CNT_RAW_WAR_PROT /**< Number of times there was a need for RAW & WAR protection. */
++} e_FmDmaDbgCntMode;
++
++/**************************************************************************//**
++ @Description Enum for selecting DMA Cache Override
++*//***************************************************************************/
++typedef enum e_FmDmaCacheOverride {
++ e_FM_DMA_NO_CACHE_OR = 0, /**< No override of the Cache field */
++ e_FM_DMA_NO_STASH_DATA, /**< Data should not be stashed in system level cache */
++ e_FM_DMA_MAY_STASH_DATA, /**< Data may be stashed in system level cache */
++ e_FM_DMA_STASH_DATA /**< Data should be stashed in system level cache */
++} e_FmDmaCacheOverride;
++
++/**************************************************************************//**
++ @Description Enum for selecting DMA External Bus Priority
++*//***************************************************************************/
++typedef enum e_FmDmaExtBusPri {
++ e_FM_DMA_EXT_BUS_NORMAL = 0, /**< Normal priority */
++ e_FM_DMA_EXT_BUS_EBS, /**< AXI extended bus service priority */
++ e_FM_DMA_EXT_BUS_SOS, /**< AXI sos priority */
++ e_FM_DMA_EXT_BUS_EBS_AND_SOS /**< AXI ebs + sos priority */
++} e_FmDmaExtBusPri;
++
++/**************************************************************************//**
++ @Description Enum for choosing the field that will be output on AID
++*//***************************************************************************/
++typedef enum e_FmDmaAidMode {
++ e_FM_DMA_AID_OUT_PORT_ID = 0, /**< 4 LSB of PORT_ID */
++ e_FM_DMA_AID_OUT_TNUM /**< 4 LSB of TNUM */
++} e_FmDmaAidMode;
++
++/**************************************************************************//**
++ @Description Enum for selecting FPM Catastrophic error behavior
++*//***************************************************************************/
++typedef enum e_FmCatastrophicErr {
++ e_FM_CATASTROPHIC_ERR_STALL_PORT = 0, /**< Port_ID is stalled (only reset can release it) */
++ e_FM_CATASTROPHIC_ERR_STALL_TASK /**< Only erroneous task is stalled */
++} e_FmCatastrophicErr;
++
++/**************************************************************************//**
++ @Description Enum for selecting FPM DMA Error behavior
++*//***************************************************************************/
++typedef enum e_FmDmaErr {
++ e_FM_DMA_ERR_CATASTROPHIC = 0, /**< Dma error is treated as a catastrophic
++ error (e_FmCatastrophicErr)*/
++ e_FM_DMA_ERR_REPORT /**< Dma error is just reported */
++} e_FmDmaErr;
++
++/**************************************************************************//**
++ @Description Enum for selecting DMA Emergency level by BMI emergency signal
++*//***************************************************************************/
++typedef enum e_FmDmaEmergencyLevel {
++ e_FM_DMA_EM_EBS = 0, /**< EBS emergency */
++ e_FM_DMA_EM_SOS /**< SOS emergency */
++} e_FmDmaEmergencyLevel;
++
++/**************************************************************************//**
++ @Collection Enum for selecting DMA Emergency options
++*//***************************************************************************/
++typedef uint32_t fmEmergencyBus_t; /**< DMA emergency options */
++
++#define FM_DMA_MURAM_READ_EMERGENCY 0x00800000 /**< Enable emergency for MURAM1 */
++#define FM_DMA_MURAM_WRITE_EMERGENCY 0x00400000 /**< Enable emergency for MURAM2 */
++#define FM_DMA_EXT_BUS_EMERGENCY 0x00100000 /**< Enable emergency for external bus */
++/* @} */
++
++/**************************************************************************//**
++ @Description A structure for defining DMA emergency level
++*//***************************************************************************/
++typedef struct t_FmDmaEmergency {
++ fmEmergencyBus_t emergencyBusSelect; /**< An OR of the busses where emergency
++ should be enabled */
++ e_FmDmaEmergencyLevel emergencyLevel; /**< EBS/SOS */
++} t_FmDmaEmergency;
++
++/**************************************************************************//*
++ @Description structure for defining FM threshold
++*//***************************************************************************/
++typedef struct t_FmThresholds {
++ uint8_t dispLimit; /**< The number of times a frames may
++ be passed in the FM before assumed to
++ be looping. */
++ uint8_t prsDispTh; /**< This is the number pf packets that may be
++ queued in the parser dispatch queue*/
++ uint8_t plcrDispTh; /**< This is the number pf packets that may be
++ queued in the policer dispatch queue*/
++ uint8_t kgDispTh; /**< This is the number pf packets that may be
++ queued in the keygen dispatch queue*/
++ uint8_t bmiDispTh; /**< This is the number pf packets that may be
++ queued in the BMI dispatch queue*/
++ uint8_t qmiEnqDispTh; /**< This is the number pf packets that may be
++ queued in the QMI enqueue dispatch queue*/
++ uint8_t qmiDeqDispTh; /**< This is the number pf packets that may be
++ queued in the QMI dequeue dispatch queue*/
++ uint8_t fmCtl1DispTh; /**< This is the number pf packets that may be
++ queued in fmCtl1 dispatch queue*/
++ uint8_t fmCtl2DispTh; /**< This is the number pf packets that may be
++ queued in fmCtl2 dispatch queue*/
++} t_FmThresholds;
++
++/**************************************************************************//*
++ @Description structure for defining DMA thresholds
++*//***************************************************************************/
++typedef struct t_FmDmaThresholds {
++ uint8_t assertEmergency; /**< When this value is reached,
++ assert emergency (Threshold)*/
++ uint8_t clearEmergency; /**< After emergency is asserted, it is held
++ until this value is reached (Hystheresis) */
++} t_FmDmaThresholds;
++
++/**************************************************************************//**
++ @Function t_FmResetOnInitOverrideCallback
++
++ @Description FMan specific reset on init user callback routine,
++ will be used to override the standard FMan reset on init procedure
++
++ @Param[in] h_Fm - FMan handler
++*//***************************************************************************/
++typedef void (t_FmResetOnInitOverrideCallback)(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_ConfigResetOnInit
++
++ @Description Define whether to reset the FM before initialization.
++ Change the default configuration [DEFAULT_resetOnInit].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] enable When TRUE, FM will be reset before any initialization.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigResetOnInit(t_Handle h_Fm, bool enable);
++
++/**************************************************************************//**
++ @Function FM_ConfigResetOnInitOverrideCallback
++
++ @Description Define a special reset of FM before initialization.
++ Change the default configuration [DEFAULT_resetOnInitOverrideCallback].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] f_ResetOnInitOverride FM specific reset on init user callback routine.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigResetOnInitOverrideCallback(t_Handle h_Fm, t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride);
++
++/**************************************************************************//**
++ @Function FM_ConfigTotalFifoSize
++
++ @Description Define Total FIFO size for the whole FM.
++ Calling this routine changes the total Fifo size in the internal driver
++ data base from its default configuration [DEFAULT_totalFifoSize]
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] totalFifoSize The selected new value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigTotalFifoSize(t_Handle h_Fm, uint32_t totalFifoSize);
++
++ /**************************************************************************//**
++ @Function FM_ConfigDmaCacheOverride
++
++ @Description Define cache override mode.
++ Calling this routine changes the cache override mode
++ in the internal driver data base from its default configuration [DEFAULT_cacheOverride]
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] cacheOverride The selected new value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaCacheOverride(t_Handle h_Fm, e_FmDmaCacheOverride cacheOverride);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaAidOverride
++
++ @Description Define DMA AID override mode.
++ Calling this routine changes the AID override mode
++ in the internal driver data base from its default configuration [DEFAULT_aidOverride]
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] aidOverride The selected new value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaAidOverride(t_Handle h_Fm, bool aidOverride);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaAidMode
++
++ @Description Define DMA AID mode.
++ Calling this routine changes the AID mode in the internal
++ driver data base from its default configuration [DEFAULT_aidMode]
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] aidMode The selected new value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaAidMode(t_Handle h_Fm, e_FmDmaAidMode aidMode);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaAxiDbgNumOfBeats
++
++ @Description Define DMA AXI number of beats.
++ Calling this routine changes the AXI number of beats in the internal
++ driver data base from its default configuration [DEFAULT_axiDbgNumOfBeats]
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] axiDbgNumOfBeats The selected new value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaAxiDbgNumOfBeats(t_Handle h_Fm, uint8_t axiDbgNumOfBeats);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaCamNumOfEntries
++
++ @Description Define number of CAM entries.
++ Calling this routine changes the number of CAM entries in the internal
++ driver data base from its default configuration [DEFAULT_dmaCamNumOfEntries].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] numOfEntries The selected new value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaCamNumOfEntries(t_Handle h_Fm, uint8_t numOfEntries);
++
++/**************************************************************************//**
++ @Function FM_ConfigEnableCounters
++
++ @Description Obsolete, always return E_OK.
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_ConfigEnableCounters(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaDbgCounter
++
++ @Description Define DMA debug counter.
++ Calling this routine changes the number of the DMA debug counter in the internal
++ driver data base from its default configuration [DEFAULT_dmaDbgCntMode].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] fmDmaDbgCntMode An enum selecting the debug counter mode.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaDbgCounter(t_Handle h_Fm, e_FmDmaDbgCntMode fmDmaDbgCntMode);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaStopOnBusErr
++
++ @Description Define bus error behavior.
++ Calling this routine changes the bus error behavior definition
++ in the internal driver data base from its default
++ configuration [DEFAULT_dmaStopOnBusError].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] stop TRUE to stop on bus error, FALSE to continue.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ Only if bus error is enabled.
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaStopOnBusErr(t_Handle h_Fm, bool stop);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaEmergency
++
++ @Description Define DMA emergency.
++ Calling this routine changes the DMA emergency definition
++ in the internal driver data base from its default
++ configuration where's it's disabled.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] p_Emergency An OR mask of all required options.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaEmergency(t_Handle h_Fm, t_FmDmaEmergency *p_Emergency);
++
++/**************************************************************************//**
++ @Function FM_ConfigDmaErr
++
++ @Description DMA error treatment.
++ Calling this routine changes the DMA error treatment
++ in the internal driver data base from its default
++ configuration [DEFAULT_dmaErr].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] dmaErr The selected new choice.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaErr(t_Handle h_Fm, e_FmDmaErr dmaErr);
++
++/**************************************************************************//**
++ @Function FM_ConfigCatastrophicErr
++
++ @Description Define FM behavior on catastrophic error.
++ Calling this routine changes the FM behavior on catastrophic
++ error in the internal driver data base from its default
++ [DEFAULT_catastrophicErr].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] catastrophicErr The selected new choice.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigCatastrophicErr(t_Handle h_Fm, e_FmCatastrophicErr catastrophicErr);
++
++/**************************************************************************//**
++ @Function FM_ConfigEnableMuramTestMode
++
++ @Description Enable MURAM test mode.
++ Calling this routine changes the internal driver data base
++ from its default selection of test mode where it's disabled.
++ This routine is only avaiable on old FM revisions (FMan v2).
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigEnableMuramTestMode(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_ConfigEnableIramTestMode
++
++ @Description Enable IRAM test mode.
++ Calling this routine changes the internal driver data base
++ from its default selection of test mode where it's disabled.
++ This routine is only avaiable on old FM revisions (FMan v2).
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigEnableIramTestMode(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_ConfigHaltOnExternalActivation
++
++ @Description Define FM behavior on external halt activation.
++ Calling this routine changes the FM behavior on external halt
++ activation in the internal driver data base from its default
++ [DEFAULT_haltOnExternalActivation].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] enable TRUE to enable halt on external halt
++ activation.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigHaltOnExternalActivation(t_Handle h_Fm, bool enable);
++
++/**************************************************************************//**
++ @Function FM_ConfigHaltOnUnrecoverableEccError
++
++ @Description Define FM behavior on external halt activation.
++ Calling this routine changes the FM behavior on unrecoverable
++ ECC error in the internal driver data base from its default
++ [DEFAULT_haltOnUnrecoverableEccError].
++ This routine is only avaiable on old FM revisions (FMan v2).
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] enable TRUE to enable halt on unrecoverable Ecc error
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigHaltOnUnrecoverableEccError(t_Handle h_Fm, bool enable);
++
++/**************************************************************************//**
++ @Function FM_ConfigException
++
++ @Description Define FM exceptions.
++ Calling this routine changes the exceptions defaults in the
++ internal driver data base where all exceptions are enabled.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigException(t_Handle h_Fm, e_FmExceptions exception, bool enable);
++
++/**************************************************************************//**
++ @Function FM_ConfigExternalEccRamsEnable
++
++ @Description Select external ECC enabling.
++ Calling this routine changes the ECC enabling control in the internal
++ driver data base from its default [DEFAULT_externalEccRamsEnable].
++ When this option is enabled Rams ECC enabling is not effected
++ by FM_EnableRamsEcc/FM_DisableRamsEcc, but by a JTAG.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] enable TRUE to enable this option.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigExternalEccRamsEnable(t_Handle h_Fm, bool enable);
++
++/**************************************************************************//**
++ @Function FM_ConfigTnumAgingPeriod
++
++ @Description Define Tnum aging period.
++ Calling this routine changes the Tnum aging of dequeue TNUMs
++ in the QMI in the internal driver data base from its default
++ [DEFAULT_tnumAgingPeriod].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] tnumAgingPeriod Tnum Aging Period in microseconds.
++ Note that period is recalculated in units of
++ 64 FM clocks. Driver will pick the closest
++ possible period.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++ NOTE that if some MAC is configured for PFC, '0' value is NOT
++ allowed.
++*//***************************************************************************/
++t_Error FM_ConfigTnumAgingPeriod(t_Handle h_Fm, uint16_t tnumAgingPeriod);
++
++/**************************************************************************//*
++ @Function FM_ConfigDmaEmergencySmoother
++
++ @Description Define DMA emergency smoother.
++ Calling this routine changes the definition of the minimum
++ amount of DATA beats transferred on the AXI READ and WRITE
++ ports before lowering the emergency level.
++ By default smoother is disabled.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] emergencyCnt emergency switching counter.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaEmergencySmoother(t_Handle h_Fm, uint32_t emergencyCnt);
++
++/**************************************************************************//*
++ @Function FM_ConfigThresholds
++
++ @Description Calling this routine changes the internal driver data base
++ from its default FM threshold configuration:
++ dispLimit: [DEFAULT_dispLimit]
++ prsDispTh: [DEFAULT_prsDispTh]
++ plcrDispTh: [DEFAULT_plcrDispTh]
++ kgDispTh: [DEFAULT_kgDispTh]
++ bmiDispTh: [DEFAULT_bmiDispTh]
++ qmiEnqDispTh: [DEFAULT_qmiEnqDispTh]
++ qmiDeqDispTh: [DEFAULT_qmiDeqDispTh]
++ fmCtl1DispTh: [DEFAULT_fmCtl1DispTh]
++ fmCtl2DispTh: [DEFAULT_fmCtl2DispTh]
++
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] p_FmThresholds A structure of threshold parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigThresholds(t_Handle h_Fm, t_FmThresholds *p_FmThresholds);
++
++/**************************************************************************//*
++ @Function FM_ConfigDmaSosEmergencyThreshold
++
++ @Description Calling this routine changes the internal driver data base
++ from its default dma SOS emergency configuration [DEFAULT_dmaSosEmergency]
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] dmaSosEmergency The selected new value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaSosEmergencyThreshold(t_Handle h_Fm, uint32_t dmaSosEmergency);
++
++/**************************************************************************//*
++ @Function FM_ConfigDmaWriteBufThresholds
++
++ @Description Calling this routine changes the internal driver data base
++ from its default configuration of DMA write buffer threshold
++ assertEmergency: [DEFAULT_dmaWriteIntBufLow]
++ clearEmergency: [DEFAULT_dmaWriteIntBufHigh]
++ This routine is only avaiable on old FM revisions (FMan v2).
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior -
++ When 'assertEmergency' value is reached, emergency is asserted,
++ then it is held until 'clearEmergency' value is reached.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaWriteBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds);
++
++ /**************************************************************************//*
++ @Function FM_ConfigDmaCommQThresholds
++
++ @Description Calling this routine changes the internal driver data base
++ from its default configuration of DMA command queue threshold
++ assertEmergency: [DEFAULT_dmaCommQLow]
++ clearEmergency: [DEFAULT_dmaCommQHigh]
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior -
++ When 'assertEmergency' value is reached, emergency is asserted,
++ then it is held until 'clearEmergency' value is reached..
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaCommQThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds);
++
++/**************************************************************************//*
++ @Function FM_ConfigDmaReadBufThresholds
++
++ @Description Calling this routine changes the internal driver data base
++ from its default configuration of DMA read buffer threshold
++ assertEmergency: [DEFAULT_dmaReadIntBufLow]
++ clearEmergency: [DEFAULT_dmaReadIntBufHigh]
++ This routine is only avaiable on old FM revisions (FMan v2).
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior -
++ When 'assertEmergency' value is reached, emergency is asserted,
++ then it is held until 'clearEmergency' value is reached..
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaReadBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds);
++
++/**************************************************************************//*
++ @Function FM_ConfigDmaWatchdog
++
++ @Description Calling this routine changes the internal driver data base
++ from its default watchdog configuration, which is disabled
++ [DEFAULT_dmaWatchdog].
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] watchDogValue The selected new value - in microseconds.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ConfigDmaWatchdog(t_Handle h_Fm, uint32_t watchDogValue);
++
++/** @} */ /* end of FM_advanced_init_grp group */
++/** @} */ /* end of FM_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_runtime_control_grp FM Runtime Control Unit
++
++ @Description FM Runtime control unit API functions, definitions and enums.
++ The FM driver provides a set of control routines.
++ These routines may only be called after the module was fully
++ initialized (both configuration and initialization routines were
++ called). They are typically used to get information from hardware
++ (status, counters/statistics, revision etc.), to modify a current
++ state or to force/enable a required action. Run-time control may
++ be called whenever necessary and as many times as needed.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection General FM defines.
++*//***************************************************************************/
++#define FM_MAX_NUM_OF_VALID_PORTS (FM_MAX_NUM_OF_OH_PORTS + \
++ FM_MAX_NUM_OF_1G_RX_PORTS + \
++ FM_MAX_NUM_OF_10G_RX_PORTS + \
++ FM_MAX_NUM_OF_1G_TX_PORTS + \
++ FM_MAX_NUM_OF_10G_TX_PORTS) /**< Number of available FM ports */
++/* @} */
++
++/**************************************************************************//*
++ @Description A Structure for Port bandwidth requirement. Port is identified
++ by type and relative id.
++*//***************************************************************************/
++typedef struct t_FmPortBandwidth {
++ e_FmPortType type; /**< FM port type */
++ uint8_t relativePortId; /**< Type relative port id */
++ uint8_t bandwidth; /**< bandwidth - (in term of percents) */
++} t_FmPortBandwidth;
++
++/**************************************************************************//*
++ @Description A Structure containing an array of Port bandwidth requirements.
++ The user should state the ports requiring bandwidth in terms of
++ percentage - i.e. all port's bandwidths in the array must add
++ up to 100.
++*//***************************************************************************/
++typedef struct t_FmPortsBandwidthParams {
++ uint8_t numOfPorts; /**< The number of relevant ports, which is the
++ number of valid entries in the array below */
++ t_FmPortBandwidth portsBandwidths[FM_MAX_NUM_OF_VALID_PORTS];
++ /**< for each port, it's bandwidth (all port's
++ bandwidths must add up to 100.*/
++} t_FmPortsBandwidthParams;
++
++/**************************************************************************//**
++ @Description DMA Emergency control on MURAM
++*//***************************************************************************/
++typedef enum e_FmDmaMuramPort {
++ e_FM_DMA_MURAM_PORT_WRITE, /**< MURAM write port */
++ e_FM_DMA_MURAM_PORT_READ /**< MURAM read port */
++} e_FmDmaMuramPort;
++
++/**************************************************************************//**
++ @Description Enum for defining FM counters
++*//***************************************************************************/
++typedef enum e_FmCounters {
++ e_FM_COUNTERS_ENQ_TOTAL_FRAME = 0, /**< QMI total enqueued frames counter */
++ e_FM_COUNTERS_DEQ_TOTAL_FRAME, /**< QMI total dequeued frames counter */
++ e_FM_COUNTERS_DEQ_0, /**< QMI 0 frames from QMan counter */
++ e_FM_COUNTERS_DEQ_1, /**< QMI 1 frames from QMan counter */
++ e_FM_COUNTERS_DEQ_2, /**< QMI 2 frames from QMan counter */
++ e_FM_COUNTERS_DEQ_3, /**< QMI 3 frames from QMan counter */
++ e_FM_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI dequeue from default queue counter */
++ e_FM_COUNTERS_DEQ_FROM_CONTEXT, /**< QMI dequeue from FQ context counter */
++ e_FM_COUNTERS_DEQ_FROM_FD, /**< QMI dequeue from FD command field counter */
++ e_FM_COUNTERS_DEQ_CONFIRM /**< QMI dequeue confirm counter */
++} e_FmCounters;
++
++/**************************************************************************//**
++ @Description A Structure for returning FM revision information
++*//***************************************************************************/
++typedef struct t_FmRevisionInfo {
++ uint8_t majorRev; /**< Major revision */
++ uint8_t minorRev; /**< Minor revision */
++} t_FmRevisionInfo;
++
++/**************************************************************************//**
++ @Description A Structure for returning FM ctrl code revision information
++*//***************************************************************************/
++typedef struct t_FmCtrlCodeRevisionInfo {
++ uint16_t packageRev; /**< Package revision */
++ uint8_t majorRev; /**< Major revision */
++ uint8_t minorRev; /**< Minor revision */
++} t_FmCtrlCodeRevisionInfo;
++
++/**************************************************************************//**
++ @Description A Structure for defining DMA status
++*//***************************************************************************/
++typedef struct t_FmDmaStatus {
++ bool cmqNotEmpty; /**< Command queue is not empty */
++ bool busError; /**< Bus error occurred */
++ bool readBufEccError; /**< Double ECC error on buffer Read (Valid for FM rev < 6)*/
++ bool writeBufEccSysError; /**< Double ECC error on buffer write from system side (Valid for FM rev < 6)*/
++ bool writeBufEccFmError; /**< Double ECC error on buffer write from FM side (Valid for FM rev < 6) */
++ bool singlePortEccError; /**< Single Port ECC error from FM side (Valid for FM rev >= 6)*/
++} t_FmDmaStatus;
++
++/**************************************************************************//**
++ @Description A Structure for obtaining FM controller monitor values
++*//***************************************************************************/
++typedef struct t_FmCtrlMon {
++ uint8_t percentCnt[2]; /**< Percentage value */
++} t_FmCtrlMon;
++
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/**************************************************************************//**
++ @Function FM_DumpRegs
++
++ @Description Dumps all FM registers
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success;
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FM_DumpRegs(t_Handle h_Fm);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++/**************************************************************************//**
++ @Function FM_SetException
++
++ @Description Calling this routine enables/disables the specified exception.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_SetException(t_Handle h_Fm, e_FmExceptions exception, bool enable);
++
++/**************************************************************************//**
++ @Function FM_EnableRamsEcc
++
++ @Description Enables ECC mechanism for all the different FM RAM's; E.g. IRAM,
++ MURAM, Parser, Keygen, Policer, etc.
++ Note:
++ If FM_ConfigExternalEccRamsEnable was called to enable external
++ setting of ECC, this routine effects IRAM ECC only.
++ This routine is also called by the driver if an ECC exception is
++ enabled.
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_EnableRamsEcc(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_DisableRamsEcc
++
++ @Description Disables ECC mechanism for all the different FM RAM's; E.g. IRAM,
++ MURAM, Parser, Keygen, Policer, etc.
++ Note:
++ If FM_ConfigExternalEccRamsEnable was called to enable external
++ setting of ECC, this routine effects IRAM ECC only.
++ In opposed to FM_EnableRamsEcc, this routine must be called
++ explicitly to disable all Rams ECC.
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Config() and before FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_DisableRamsEcc(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_GetRevision
++
++ @Description Returns the FM revision
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[out] p_FmRevisionInfo A structure of revision information parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FM_GetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo);
++
++/**************************************************************************//**
++ @Function FM_GetFmanCtrlCodeRevision
++
++ @Description Returns the Fman controller code revision
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[out] p_RevisionInfo A structure of revision information parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FM_GetFmanCtrlCodeRevision(t_Handle h_Fm, t_FmCtrlCodeRevisionInfo *p_RevisionInfo);
++
++/**************************************************************************//**
++ @Function FM_GetCounter
++
++ @Description Reads one of the FM counters.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] counter The requested counter.
++
++ @Return Counter's current value.
++
++ @Cautions Allowed only following FM_Init().
++ Note that it is user's responsibility to call this routine only
++ for enabled counters, and there will be no indication if a
++ disabled counter is accessed.
++*//***************************************************************************/
++uint32_t FM_GetCounter(t_Handle h_Fm, e_FmCounters counter);
++
++/**************************************************************************//**
++ @Function FM_ModifyCounter
++
++ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] counter The requested counter.
++ @Param[in] val The requested value to be written into the counter.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ModifyCounter(t_Handle h_Fm, e_FmCounters counter, uint32_t val);
++
++/**************************************************************************//**
++ @Function FM_Resume
++
++ @Description Release FM after halt FM command or after unrecoverable ECC error.
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++void FM_Resume(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_SetDmaEmergency
++
++ @Description Manual emergency set
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] muramPort MURAM direction select.
++ @Param[in] enable TRUE to manually enable emergency, FALSE to disable.
++
++ @Return None.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++void FM_SetDmaEmergency(t_Handle h_Fm, e_FmDmaMuramPort muramPort, bool enable);
++
++/**************************************************************************//**
++ @Function FM_SetDmaExtBusPri
++
++ @Description Set the DMA external bus priority
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] pri External bus priority select
++
++ @Return None.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++void FM_SetDmaExtBusPri(t_Handle h_Fm, e_FmDmaExtBusPri pri);
++
++/**************************************************************************//**
++ @Function FM_GetDmaStatus
++
++ @Description Reads the DMA current status
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[out] p_FmDmaStatus A structure of DMA status parameters.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++void FM_GetDmaStatus(t_Handle h_Fm, t_FmDmaStatus *p_FmDmaStatus);
++
++/**************************************************************************//**
++ @Function FM_ErrorIsr
++
++ @Description FM interrupt-service-routine for errors.
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; E_EMPTY if no errors found in register, other
++ error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ErrorIsr(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_EventIsr
++
++ @Description FM interrupt-service-routine for normal events.
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++void FM_EventIsr(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_GetSpecialOperationCoding
++
++ @Description Return a specific coding according to the input mask.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] spOper special operation mask.
++ @Param[out] p_SpOperCoding special operation code.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FM_GetSpecialOperationCoding(t_Handle h_Fm,
++ fmSpecialOperations_t spOper,
++ uint8_t *p_SpOperCoding);
++
++/**************************************************************************//**
++ @Function FM_CtrlMonStart
++
++ @Description Start monitoring utilization of all available FM controllers.
++
++ In order to obtain FM controllers utilization the following sequence
++ should be used:
++ -# FM_CtrlMonStart()
++ -# FM_CtrlMonStop()
++ -# FM_CtrlMonGetCounters() - issued for each FM controller
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID).
++*//***************************************************************************/
++t_Error FM_CtrlMonStart(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_CtrlMonStop
++
++ @Description Stop monitoring utilization of all available FM controllers.
++
++ In order to obtain FM controllers utilization the following sequence
++ should be used:
++ -# FM_CtrlMonStart()
++ -# FM_CtrlMonStop()
++ -# FM_CtrlMonGetCounters() - issued for each FM controller
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID).
++*//***************************************************************************/
++t_Error FM_CtrlMonStop(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_CtrlMonGetCounters
++
++ @Description Obtain FM controller utilization parameters.
++
++ In order to obtain FM controllers utilization the following sequence
++ should be used:
++ -# FM_CtrlMonStart()
++ -# FM_CtrlMonStop()
++ -# FM_CtrlMonGetCounters() - issued for each FM controller
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] fmCtrlIndex FM Controller index for that utilization results
++ are requested.
++ @Param[in] p_Mon Pointer to utilization results structure.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID).
++*//***************************************************************************/
++t_Error FM_CtrlMonGetCounters(t_Handle h_Fm, uint8_t fmCtrlIndex, t_FmCtrlMon *p_Mon);
++
++
++/**************************************************************************//*
++ @Function FM_ForceIntr
++
++ @Description Causes an interrupt event on the requested source.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] exception An exception to be forced.
++
++ @Return E_OK on success; Error code if the exception is not enabled,
++ or is not able to create interrupt.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_ForceIntr (t_Handle h_Fm, e_FmExceptions exception);
++
++/**************************************************************************//*
++ @Function FM_SetPortsBandwidth
++
++ @Description Sets relative weights between ports when accessing common resources.
++
++ @Param[in] h_Fm A handle to an FM Module.
++ @Param[in] p_PortsBandwidth A structure of ports bandwidths in percentage, i.e.
++ total must equal 100.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_SetPortsBandwidth(t_Handle h_Fm, t_FmPortsBandwidthParams *p_PortsBandwidth);
++
++/**************************************************************************//*
++ @Function FM_GetMuramHandle
++
++ @Description Gets the corresponding MURAM handle
++
++ @Param[in] h_Fm A handle to an FM Module.
++
++ @Return MURAM handle; NULL otherwise.
++
++ @Cautions Allowed only following FM_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Handle FM_GetMuramHandle(t_Handle h_Fm);
++
++/** @} */ /* end of FM_runtime_control_grp group */
++/** @} */ /* end of FM_lib_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++#ifdef NCSW_BACKWARD_COMPATIBLE_API
++typedef t_FmFirmwareParams t_FmPcdFirmwareParams;
++typedef t_FmBufferPrefixContent t_FmPortBufferPrefixContent;
++typedef t_FmExtPoolParams t_FmPortExtPoolParams;
++typedef t_FmExtPools t_FmPortExtPools;
++typedef t_FmBackupBmPools t_FmPortBackupBmPools;
++typedef t_FmBufPoolDepletion t_FmPortBufPoolDepletion;
++typedef e_FmDmaSwapOption e_FmPortDmaSwapOption;
++typedef e_FmDmaCacheOption e_FmPortDmaCacheOption;
++
++#define FM_CONTEXTA_GET_OVVERIDE FM_CONTEXTA_GET_OVERRIDE
++#define FM_CONTEXTA_SET_OVVERIDE FM_CONTEXTA_SET_OVERRIDE
++
++#define e_FM_EX_BMI_PIPELINE_ECC e_FM_EX_BMI_STORAGE_PROFILE_ECC
++#define e_FM_PORT_DMA_NO_SWP e_FM_DMA_NO_SWP
++#define e_FM_PORT_DMA_SWP_PPC_LE e_FM_DMA_SWP_PPC_LE
++#define e_FM_PORT_DMA_SWP_BE e_FM_DMA_SWP_BE
++#define e_FM_PORT_DMA_NO_STASH e_FM_DMA_NO_STASH
++#define e_FM_PORT_DMA_STASH e_FM_DMA_STASH
++#endif /* NCSW_BACKWARD_COMPATIBLE_API */
++
++
++#endif /* __FM_EXT */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h
+new file mode 100644
+index 00000000..da7e0463
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h
+@@ -0,0 +1,859 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_mac_ext.h
++
++ @Description FM MAC ...
++*//***************************************************************************/
++#ifndef __FM_MAC_EXT_H
++#define __FM_MAC_EXT_H
++
++#include "std_ext.h"
++#include "enet_ext.h"
++
++
++/**************************************************************************//**
++
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_mac_grp FM MAC
++
++ @Description FM MAC API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++#define FM_MAC_NO_PFC 0xff
++
++
++/**************************************************************************//**
++ @Description FM MAC Exceptions
++*//***************************************************************************/
++typedef enum e_FmMacExceptions {
++ e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO = 0 /**< 10GEC MDIO scan event interrupt */
++ ,e_FM_MAC_EX_10G_MDIO_CMD_CMPL /**< 10GEC MDIO command completion interrupt */
++ ,e_FM_MAC_EX_10G_REM_FAULT /**< 10GEC, mEMAC Remote fault interrupt */
++ ,e_FM_MAC_EX_10G_LOC_FAULT /**< 10GEC, mEMAC Local fault interrupt */
++ ,e_FM_MAC_EX_10G_1TX_ECC_ER /**< 10GEC, mEMAC Transmit frame ECC error interrupt */
++ ,e_FM_MAC_EX_10G_TX_FIFO_UNFL /**< 10GEC, mEMAC Transmit FIFO underflow interrupt */
++ ,e_FM_MAC_EX_10G_TX_FIFO_OVFL /**< 10GEC, mEMAC Transmit FIFO overflow interrupt */
++ ,e_FM_MAC_EX_10G_TX_ER /**< 10GEC Transmit frame error interrupt */
++ ,e_FM_MAC_EX_10G_RX_FIFO_OVFL /**< 10GEC, mEMAC Receive FIFO overflow interrupt */
++ ,e_FM_MAC_EX_10G_RX_ECC_ER /**< 10GEC, mEMAC Receive frame ECC error interrupt */
++ ,e_FM_MAC_EX_10G_RX_JAB_FRM /**< 10GEC Receive jabber frame interrupt */
++ ,e_FM_MAC_EX_10G_RX_OVRSZ_FRM /**< 10GEC Receive oversized frame interrupt */
++ ,e_FM_MAC_EX_10G_RX_RUNT_FRM /**< 10GEC Receive runt frame interrupt */
++ ,e_FM_MAC_EX_10G_RX_FRAG_FRM /**< 10GEC Receive fragment frame interrupt */
++ ,e_FM_MAC_EX_10G_RX_LEN_ER /**< 10GEC Receive payload length error interrupt */
++ ,e_FM_MAC_EX_10G_RX_CRC_ER /**< 10GEC Receive CRC error interrupt */
++ ,e_FM_MAC_EX_10G_RX_ALIGN_ER /**< 10GEC Receive alignment error interrupt */
++ ,e_FM_MAC_EX_1G_BAB_RX /**< dTSEC Babbling receive error */
++ ,e_FM_MAC_EX_1G_RX_CTL /**< dTSEC Receive control (pause frame) interrupt */
++ ,e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET /**< dTSEC Graceful transmit stop complete */
++ ,e_FM_MAC_EX_1G_BAB_TX /**< dTSEC Babbling transmit error */
++ ,e_FM_MAC_EX_1G_TX_CTL /**< dTSEC Transmit control (pause frame) interrupt */
++ ,e_FM_MAC_EX_1G_TX_ERR /**< dTSEC Transmit error */
++ ,e_FM_MAC_EX_1G_LATE_COL /**< dTSEC Late collision */
++ ,e_FM_MAC_EX_1G_COL_RET_LMT /**< dTSEC Collision retry limit */
++ ,e_FM_MAC_EX_1G_TX_FIFO_UNDRN /**< dTSEC Transmit FIFO underrun */
++ ,e_FM_MAC_EX_1G_MAG_PCKT /**< dTSEC Magic Packet detection */
++ ,e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET /**< dTSEC MII management read completion */
++ ,e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET /**< dTSEC MII management write completion */
++ ,e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET /**< dTSEC Graceful receive stop complete */
++ ,e_FM_MAC_EX_1G_TX_DATA_ERR /**< dTSEC Internal data error on transmit */
++ ,e_FM_MAC_EX_1G_RX_DATA_ERR /**< dTSEC Internal data error on receive */
++ ,e_FM_MAC_EX_1G_1588_TS_RX_ERR /**< dTSEC Time-Stamp Receive Error */
++ ,e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL /**< dTSEC MIB counter overflow */
++ ,e_FM_MAC_EX_TS_FIFO_ECC_ERR /**< mEMAC Time-stamp FIFO ECC error interrupt;
++ not supported on T4240/B4860 rev1 chips */
++ ,e_FM_MAC_EX_MAGIC_PACKET_INDICATION = e_FM_MAC_EX_1G_MAG_PCKT
++ /**< mEMAC Magic Packet Indication Interrupt */
++} e_FmMacExceptions;
++
++/**************************************************************************//**
++ @Description TM MAC statistics level
++*//***************************************************************************/
++typedef enum e_FmMacStatisticsLevel {
++ e_FM_MAC_NONE_STATISTICS = 0, /**< No statistics */
++ e_FM_MAC_PARTIAL_STATISTICS, /**< Only error counters are available; Optimized for performance */
++ e_FM_MAC_FULL_STATISTICS /**< All counters available; Not optimized for performance */
++} e_FmMacStatisticsLevel;
++
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Priority Flow Control Parameters
++*//***************************************************************************/
++typedef struct t_FmMacPfcParams {
++ bool pfcEnable; /**< Enable/Disable PFC */
++
++ uint16_t pauseQuanta[FM_MAX_NUM_OF_PFC_PRIORITIES]; /**< Pause Quanta per priority to be sent in a pause frame. Each quanta represents a 512 bit-times*/
++
++ uint16_t pauseThresholdQuanta[FM_MAX_NUM_OF_PFC_PRIORITIES];/**< Pause threshold per priority, when timer passes this threshold time a PFC frames is sent again if the port is still congested or BM pool in depletion*/
++
++
++} t_FmMacPfcParams;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Function t_FmMacExceptionCallback
++
++ @Description Fm Mac Exception Callback from FM MAC to the user
++
++ @Param[in] h_App - Handle to the upper layer handler
++
++ @Param[in] exceptions - The exception that occurred
++
++ @Return void.
++*//***************************************************************************/
++typedef void (t_FmMacExceptionCallback)(t_Handle h_App, e_FmMacExceptions exceptions);
++
++
++/**************************************************************************//**
++ @Description TM MAC statistics rfc3635
++*//***************************************************************************/
++typedef struct t_FmMacStatistics {
++/* RMON */
++ uint64_t eStatPkts64; /**< r-10G tr-DT 64 byte frame counter */
++ uint64_t eStatPkts65to127; /**< r-10G 65 to 127 byte frame counter */
++ uint64_t eStatPkts128to255; /**< r-10G 128 to 255 byte frame counter */
++ uint64_t eStatPkts256to511; /**< r-10G 256 to 511 byte frame counter */
++ uint64_t eStatPkts512to1023; /**< r-10G 512 to 1023 byte frame counter */
++ uint64_t eStatPkts1024to1518; /**< r-10G 1024 to 1518 byte frame counter */
++ uint64_t eStatPkts1519to1522; /**< r-10G 1519 to 1522 byte good frame count */
++/* */
++ uint64_t eStatFragments; /**< Total number of packets that were less than 64 octets long with a wrong CRC.*/
++ uint64_t eStatJabbers; /**< Total number of packets longer than valid maximum length octets */
++ uint64_t eStatsDropEvents; /**< number of dropped packets due to internal errors of the MAC Client (during receive). */
++ uint64_t eStatCRCAlignErrors; /**< Incremented when frames of correct length but with CRC error are received.*/
++ uint64_t eStatUndersizePkts; /**< Incremented for frames under 64 bytes with a valid FCS and otherwise well formed;
++ This count does not include range length errors */
++ uint64_t eStatOversizePkts; /**< Incremented for frames which exceed 1518 (non VLAN) or 1522 (VLAN) and contains
++ a valid FCS and otherwise well formed */
++/* Pause */
++ uint64_t teStatPause; /**< Pause MAC Control received */
++ uint64_t reStatPause; /**< Pause MAC Control sent */
++/* MIB II */
++ uint64_t ifInOctets; /**< Total number of byte received. */
++ uint64_t ifInPkts; /**< Total number of packets received.*/
++ uint64_t ifInUcastPkts; /**< Total number of unicast frame received;
++ NOTE: this counter is not supported on dTSEC MAC */
++ uint64_t ifInMcastPkts; /**< Total number of multicast frame received*/
++ uint64_t ifInBcastPkts; /**< Total number of broadcast frame received */
++ uint64_t ifInDiscards; /**< Frames received, but discarded due to problems within the MAC RX. */
++ uint64_t ifInErrors; /**< Number of frames received with error:
++ - FIFO Overflow Error
++ - CRC Error
++ - Frame Too Long Error
++ - Alignment Error
++ - The dedicated Error Code (0xfe, not a code error) was received */
++ uint64_t ifOutOctets; /**< Total number of byte sent. */
++ uint64_t ifOutPkts; /**< Total number of packets sent .*/
++ uint64_t ifOutUcastPkts; /**< Total number of unicast frame sent;
++ NOTE: this counter is not supported on dTSEC MAC */
++ uint64_t ifOutMcastPkts; /**< Total number of multicast frame sent */
++ uint64_t ifOutBcastPkts; /**< Total number of multicast frame sent */
++ uint64_t ifOutDiscards; /**< Frames received, but discarded due to problems within the MAC TX N/A!.*/
++ uint64_t ifOutErrors; /**< Number of frames transmitted with error:
++ - FIFO Overflow Error
++ - FIFO Underflow Error
++ - Other */
++} t_FmMacStatistics;
++
++
++/**************************************************************************//**
++ @Group FM_mac_init_grp FM MAC Initialization Unit
++
++ @Description FM MAC Initialization Unit
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description FM MAC config input
++*//***************************************************************************/
++typedef struct t_FmMacParams {
++ uintptr_t baseAddr; /**< Base of memory mapped FM MAC registers */
++ t_EnetAddr addr; /**< MAC address of device; First octet is sent first */
++ uint8_t macId; /**< MAC ID;
++ numbering of dTSEC and 1G-mEMAC:
++ 0 - FM_MAX_NUM_OF_1G_MACS;
++ numbering of 10G-MAC (TGEC) and 10G-mEMAC:
++ 0 - FM_MAX_NUM_OF_10G_MACS */
++ e_EnetMode enetMode; /**< Ethernet operation mode (MAC-PHY interface and speed);
++ Note that the speed should indicate the maximum rate that
++ this MAC should support rather than the actual speed;
++ i.e. user should use the FM_MAC_AdjustLink() routine to
++ provide accurate speed;
++ In case of mEMAC RGMII mode, the MAC is configured to RGMII
++ automatic mode, where actual speed/duplex mode information
++ is provided by PHY automatically in-band; FM_MAC_AdjustLink()
++ function should be used to switch to manual RGMII speed/duplex mode
++ configuration if RGMII PHY doesn't support in-band status signaling;
++ In addition, in mEMAC, in case where user is using the higher MACs
++ (i.e. the MACs that should support 10G), user should pass here
++ speed=10000 even if the interface is not allowing that (e.g. SGMII). */
++ t_Handle h_Fm; /**< A handle to the FM object this port related to */
++ int mdioIrq; /**< MDIO exceptions interrupt source - not valid for all
++ MACs; MUST be set to 'NO_IRQ' for MACs that don't have
++ mdio-irq, or for polling */
++ t_FmMacExceptionCallback *f_Event; /**< MDIO Events Callback Routine */
++ t_FmMacExceptionCallback *f_Exception; /**< Exception Callback Routine */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks */
++} t_FmMacParams;
++
++
++/**************************************************************************//**
++ @Function FM_MAC_Config
++
++ @Description Creates descriptor for the FM MAC module.
++
++ The routine returns a handle (descriptor) to the FM MAC object.
++ This descriptor must be passed as first parameter to all other
++ FM MAC function calls.
++
++ No actual initialization or configuration of FM MAC hardware is
++ done by this routine.
++
++ @Param[in] p_FmMacParam - Pointer to data structure of parameters
++
++ @Retval Handle to FM MAC object, or NULL for Failure.
++*//***************************************************************************/
++t_Handle FM_MAC_Config(t_FmMacParams *p_FmMacParam);
++
++/**************************************************************************//**
++ @Function FM_MAC_Init
++
++ @Description Initializes the FM MAC module
++
++ @Param[in] h_FmMac - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MAC_Init(t_Handle h_FmMac);
++
++/**************************************************************************//**
++ @Function FM_Free
++
++ @Description Frees all resources that were assigned to FM MAC module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmMac - FM module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MAC_Free(t_Handle h_FmMac);
++
++
++/**************************************************************************//**
++ @Group FM_mac_advanced_init_grp FM MAC Advanced Configuration Unit
++
++ @Description Configuration functions used to change default values.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigResetOnInit
++
++ @Description Tell the driver whether to reset the FM MAC before initialization or
++ not. It changes the default configuration [DEFAULT_resetOnInit].
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] enable When TRUE, FM will be reset before any initialization.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigResetOnInit(t_Handle h_FmMac, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigLoopback
++
++ @Description Enable/Disable internal loopback mode
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] enable TRUE to enable or FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigLoopback(t_Handle h_FmMac, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigMaxFrameLength
++
++ @Description Setup maximum Rx Frame Length (in 1G MAC, effects also Tx)
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] newVal MAX Frame length
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigMaxFrameLength(t_Handle h_FmMac, uint16_t newVal);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigWan
++
++ @Description ENABLE WAN mode in 10G-MAC
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] enable TRUE to enable or FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigWan(t_Handle h_FmMac, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigPadAndCrc
++
++ @Description Config PAD and CRC mode
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] enable TRUE to enable or FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++ Not supported on 10G-MAC (i.e. CRC & PAD are added automatically
++ by HW); on mEMAC, this routine supports only PAD (i.e. CRC is
++ added automatically by HW).
++*//***************************************************************************/
++t_Error FM_MAC_ConfigPadAndCrc(t_Handle h_FmMac, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigHalfDuplex
++
++ @Description Config Half Duplex Mode
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] enable TRUE to enable or FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigHalfDuplex(t_Handle h_FmMac, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigTbiPhyAddr
++
++ @Description Configures the address of internal TBI PHY.
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] newVal TBI PHY address (1-31).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigTbiPhyAddr(t_Handle h_FmMac, uint8_t newVal);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigLengthCheck
++
++ @Description Configure the frame length checking.
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] enable TRUE to enable or FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigLengthCheck(t_Handle h_FmMac, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_ConfigException
++
++ @Description Change Exception selection from default
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] ex Type of the desired exceptions
++ @Param[in] enable TRUE to enable the specified exception, FALSE to disable it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ConfigException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable);
++
++#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++t_Error FM_MAC_ConfigSkipFman11Workaround (t_Handle h_FmMac);
++#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
++/** @} */ /* end of FM_mac_advanced_init_grp group */
++/** @} */ /* end of FM_mac_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_mac_runtime_control_grp FM MAC Runtime Control Unit
++
++ @Description FM MAC Runtime control unit API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_MAC_Enable
++
++ @Description Enable the MAC
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] mode Mode of operation (RX, TX, Both)
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_Enable(t_Handle h_FmMac, e_CommMode mode);
++
++/**************************************************************************//**
++ @Function FM_MAC_Disable
++
++ @Description DISABLE the MAC
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++ @Param[in] mode Define what part to Disable (RX, TX or BOTH)
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_Disable(t_Handle h_FmMac, e_CommMode mode);
++
++/**************************************************************************//**
++ @Function FM_MAC_Resume
++
++ @Description Re-init the MAC after suspend
++
++ @Param[in] h_FmMac A handle to a FM MAC Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_Resume(t_Handle h_FmMac);
++
++/**************************************************************************//**
++ @Function FM_MAC_Enable1588TimeStamp
++
++ @Description Enables the TSU operation.
++
++ @Param[in] h_Fm - Handle to the PTP as returned from the FM_MAC_PtpConfig.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_Enable1588TimeStamp(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_MAC_Disable1588TimeStamp
++
++ @Description Disables the TSU operation.
++
++ @Param[in] h_Fm - Handle to the PTP as returned from the FM_MAC_PtpConfig.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_Disable1588TimeStamp(t_Handle h_Fm);
++
++/**************************************************************************//**
++ @Function FM_MAC_SetTxAutoPauseFrames
++
++ @Description Enable/Disable transmission of Pause-Frames.
++ The routine changes the default configuration [DEFAULT_TX_PAUSE_TIME].
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] pauseTime - Pause quanta value used with transmitted pause frames.
++ Each quanta represents a 512 bit-times; Note that '0'
++ as an input here will be used as disabling the
++ transmission of the pause-frames.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_SetTxAutoPauseFrames(t_Handle h_FmMac,
++ uint16_t pauseTime);
++
++ /**************************************************************************//**
++ @Function FM_MAC_SetTxPauseFrames
++
++ @Description Enable/Disable transmission of Pause-Frames.
++ The routine changes the default configuration:
++ pause-time - [DEFAULT_TX_PAUSE_TIME]
++ threshold-time - [0]
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] priority - the PFC class of service; use 'FM_MAC_NO_PFC'
++ to indicate legacy pause support (i.e. no PFC).
++ @Param[in] pauseTime - Pause quanta value used with transmitted pause frames.
++ Each quanta represents a 512 bit-times;
++ Note that '0' as an input here will be used as disabling the
++ transmission of the pause-frames.
++ @Param[in] threshTime - Pause Threshold equanta value used by the MAC to retransmit pause frame.
++ if the situation causing a pause frame to be sent didn't finish when the timer
++ reached the threshold quanta, the MAC will retransmit the pause frame.
++ Each quanta represents a 512 bit-times.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++ In order for PFC to work properly the user must configure
++ TNUM-aging in the tx-port it is recommended that pre-fetch and
++ rate limit in the tx port should be disabled;
++ PFC is supported only on new mEMAC; i.e. in MACs that don't have
++ PFC support (10G-MAC and dTSEC), user should use 'FM_MAC_NO_PFC'
++ in the 'priority' field.
++*//***************************************************************************/
++t_Error FM_MAC_SetTxPauseFrames(t_Handle h_FmMac,
++ uint8_t priority,
++ uint16_t pauseTime,
++ uint16_t threshTime);
++
++/**************************************************************************//**
++ @Function FM_MAC_SetRxIgnorePauseFrames
++
++ @Description Enable/Disable ignoring of Pause-Frames.
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] en - boolean indicates whether to ignore the incoming pause
++ frames or not.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_SetRxIgnorePauseFrames(t_Handle h_FmMac, bool en);
++
++/**************************************************************************//**
++ @Function FM_MAC_SetWakeOnLan
++
++ @Description Enable/Disable Wake On Lan support
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] en - boolean indicates whether to enable Wake On Lan
++ support or not.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_SetWakeOnLan(t_Handle h_FmMac, bool en);
++
++/**************************************************************************//**
++ @Function FM_MAC_ResetCounters
++
++ @Description reset all statistics counters
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ResetCounters(t_Handle h_FmMac);
++
++/**************************************************************************//**
++ @Function FM_MAC_SetException
++
++ @Description Enable/Disable a specific Exception
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] ex - Type of the desired exceptions
++ @Param[in] enable - TRUE to enable the specified exception, FALSE to disable it.
++
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_SetException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_SetStatistics
++
++ @Description Define Statistics level.
++ Where applicable, the routine also enables the MIB counters
++ overflow interrupt in order to keep counters accurate
++ and account for overflows.
++ This routine is relevant only for dTSEC.
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] statisticsLevel - Full statistics level provides all standard counters but may
++ reduce performance. Partial statistics provides only special
++ event counters (errors etc.). If selected, regular counters (such as
++ byte/packet) will be invalid and will return -1.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_SetStatistics(t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel);
++
++/**************************************************************************//**
++ @Function FM_MAC_GetStatistics
++
++ @Description get all statistics counters
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] p_Statistics - Structure with statistics
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++t_Error FM_MAC_GetStatistics(t_Handle h_FmMac, t_FmMacStatistics *p_Statistics);
++
++/**************************************************************************//**
++ @Function FM_MAC_ModifyMacAddr
++
++ @Description Replace the main MAC Address
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] p_EnetAddr - Ethernet Mac address
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_ModifyMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++
++/**************************************************************************//**
++ @Function FM_MAC_AddHashMacAddr
++
++ @Description Add an Address to the hash table. This is for filter purpose only.
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] p_EnetAddr - Ethernet Mac address
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init(). It is a filter only address.
++ @Cautions Some address need to be filterd out in upper FM blocks.
++*//***************************************************************************/
++t_Error FM_MAC_AddHashMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++
++/**************************************************************************//**
++ @Function FM_MAC_RemoveHashMacAddr
++
++ @Description Delete an Address to the hash table. This is for filter purpose only.
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] p_EnetAddr - Ethernet Mac address
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_RemoveHashMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++
++/**************************************************************************//**
++ @Function FM_MAC_AddExactMatchMacAddr
++
++ @Description Add a unicast or multicast mac address for exact-match filtering
++ (8 on dTSEC, 2 for 10G-MAC)
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] p_EnetAddr - MAC Address to ADD
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_AddExactMatchMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++
++/**************************************************************************//**
++ @Function FM_MAC_RemovelExactMatchMacAddr
++
++ @Description Remove a uni cast or multi cast mac address.
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] p_EnetAddr - MAC Address to remove
++
++ @Return E_OK on success; Error code otherwise..
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_RemovelExactMatchMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
++
++/**************************************************************************//**
++ @Function FM_MAC_SetPromiscuous
++
++ @Description Enable/Disable MAC Promiscuous mode for ALL mac addresses.
++
++ @Param[in] h_FmMac - A handle to a FM MAC Module.
++ @Param[in] enable - TRUE to enable or FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_SetPromiscuous(t_Handle h_FmMac, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MAC_AdjustLink
++
++ @Description Adjusts the Ethernet link with new speed/duplex setup.
++ This routine is relevant for dTSEC and mEMAC.
++ In case of mEMAC, this routine is also used for manual
++ re-configuration of RGMII speed and duplex mode for
++ RGMII PHYs not supporting in-band status information
++ to MAC.
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] speed - Ethernet speed.
++ @Param[in] fullDuplex - TRUE for full-duplex mode;
++ FALSE for half-duplex mode.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MAC_AdjustLink(t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex);
++
++/**************************************************************************//**
++ @Function FM_MAC_RestartAutoneg
++
++ @Description Restarts the auto-negotiation process.
++ When auto-negotiation process is invoked under traffic the
++ auto-negotiation process between the internal SGMII PHY and the
++ external PHY does not always complete successfully. Calling this
++ function will restart the auto-negotiation process that will end
++ successfully. It is recommended to call this function after issuing
++ auto-negotiation restart command to the Eth Phy.
++ This routine is relevant only for dTSEC.
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MAC_RestartAutoneg(t_Handle h_FmMac);
++
++/**************************************************************************//**
++ @Function FM_MAC_GetId
++
++ @Description Return the MAC ID
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[out] p_MacId - MAC ID of device
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_GetId(t_Handle h_FmMac, uint32_t *p_MacId);
++
++/**************************************************************************//**
++ @Function FM_MAC_GetVesrion
++
++ @Description Return Mac HW chip version
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[out] p_MacVresion - Mac version as defined by the chip
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_GetVesrion(t_Handle h_FmMac, uint32_t *p_MacVresion);
++
++/**************************************************************************//**
++ @Function FM_MAC_MII_WritePhyReg
++
++ @Description Write data into Phy Register
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] phyAddr - Phy Address on the MII bus
++ @Param[in] reg - Register Number.
++ @Param[in] data - Data to write.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_MII_WritePhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data);
++
++/**************************************************************************//**
++ @Function FM_MAC_MII_ReadPhyReg
++
++ @Description Read data from Phy Register
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++ @Param[in] phyAddr - Phy Address on the MII bus
++ @Param[in] reg - Register Number.
++ @Param[out] p_Data - Data from PHY.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_MII_ReadPhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/**************************************************************************//**
++ @Function FM_MAC_DumpRegs
++
++ @Description Dump internal registers
++
++ @Param[in] h_FmMac - A handle to a FM Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MAC_Init().
++*//***************************************************************************/
++t_Error FM_MAC_DumpRegs(t_Handle h_FmMac);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++/** @} */ /* end of FM_mac_runtime_control_grp group */
++/** @} */ /* end of FM_mac_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++#endif /* __FM_MAC_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_macsec_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_macsec_ext.h
+new file mode 100644
+index 00000000..57925f10
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_macsec_ext.h
+@@ -0,0 +1,1271 @@
++/*
++ * Copyright 2008-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File fm_macsec_ext.h
++
++ @Description FM MACSEC ...
++*//***************************************************************************/
++#ifndef __FM_MACSEC_EXT_H
++#define __FM_MACSEC_EXT_H
++
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_MACSEC_grp FM MACSEC
++
++ @Description FM MACSEC API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description MACSEC Exceptions
++*//***************************************************************************/
++typedef enum e_FmMacsecExceptions {
++ e_FM_MACSEC_EX_SINGLE_BIT_ECC, /**< Single bit ECC error */
++ e_FM_MACSEC_EX_MULTI_BIT_ECC /**< Multi bit ECC error */
++} e_FmMacsecExceptions;
++
++
++/**************************************************************************//**
++ @Group FM_MACSEC_init_grp FM-MACSEC Initialization Unit
++
++ @Description FM MACSEC Initialization Unit
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function t_FmMacsecExceptionsCallback
++
++ @Description Exceptions user callback routine, will be called upon an
++ exception passing the exception identification.
++
++ @Param[in] h_App A handle to an application layer object; This handle
++ will be passed by the driver upon calling this callback.
++ @Param[in] exception The exception.
++*//***************************************************************************/
++typedef void (t_FmMacsecExceptionsCallback) ( t_Handle h_App,
++ e_FmMacsecExceptions exception);
++
++
++/**************************************************************************//**
++ @Description FM MACSEC config input
++*//***************************************************************************/
++typedef struct t_FmMacsecParams {
++ t_Handle h_Fm; /**< A handle to the FM object related to */
++ bool guestMode; /**< Partition-id */
++ union {
++ struct {
++ uint8_t fmMacId; /**< FM MAC id */
++ } guestParams;
++
++ struct {
++ uintptr_t baseAddr; /**< Base of memory mapped FM MACSEC registers */
++ t_Handle h_FmMac; /**< A handle to the FM MAC object related to */
++ t_FmMacsecExceptionsCallback *f_Exception; /**< Exception Callback Routine */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks */
++ } nonGuestParams;
++ };
++} t_FmMacsecParams;
++
++/**************************************************************************//**
++ @Function FM_MACSEC_Config
++
++ @Description Creates descriptor for the FM MACSEC module;
++
++ The routine returns a handle (descriptor) to the FM MACSEC object;
++ This descriptor must be passed as first parameter to all other
++ FM MACSEC function calls;
++
++ No actual initialization or configuration of FM MACSEC hardware is
++ done by this routine.
++
++ @Param[in] p_FmMacsecParam Pointer to data structure of parameters.
++
++ @Retval Handle to FM MACSEC object, or NULL for Failure.
++*//***************************************************************************/
++t_Handle FM_MACSEC_Config(t_FmMacsecParams *p_FmMacsecParam);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_Init
++
++ @Description Initializes the FM MACSEC module.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MACSEC_Init(t_Handle h_FmMacsec);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_Free
++
++ @Description Frees all resources that were assigned to FM MACSEC module;
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MACSEC_Free(t_Handle h_FmMacsec);
++
++
++/**************************************************************************//**
++ @Group FM_MACSEC_advanced_init_grp FM-MACSEC Advanced Configuration Unit
++
++ @Description Configuration functions used to change default values.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description enum for unknown sci frame treatment
++*//***************************************************************************/
++typedef enum e_FmMacsecUnknownSciFrameTreatment {
++ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_BOTH = 0, /**< Controlled port - Strict mode */
++ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED, /**< If C bit clear deliver on controlled port, else discard
++ Controlled port - Check or Disable mode */
++ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED, /**< Controlled port - Strict mode */
++ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_OR_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED /**< If C bit set deliver on uncontrolled port and discard on controlled port,
++ else discard on uncontrolled port and deliver on controlled port
++ Controlled port - Check or Disable mode */
++} e_FmMacsecUnknownSciFrameTreatment;
++
++/**************************************************************************//**
++ @Description enum for untag frame treatment
++*//***************************************************************************/
++typedef enum e_FmMacsecUntagFrameTreatment {
++ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED = 0, /**< Controlled port - Strict mode */
++ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_BOTH, /**< Controlled port - Strict mode */
++ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_CONTROLLED_UNMODIFIED /**< Controlled port - Strict mode */
++} e_FmMacsecUntagFrameTreatment;
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigUnknownSciFrameTreatment
++
++ @Description Change the treatment for received frames with unknown sci from its default
++ configuration [DEFAULT_unknownSciFrameTreatment].
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] treatMode The selected mode.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigUnknownSciFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigInvalidTagsFrameTreatment
++
++ @Description Change the treatment for received frames with invalid tags or
++ a zero value PN or an invalid ICV from its default configuration
++ [DEFAULT_invalidTagsFrameTreatment].
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] deliverUncontrolled If True deliver on the uncontrolled port, else discard;
++ In both cases discard on the controlled port;
++ this provide Strict, Check or Disable mode.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigInvalidTagsFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment
++
++ @Description Change the treatment for received frames with the Encryption bit
++ set and the Changed Text bit clear from its default configuration
++ [DEFAULT_encryptWithNoChangedTextFrameTreatment].
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] discardUncontrolled If True discard on the uncontrolled port, else deliver;
++ In both cases discard on the controlled port;
++ this provide Strict, Check or Disable mode.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(t_Handle h_FmMacsec, bool discardUncontrolled);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment
++
++ @Description Change the treatment for received frames with the Encryption bit
++ clear and the Changed Text bit set from its default configuration
++ [DEFAULT_changedTextWithNoEncryptFrameTreatment].
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] deliverUncontrolled If True deliver on the uncontrolled port, else discard;
++ In both cases discard on the controlled port;
++ this provide Strict, Check or Disable mode.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigUntagFrameTreatment
++
++ @Description Change the treatment for received frames without the MAC security tag (SecTAG)
++ from its default configuration [DEFAULT_untagFrameTreatment].
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] treatMode The selected mode.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigUntagFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment
++
++ @Description Change the treatment for received frames with only SCB bit set
++ from its default configuration [DEFAULT_onlyScbIsSetFrameTreatment].
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] deliverUncontrolled If True deliver on the uncontrolled port, else discard;
++ In both cases discard on the controlled port;
++ this provide Strict, Check or Disable mode.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigPnExhaustionThreshold
++
++ @Description It's provide the ability to configure a PN exhaustion threshold;
++ When the NextPn crosses this value an interrupt event
++ is asserted to warn that the active SA should re-key.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] pnExhThr If the threshold is reached, an interrupt event
++ is asserted to re-key.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigPnExhaustionThreshold(t_Handle h_FmMacsec, uint32_t pnExhThr);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigKeysUnreadable
++
++ @Description Turn on privacy mode; All the keys and their hash values can't be read any more;
++ Can not be cleared unless hard reset.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigKeysUnreadable(t_Handle h_FmMacsec);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigSectagWithoutSCI
++
++ @Description Promise that all generated Sectag will be without SCI included.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigSectagWithoutSCI(t_Handle h_FmMacsec);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_ConfigException
++
++ @Description Calling this routine changes the internal driver data base
++ from its default selection of exceptions enablement;
++ By default all exceptions are enabled.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_ConfigException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
++
++/** @} */ /* end of FM_MACSEC_advanced_init_grp group */
++/** @} */ /* end of FM_MACSEC_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_MACSEC_runtime_control_grp FM-MACSEC Runtime Control Data Unit
++
++ @Description FM MACSEC runtime control data unit API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_MACSEC_GetRevision
++
++ @Description Return MACSEC HW chip revision
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[out] p_MacsecRevision MACSEC revision as defined by the chip.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_GetRevision(t_Handle h_FmMacsec, uint32_t *p_MacsecRevision);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_Enable
++
++ @Description This routine should be called after MACSEC is initialized for enabling all
++ MACSEC engines according to their existing configuration.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Init() and when MACSEC is disabled.
++*//***************************************************************************/
++t_Error FM_MACSEC_Enable(t_Handle h_FmMacsec);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_Disable
++
++ @Description This routine may be called when MACSEC is enabled in order to
++ disable all MACSEC engines; The MACSEC is working in bypass mode.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Init() and when MACSEC is enabled.
++*//***************************************************************************/
++t_Error FM_MACSEC_Disable(t_Handle h_FmMacsec);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SetException
++
++ @Description Calling this routine enables/disables the specified exception.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SetException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/**************************************************************************//**
++ @Function FM_MACSEC_DumpRegs
++
++ @Description Dump internal registers.
++
++ @Param[in] h_FmMacsec - FM MACSEC module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_DumpRegs(t_Handle h_FmMacsec);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++#ifdef VERIFICATION_SUPPORT
++/********************* VERIFICATION ONLY ********************************/
++/**************************************************************************//**
++ @Function FM_MACSEC_BackdoorSet
++
++ @Description Set register of the MACSEC memory map
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[out] offset Register offset.
++ @Param[out] value Value to write.
++
++
++ @Return None
++
++ @Cautions Allowed only following FM_MACSEC_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_BackdoorSet(t_Handle h_FmMacsec, uint32_t offset, uint32_t value);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_BackdoorGet
++
++ @Description Read from register of the MACSEC memory map.
++
++ @Param[in] h_FmMacsec FM MACSEC module descriptor.
++ @Param[out] offset Register offset.
++
++ @Return Value read
++
++ @Cautions Allowed only following FM_MACSEC_Init().
++*//***************************************************************************/
++uint32_t FM_MACSEC_BackdoorGet(t_Handle h_FmMacsec, uint32_t offset);
++#endif /* VERIFICATION_SUPPORT */
++
++/** @} */ /* end of FM_MACSEC_runtime_control_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_MACSEC_SECY_grp FM-MACSEC SecY
++
++ @Description FM-MACSEC SecY API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++typedef uint8_t macsecSAKey_t[32];
++typedef uint64_t macsecSCI_t;
++typedef uint8_t macsecAN_t;
++
++/**************************************************************************//**
++@Description MACSEC SECY Cipher Suite
++*//***************************************************************************/
++typedef enum e_FmMacsecSecYCipherSuite {
++ e_FM_MACSEC_SECY_GCM_AES_128 = 0, /**< GCM-AES-128 */
++#if (DPAA_VERSION >= 11)
++ e_FM_MACSEC_SECY_GCM_AES_256 /**< GCM-AES-256 */
++#endif /* (DPAA_VERSION >= 11) */
++} e_FmMacsecSecYCipherSuite;
++
++/**************************************************************************//**
++ @Description MACSEC SECY Exceptions
++*//***************************************************************************/
++typedef enum e_FmMacsecSecYExceptions {
++ e_FM_MACSEC_SECY_EX_FRAME_DISCARDED /**< Frame Discarded */
++} e_FmMacsecSecYExceptions;
++
++/**************************************************************************//**
++ @Description MACSEC SECY Events
++*//***************************************************************************/
++typedef enum e_FmMacsecSecYEvents {
++ e_FM_MACSEC_SECY_EV_NEXT_PN /**< Next Packet Number exhaustion threshold reached */
++} e_FmMacsecSecYEvents;
++
++/**************************************************************************//**
++ @Collection MACSEC SECY Frame Discarded Descriptor error
++*//***************************************************************************/
++typedef uint8_t macsecTxScFrameDiscardedErrSelect_t; /**< typedef for defining Frame Discarded Descriptor errors */
++
++#define FM_MACSEC_SECY_TX_SC_FRM_DISCAR_ERR_NEXT_PN_ZERO 0x8000 /**< NextPn == 0 */
++#define FM_MACSEC_SECY_TX_SC_FRM_DISCAR_ERR_SC_DISBALE 0x4000 /**< SC is disable */
++/* @} */
++
++/**************************************************************************//**
++ @Function t_FmMacsecSecYExceptionsCallback
++
++ @Description Exceptions user callback routine, will be called upon an
++ exception passing the exception identification.
++
++ @Param[in] h_App A handle to an application layer object; This handle
++ will be passed by the driver upon calling this callback.
++ @Param[in] exception The exception.
++*//***************************************************************************/
++typedef void (t_FmMacsecSecYExceptionsCallback) ( t_Handle h_App,
++ e_FmMacsecSecYExceptions exception);
++
++/**************************************************************************//**
++ @Function t_FmMacsecSecYEventsCallback
++
++ @Description Events user callback routine, will be called upon an
++ event passing the event identification.
++
++ @Param[in] h_App A handle to an application layer object; This handle
++ will be passed by the driver upon calling this callback.
++ @Param[in] event The event.
++*//***************************************************************************/
++typedef void (t_FmMacsecSecYEventsCallback) ( t_Handle h_App,
++ e_FmMacsecSecYEvents event);
++
++/**************************************************************************//**
++ @Description RFC2863 MIB
++*//***************************************************************************/
++typedef struct t_MIBStatistics {
++ uint64_t ifInOctets; /**< Total number of byte received */
++ uint64_t ifInPkts; /**< Total number of packets received */
++ uint64_t ifInMcastPkts; /**< Total number of multicast frame received */
++ uint64_t ifInBcastPkts; /**< Total number of broadcast frame received */
++ uint64_t ifInDiscards; /**< Frames received, but discarded due to problems within the MAC RX :
++ - InPktsNoTag,
++ - InPktsLate,
++ - InPktsOverrun */
++ uint64_t ifInErrors; /**< Number of frames received with error:
++ - InPktsBadTag,
++ - InPktsNoSCI,
++ - InPktsNotUsingSA
++ - InPktsNotValid */
++ uint64_t ifOutOctets; /**< Total number of byte sent */
++ uint64_t ifOutPkts; /**< Total number of packets sent */
++ uint64_t ifOutMcastPkts; /**< Total number of multicast frame sent */
++ uint64_t ifOutBcastPkts; /**< Total number of multicast frame sent */
++ uint64_t ifOutDiscards; /**< Frames received, but discarded due to problems within the MAC TX N/A! */
++ uint64_t ifOutErrors; /**< Number of frames transmitted with error:
++ - FIFO Overflow Error
++ - FIFO Underflow Error
++ - Other */
++} t_MIBStatistics;
++
++/**************************************************************************//**
++ @Description MACSEC SecY Rx SA Statistics
++*//***************************************************************************/
++typedef struct t_FmMacsecSecYRxSaStatistics {
++ uint32_t inPktsOK; /**< The number of frames with resolved SCI, have passed all
++ frame validation frame validation with the validateFrame not set to disable */
++ uint32_t inPktsInvalid; /**< The number of frames with resolved SCI, that have failed frame
++ validation with the validateFrame set to check */
++ uint32_t inPktsNotValid; /**< The number of frames with resolved SCI, discarded on the controlled port,
++ that have failed frame validation with the validateFrame set to strict or the c bit is set */
++ uint32_t inPktsNotUsingSA; /**< The number of frames received with resolved SCI and discarded on disabled or
++ not provisioned SA with validateFrame in the strict mode or the C bit is set */
++ uint32_t inPktsUnusedSA; /**< The number of frames received with resolved SCI on disabled or not provisioned SA
++ with validateFrame not in the strict mode and the C bit is cleared */
++} t_FmMacsecSecYRxSaStatistics;
++
++/**************************************************************************//**
++ @Description MACSEC SecY Tx SA Statistics
++*//***************************************************************************/
++typedef struct t_FmMacsecSecYTxSaStatistics {
++ uint64_t outPktsProtected; /**< The number of frames, that the user of the controlled port requested to
++ be transmitted, which were integrity protected */
++ uint64_t outPktsEncrypted; /**< The number of frames, that the user of the controlled port requested to
++ be transmitted, which were confidentiality protected */
++} t_FmMacsecSecYTxSaStatistics;
++
++/**************************************************************************//**
++ @Description MACSEC SecY Rx SC Statistics
++*//***************************************************************************/
++typedef struct t_FmMacsecSecYRxScStatistics {
++ uint64_t inPktsUnchecked; /**< The number of frames with resolved SCI, delivered to the user of a controlled port,
++ that are not validated with the validateFrame set to disable */
++ uint64_t inPktsDelayed; /**< The number of frames with resolved SCI, delivered to the user of a controlled port,
++ that have their PN smaller than the lowest_PN with the validateFrame set to
++ disable or replayProtect disabled */
++ uint64_t inPktsLate; /**< The number of frames with resolved SCI, discarded on the controlled port,
++ that have their PN smaller than the lowest_PN with the validateFrame set to
++ Check or Strict and replayProtect enabled */
++ uint64_t inPktsOK; /**< The number of frames with resolved SCI, have passed all
++ frame validation frame validation with the validateFrame not set to disable */
++ uint64_t inPktsInvalid; /**< The number of frames with resolved SCI, that have failed frame
++ validation with the validateFrame set to check */
++ uint64_t inPktsNotValid; /**< The number of frames with resolved SCI, discarded on the controlled port,
++ that have failed frame validation with the validateFrame set to strict or the c bit is set */
++ uint64_t inPktsNotUsingSA; /**< The number of frames received with resolved SCI and discarded on disabled or
++ not provisioned SA with validateFrame in the strict mode or the C bit is set */
++ uint64_t inPktsUnusedSA; /**< The number of frames received with resolved SCI on disabled or not provisioned SA
++ with validateFrame not in the strict mode and the C bit is cleared */
++} t_FmMacsecSecYRxScStatistics;
++
++/**************************************************************************//**
++ @Description MACSEC SecY Tx SC Statistics
++*//***************************************************************************/
++typedef struct t_FmMacsecSecYTxScStatistics {
++ uint64_t outPktsProtected; /**< The number of frames, that the user of the controlled port requested to
++ be transmitted, which were integrity protected */
++ uint64_t outPktsEncrypted; /**< The number of frames, that the user of the controlled port requested to
++ be transmitted, which were confidentiality protected */
++} t_FmMacsecSecYTxScStatistics;
++
++/**************************************************************************//**
++ @Description MACSEC SecY Statistics
++*//***************************************************************************/
++typedef struct t_FmMacsecSecYStatistics {
++ t_MIBStatistics mibCtrlStatistics; /**< Controlled port MIB statistics */
++ t_MIBStatistics mibNonCtrlStatistics; /**< Uncontrolled port MIB statistics */
++/* Frame verification statistics */
++ uint64_t inPktsUntagged; /**< The number of received packets without the MAC security tag
++ (SecTAG) with validateFrames which is not in the strict mode */
++ uint64_t inPktsNoTag; /**< The number of received packets discarded without the
++ MAC security tag (SecTAG) with validateFrames which is in the strict mode */
++ uint64_t inPktsBadTag; /**< The number of received packets discarded with an invalid
++ SecTAG or a zero value PN or an invalid ICV */
++ uint64_t inPktsUnknownSCI; /**< The number of received packets with unknown SCI with the
++ condition : validateFrames is not in the strict mode and the
++ C bit in the SecTAG is not set */
++ uint64_t inPktsNoSCI; /**< The number of received packets discarded with unknown SCI
++ information with the condition : validateFrames is in the strict mode
++ or the C bit in the SecTAG is set */
++ uint64_t inPktsOverrun; /**< The number of packets discarded because the number of
++ received packets exceeded the cryptographic performance capabilities */
++/* Frame validation statistics */
++ uint64_t inOctetsValidated; /**< The number of octets of plaintext recovered from received frames with
++ resolved SCI that were integrity protected but not encrypted */
++ uint64_t inOctetsDecrypted; /**< The number of octets of plaintext recovered from received frames with
++ resolved SCI that were integrity protected and encrypted */
++/* Frame generation statistics */
++ uint64_t outPktsUntagged; /**< The number of frames, that the user of the controlled port requested to
++ be transmitted, with protectFrame false */
++ uint64_t outPktsTooLong; /**< The number of frames, that the user of the controlled port requested to
++ be transmitted, discarded due to length being larger than Maximum Frame Length (MACSEC_MFL) */
++/* Frame protection statistics */
++ uint64_t outOctetsProtected; /**< The number of octets of User Data in transmitted frames that were
++ integrity protected but not encrypted */
++ uint64_t outOctetsEncrypted; /**< The number of octets of User Data in transmitted frames that were
++ both integrity protected and encrypted */
++} t_FmMacsecSecYStatistics;
++
++
++/**************************************************************************//**
++ @Description MACSEC SecY SC Params
++*//***************************************************************************/
++typedef struct t_FmMacsecSecYSCParams {
++ macsecSCI_t sci; /**< The secure channel identification of the SC */
++ e_FmMacsecSecYCipherSuite cipherSuite; /**< Cipher suite to be used for the SC */
++} t_FmMacsecSecYSCParams;
++
++/**************************************************************************//**
++ @Group FM_MACSEC_SECY_init_grp FM-MACSEC SecY Initialization Unit
++
++ @Description FM-MACSEC SecY Initialization Unit
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description enum for validate frames
++*//***************************************************************************/
++typedef enum e_FmMacsecValidFrameBehavior {
++ e_FM_MACSEC_VALID_FRAME_BEHAVIOR_DISABLE = 0, /**< disable the validation function */
++ e_FM_MACSEC_VALID_FRAME_BEHAVIOR_CHECK, /**< enable the validation function but only for checking
++ without filtering out invalid frames */
++ e_FM_MACSEC_VALID_FRAME_BEHAVIOR_STRICT /**< enable the validation function and also strictly filter
++ out those invalid frames */
++} e_FmMacsecValidFrameBehavior;
++
++/**************************************************************************//**
++ @Description enum for sci insertion
++*//***************************************************************************/
++typedef enum e_FmMacsecSciInsertionMode {
++ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG = 0, /**< explicit sci in the sectag */
++ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_MAC_SA, /**< mac sa is overwritten with the sci*/
++ e_FM_MACSEC_SCI_INSERTION_MODE_IMPLICT_PTP /**< implicit point-to-point sci (pre-shared) */
++} e_FmMacsecSciInsertionMode;
++
++/**************************************************************************//**
++ @Description FM MACSEC SecY config input
++*//***************************************************************************/
++typedef struct t_FmMacsecSecYParams {
++ t_Handle h_FmMacsec; /**< A handle to the FM MACSEC object */
++ t_FmMacsecSecYSCParams txScParams; /**< Tx SC Params */
++ uint32_t numReceiveChannels; /**< Number of receive channels dedicated to this SecY */
++ t_FmMacsecSecYExceptionsCallback *f_Exception; /**< Callback routine to be called by the driver upon SecY exception */
++ t_FmMacsecSecYEventsCallback *f_Event; /**< Callback routine to be called by the driver upon SecY event */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks */
++} t_FmMacsecSecYParams;
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_Config
++
++ @Description Creates descriptor for the FM MACSEC SECY module;
++
++ The routine returns a handle (descriptor) to the FM MACSEC SECY object;
++ This descriptor must be passed as first parameter to all other
++ FM MACSEC SECY function calls;
++ No actual initialization or configuration of FM MACSEC SecY hardware is
++ done by this routine.
++
++ @Param[in] p_FmMacsecSecYParam Pointer to data structure of parameters.
++
++ @Return Handle to FM MACSEC SECY object, or NULL for Failure.
++*//***************************************************************************/
++t_Handle FM_MACSEC_SECY_Config(t_FmMacsecSecYParams *p_FmMacsecSecYParam);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_Init
++
++ @Description Initializes the FM MACSEC SECY module.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_Init(t_Handle h_FmMacsecSecY);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_Free
++
++ @Description Frees all resources that were assigned to FM MACSEC SECY module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_Free(t_Handle h_FmMacsecSecY);
++
++/**************************************************************************//**
++ @Group FM_MACSEC_SECY_advanced_init_grp FM-MACSEC SecY Advanced Configuration Unit
++
++ @Description Configuration functions used to change default values.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigSciInsertionMode
++
++ @Description Calling this routine changes the SCI-insertion-mode in the
++ internal driver data base from its default configuration
++ [DEFAULT_sciInsertionMode]
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] sciInsertionMode Sci insertion mode
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
++
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigSciInsertionMode(t_Handle h_FmMacsecSecY, e_FmMacsecSciInsertionMode sciInsertionMode);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigProtectFrames
++
++ @Description Calling this routine changes the protect-frame mode in the
++ internal driver data base from its default configuration
++ [DEFAULT_protectFrames]
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] protectFrames If FALSE, frames are transmitted without modification
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
++
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigProtectFrames(t_Handle h_FmMacsecSecY, bool protectFrames);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigReplayWindow
++
++ @Description Calling this routine changes the replay-window settings in the
++ internal driver data base from its default configuration
++ [DEFAULT_replayEnable], [DEFAULT_replayWindow]
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] replayProtect; Replay protection function mode
++ @Param[in] replayWindow; The size of the replay window
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
++
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigReplayWindow(t_Handle h_FmMacsecSecY, bool replayProtect, uint32_t replayWindow);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigValidationMode
++
++ @Description Calling this routine changes the frame-validation-behavior mode
++ in the internal driver data base from its default configuration
++ [DEFAULT_validateFrames]
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] validateFrames Validation function mode
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
++
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigValidationMode(t_Handle h_FmMacsecSecY, e_FmMacsecValidFrameBehavior validateFrames);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigConfidentiality
++
++ @Description Calling this routine changes the confidentiality settings in the
++ internal driver data base from its default configuration
++ [DEFAULT_confidentialityEnable], [DEFAULT_confidentialityOffset]
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] confidentialityEnable TRUE - confidentiality protection and integrity protection
++ FALSE - no confidentiality protection, only integrity protection
++ @Param[in] confidentialityOffset The number of initial octets of each MSDU without confidentiality protection
++ common values are 0, 30, and 50
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
++
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigConfidentiality(t_Handle h_FmMacsecSecY, bool confidentialityEnable, uint16_t confidentialityOffset);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigPointToPoint
++
++ @Description configure this SecY to work in point-to-point mode, means that
++ it will have only one rx sc;
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
++ Can be called only once in a system; only the first secY that will call this
++ routine will be able to operate in Point-To-Point mode.
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigPointToPoint(t_Handle h_FmMacsecSecY);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigException
++
++ @Description Calling this routine changes the internal driver data base
++ from its default selection of exceptions enablement;
++ By default all exceptions are enabled.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigException(t_Handle h_FmMacsecSecY, e_FmMacsecSecYExceptions exception, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_ConfigEvent
++
++ @Description Calling this routine changes the internal driver data base
++ from its default selection of events enablement;
++ By default all events are enabled.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] event The event to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_ConfigEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable);
++
++/** @} */ /* end of FM_MACSEC_SECY_advanced_init_grp group */
++/** @} */ /* end of FM_MACSEC_SECY_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_MACSEC_SECY_runtime_control_grp FM-MACSEC SecY Runtime Control Unit
++
++ @Description FM MACSEC SECY Runtime control unit API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_CreateRxSc
++
++ @Description Create a receive secure channel.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] scParams secure channel params.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Handle FM_MACSEC_SECY_CreateRxSc(t_Handle h_FmMacsecSecY, t_FmMacsecSecYSCParams *p_ScParams);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_DeleteRxSc
++
++ @Description Deleting an initialized secure channel.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSc().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_DeleteRxSc(t_Handle h_FmMacsecSecY, t_Handle h_Sc);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_CreateRxSa
++
++ @Description Create a receive secure association for the secure channel;
++ the SA cannot be used to receive frames until FM_MACSEC_SECY_RxSaEnableReceive is called.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[in] an association number represent the SA.
++ @Param[in] lowestPn the lowest acceptable PN value for a received frame.
++ @Param[in] key the desired key for this SA.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSc().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_CreateRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_DeleteRxSa
++
++ @Description Deleting an initialized secure association.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[in] an association number represent the SA.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_DeleteRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_RxSaEnableReceive
++
++ @Description Enabling the SA to receive frames.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[in] an association number represent the SA.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_RxSaEnableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_RxSaDisableReceive
++
++ @Description Disabling the SA from receive frames.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[in] an association number represent the SA.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_RxSaDisableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_RxSaUpdateNextPn
++
++ @Description Update the next packet number expected on RX;
++ The value of nextPN shall be set to the greater of its existing value and the
++ supplied of updtNextPN (802.1AE-2006 10.7.15).
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[in] an association number represent the SA.
++ @Param[in] updtNextPN the next PN value for a received frame.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_RxSaUpdateNextPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtNextPN);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_RxSaUpdateLowestPn
++
++ @Description Update the lowest packet number expected on RX;
++ The value of lowestPN shall be set to the greater of its existing value and the
++ supplied of updtLowestPN (802.1AE-2006 10.7.15).
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[in] an association number represent the SA.
++ @Param[in] updtLowestPN the lowest PN acceptable value for a received frame.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_RxSaUpdateLowestPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtLowestPN);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_RxSaModifyKey
++
++ @Description Modify the current key of the SA with a new one.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[in] an association number represent the SA.
++ @Param[in] key new key to replace the current key.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_RxSaModifyKey(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, macsecSAKey_t key);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_CreateTxSa
++
++ @Description Create a transmit secure association for the secure channel;
++ the SA cannot be used to transmit frames until FM_MACSEC_SECY_TxSaSetActivate is called;
++ Only one SA can be active at a time.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] an association number represent the SA.
++ @Param[in] key the desired key for this SA.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_CreateTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an, macsecSAKey_t key);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_DeleteTxSa
++
++ @Description Deleting an initialized secure association.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] an association number represent the SA.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_DeleteTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_TxSaModifyKey
++
++ @Description Modify the key of the inactive SA with a new one.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] nextActiveAn association number represent the next SA to be activated.
++ @Param[in] key new key to replace the current key.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_TxSaModifyKey(t_Handle h_FmMacsecSecY, macsecAN_t nextActiveAn, macsecSAKey_t key);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_TxSaSetActive
++
++ @Description Set this SA to the active SA to be used on TX for SC;
++ only one SA can be active at a time.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] an association number represent the SA.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_TxSaSetActive(t_Handle h_FmMacsecSecY, macsecAN_t an);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_TxSaGetActive
++
++ @Description Get the active SA that being used for TX.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[out] p_An the active an.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_TxSaGetActive(t_Handle h_FmMacsecSecY, macsecAN_t *p_An);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_GetStatistics
++
++ @Description get all statistics counters.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] p_Statistics Structure with statistics.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_GetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYStatistics *p_Statistics);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_RxScGetStatistics
++
++ @Description get all statistics counters.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc Rx Sc handle.
++ @Param[in] p_Statistics Structure with statistics.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_RxScGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, t_FmMacsecSecYRxScStatistics *p_Statistics);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_RxSaGetStatistics
++
++ @Description get all statistics counters
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc Rx Sc handle.
++ @Param[in] an association number represent the SA.
++ @Param[in] p_Statistics Structure with statistics.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_RxSaGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, t_FmMacsecSecYRxSaStatistics *p_Statistics);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_TxScGetStatistics
++
++ @Description get all statistics counters.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] p_Statistics Structure with statistics.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_TxScGetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYTxScStatistics *p_Statistics);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_TxSaGetStatistics
++
++ @Description get all statistics counters.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] an association number represent the SA.
++ @Param[in] p_Statistics Structure with statistics.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_TxSaGetStatistics(t_Handle h_FmMacsecSecY, macsecAN_t an, t_FmMacsecSecYTxSaStatistics *p_Statistics);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_SetException
++
++ @Description Calling this routine enables/disables the specified exception.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_SetException(t_Handle h_FmMacsecSecY, e_FmMacsecExceptions exception, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_SetEvent
++
++ @Description Calling this routine enables/disables the specified event.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] event The event to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_SetEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_GetRxScPhysId
++
++ @Description return the physical id of the Secure Channel.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
++ @Param[out] p_ScPhysId the SC physical id.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSc().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_GetRxScPhysId(t_Handle h_FmMacsecSecY, t_Handle h_Sc, uint32_t *p_ScPhysId);
++
++/**************************************************************************//**
++ @Function FM_MACSEC_SECY_GetTxScPhysId
++
++ @Description return the physical id of the Secure Channel.
++
++ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
++ @Param[out] p_ScPhysId the SC physical id.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MACSEC_SECY_Init().
++*//***************************************************************************/
++t_Error FM_MACSEC_SECY_GetTxScPhysId(t_Handle h_FmMacsecSecY, uint32_t *p_ScPhysId);
++
++/** @} */ /* end of FM_MACSEC_SECY_runtime_control_grp group */
++/** @} */ /* end of FM_MACSEC_SECY_grp group */
++/** @} */ /* end of FM_MACSEC_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++#endif /* __FM_MACSEC_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_muram_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_muram_ext.h
+new file mode 100644
+index 00000000..ef62c8ef
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_muram_ext.h
+@@ -0,0 +1,170 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_muram_ext.h
++
++ @Description FM MURAM Application Programming Interface.
++*//***************************************************************************/
++#ifndef __FM_MURAM_EXT
++#define __FM_MURAM_EXT
++
++#include "error_ext.h"
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_muram_grp FM MURAM
++
++ @Description FM MURAM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_muram_init_grp FM MURAM Initialization Unit
++
++ @Description FM MURAM initialization API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_MURAM_ConfigAndInit
++
++ @Description Creates partition in the MURAM.
++
++ The routine returns a handle (descriptor) to the MURAM partition.
++ This descriptor must be passed as first parameter to all other
++ FM-MURAM function calls.
++
++ No actual initialization or configuration of FM_MURAM hardware is
++ done by this routine.
++
++ @Param[in] baseAddress - Pointer to base of memory mapped FM-MURAM.
++ @Param[in] size - Size of the FM-MURAM partition.
++
++ @Return Handle to FM-MURAM object, or NULL for Failure.
++*//***************************************************************************/
++t_Handle FM_MURAM_ConfigAndInit(uintptr_t baseAddress, uint32_t size);
++
++/**************************************************************************//**
++ @Function FM_MURAM_Free
++
++ @Description Frees all resources that were assigned to FM-MURAM module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmMuram - FM-MURAM module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MURAM_Free(t_Handle h_FmMuram);
++
++/** @} */ /* end of FM_muram_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_muram_ctrl_grp FM MURAM Control Unit
++
++ @Description FM MURAM control API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_MURAM_AllocMem
++
++ @Description Allocate some memory from FM-MURAM partition.
++
++ @Param[in] h_FmMuram - FM-MURAM module descriptor.
++ @Param[in] size - size of the memory to be allocated.
++ @Param[in] align - Alignment of the memory.
++
++ @Return address of the allocated memory; NULL otherwise.
++*//***************************************************************************/
++void * FM_MURAM_AllocMem(t_Handle h_FmMuram, uint32_t size, uint32_t align);
++
++/**************************************************************************//**
++ @Function FM_MURAM_AllocMemForce
++
++ @Description Allocate some specific memory from FM-MURAM partition (according
++ to base).
++
++ @Param[in] h_FmMuram - FM-MURAM module descriptor.
++ @Param[in] base - the desired base-address to be allocated.
++ @Param[in] size - size of the memory to be allocated.
++
++ @Return address of the allocated memory; NULL otherwise.
++*//***************************************************************************/
++void * FM_MURAM_AllocMemForce(t_Handle h_FmMuram, uint64_t base, uint32_t size);
++
++/**************************************************************************//**
++ @Function FM_MURAM_FreeMem
++
++ @Description Free an allocated memory from FM-MURAM partition.
++
++ @Param[in] h_FmMuram - FM-MURAM module descriptor.
++ @Param[in] ptr - A pointer to an allocated memory.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_MURAM_FreeMem(t_Handle h_FmMuram, void *ptr);
++
++/**************************************************************************//**
++ @Function FM_MURAM_GetFreeMemSize
++
++ @Description Returns the size (in bytes) of free MURAM memory.
++
++ @Param[in] h_FmMuram - FM-MURAM module descriptor.
++
++ @Return Free MURAM memory size in bytes.
++*//***************************************************************************/
++uint64_t FM_MURAM_GetFreeMemSize(t_Handle h_FmMuram);
++
++/** @} */ /* end of FM_muram_ctrl_grp group */
++/** @} */ /* end of FM_muram_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++
++#endif /* __FM_MURAM_EXT */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_pcd_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_pcd_ext.h
+new file mode 100644
+index 00000000..8d1c3d88
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_pcd_ext.h
+@@ -0,0 +1,3974 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_pcd_ext.h
++
++ @Description FM PCD API definitions
++*//***************************************************************************/
++#ifndef __FM_PCD_EXT
++#define __FM_PCD_EXT
++
++#include "std_ext.h"
++#include "net_ext.h"
++#include "list_ext.h"
++#include "fm_ext.h"
++#include "fsl_fman_kg.h"
++
++
++/**************************************************************************//**
++ @Group FM_grp Frame Manager API
++
++ @Description Frame Manager Application Programming Interface
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_PCD_grp FM PCD
++
++ @Description Frame Manager PCD (Parse-Classify-Distribute) API.
++
++ The FM PCD module is responsible for the initialization of all
++ global classifying FM modules. This includes the parser general and
++ common registers, the key generator global and common registers,
++ and the policer global and common registers.
++ In addition, the FM PCD SW module will initialize all required
++ key generator schemes, coarse classification flows, and policer
++ profiles. When FM module is configured to work with one of these
++ entities, it will register to it using the FM PORT API. The PCD
++ module will manage the PCD resources - i.e. resource management of
++ KeyGen schemes, etc.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection General PCD defines
++*//***************************************************************************/
++#define FM_PCD_MAX_NUM_OF_PRIVATE_HDRS 2 /**< Number of units/headers saved for user */
++
++#define FM_PCD_PRS_NUM_OF_HDRS 16 /**< Number of headers supported by HW parser */
++#define FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS (32 - FM_PCD_MAX_NUM_OF_PRIVATE_HDRS)
++ /**< Number of distinction units is limited by
++ register size (32 bits) minus reserved bits
++ for private headers. */
++#define FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS 4 /**< Maximum number of interchangeable headers
++ in a distinction unit */
++#define FM_PCD_KG_NUM_OF_GENERIC_REGS FM_KG_NUM_OF_GENERIC_REGS /**< Total number of generic KeyGen registers */
++#define FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY 35 /**< Max number allowed on any configuration;
++ For HW implementation reasons, in most
++ cases less than this will be allowed; The
++ driver will return an initialization error
++ if resource is unavailable. */
++#define FM_PCD_KG_NUM_OF_EXTRACT_MASKS 4 /**< Total number of masks allowed on KeyGen extractions. */
++#define FM_PCD_KG_NUM_OF_DEFAULT_GROUPS 16 /**< Number of default value logical groups */
++
++#define FM_PCD_PRS_NUM_OF_LABELS 32 /**< Maximum number of SW parser labels */
++#define FM_SW_PRS_MAX_IMAGE_SIZE (FM_PCD_SW_PRS_SIZE /*- FM_PCD_PRS_SW_OFFSET -FM_PCD_PRS_SW_TAIL_SIZE*/-FM_PCD_PRS_SW_PATCHES_SIZE)
++ /**< Maximum size of SW parser code */
++
++#define FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE 128 /**< Maximum size of insertion template for
++ insert manipulation */
++
++#if (DPAA_VERSION >= 11)
++#define FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES 64 /**< Maximum possible entries for frame replicator group */
++#endif /* (DPAA_VERSION >= 11) */
++/* @} */
++
++
++/**************************************************************************//**
++ @Group FM_PCD_init_grp FM PCD Initialization Unit
++
++ @Description Frame Manager PCD Initialization Unit API
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description PCD counters
++*//***************************************************************************/
++typedef enum e_FmPcdCounters {
++ e_FM_PCD_KG_COUNTERS_TOTAL, /**< KeyGen counter */
++ e_FM_PCD_PLCR_COUNTERS_RED, /**< Policer counter - counts the total number of RED packets that exit the Policer. */
++ e_FM_PCD_PLCR_COUNTERS_YELLOW, /**< Policer counter - counts the total number of YELLOW packets that exit the Policer. */
++ e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED, /**< Policer counter - counts the number of packets that changed color to RED by the Policer;
++ This is a subset of e_FM_PCD_PLCR_COUNTERS_RED packet count, indicating active color changes. */
++ e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW, /**< Policer counter - counts the number of packets that changed color to YELLOW by the Policer;
++ This is a subset of e_FM_PCD_PLCR_COUNTERS_YELLOW packet count, indicating active color changes. */
++ e_FM_PCD_PLCR_COUNTERS_TOTAL, /**< Policer counter - counts the total number of packets passed in the Policer. */
++ e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH, /**< Policer counter - counts the number of packets with length mismatch. */
++ e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH, /**< Parser counter - counts the number of times the parser block is dispatched. */
++ e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L2 parse result is returned (including errors). */
++ e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L3 parse result is returned (including errors). */
++ e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L4 parse result is returned (including errors). */
++ e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times SHIM parse result is returned (including errors). */
++ e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L2 parse result is returned with errors. */
++ e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L3 parse result is returned with errors. */
++ e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L4 parse result is returned with errors. */
++ e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times SHIM parse result is returned with errors. */
++ e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES, /**< Parser counter - counts the number of cycles spent executing soft parser instruction (including stall cycles). */
++ e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES, /**< Parser counter - counts the number of cycles stalled waiting for parser internal memory reads while executing soft parser instruction. */
++ e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES, /**< Parser counter - counts the number of cycles spent executing hard parser (including stall cycles). */
++ e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory read. */
++ e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory read. */
++ e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory write. */
++ e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory write. */
++ e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES /**< FPM counter - counts the number of cycles stalled while performing a FPM Command. */
++} e_FmPcdCounters;
++
++/**************************************************************************//**
++ @Description PCD interrupts
++*//***************************************************************************/
++typedef enum e_FmPcdExceptions {
++ e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC, /**< KeyGen double-bit ECC error is detected on internal memory read access. */
++ e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, /**< KeyGen scheme configuration error indicating a key size larger than 56 bytes. */
++ e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC, /**< Policer double-bit ECC error has been detected on PRAM read access. */
++ e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR, /**< Policer access to a non-initialized profile has been detected. */
++ e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE, /**< Policer RAM self-initialization complete */
++ e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE, /**< Policer atomic action complete */
++ e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC, /**< Parser double-bit ECC error */
++ e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC /**< Parser single-bit ECC error */
++} e_FmPcdExceptions;
++
++
++/**************************************************************************//**
++ @Description Exceptions user callback routine, will be called upon an
++ exception passing the exception identification.
++
++ @Param[in] h_App - User's application descriptor.
++ @Param[in] exception - The exception.
++ *//***************************************************************************/
++typedef void (t_FmPcdExceptionCallback) (t_Handle h_App, e_FmPcdExceptions exception);
++
++/**************************************************************************//**
++ @Description Exceptions user callback routine, will be called upon an exception
++ passing the exception identification.
++
++ @Param[in] h_App - User's application descriptor.
++ @Param[in] exception - The exception.
++ @Param[in] index - id of the relevant source (may be scheme or profile id).
++ *//***************************************************************************/
++typedef void (t_FmPcdIdExceptionCallback) ( t_Handle h_App,
++ e_FmPcdExceptions exception,
++ uint16_t index);
++
++/**************************************************************************//**
++ @Description A callback for enqueuing frame onto a QM queue.
++
++ @Param[in] h_QmArg - Application's handle passed to QM module on enqueue.
++ @Param[in] p_Fd - Frame descriptor for the frame.
++
++ @Return E_OK on success; Error code otherwise.
++ *//***************************************************************************/
++typedef t_Error (t_FmPcdQmEnqueueCallback) (t_Handle h_QmArg, void *p_Fd);
++
++/**************************************************************************//**
++ @Description Host-Command parameters structure.
++
++ When using Host command for PCD functionalities, a dedicated port
++ must be used. If this routine is called for a PCD in a single partition
++ environment, or it is the Master partition in a Multi-partition
++ environment, The port will be initialized by the PCD driver
++ initialization routine.
++ *//***************************************************************************/
++typedef struct t_FmPcdHcParams {
++ uintptr_t portBaseAddr; /**< Virtual Address of Host-Command Port memory mapped registers.*/
++ uint8_t portId; /**< Port Id (0-6 relative to Host-Command/Offline-Parsing ports);
++ NOTE: When configuring Host Command port for
++ FMANv3 devices (DPAA_VERSION 11 and higher),
++ portId=0 MUST be used. */
++ uint16_t liodnBase; /**< LIODN base for this port, to be used together with LIODN offset
++ (irrelevant for P4080 revision 1.0) */
++ uint32_t errFqid; /**< Host-Command Port error queue Id. */
++ uint32_t confFqid; /**< Host-Command Port confirmation queue Id. */
++ uint32_t qmChannel; /**< QM channel dedicated to this Host-Command port;
++ will be used by the FM for dequeue. */
++ t_FmPcdQmEnqueueCallback *f_QmEnqueue; /**< Callback routine for enqueuing a frame to the QM */
++ t_Handle h_QmArg; /**< Application's handle passed to QM module on enqueue */
++} t_FmPcdHcParams;
++
++/**************************************************************************//**
++ @Description The main structure for PCD initialization
++ *//***************************************************************************/
++typedef struct t_FmPcdParams {
++ bool prsSupport; /**< TRUE if Parser will be used for any of the FM ports. */
++ bool ccSupport; /**< TRUE if Coarse Classification will be used for any
++ of the FM ports. */
++ bool kgSupport; /**< TRUE if KeyGen will be used for any of the FM ports. */
++ bool plcrSupport; /**< TRUE if Policer will be used for any of the FM ports. */
++ t_Handle h_Fm; /**< A handle to the FM module. */
++ uint8_t numOfSchemes; /**< Number of schemes dedicated to this partition.
++ this parameter is relevant if 'kgSupport'=TRUE. */
++ bool useHostCommand; /**< Optional for single partition, Mandatory for Multi partition */
++ t_FmPcdHcParams hc; /**< Host Command parameters, relevant only if 'useHostCommand'=TRUE;
++ Relevant when FM not runs in "guest-mode". */
++
++ t_FmPcdExceptionCallback *f_Exception; /**< Callback routine for general PCD exceptions;
++ Relevant when FM not runs in "guest-mode". */
++ t_FmPcdIdExceptionCallback *f_ExceptionId; /**< Callback routine for specific KeyGen scheme or
++ Policer profile exceptions;
++ Relevant when FM not runs in "guest-mode". */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks;
++ Relevant when FM not runs in "guest-mode". */
++ uint8_t partPlcrProfilesBase; /**< The first policer-profile-id dedicated to this partition.
++ this parameter is relevant if 'plcrSupport'=TRUE.
++ NOTE: this parameter relevant only when working with multiple partitions. */
++ uint16_t partNumOfPlcrProfiles; /**< Number of policer-profiles dedicated to this partition.
++ this parameter is relevant if 'plcrSupport'=TRUE.
++ NOTE: this parameter relevant only when working with multiple partitions. */
++} t_FmPcdParams;
++
++
++/**************************************************************************//**
++ @Function FM_PCD_Config
++
++ @Description Basic configuration of the PCD module.
++ Creates descriptor for the FM PCD module.
++
++ @Param[in] p_FmPcdParams A structure of parameters for the initialization of PCD.
++
++ @Return A handle to the initialized module.
++*//***************************************************************************/
++t_Handle FM_PCD_Config(t_FmPcdParams *p_FmPcdParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_Init
++
++ @Description Initialization of the PCD module.
++
++ @Param[in] h_FmPcd - FM PCD module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PCD_Init(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_Free
++
++ @Description Frees all resources that were assigned to FM module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmPcd - FM PCD module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PCD_Free(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Group FM_PCD_advanced_cfg_grp FM PCD Advanced Configuration Unit
++
++ @Description Frame Manager PCD Advanced Configuration API.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_PCD_ConfigException
++
++ @Description Calling this routine changes the internal driver data base
++ from its default selection of exceptions enabling.
++ [DEFAULT_numOfSharedPlcrProfiles].
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_ConfigException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PCD_ConfigHcFramesDataMemory
++
++ @Description Configures memory-partition-id for FMan-Controller Host-Command
++ frames. Calling this routine changes the internal driver data
++ base from its default configuration [0].
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] memId Memory partition ID.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions This routine may be called only if 'useHostCommand' was TRUE
++ when FM_PCD_Config() routine was called.
++*//***************************************************************************/
++t_Error FM_PCD_ConfigHcFramesDataMemory(t_Handle h_FmPcd, uint8_t memId);
++
++/**************************************************************************//**
++ @Function FM_PCD_ConfigPlcrNumOfSharedProfiles
++
++ @Description Calling this routine changes the internal driver data base
++ from its default selection of exceptions enablement.
++ [DEFAULT_numOfSharedPlcrProfiles].
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] numOfSharedPlcrProfiles Number of profiles to
++ be shared between ports on this partition
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PCD_ConfigPlcrNumOfSharedProfiles(t_Handle h_FmPcd, uint16_t numOfSharedPlcrProfiles);
++
++/**************************************************************************//**
++ @Function FM_PCD_ConfigPlcrAutoRefreshMode
++
++ @Description Calling this routine changes the internal driver data base
++ from its default selection of exceptions enablement.
++ By default auto-refresh is [DEFAULT_plcrAutoRefresh].
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] enable TRUE to enable, FALSE to disable
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_ConfigPlcrAutoRefreshMode(t_Handle h_FmPcd, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PCD_ConfigPrsMaxCycleLimit
++
++ @Description Calling this routine changes the internal data structure for
++ the maximum parsing time from its default value
++ [DEFAULT_MAX_PRS_CYC_LIM].
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] value 0 to disable the mechanism, or new
++ maximum parsing time.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_ConfigPrsMaxCycleLimit(t_Handle h_FmPcd,uint16_t value);
++
++/** @} */ /* end of FM_PCD_advanced_cfg_grp group */
++/** @} */ /* end of FM_PCD_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_PCD_Runtime_grp FM PCD Runtime Unit
++
++ @Description Frame Manager PCD Runtime Unit API
++
++ The runtime control allows creation of PCD infrastructure modules
++ such as Network Environment Characteristics, Classification Plan
++ Groups and Coarse Classification Trees.
++ It also allows on-the-fly initialization, modification and removal
++ of PCD modules such as KeyGen schemes, coarse classification nodes
++ and Policer profiles.
++
++ In order to explain the programming model of the PCD driver interface
++ a few terms should be explained, and will be used below.
++ - Distinction Header - One of the 16 protocols supported by the FM parser,
++ or one of the SHIM headers (1 or 2). May be a header with a special
++ option (see below).
++ - Interchangeable Headers Group - This is a group of Headers recognized
++ by either one of them. For example, if in a specific context the user
++ chooses to treat IPv4 and IPV6 in the same way, they may create an
++ interchangeable Headers Unit consisting of these 2 headers.
++ - A Distinction Unit - a Distinction Header or an Interchangeable Headers
++ Group.
++ - Header with special option - applies to Ethernet, MPLS, VLAN, IPv4 and
++ IPv6, includes multicast, broadcast and other protocol specific options.
++ In terms of hardware it relates to the options available in the classification
++ plan.
++ - Network Environment Characteristics - a set of Distinction Units that define
++ the total recognizable header selection for a certain environment. This is
++ NOT the list of all headers that will ever appear in a flow, but rather
++ everything that needs distinction in a flow, where distinction is made by KeyGen
++ schemes and coarse classification action descriptors.
++
++ The PCD runtime modules initialization is done in stages. The first stage after
++ initializing the PCD module itself is to establish a Network Flows Environment
++ Definition. The application may choose to establish one or more such environments.
++ Later, when needed, the application will have to state, for some of its modules,
++ to which single environment it belongs.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description A structure for SW parser labels
++ *//***************************************************************************/
++typedef struct t_FmPcdPrsLabelParams {
++ uint32_t instructionOffset; /**< SW parser label instruction offset (2 bytes
++ resolution), relative to Parser RAM. */
++ e_NetHeaderType hdr; /**< The existence of this header will invoke
++ the SW parser code; Use HEADER_TYPE_NONE
++ to indicate that sw parser is to run
++ independent of the existence of any protocol
++ (run before HW parser). */
++ uint8_t indexPerHdr; /**< Normally 0, if more than one SW parser
++ attachments for the same header, use this
++ index to distinguish between them. */
++} t_FmPcdPrsLabelParams;
++
++/**************************************************************************//**
++ @Description A structure for SW parser
++ *//***************************************************************************/
++typedef struct t_FmPcdPrsSwParams {
++ bool override; /**< FALSE to invoke a check that nothing else
++ was loaded to this address, including
++ internal patches.
++ TRUE to override any existing code.*/
++ uint32_t size; /**< SW parser code size */
++ uint16_t base; /**< SW parser base (in instruction counts!
++ must be larger than 0x20)*/
++ uint8_t *p_Code; /**< SW parser code */
++ uint32_t swPrsDataParams[FM_PCD_PRS_NUM_OF_HDRS];
++ /**< SW parser data (parameters) */
++ uint8_t numOfLabels; /**< Number of labels for SW parser. */
++ t_FmPcdPrsLabelParams labelsTable[FM_PCD_PRS_NUM_OF_LABELS];
++ /**< SW parser labels table, containing
++ numOfLabels entries */
++} t_FmPcdPrsSwParams;
++
++
++/**************************************************************************//**
++ @Function FM_PCD_Enable
++
++ @Description This routine should be called after PCD is initialized for enabling all
++ PCD engines according to their existing configuration.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
++*//***************************************************************************/
++t_Error FM_PCD_Enable(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_Disable
++
++ @Description This routine may be called when PCD is enabled in order to
++ disable all PCD engines. It may be called
++ only when none of the ports in the system are using the PCD.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init() and when PCD is enabled.
++*//***************************************************************************/
++t_Error FM_PCD_Disable(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_GetCounter
++
++ @Description Reads one of the FM PCD counters.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] counter The requested counter.
++
++ @Return Counter's current value.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ Note that it is user's responsibility to call this routine only
++ for enabled counters, and there will be no indication if a
++ disabled counter is accessed.
++*//***************************************************************************/
++uint32_t FM_PCD_GetCounter(t_Handle h_FmPcd, e_FmPcdCounters counter);
++
++/**************************************************************************//**
++@Function FM_PCD_PrsLoadSw
++
++@Description This routine may be called in order to load software parsing code.
++
++
++@Param[in] h_FmPcd FM PCD module descriptor.
++@Param[in] p_SwPrs A pointer to a structure of software
++ parser parameters, including the software
++ parser image.
++
++@Return E_OK on success; Error code otherwise.
++
++@Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_PrsLoadSw(t_Handle h_FmPcd, t_FmPcdPrsSwParams *p_SwPrs);
++
++/**************************************************************************//**
++@Function FM_PCD_SetAdvancedOffloadSupport
++
++@Description This routine must be called in order to support the following features:
++ IP-fragmentation, IP-reassembly, IPsec, Header-manipulation, frame-replicator.
++
++@Param[in] h_FmPcd FM PCD module descriptor.
++
++@Return E_OK on success; Error code otherwise.
++
++@Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_SetAdvancedOffloadSupport(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSetDfltValue
++
++ @Description Calling this routine sets a global default value to be used
++ by the KeyGen when parser does not recognize a required
++ field/header.
++ By default default values are 0.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] valueId 0,1 - one of 2 global default values.
++ @Param[in] value The requested default value.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_KgSetDfltValue(t_Handle h_FmPcd, uint8_t valueId, uint32_t value);
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSetAdditionalDataAfterParsing
++
++ @Description Calling this routine allows the KeyGen to access data past
++ the parser finishing point.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] payloadOffset the number of bytes beyond the parser location.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_KgSetAdditionalDataAfterParsing(t_Handle h_FmPcd, uint8_t payloadOffset);
++
++/**************************************************************************//**
++ @Function FM_PCD_SetException
++
++ @Description Calling this routine enables/disables PCD interrupts.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_SetException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PCD_ModifyCounter
++
++ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] counter The requested counter.
++ @Param[in] value The requested value to be written into the counter.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_ModifyCounter(t_Handle h_FmPcd, e_FmPcdCounters counter, uint32_t value);
++
++/**************************************************************************//**
++ @Function FM_PCD_SetPlcrStatistics
++
++ @Description This routine may be used to enable/disable policer statistics
++ counter. By default the statistics is enabled.
++
++ @Param[in] h_FmPcd FM PCD module descriptor
++ @Param[in] enable TRUE to enable, FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_SetPlcrStatistics(t_Handle h_FmPcd, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PCD_SetPrsStatistics
++
++ @Description Defines whether to gather parser statistics including all ports.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] enable TRUE to enable, FALSE to disable.
++
++ @Return None
++
++ @Cautions Allowed only following FM_PCD_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++void FM_PCD_SetPrsStatistics(t_Handle h_FmPcd, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PCD_HcTxConf
++
++ @Description This routine should be called to confirm frames that were
++ received on the HC confirmation queue.
++
++ @Param[in] h_FmPcd A handle to an FM PCD Module.
++ @Param[in] p_Fd Frame descriptor of the received frame.
++
++ @Cautions Allowed only following FM_PCD_Init(). Allowed only if 'useHostCommand'
++ option was selected in the initialization.
++*//***************************************************************************/
++void FM_PCD_HcTxConf(t_Handle h_FmPcd, t_DpaaFD *p_Fd);
++
++/**************************************************************************//*
++ @Function FM_PCD_ForceIntr
++
++ @Description Causes an interrupt event on the requested source.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] exception An exception to be forced.
++
++ @Return E_OK on success; Error code if the exception is not enabled,
++ or is not able to create interrupt.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PCD_ForceIntr (t_Handle h_FmPcd, e_FmPcdExceptions exception);
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/**************************************************************************//**
++ @Function FM_PCD_DumpRegs
++
++ @Description Dumps all PCD registers
++
++ @Param[in] h_FmPcd A handle to an FM PCD Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ NOTE: this routine may be called only for FM in master mode
++ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
++ are mapped.
++*//***************************************************************************/
++t_Error FM_PCD_DumpRegs(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_KgDumpRegs
++
++ @Description Dumps all PCD KG registers
++
++ @Param[in] h_FmPcd A handle to an FM PCD Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ NOTE: this routine may be called only for FM in master mode
++ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
++ are mapped.
++*//***************************************************************************/
++t_Error FM_PCD_KgDumpRegs(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrDumpRegs
++
++ @Description Dumps all PCD Policer registers
++
++ @Param[in] h_FmPcd A handle to an FM PCD Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ NOTE: this routine may be called only for FM in master mode
++ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
++ are mapped.
++*//***************************************************************************/
++t_Error FM_PCD_PlcrDumpRegs(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrProfileDumpRegs
++
++ @Description Dumps all PCD Policer profile registers
++
++ @Param[in] h_Profile A handle to a Policer profile.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ NOTE: this routine may be called only for FM in master mode
++ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
++ are mapped.
++*//***************************************************************************/
++t_Error FM_PCD_PlcrProfileDumpRegs(t_Handle h_Profile);
++
++/**************************************************************************//**
++ @Function FM_PCD_PrsDumpRegs
++
++ @Description Dumps all PCD Parser registers
++
++ @Param[in] h_FmPcd A handle to an FM PCD Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ NOTE: this routine may be called only for FM in master mode
++ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
++ are mapped.
++*//***************************************************************************/
++t_Error FM_PCD_PrsDumpRegs(t_Handle h_FmPcd);
++
++/**************************************************************************//**
++ @Function FM_PCD_HcDumpRegs
++
++ @Description Dumps HC Port registers
++
++ @Param[in] h_FmPcd A handle to an FM PCD Module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++ NOTE: this routine may be called only for FM in master mode
++ (i.e. 'guestId'=NCSW_MASTER_ID).
++*//***************************************************************************/
++t_Error FM_PCD_HcDumpRegs(t_Handle h_FmPcd);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++
++
++/**************************************************************************//**
++ KeyGen FM_PCD_Runtime_build_grp FM PCD Runtime Building Unit
++
++ @Description Frame Manager PCD Runtime Building API
++
++ This group contains routines for setting, deleting and modifying
++ PCD resources, for defining the total PCD tree.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection Definitions of coarse classification
++ parameters as required by KeyGen (when coarse classification
++ is the next engine after this scheme).
++*//***************************************************************************/
++#define FM_PCD_MAX_NUM_OF_CC_TREES 8
++#define FM_PCD_MAX_NUM_OF_CC_GROUPS 16
++#define FM_PCD_MAX_NUM_OF_CC_UNITS 4
++#define FM_PCD_MAX_NUM_OF_KEYS 256
++#define FM_PCD_MAX_NUM_OF_FLOWS (4*KILOBYTE)
++#define FM_PCD_MAX_SIZE_OF_KEY 56
++#define FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP 16
++#define FM_PCD_LAST_KEY_INDEX 0xffff
++
++#define FM_PCD_MAX_NUM_OF_CC_NODES 255 /* Obsolete, not used - will be removed in the future */
++/* @} */
++
++/**************************************************************************//**
++ @Collection A set of definitions to allow protocol
++ special option description.
++*//***************************************************************************/
++typedef uint32_t protocolOpt_t; /**< A general type to define a protocol option. */
++
++typedef protocolOpt_t ethProtocolOpt_t; /**< Ethernet protocol options. */
++#define ETH_BROADCAST 0x80000000 /**< Ethernet Broadcast. */
++#define ETH_MULTICAST 0x40000000 /**< Ethernet Multicast. */
++
++typedef protocolOpt_t vlanProtocolOpt_t; /**< VLAN protocol options. */
++#define VLAN_STACKED 0x20000000 /**< Stacked VLAN. */
++
++typedef protocolOpt_t mplsProtocolOpt_t; /**< MPLS protocol options. */
++#define MPLS_STACKED 0x10000000 /**< Stacked MPLS. */
++
++typedef protocolOpt_t ipv4ProtocolOpt_t; /**< IPv4 protocol options. */
++#define IPV4_BROADCAST_1 0x08000000 /**< IPv4 Broadcast. */
++#define IPV4_MULTICAST_1 0x04000000 /**< IPv4 Multicast. */
++#define IPV4_UNICAST_2 0x02000000 /**< Tunneled IPv4 - Unicast. */
++#define IPV4_MULTICAST_BROADCAST_2 0x01000000 /**< Tunneled IPv4 - Broadcast/Multicast. */
++
++#define IPV4_FRAG_1 0x00000008 /**< IPV4 reassembly option.
++ IPV4 Reassembly manipulation requires network
++ environment with IPV4 header and IPV4_FRAG_1 option */
++
++typedef protocolOpt_t ipv6ProtocolOpt_t; /**< IPv6 protocol options. */
++#define IPV6_MULTICAST_1 0x00800000 /**< IPv6 Multicast. */
++#define IPV6_UNICAST_2 0x00400000 /**< Tunneled IPv6 - Unicast. */
++#define IPV6_MULTICAST_2 0x00200000 /**< Tunneled IPv6 - Multicast. */
++
++#define IPV6_FRAG_1 0x00000004 /**< IPV6 reassembly option.
++ IPV6 Reassembly manipulation requires network
++ environment with IPV6 header and IPV6_FRAG_1 option;
++ in case where fragment found, the fragment-extension offset
++ may be found at 'shim2' (in parser-result). */
++#if (DPAA_VERSION >= 11)
++typedef protocolOpt_t capwapProtocolOpt_t; /**< CAPWAP protocol options. */
++#define CAPWAP_FRAG_1 0x00000008 /**< CAPWAP reassembly option.
++ CAPWAP Reassembly manipulation requires network
++ environment with CAPWAP header and CAPWAP_FRAG_1 option;
++ in case where fragment found, the fragment-extension offset
++ may be found at 'shim2' (in parser-result). */
++#endif /* (DPAA_VERSION >= 11) */
++
++
++/* @} */
++
++#define FM_PCD_MANIP_MAX_HDR_SIZE 256
++#define FM_PCD_MANIP_DSCP_TO_VLAN_TRANS 64
++
++/**************************************************************************//**
++ @Collection A set of definitions to support Header Manipulation selection.
++*//***************************************************************************/
++typedef uint32_t hdrManipFlags_t; /**< A general type to define a HMan update command flags. */
++
++typedef hdrManipFlags_t ipv4HdrManipUpdateFlags_t; /**< IPv4 protocol HMan update command flags. */
++
++#define HDR_MANIP_IPV4_TOS 0x80000000 /**< update TOS with the given value ('tos' field
++ of t_FmPcdManipHdrFieldUpdateIpv4) */
++#define HDR_MANIP_IPV4_ID 0x40000000 /**< update IP ID with the given value ('id' field
++ of t_FmPcdManipHdrFieldUpdateIpv4) */
++#define HDR_MANIP_IPV4_TTL 0x20000000 /**< Decrement TTL by 1 */
++#define HDR_MANIP_IPV4_SRC 0x10000000 /**< update IP source address with the given value
++ ('src' field of t_FmPcdManipHdrFieldUpdateIpv4) */
++#define HDR_MANIP_IPV4_DST 0x08000000 /**< update IP destination address with the given value
++ ('dst' field of t_FmPcdManipHdrFieldUpdateIpv4) */
++
++typedef hdrManipFlags_t ipv6HdrManipUpdateFlags_t; /**< IPv6 protocol HMan update command flags. */
++
++#define HDR_MANIP_IPV6_TC 0x80000000 /**< update Traffic Class address with the given value
++ ('trafficClass' field of t_FmPcdManipHdrFieldUpdateIpv6) */
++#define HDR_MANIP_IPV6_HL 0x40000000 /**< Decrement Hop Limit by 1 */
++#define HDR_MANIP_IPV6_SRC 0x20000000 /**< update IP source address with the given value
++ ('src' field of t_FmPcdManipHdrFieldUpdateIpv6) */
++#define HDR_MANIP_IPV6_DST 0x10000000 /**< update IP destination address with the given value
++ ('dst' field of t_FmPcdManipHdrFieldUpdateIpv6) */
++
++typedef hdrManipFlags_t tcpUdpHdrManipUpdateFlags_t;/**< TCP/UDP protocol HMan update command flags. */
++
++#define HDR_MANIP_TCP_UDP_SRC 0x80000000 /**< update TCP/UDP source address with the given value
++ ('src' field of t_FmPcdManipHdrFieldUpdateTcpUdp) */
++#define HDR_MANIP_TCP_UDP_DST 0x40000000 /**< update TCP/UDP destination address with the given value
++ ('dst' field of t_FmPcdManipHdrFieldUpdateTcpUdp) */
++#define HDR_MANIP_TCP_UDP_CHECKSUM 0x20000000 /**< update TCP/UDP checksum */
++
++/* @} */
++
++/**************************************************************************//**
++ @Description A type used for returning the order of the key extraction.
++ each value in this array represents the index of the extraction
++ command as defined by the user in the initialization extraction array.
++ The valid size of this array is the user define number of extractions
++ required (also marked by the second '0' in this array).
++*//***************************************************************************/
++typedef uint8_t t_FmPcdKgKeyOrder [FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
++
++/**************************************************************************//**
++ @Description All PCD engines
++*//***************************************************************************/
++typedef enum e_FmPcdEngine {
++ e_FM_PCD_INVALID = 0, /**< Invalid PCD engine */
++ e_FM_PCD_DONE, /**< No PCD Engine indicated */
++ e_FM_PCD_KG, /**< KeyGen */
++ e_FM_PCD_CC, /**< Coarse classifier */
++ e_FM_PCD_PLCR, /**< Policer */
++ e_FM_PCD_PRS, /**< Parser */
++#if (DPAA_VERSION >= 11)
++ e_FM_PCD_FR, /**< Frame-Replicator */
++#endif /* (DPAA_VERSION >= 11) */
++ e_FM_PCD_HASH /**< Hash table */
++} e_FmPcdEngine;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting extraction by header types
++*//***************************************************************************/
++typedef enum e_FmPcdExtractByHdrType {
++ e_FM_PCD_EXTRACT_FROM_HDR, /**< Extract bytes from header */
++ e_FM_PCD_EXTRACT_FROM_FIELD, /**< Extract bytes from header field */
++ e_FM_PCD_EXTRACT_FULL_FIELD /**< Extract a full field */
++} e_FmPcdExtractByHdrType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting extraction source
++ (when it is not the header)
++*//***************************************************************************/
++typedef enum e_FmPcdExtractFrom {
++ e_FM_PCD_EXTRACT_FROM_FRAME_START, /**< KG & CC: Extract from beginning of frame */
++ e_FM_PCD_EXTRACT_FROM_DFLT_VALUE, /**< KG only: Extract from a default value */
++ e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE, /**< KG & CC: Extract from the point where parsing had finished */
++ e_FM_PCD_EXTRACT_FROM_KEY, /**< CC only: Field where saved KEY */
++ e_FM_PCD_EXTRACT_FROM_HASH, /**< CC only: Field where saved HASH */
++ e_FM_PCD_EXTRACT_FROM_PARSE_RESULT, /**< KG only: Extract from the parser result */
++ e_FM_PCD_EXTRACT_FROM_ENQ_FQID, /**< KG & CC: Extract from enqueue FQID */
++ e_FM_PCD_EXTRACT_FROM_FLOW_ID /**< CC only: Field where saved Dequeue FQID */
++} e_FmPcdExtractFrom;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting extraction type
++*//***************************************************************************/
++typedef enum e_FmPcdExtractType {
++ e_FM_PCD_EXTRACT_BY_HDR, /**< Extract according to header */
++ e_FM_PCD_EXTRACT_NON_HDR, /**< Extract from data that is not the header */
++ e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO /**< Extract private info as specified by user */
++} e_FmPcdExtractType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting default extraction value
++*//***************************************************************************/
++typedef enum e_FmPcdKgExtractDfltSelect {
++ e_FM_PCD_KG_DFLT_GBL_0, /**< Default selection is KG register 0 */
++ e_FM_PCD_KG_DFLT_GBL_1, /**< Default selection is KG register 1 */
++ e_FM_PCD_KG_DFLT_PRIVATE_0, /**< Default selection is a per scheme register 0 */
++ e_FM_PCD_KG_DFLT_PRIVATE_1, /**< Default selection is a per scheme register 1 */
++ e_FM_PCD_KG_DFLT_ILLEGAL /**< Illegal selection */
++} e_FmPcdKgExtractDfltSelect;
++
++/**************************************************************************//**
++ @Description Enumeration type defining all default groups - each group shares
++ a default value, one of four user-initialized values.
++*//***************************************************************************/
++typedef enum e_FmPcdKgKnownFieldsDfltTypes {
++ e_FM_PCD_KG_MAC_ADDR, /**< MAC Address */
++ e_FM_PCD_KG_TCI, /**< TCI field */
++ e_FM_PCD_KG_ENET_TYPE, /**< ENET Type */
++ e_FM_PCD_KG_PPP_SESSION_ID, /**< PPP Session id */
++ e_FM_PCD_KG_PPP_PROTOCOL_ID, /**< PPP Protocol id */
++ e_FM_PCD_KG_MPLS_LABEL, /**< MPLS label */
++ e_FM_PCD_KG_IP_ADDR, /**< IP address */
++ e_FM_PCD_KG_PROTOCOL_TYPE, /**< Protocol type */
++ e_FM_PCD_KG_IP_TOS_TC, /**< TOS or TC */
++ e_FM_PCD_KG_IPV6_FLOW_LABEL, /**< IPV6 flow label */
++ e_FM_PCD_KG_IPSEC_SPI, /**< IPSEC SPI */
++ e_FM_PCD_KG_L4_PORT, /**< L4 Port */
++ e_FM_PCD_KG_TCP_FLAG, /**< TCP Flag */
++ e_FM_PCD_KG_GENERIC_FROM_DATA, /**< grouping implemented by SW,
++ any data extraction that is not the full
++ field described above */
++ e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V, /**< grouping implemented by SW,
++ any data extraction without validation */
++ e_FM_PCD_KG_GENERIC_NOT_FROM_DATA /**< grouping implemented by SW,
++ extraction from parser result or
++ direct use of default value */
++} e_FmPcdKgKnownFieldsDfltTypes;
++
++/**************************************************************************//**
++ @Description Enumeration type for defining header index for scenarios with
++ multiple (tunneled) headers
++*//***************************************************************************/
++typedef enum e_FmPcdHdrIndex {
++ e_FM_PCD_HDR_INDEX_NONE = 0, /**< used when multiple headers not used, also
++ to specify regular IP (not tunneled). */
++ e_FM_PCD_HDR_INDEX_1, /**< may be used for VLAN, MPLS, tunneled IP */
++ e_FM_PCD_HDR_INDEX_2, /**< may be used for MPLS, tunneled IP */
++ e_FM_PCD_HDR_INDEX_3, /**< may be used for MPLS */
++ e_FM_PCD_HDR_INDEX_LAST = 0xFF /**< may be used for VLAN, MPLS */
++} e_FmPcdHdrIndex;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile functional type
++*//***************************************************************************/
++typedef enum e_FmPcdProfileTypeSelection {
++ e_FM_PCD_PLCR_PORT_PRIVATE, /**< Port dedicated profile */
++ e_FM_PCD_PLCR_SHARED /**< Shared profile (shared within partition) */
++} e_FmPcdProfileTypeSelection;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile algorithm
++*//***************************************************************************/
++typedef enum e_FmPcdPlcrAlgorithmSelection {
++ e_FM_PCD_PLCR_PASS_THROUGH, /**< Policer pass through */
++ e_FM_PCD_PLCR_RFC_2698, /**< Policer algorithm RFC 2698 */
++ e_FM_PCD_PLCR_RFC_4115 /**< Policer algorithm RFC 4115 */
++} e_FmPcdPlcrAlgorithmSelection;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting a policer profile color mode
++*//***************************************************************************/
++typedef enum e_FmPcdPlcrColorMode {
++ e_FM_PCD_PLCR_COLOR_BLIND, /**< Color blind */
++ e_FM_PCD_PLCR_COLOR_AWARE /**< Color aware */
++} e_FmPcdPlcrColorMode;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting a policer profile color
++*//***************************************************************************/
++typedef enum e_FmPcdPlcrColor {
++ e_FM_PCD_PLCR_GREEN, /**< Green color code */
++ e_FM_PCD_PLCR_YELLOW, /**< Yellow color code */
++ e_FM_PCD_PLCR_RED, /**< Red color code */
++ e_FM_PCD_PLCR_OVERRIDE /**< Color override code */
++} e_FmPcdPlcrColor;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile packet frame length selector
++*//***************************************************************************/
++typedef enum e_FmPcdPlcrFrameLengthSelect {
++ e_FM_PCD_PLCR_L2_FRM_LEN, /**< L2 frame length */
++ e_FM_PCD_PLCR_L3_FRM_LEN, /**< L3 frame length */
++ e_FM_PCD_PLCR_L4_FRM_LEN, /**< L4 frame length */
++ e_FM_PCD_PLCR_FULL_FRM_LEN /**< Full frame length */
++} e_FmPcdPlcrFrameLengthSelect;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting roll-back frame
++*//***************************************************************************/
++typedef enum e_FmPcdPlcrRollBackFrameSelect {
++ e_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN, /**< Roll-back L2 frame length */
++ e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN /**< Roll-back Full frame length */
++} e_FmPcdPlcrRollBackFrameSelect;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile packet or byte mode
++*//***************************************************************************/
++typedef enum e_FmPcdPlcrRateMode {
++ e_FM_PCD_PLCR_BYTE_MODE, /**< Byte mode */
++ e_FM_PCD_PLCR_PACKET_MODE /**< Packet mode */
++} e_FmPcdPlcrRateMode;
++
++/**************************************************************************//**
++ @Description Enumeration type for defining action of frame
++*//***************************************************************************/
++typedef enum e_FmPcdDoneAction {
++ e_FM_PCD_ENQ_FRAME = 0, /**< Enqueue frame */
++ e_FM_PCD_DROP_FRAME /**< Mark this frame as error frame and continue
++ to error flow; 'FM_PORT_FRM_ERR_CLS_DISCARD'
++ flag will be set for this frame. */
++} e_FmPcdDoneAction;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer counter
++*//***************************************************************************/
++typedef enum e_FmPcdPlcrProfileCounters {
++ e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER, /**< Green packets counter */
++ e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER, /**< Yellow packets counter */
++ e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER, /**< Red packets counter */
++ e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER, /**< Recolored yellow packets counter */
++ e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER /**< Recolored red packets counter */
++} e_FmPcdPlcrProfileCounters;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the PCD action after extraction
++*//***************************************************************************/
++typedef enum e_FmPcdAction {
++ e_FM_PCD_ACTION_NONE, /**< NONE */
++ e_FM_PCD_ACTION_EXACT_MATCH, /**< Exact match on the selected extraction */
++ e_FM_PCD_ACTION_INDEXED_LOOKUP /**< Indexed lookup on the selected extraction */
++} e_FmPcdAction;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of insert manipulation
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrInsrtType {
++ e_FM_PCD_MANIP_INSRT_GENERIC, /**< Insert according to offset & size */
++ e_FM_PCD_MANIP_INSRT_BY_HDR, /**< Insert according to protocol */
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ e_FM_PCD_MANIP_INSRT_BY_TEMPLATE /**< Insert template to start of frame */
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++} e_FmPcdManipHdrInsrtType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of remove manipulation
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrRmvType {
++ e_FM_PCD_MANIP_RMV_GENERIC, /**< Remove according to offset & size */
++ e_FM_PCD_MANIP_RMV_BY_HDR /**< Remove according to offset & size */
++} e_FmPcdManipHdrRmvType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific L2 fields removal
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrRmvSpecificL2 {
++ e_FM_PCD_MANIP_HDR_RMV_ETHERNET, /**< Ethernet/802.3 MAC */
++ e_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS, /**< stacked QTags */
++ e_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS, /**< MPLS and Ethernet/802.3 MAC header until
++ the header which follows the MPLS header */
++ e_FM_PCD_MANIP_HDR_RMV_MPLS, /**< Remove MPLS header (Unlimited MPLS labels) */
++ e_FM_PCD_MANIP_HDR_RMV_PPPOE /**< Remove the PPPoE header and PPP protocol field. */
++} e_FmPcdManipHdrRmvSpecificL2;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific fields updates
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrFieldUpdateType {
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN, /**< VLAN updates */
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4, /**< IPV4 updates */
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6, /**< IPV6 updates */
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP, /**< TCP_UDP updates */
++} e_FmPcdManipHdrFieldUpdateType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting VLAN updates
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrFieldUpdateVlan {
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI, /**< Replace VPri of outer most VLAN tag. */
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN /**< DSCP to VLAN priority bits translation */
++} e_FmPcdManipHdrFieldUpdateVlan;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific L2 header insertion
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrInsrtSpecificL2 {
++ e_FM_PCD_MANIP_HDR_INSRT_MPLS, /**< Insert MPLS header (Unlimited MPLS labels) */
++ e_FM_PCD_MANIP_HDR_INSRT_PPPOE /**< Insert PPPOE */
++} e_FmPcdManipHdrInsrtSpecificL2;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Enumeration type for selecting QoS mapping mode
++
++ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE'
++ User should instruct the port to read the hash-result
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrQosMappingMode {
++ e_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE = 0, /**< No mapping, QoS field will not be changed */
++ e_FM_PCD_MANIP_HDR_QOS_MAPPING_AS_IS, /**< QoS field will be overwritten by the last byte in the hash-result. */
++} e_FmPcdManipHdrQosMappingMode;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting QoS source
++
++ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_SRC_NONE'
++ User should left room for the hash-result on input/output buffer
++ and instruct the port to read/write the hash-result to the buffer (RPD should be set)
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrQosSrc {
++ e_FM_PCD_MANIP_HDR_QOS_SRC_NONE = 0, /**< TODO */
++ e_FM_PCD_MANIP_HDR_QOS_SRC_USER_DEFINED, /**< QoS will be taken from the last byte in the hash-result. */
++} e_FmPcdManipHdrQosSrc;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of header insertion
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrInsrtByHdrType {
++ e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2, /**< Specific L2 fields insertion */
++#if (DPAA_VERSION >= 11)
++ e_FM_PCD_MANIP_INSRT_BY_HDR_IP, /**< IP insertion */
++ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP, /**< UDP insertion */
++ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, /**< UDP lite insertion */
++ e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP /**< CAPWAP insertion */
++#endif /* (DPAA_VERSION >= 11) */
++} e_FmPcdManipHdrInsrtByHdrType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific customCommand
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrCustomType {
++ e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE, /**< Replace IPv4/IPv6 */
++ e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE, /**< Replace IPv4/IPv6 */
++} e_FmPcdManipHdrCustomType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific customCommand
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrCustomIpReplace {
++ e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV4_BY_IPV6, /**< Replace IPv4 by IPv6 */
++ e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 /**< Replace IPv6 by IPv4 */
++} e_FmPcdManipHdrCustomIpReplace;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of header removal
++*//***************************************************************************/
++typedef enum e_FmPcdManipHdrRmvByHdrType {
++ e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2 = 0, /**< Specific L2 fields removal */
++#if (DPAA_VERSION >= 11)
++ e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP, /**< CAPWAP removal */
++#endif /* (DPAA_VERSION >= 11) */
++#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START, /**< Locate from data that is not the header */
++#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++} e_FmPcdManipHdrRmvByHdrType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of timeout mode
++*//***************************************************************************/
++typedef enum e_FmPcdManipReassemTimeOutMode {
++ e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES, /**< Limits the time of the reassembly process
++ from the first fragment to the last */
++ e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG /**< Limits the time of receiving the fragment */
++} e_FmPcdManipReassemTimeOutMode;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of WaysNumber mode
++*//***************************************************************************/
++typedef enum e_FmPcdManipReassemWaysNumber {
++ e_FM_PCD_MANIP_ONE_WAY_HASH = 1, /**< One way hash */
++ e_FM_PCD_MANIP_TWO_WAYS_HASH, /**< Two ways hash */
++ e_FM_PCD_MANIP_THREE_WAYS_HASH, /**< Three ways hash */
++ e_FM_PCD_MANIP_FOUR_WAYS_HASH, /**< Four ways hash */
++ e_FM_PCD_MANIP_FIVE_WAYS_HASH, /**< Five ways hash */
++ e_FM_PCD_MANIP_SIX_WAYS_HASH, /**< Six ways hash */
++ e_FM_PCD_MANIP_SEVEN_WAYS_HASH, /**< Seven ways hash */
++ e_FM_PCD_MANIP_EIGHT_WAYS_HASH /**< Eight ways hash */
++} e_FmPcdManipReassemWaysNumber;
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of statistics mode
++*//***************************************************************************/
++typedef enum e_FmPcdStatsType {
++ e_FM_PCD_STATS_PER_FLOWID = 0 /**< Flow ID is used as index for getting statistics */
++} e_FmPcdStatsType;
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting manipulation type
++*//***************************************************************************/
++typedef enum e_FmPcdManipType {
++ e_FM_PCD_MANIP_HDR = 0, /**< Header manipulation */
++ e_FM_PCD_MANIP_REASSEM, /**< Reassembly */
++ e_FM_PCD_MANIP_FRAG, /**< Fragmentation */
++ e_FM_PCD_MANIP_SPECIAL_OFFLOAD /**< Special Offloading */
++} e_FmPcdManipType;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of statistics mode
++*//***************************************************************************/
++typedef enum e_FmPcdCcStatsMode {
++ e_FM_PCD_CC_STATS_MODE_NONE = 0, /**< No statistics support */
++ e_FM_PCD_CC_STATS_MODE_FRAME, /**< Frame count statistics */
++ e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME, /**< Byte and frame count statistics */
++#if (DPAA_VERSION >= 11)
++ e_FM_PCD_CC_STATS_MODE_RMON, /**< Byte and frame length range count statistics;
++ This mode is supported only on B4860 device */
++#endif /* (DPAA_VERSION >= 11) */
++} e_FmPcdCcStatsMode;
++
++/**************************************************************************//**
++ @Description Enumeration type for determining the action in case an IP packet
++ is larger than MTU but its DF (Don't Fragment) bit is set.
++*//***************************************************************************/
++typedef enum e_FmPcdManipDontFragAction {
++ e_FM_PCD_MANIP_DISCARD_PACKET = 0, /**< Discard packet */
++ e_FM_PCD_MANIP_ENQ_TO_ERR_Q_OR_DISCARD_PACKET = e_FM_PCD_MANIP_DISCARD_PACKET,
++ /**< Obsolete, cannot enqueue to error queue;
++ In practice, selects to discard packets;
++ Will be removed in the future */
++ e_FM_PCD_MANIP_FRAGMENT_PACKET, /**< Fragment packet and continue normal processing */
++ e_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG /**< Continue normal processing without fragmenting the packet */
++} e_FmPcdManipDontFragAction;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of special offload manipulation
++*//***************************************************************************/
++typedef enum e_FmPcdManipSpecialOffloadType {
++ e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC, /**< IPSec offload manipulation */
++#if (DPAA_VERSION >= 11)
++ e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP /**< CAPWAP offload manipulation */
++#endif /* (DPAA_VERSION >= 11) */
++} e_FmPcdManipSpecialOffloadType;
++
++
++/**************************************************************************//**
++ @Description A Union of protocol dependent special options
++*//***************************************************************************/
++typedef union u_FmPcdHdrProtocolOpt {
++ ethProtocolOpt_t ethOpt; /**< Ethernet options */
++ vlanProtocolOpt_t vlanOpt; /**< VLAN options */
++ mplsProtocolOpt_t mplsOpt; /**< MPLS options */
++ ipv4ProtocolOpt_t ipv4Opt; /**< IPv4 options */
++ ipv6ProtocolOpt_t ipv6Opt; /**< IPv6 options */
++#if (DPAA_VERSION >= 11)
++ capwapProtocolOpt_t capwapOpt; /**< CAPWAP options */
++#endif /* (DPAA_VERSION >= 11) */
++} u_FmPcdHdrProtocolOpt;
++
++/**************************************************************************//**
++ @Description A union holding protocol fields
++
++
++ Fields supported as "full fields":
++ HEADER_TYPE_ETH:
++ NET_HEADER_FIELD_ETH_DA
++ NET_HEADER_FIELD_ETH_SA
++ NET_HEADER_FIELD_ETH_TYPE
++
++ HEADER_TYPE_LLC_SNAP:
++ NET_HEADER_FIELD_LLC_SNAP_TYPE
++
++ HEADER_TYPE_VLAN:
++ NET_HEADER_FIELD_VLAN_TCI
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
++ e_FM_PCD_HDR_INDEX_LAST)
++
++ HEADER_TYPE_MPLS:
++ NET_HEADER_FIELD_MPLS_LABEL_STACK
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
++ e_FM_PCD_HDR_INDEX_2,
++ e_FM_PCD_HDR_INDEX_LAST)
++
++ HEADER_TYPE_IPv4:
++ NET_HEADER_FIELD_IPv4_SRC_IP
++ NET_HEADER_FIELD_IPv4_DST_IP
++ NET_HEADER_FIELD_IPv4_PROTO
++ NET_HEADER_FIELD_IPv4_TOS
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
++ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
++
++ HEADER_TYPE_IPv6:
++ NET_HEADER_FIELD_IPv6_SRC_IP
++ NET_HEADER_FIELD_IPv6_DST_IP
++ NET_HEADER_FIELD_IPv6_NEXT_HDR
++ NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC (must come together!)
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
++ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
++
++ (Note that starting from DPAA 1-1, NET_HEADER_FIELD_IPv6_NEXT_HDR applies to
++ the last next header indication, meaning the next L4, which may be
++ present at the Ipv6 last extension. On earlier revisions this field
++ applies to the Next-Header field of the main IPv6 header)
++
++ HEADER_TYPE_IP:
++ NET_HEADER_FIELD_IP_PROTO
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_LAST)
++ NET_HEADER_FIELD_IP_DSCP
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1)
++ HEADER_TYPE_GRE:
++ NET_HEADER_FIELD_GRE_TYPE
++
++ HEADER_TYPE_MINENCAP
++ NET_HEADER_FIELD_MINENCAP_SRC_IP
++ NET_HEADER_FIELD_MINENCAP_DST_IP
++ NET_HEADER_FIELD_MINENCAP_TYPE
++
++ HEADER_TYPE_TCP:
++ NET_HEADER_FIELD_TCP_PORT_SRC
++ NET_HEADER_FIELD_TCP_PORT_DST
++ NET_HEADER_FIELD_TCP_FLAGS
++
++ HEADER_TYPE_UDP:
++ NET_HEADER_FIELD_UDP_PORT_SRC
++ NET_HEADER_FIELD_UDP_PORT_DST
++
++ HEADER_TYPE_UDP_LITE:
++ NET_HEADER_FIELD_UDP_LITE_PORT_SRC
++ NET_HEADER_FIELD_UDP_LITE_PORT_DST
++
++ HEADER_TYPE_IPSEC_AH:
++ NET_HEADER_FIELD_IPSEC_AH_SPI
++ NET_HEADER_FIELD_IPSEC_AH_NH
++
++ HEADER_TYPE_IPSEC_ESP:
++ NET_HEADER_FIELD_IPSEC_ESP_SPI
++
++ HEADER_TYPE_SCTP:
++ NET_HEADER_FIELD_SCTP_PORT_SRC
++ NET_HEADER_FIELD_SCTP_PORT_DST
++
++ HEADER_TYPE_DCCP:
++ NET_HEADER_FIELD_DCCP_PORT_SRC
++ NET_HEADER_FIELD_DCCP_PORT_DST
++
++ HEADER_TYPE_PPPoE:
++ NET_HEADER_FIELD_PPPoE_PID
++ NET_HEADER_FIELD_PPPoE_SID
++
++ *****************************************************************
++ Fields supported as "from fields":
++ HEADER_TYPE_ETH (with or without validation):
++ NET_HEADER_FIELD_ETH_TYPE
++
++ HEADER_TYPE_VLAN (with or without validation):
++ NET_HEADER_FIELD_VLAN_TCI
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
++ e_FM_PCD_HDR_INDEX_LAST)
++
++ HEADER_TYPE_IPv4 (without validation):
++ NET_HEADER_FIELD_IPv4_PROTO
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
++ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
++
++ HEADER_TYPE_IPv6 (without validation):
++ NET_HEADER_FIELD_IPv6_NEXT_HDR
++ (index may apply:
++ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
++ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
++
++*//***************************************************************************/
++typedef union t_FmPcdFields {
++ headerFieldEth_t eth; /**< Ethernet */
++ headerFieldVlan_t vlan; /**< VLAN */
++ headerFieldLlcSnap_t llcSnap; /**< LLC SNAP */
++ headerFieldPppoe_t pppoe; /**< PPPoE */
++ headerFieldMpls_t mpls; /**< MPLS */
++ headerFieldIp_t ip; /**< IP */
++ headerFieldIpv4_t ipv4; /**< IPv4 */
++ headerFieldIpv6_t ipv6; /**< IPv6 */
++ headerFieldUdp_t udp; /**< UDP */
++ headerFieldUdpLite_t udpLite; /**< UDP Lite */
++ headerFieldTcp_t tcp; /**< TCP */
++ headerFieldSctp_t sctp; /**< SCTP */
++ headerFieldDccp_t dccp; /**< DCCP */
++ headerFieldGre_t gre; /**< GRE */
++ headerFieldMinencap_t minencap; /**< Minimal Encapsulation */
++ headerFieldIpsecAh_t ipsecAh; /**< IPSec AH */
++ headerFieldIpsecEsp_t ipsecEsp; /**< IPSec ESP */
++ headerFieldUdpEncapEsp_t udpEncapEsp; /**< UDP Encapsulation ESP */
++} t_FmPcdFields;
++
++/**************************************************************************//**
++ @Description Parameters for defining header extraction for key generation
++*//***************************************************************************/
++typedef struct t_FmPcdFromHdr {
++ uint8_t size; /**< Size in byte */
++ uint8_t offset; /**< Byte offset */
++} t_FmPcdFromHdr;
++
++/**************************************************************************//**
++ @Description Parameters for defining field extraction for key generation
++*//***************************************************************************/
++typedef struct t_FmPcdFromField {
++ t_FmPcdFields field; /**< Field selection */
++ uint8_t size; /**< Size in byte */
++ uint8_t offset; /**< Byte offset */
++} t_FmPcdFromField;
++
++/**************************************************************************//**
++ @Description Parameters for defining a single network environment unit
++
++ A distinction unit should be defined if it will later be used
++ by one or more PCD engines to distinguish between flows.
++*//***************************************************************************/
++typedef struct t_FmPcdDistinctionUnit {
++ struct {
++ e_NetHeaderType hdr; /**< One of the headers supported by the FM */
++ u_FmPcdHdrProtocolOpt opt; /**< Select only one option ! */
++ } hdrs[FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS];
++} t_FmPcdDistinctionUnit;
++
++/**************************************************************************//**
++ @Description Parameters for defining all different distinction units supported
++ by a specific PCD Network Environment Characteristics module.
++
++ Each unit represent a protocol or a group of protocols that may
++ be used later by the different PCD engines to distinguish
++ between flows.
++*//***************************************************************************/
++typedef struct t_FmPcdNetEnvParams {
++ uint8_t numOfDistinctionUnits; /**< Number of different units to be identified */
++ t_FmPcdDistinctionUnit units[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /**< An array of numOfDistinctionUnits of the
++ different units to be identified */
++} t_FmPcdNetEnvParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining a single extraction action when
++ creating a key
++*//***************************************************************************/
++typedef struct t_FmPcdExtractEntry {
++ e_FmPcdExtractType type; /**< Extraction type select */
++ union {
++ struct {
++ e_NetHeaderType hdr; /**< Header selection */
++ bool ignoreProtocolValidation;
++ /**< Ignore protocol validation */
++ e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled
++ IP. Otherwise should be cleared. */
++ e_FmPcdExtractByHdrType type; /**< Header extraction type select */
++ union {
++ t_FmPcdFromHdr fromHdr; /**< Extract bytes from header parameters */
++ t_FmPcdFromField fromField; /**< Extract bytes from field parameters */
++ t_FmPcdFields fullField; /**< Extract full filed parameters */
++ } extractByHdrType;
++ } extractByHdr; /**< used when type = e_FM_PCD_KG_EXTRACT_BY_HDR */
++ struct {
++ e_FmPcdExtractFrom src; /**< Non-header extraction source */
++ e_FmPcdAction action; /**< Relevant for CC Only */
++ uint16_t icIndxMask; /**< Relevant only for CC when
++ action = e_FM_PCD_ACTION_INDEXED_LOOKUP;
++ Note that the number of bits that are set within
++ this mask must be log2 of the CC-node 'numOfKeys'.
++ Note that the mask cannot be set on the lower bits. */
++ uint8_t offset; /**< Byte offset */
++ uint8_t size; /**< Size in byte */
++ } extractNonHdr; /**< used when type = e_FM_PCD_KG_EXTRACT_NON_HDR */
++ };
++} t_FmPcdExtractEntry;
++
++/**************************************************************************//**
++ @Description Parameters for defining masks for each extracted field in the key.
++*//***************************************************************************/
++typedef struct t_FmPcdKgExtractMask {
++ uint8_t extractArrayIndex; /**< Index in the extraction array, as initialized by user */
++ uint8_t offset; /**< Byte offset */
++ uint8_t mask; /**< A byte mask (selected bits will be used) */
++} t_FmPcdKgExtractMask;
++
++/**************************************************************************//**
++ @Description Parameters for defining default selection per groups of fields
++*//***************************************************************************/
++typedef struct t_FmPcdKgExtractDflt {
++ e_FmPcdKgKnownFieldsDfltTypes type; /**< Default type select */
++ e_FmPcdKgExtractDfltSelect dfltSelect; /**< Default register select */
++} t_FmPcdKgExtractDflt;
++
++/**************************************************************************//**
++ @Description Parameters for defining key extraction and hashing
++*//***************************************************************************/
++typedef struct t_FmPcdKgKeyExtractAndHashParams {
++ uint32_t privateDflt0; /**< Scheme default register 0 */
++ uint32_t privateDflt1; /**< Scheme default register 1 */
++ uint8_t numOfUsedExtracts; /**< defines the valid size of the following array */
++ t_FmPcdExtractEntry extractArray [FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY]; /**< An array of extractions definition. */
++ uint8_t numOfUsedDflts; /**< defines the valid size of the following array */
++ t_FmPcdKgExtractDflt dflts[FM_PCD_KG_NUM_OF_DEFAULT_GROUPS];
++ /**< For each extraction used in this scheme, specify the required
++ default register to be used when header is not found.
++ types not specified in this array will get undefined value. */
++ uint8_t numOfUsedMasks; /**< defines the valid size of the following array */
++ t_FmPcdKgExtractMask masks[FM_PCD_KG_NUM_OF_EXTRACT_MASKS];
++ uint8_t hashShift; /**< hash result right shift. Select the 24 bits out of the 64 hash
++ result. 0 means using the 24 LSB's, otherwise use the
++ 24 LSB's after shifting right.*/
++ uint32_t hashDistributionNumOfFqids; /**< must be > 1 and a power of 2. Represents the range
++ of queues for the key and hash functionality */
++ uint8_t hashDistributionFqidsShift; /**< selects the FQID bits that will be effected by the hash */
++ bool symmetricHash; /**< TRUE to generate the same hash for frames with swapped source and
++ destination fields on all layers; If TRUE, driver will check that for
++ all layers, if SRC extraction is selected, DST extraction must also be
++ selected, and vice versa. */
++} t_FmPcdKgKeyExtractAndHashParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining a single FQID mask (extracted OR).
++*//***************************************************************************/
++typedef struct t_FmPcdKgExtractedOrParams {
++ e_FmPcdExtractType type; /**< Extraction type select */
++ union {
++ struct { /**< used when type = e_FM_PCD_KG_EXTRACT_BY_HDR */
++ e_NetHeaderType hdr;
++ e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled
++ IP. Otherwise should be cleared.*/
++ bool ignoreProtocolValidation;
++ /**< continue extraction even if protocol is not recognized */
++ } extractByHdr; /**< Header to extract by */
++ e_FmPcdExtractFrom src; /**< used when type = e_FM_PCD_KG_EXTRACT_NON_HDR */
++ };
++ uint8_t extractionOffset; /**< Offset for extraction (in bytes). */
++ e_FmPcdKgExtractDfltSelect dfltValue; /**< Select register from which extraction is taken if
++ field not found */
++ uint8_t mask; /**< Extraction mask (specified bits are used) */
++ uint8_t bitOffsetInFqid; /**< 0-31, Selects which bits of the 24 FQID bits to effect using
++ the extracted byte; Assume byte is placed as the 8 MSB's in
++ a 32 bit word where the lower bits
++ are the FQID; i.e if bitOffsetInFqid=1 than its LSB
++ will effect the FQID MSB, if bitOffsetInFqid=24 than the
++ extracted byte will effect the 8 LSB's of the FQID,
++ if bitOffsetInFqid=31 than the byte's MSB will effect
++ the FQID's LSB; 0 means - no effect on FQID;
++ Note that one, and only one of
++ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
++ extracted byte must effect either FQID or Policer profile).*/
++ uint8_t bitOffsetInPlcrProfile;
++ /**< 0-15, Selects which bits of the 8 policer profile id bits to
++ effect using the extracted byte; Assume byte is placed
++ as the 8 MSB's in a 16 bit word where the lower bits
++ are the policer profile id; i.e if bitOffsetInPlcrProfile=1
++ than its LSB will effect the profile MSB, if bitOffsetInFqid=8
++ than the extracted byte will effect the whole policer profile id,
++ if bitOffsetInFqid=15 than the byte's MSB will effect
++ the Policer Profile id's LSB;
++ 0 means - no effect on policer profile; Note that one, and only one of
++ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
++ extracted byte must effect either FQID or Policer profile).*/
++} t_FmPcdKgExtractedOrParams;
++
++/**************************************************************************//**
++ @Description Parameters for configuring a scheme counter
++*//***************************************************************************/
++typedef struct t_FmPcdKgSchemeCounter {
++ bool update; /**< FALSE to keep the current counter state
++ and continue from that point, TRUE to update/reset
++ the counter when the scheme is written. */
++ uint32_t value; /**< If update=TRUE, this value will be written into the
++ counter. clear this field to reset the counter. */
++} t_FmPcdKgSchemeCounter;
++
++/**************************************************************************//**
++ @Description Parameters for configuring a policer profile for a KeyGen scheme
++ (when policer is the next engine after this scheme).
++*//***************************************************************************/
++typedef struct t_FmPcdKgPlcrProfile {
++ bool sharedProfile; /**< TRUE if this profile is shared between ports
++ (managed by master partition); Must not be TRUE
++ if profile is after Coarse Classification*/
++ bool direct; /**< if TRUE, directRelativeProfileId only selects the profile
++ id, if FALSE fqidOffsetRelativeProfileIdBase is used
++ together with fqidOffsetShift and numOfProfiles
++ parameters, to define a range of profiles from
++ which the KeyGen result will determine the
++ destination policer profile. */
++ union {
++ uint16_t directRelativeProfileId; /**< Used if 'direct' is TRUE, to select policer profile.
++ should indicate the policer profile offset within the
++ port's policer profiles or shared window. */
++ struct {
++ uint8_t fqidOffsetShift; /**< Shift on the KeyGen create FQID offset (i.e. not the
++ final FQID - without the FQID base). */
++ uint8_t fqidOffsetRelativeProfileIdBase;
++ /**< The base of the FMan Port's relative Storage-Profile ID;
++ this value will be "OR'ed" with the KeyGen create FQID
++ offset (i.e. not the final FQID - without the FQID base);
++ the final result should indicate the Storage-Profile offset
++ within the FMan Port's relative Storage-Profiles window/
++ (or the SHARED window depends on 'sharedProfile'). */
++ uint8_t numOfProfiles; /**< Range of profiles starting at base */
++ } indirectProfile; /**< Indirect profile parameters */
++ } profileSelect; /**< Direct/indirect profile selection and parameters */
++} t_FmPcdKgPlcrProfile;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Parameters for configuring a storage profile for a KeyGen scheme.
++*//***************************************************************************/
++typedef struct t_FmPcdKgStorageProfile {
++ bool direct; /**< If TRUE, directRelativeProfileId only selects the
++ profile id;
++ If FALSE, fqidOffsetRelativeProfileIdBase is used
++ together with fqidOffsetShift and numOfProfiles
++ parameters to define a range of profiles from which
++ the KeyGen result will determine the destination
++ storage profile. */
++ union {
++ uint16_t directRelativeProfileId; /**< Used when 'direct' is TRUE, to select a storage profile;
++ should indicate the storage profile offset within the
++ port's storage profiles window. */
++ struct {
++ uint8_t fqidOffsetShift; /**< Shift on the KeyGen create FQID offset (i.e. not the
++ final FQID - without the FQID base). */
++ uint8_t fqidOffsetRelativeProfileIdBase;
++ /**< The base of the FMan Port's relative Storage-Profile ID;
++ this value will be "OR'ed" with the KeyGen create FQID
++ offset (i.e. not the final FQID - without the FQID base);
++ the final result should indicate the Storage-Profile offset
++ within the FMan Port's relative Storage-Profiles window. */
++ uint8_t numOfProfiles; /**< Range of profiles starting at base. */
++ } indirectProfile; /**< Indirect profile parameters. */
++ } profileSelect; /**< Direct/indirect profile selection and parameters. */
++} t_FmPcdKgStorageProfile;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Parameters for defining CC as the next engine after KeyGen
++*//***************************************************************************/
++typedef struct t_FmPcdKgCc {
++ t_Handle h_CcTree; /**< A handle to a CC Tree */
++ uint8_t grpId; /**< CC group id within the CC tree */
++ bool plcrNext; /**< TRUE if after CC, in case of data frame,
++ policing is required. */
++ bool bypassPlcrProfileGeneration; /**< TRUE to bypass KeyGen policer profile generation;
++ selected profile is the one set at port initialization. */
++ t_FmPcdKgPlcrProfile plcrProfile; /**< Valid only if plcrNext = TRUE and
++ bypassPlcrProfileGeneration = FALSE */
++} t_FmPcdKgCc;
++
++/**************************************************************************//**
++ @Description Parameters for defining initializing a KeyGen scheme
++*//***************************************************************************/
++typedef struct t_FmPcdKgSchemeParams {
++ bool modify; /**< TRUE to change an existing scheme */
++ union
++ {
++ uint8_t relativeSchemeId; /**< if modify=FALSE:Partition relative scheme id */
++ t_Handle h_Scheme; /**< if modify=TRUE: a handle of the existing scheme */
++ } id;
++ bool alwaysDirect; /**< This scheme is reached only directly, i.e. no need
++ for match vector; KeyGen will ignore it when matching */
++ struct { /**< HL Relevant only if alwaysDirect = FALSE */
++ t_Handle h_NetEnv; /**< A handle to the Network environment as returned
++ by FM_PCD_NetEnvCharacteristicsSet() */
++ uint8_t numOfDistinctionUnits; /**< Number of NetEnv units listed in unitIds array */
++ uint8_t unitIds[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
++ /**< Indexes as passed to SetNetEnvCharacteristics array*/
++ } netEnvParams;
++ bool useHash; /**< use the KeyGen Hash functionality */
++ t_FmPcdKgKeyExtractAndHashParams keyExtractAndHashParams;
++ /**< used only if useHash = TRUE */
++ bool bypassFqidGeneration; /**< Normally - FALSE, TRUE to avoid FQID update in the IC;
++ In such a case FQID after KeyGen will be the default FQID
++ defined for the relevant port, or the FQID defined by CC
++ in cases where CC was the previous engine. */
++ uint32_t baseFqid; /**< Base FQID; Relevant only if bypassFqidGeneration = FALSE;
++ If hash is used and an even distribution is expected
++ according to hashDistributionNumOfFqids, baseFqid must be aligned to
++ hashDistributionNumOfFqids. */
++ uint8_t numOfUsedExtractedOrs; /**< Number of FQID masks listed in extractedOrs array */
++ t_FmPcdKgExtractedOrParams extractedOrs[FM_PCD_KG_NUM_OF_GENERIC_REGS];
++ /**< FM_PCD_KG_NUM_OF_GENERIC_REGS
++ registers are shared between qidMasks
++ functionality and some of the extraction
++ actions; Normally only some will be used
++ for qidMask. Driver will return error if
++ resource is full at initialization time. */
++
++#if (DPAA_VERSION >= 11)
++ bool overrideStorageProfile; /**< TRUE if KeyGen override previously decided storage profile */
++ t_FmPcdKgStorageProfile storageProfile; /**< Used when overrideStorageProfile TRUE */
++#endif /* (DPAA_VERSION >= 11) */
++
++ e_FmPcdEngine nextEngine; /**< may be BMI, PLCR or CC */
++ union { /**< depends on nextEngine */
++ e_FmPcdDoneAction doneAction; /**< Used when next engine is BMI (done) */
++ t_FmPcdKgPlcrProfile plcrProfile; /**< Used when next engine is PLCR */
++ t_FmPcdKgCc cc; /**< Used when next engine is CC */
++ } kgNextEngineParams;
++ t_FmPcdKgSchemeCounter schemeCounter; /**< A structure of parameters for updating
++ the scheme counter */
++} t_FmPcdKgSchemeParams;
++
++/**************************************************************************//**
++ @Collection Definitions for CC statistics
++*//***************************************************************************/
++#if (DPAA_VERSION >= 11)
++#define FM_PCD_CC_STATS_MAX_NUM_OF_FLR 10 /* Maximal supported number of frame length ranges */
++#define FM_PCD_CC_STATS_FLR_SIZE 2 /* Size in bytes of a frame length range limit */
++#endif /* (DPAA_VERSION >= 11) */
++#define FM_PCD_CC_STATS_COUNTER_SIZE 4 /* Size in bytes of a frame length range counter */
++/* @} */
++
++/**************************************************************************//**
++ @Description Parameters for defining CC as the next engine after a CC node.
++*//***************************************************************************/
++typedef struct t_FmPcdCcNextCcParams {
++ t_Handle h_CcNode; /**< A handle of the next CC node */
++} t_FmPcdCcNextCcParams;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Parameters for defining Frame replicator as the next engine after a CC node.
++*//***************************************************************************/
++typedef struct t_FmPcdCcNextFrParams {
++ t_Handle h_FrmReplic; /**< A handle of the next frame replicator group */
++} t_FmPcdCcNextFrParams;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Parameters for defining Policer as the next engine after a CC node.
++*//***************************************************************************/
++typedef struct t_FmPcdCcNextPlcrParams {
++ bool overrideParams; /**< TRUE if CC override previously decided parameters*/
++ bool sharedProfile; /**< Relevant only if overrideParams=TRUE:
++ TRUE if this profile is shared between ports */
++ uint16_t newRelativeProfileId; /**< Relevant only if overrideParams=TRUE:
++ (otherwise profile id is taken from KeyGen);
++ This parameter should indicate the policer
++ profile offset within the port's
++ policer profiles or from SHARED window.*/
++ uint32_t newFqid; /**< Relevant only if overrideParams=TRUE:
++ FQID for enqueuing the frame;
++ In earlier chips if policer next engine is KEYGEN,
++ this parameter can be 0, because the KEYGEN
++ always decides the enqueue FQID.*/
++#if (DPAA_VERSION >= 11)
++ uint8_t newRelativeStorageProfileId;
++ /**< Indicates the relative storage profile offset within
++ the port's storage profiles window;
++ Relevant only if the port was configured with VSP. */
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmPcdCcNextPlcrParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining enqueue as the next action after a CC node.
++*//***************************************************************************/
++typedef struct t_FmPcdCcNextEnqueueParams {
++ e_FmPcdDoneAction action; /**< Action - when next engine is BMI (done) */
++ bool overrideFqid; /**< TRUE if CC override previously decided fqid and vspid,
++ relevant if action = e_FM_PCD_ENQ_FRAME */
++ uint32_t newFqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
++ (otherwise FQID is taken from KeyGen),
++ relevant if action = e_FM_PCD_ENQ_FRAME */
++#if (DPAA_VERSION >= 11)
++ uint8_t newRelativeStorageProfileId;
++ /**< Valid if overrideFqid=TRUE, Indicates the relative virtual
++ storage profile offset within the port's storage profiles
++ window; Relevant only if the port was configured with VSP. */
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmPcdCcNextEnqueueParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining KeyGen as the next engine after a CC node.
++*//***************************************************************************/
++typedef struct t_FmPcdCcNextKgParams {
++ bool overrideFqid; /**< TRUE if CC override previously decided fqid and vspid,
++ Note - this parameters irrelevant for earlier chips */
++ uint32_t newFqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
++ (otherwise FQID is taken from KeyGen),
++ Note - this parameters irrelevant for earlier chips */
++#if (DPAA_VERSION >= 11)
++ uint8_t newRelativeStorageProfileId;
++ /**< Valid if overrideFqid=TRUE, Indicates the relative virtual
++ storage profile offset within the port's storage profiles
++ window; Relevant only if the port was configured with VSP. */
++#endif /* (DPAA_VERSION >= 11) */
++
++ t_Handle h_DirectScheme; /**< Direct scheme handle to go to. */
++} t_FmPcdCcNextKgParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining the next engine after a CC node.
++*//***************************************************************************/
++typedef struct t_FmPcdCcNextEngineParams {
++ e_FmPcdEngine nextEngine; /**< User has to initialize parameters
++ according to nextEngine definition */
++ union {
++ t_FmPcdCcNextCcParams ccParams; /**< Parameters in case next engine is CC */
++ t_FmPcdCcNextPlcrParams plcrParams; /**< Parameters in case next engine is PLCR */
++ t_FmPcdCcNextEnqueueParams enqueueParams; /**< Parameters in case next engine is BMI */
++ t_FmPcdCcNextKgParams kgParams; /**< Parameters in case next engine is KG */
++#if (DPAA_VERSION >= 11)
++ t_FmPcdCcNextFrParams frParams; /**< Parameters in case next engine is FR */
++#endif /* (DPAA_VERSION >= 11) */
++ } params; /**< union used for all the next-engine parameters options */
++
++ t_Handle h_Manip; /**< Handle to Manipulation object.
++ Relevant if next engine is of type result
++ (e_FM_PCD_PLCR, e_FM_PCD_KG, e_FM_PCD_DONE) */
++
++ bool statisticsEn; /**< If TRUE, statistics counters are incremented
++ for each frame passing through this
++ Coarse Classification entry. */
++} t_FmPcdCcNextEngineParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining a single CC key
++*//***************************************************************************/
++typedef struct t_FmPcdCcKeyParams {
++ uint8_t *p_Key; /**< Relevant only if 'action' = e_FM_PCD_ACTION_EXACT_MATCH;
++ pointer to the key of the size defined in keySize */
++ uint8_t *p_Mask; /**< Relevant only if 'action' = e_FM_PCD_ACTION_EXACT_MATCH;
++ pointer to the Mask per key of the size defined
++ in keySize. p_Key and p_Mask (if defined) has to be
++ of the same size defined in the keySize;
++ NOTE that if this value is equal for all entries whithin
++ this table, the driver will automatically use global-mask
++ (i.e. one common mask for all entries) instead of private
++ one; that is done in order to spare some memory and for
++ better performance. */
++ t_FmPcdCcNextEngineParams ccNextEngineParams;
++ /**< parameters for the next for the defined Key in
++ the p_Key */
++} t_FmPcdCcKeyParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining CC keys parameters
++ The driver supports two methods for CC node allocation: dynamic and static.
++ Static mode was created in order to prevent runtime alloc/free
++ of FMan memory (MURAM), which may cause fragmentation; in this mode,
++ the driver automatically allocates the memory according to
++ 'maxNumOfKeys' parameter. The driver calculates the maximal memory
++ size that may be used for this CC-Node taking into consideration
++ 'maskSupport' and 'statisticsMode' parameters.
++ When 'action' = e_FM_PCD_ACTION_INDEXED_LOOKUP in the extraction
++ parameters of this node, 'maxNumOfKeys' must be equal to 'numOfKeys'.
++ In dynamic mode, 'maxNumOfKeys' must be zero. At initialization,
++ all required structures are allocated according to 'numOfKeys'
++ parameter. During runtime modification, these structures are
++ re-allocated according to the updated number of keys.
++
++ Please note that 'action' and 'icIndxMask' mentioned in the
++ specific parameter explanations are passed in the extraction
++ parameters of the node (fields of extractCcParams.extractNonHdr).
++*//***************************************************************************/
++typedef struct t_KeysParams {
++ uint16_t maxNumOfKeys; /**< Maximum number of keys that will (ever) be used in this CC-Node;
++ A value of zero may be used for dynamic memory allocation. */
++ bool maskSupport; /**< This parameter is relevant only if a node is initialized with
++ 'action' = e_FM_PCD_ACTION_EXACT_MATCH and maxNumOfKeys > 0;
++ Should be TRUE to reserve table memory for key masks, even if
++ initial keys do not contain masks, or if the node was initialized
++ as 'empty' (without keys); this will allow user to add keys with
++ masks at runtime.
++ NOTE that if user want to use only global-masks (i.e. one common mask
++ for all the entries within this table, this parameter should set to 'FALSE'. */
++ e_FmPcdCcStatsMode statisticsMode; /**< Determines the supported statistics mode for all node's keys.
++ To enable statistics gathering, statistics should be enabled per
++ every key, using 'statisticsEn' in next engine parameters structure
++ of that key;
++ If 'maxNumOfKeys' is set, all required structures will be
++ preallocated for all keys. */
++#if (DPAA_VERSION >= 11)
++ uint16_t frameLengthRanges[FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
++ /**< Relevant only for 'RMON' statistics mode
++ (this feature is supported only on B4860 device);
++ Holds a list of programmable thresholds - for each received frame,
++ its length in bytes is examined against these range thresholds and
++ the appropriate counter is incremented by 1 - for example, to belong
++ to range i, the following should hold:
++ range i-1 threshold < frame length <= range i threshold
++ Each range threshold must be larger then its preceding range
++ threshold, and last range threshold must be 0xFFFF. */
++#endif /* (DPAA_VERSION >= 11) */
++ uint16_t numOfKeys; /**< Number of initial keys;
++ Note that in case of 'action' = e_FM_PCD_ACTION_INDEXED_LOOKUP,
++ this field should be power-of-2 of the number of bits that are
++ set in 'icIndxMask'. */
++ uint8_t keySize; /**< Size of key - for extraction of type FULL_FIELD, 'keySize' has
++ to be the standard size of the selected key; For other extraction
++ types, 'keySize' has to be as size of extraction; When 'action' =
++ e_FM_PCD_ACTION_INDEXED_LOOKUP, 'keySize' must be 2. */
++ t_FmPcdCcKeyParams keyParams[FM_PCD_MAX_NUM_OF_KEYS];
++ /**< An array with 'numOfKeys' entries, each entry specifies the
++ corresponding key parameters;
++ When 'action' = e_FM_PCD_ACTION_EXACT_MATCH, this value must not
++ exceed 255 (FM_PCD_MAX_NUM_OF_KEYS-1) as the last entry is saved
++ for the 'miss' entry. */
++ t_FmPcdCcNextEngineParams ccNextEngineParamsForMiss;
++ /**< Parameters for defining the next engine when a key is not matched;
++ Not relevant if action = e_FM_PCD_ACTION_INDEXED_LOOKUP. */
++} t_KeysParams;
++
++
++/**************************************************************************//**
++ @Description Parameters for defining a CC node
++*//***************************************************************************/
++typedef struct t_FmPcdCcNodeParams {
++ t_FmPcdExtractEntry extractCcParams; /**< Extraction parameters */
++ t_KeysParams keysParams; /**< Keys definition matching the selected extraction */
++} t_FmPcdCcNodeParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining a hash table
++*//***************************************************************************/
++typedef struct t_FmPcdHashTableParams {
++ uint16_t maxNumOfKeys; /**< Maximum Number Of Keys that will (ever) be used in this Hash-table */
++ e_FmPcdCcStatsMode statisticsMode; /**< If not e_FM_PCD_CC_STATS_MODE_NONE, the required structures for the
++ requested statistics mode will be allocated according to maxNumOfKeys. */
++ uint8_t kgHashShift; /**< KG-Hash-shift as it was configured in the KG-scheme
++ that leads to this hash-table. */
++ uint16_t hashResMask; /**< Mask that will be used on the hash-result;
++ The number-of-sets for this hash will be calculated
++ as (2^(number of bits set in 'hashResMask'));
++ The 4 lower bits must be cleared. */
++ uint8_t hashShift; /**< Byte offset from the beginning of the KeyGen hash result to the
++ 2-bytes to be used as hash index. */
++ uint8_t matchKeySize; /**< Size of the exact match keys held by the hash buckets */
++
++ t_FmPcdCcNextEngineParams ccNextEngineParamsForMiss; /**< Parameters for defining the next engine when a key is not matched */
++
++} t_FmPcdHashTableParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining a CC tree group.
++
++ This structure defines a CC group in terms of NetEnv units
++ and the action to be taken in each case. The unitIds list must
++ be given in order from low to high indices.
++
++ t_FmPcdCcNextEngineParams is a list of 2^numOfDistinctionUnits
++ structures where each defines the next action to be taken for
++ each units combination. for example:
++ numOfDistinctionUnits = 2
++ unitIds = {1,3}
++ p_NextEnginePerEntriesInGrp[0] = t_FmPcdCcNextEngineParams for the case that
++ unit 1 - not found; unit 3 - not found;
++ p_NextEnginePerEntriesInGrp[1] = t_FmPcdCcNextEngineParams for the case that
++ unit 1 - not found; unit 3 - found;
++ p_NextEnginePerEntriesInGrp[2] = t_FmPcdCcNextEngineParams for the case that
++ unit 1 - found; unit 3 - not found;
++ p_NextEnginePerEntriesInGrp[3] = t_FmPcdCcNextEngineParams for the case that
++ unit 1 - found; unit 3 - found;
++*//***************************************************************************/
++typedef struct t_FmPcdCcGrpParams {
++ uint8_t numOfDistinctionUnits; /**< Up to 4 */
++ uint8_t unitIds[FM_PCD_MAX_NUM_OF_CC_UNITS];
++ /**< Indices of the units as defined in
++ FM_PCD_NetEnvCharacteristicsSet() */
++ t_FmPcdCcNextEngineParams nextEnginePerEntriesInGrp[FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP];
++ /**< Maximum entries per group is 16 */
++} t_FmPcdCcGrpParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining CC tree groups
++*//***************************************************************************/
++typedef struct t_FmPcdCcTreeParams {
++ t_Handle h_NetEnv; /**< A handle to the Network environment as returned
++ by FM_PCD_NetEnvCharacteristicsSet() */
++ uint8_t numOfGrps; /**< Number of CC groups within the CC tree */
++ t_FmPcdCcGrpParams ccGrpParams[FM_PCD_MAX_NUM_OF_CC_GROUPS];
++ /**< Parameters for each group. */
++} t_FmPcdCcTreeParams;
++
++
++/**************************************************************************//**
++ @Description CC key statistics structure
++*//***************************************************************************/
++typedef struct t_FmPcdCcKeyStatistics {
++ uint32_t byteCount; /**< This counter reflects byte count of frames that
++ were matched by this key. */
++ uint32_t frameCount; /**< This counter reflects count of frames that
++ were matched by this key. */
++#if (DPAA_VERSION >= 11)
++ uint32_t frameLengthRangeCount[FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
++ /**< These counters reflect how many frames matched
++ this key in 'RMON' statistics mode:
++ Each counter holds the number of frames of a
++ specific frames length range, according to the
++ ranges provided at initialization. */
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmPcdCcKeyStatistics;
++
++/**************************************************************************//**
++ @Description Parameters for defining policer byte rate
++*//***************************************************************************/
++typedef struct t_FmPcdPlcrByteRateModeParams {
++ e_FmPcdPlcrFrameLengthSelect frameLengthSelection; /**< Frame length selection */
++ e_FmPcdPlcrRollBackFrameSelect rollBackFrameSelection; /**< relevant option only e_FM_PCD_PLCR_L2_FRM_LEN,
++ e_FM_PCD_PLCR_FULL_FRM_LEN */
++} t_FmPcdPlcrByteRateModeParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining the policer profile (based on
++ RFC-2698 or RFC-4115 attributes).
++*//***************************************************************************/
++typedef struct t_FmPcdPlcrNonPassthroughAlgParams {
++ e_FmPcdPlcrRateMode rateMode; /**< Byte mode or Packet mode */
++ t_FmPcdPlcrByteRateModeParams byteModeParams; /**< Valid for Byte NULL for Packet */
++ uint32_t committedInfoRate; /**< KBits/Second or Packets/Second */
++ uint32_t committedBurstSize; /**< Bytes/Packets */
++ uint32_t peakOrExcessInfoRate; /**< KBits/Second or Packets/Second */
++ uint32_t peakOrExcessBurstSize; /**< Bytes/Packets */
++} t_FmPcdPlcrNonPassthroughAlgParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining the next engine after policer
++*//***************************************************************************/
++typedef union u_FmPcdPlcrNextEngineParams {
++ e_FmPcdDoneAction action; /**< Action - when next engine is BMI (done) */
++ t_Handle h_Profile; /**< Policer profile handle - used when next engine
++ is Policer, must be a SHARED profile */
++ t_Handle h_DirectScheme; /**< Direct scheme select - when next engine is KeyGen */
++} u_FmPcdPlcrNextEngineParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining the policer profile entry
++*//***************************************************************************/
++typedef struct t_FmPcdPlcrProfileParams {
++ bool modify; /**< TRUE to change an existing profile */
++ union {
++ struct {
++ e_FmPcdProfileTypeSelection profileType; /**< Type of policer profile */
++ t_Handle h_FmPort; /**< Relevant for per-port profiles only */
++ uint16_t relativeProfileId; /**< Profile id - relative to shared group or to port */
++ } newParams; /**< use it when modify = FALSE */
++ t_Handle h_Profile; /**< A handle to a profile - use it when modify=TRUE */
++ } id;
++ e_FmPcdPlcrAlgorithmSelection algSelection; /**< Profile Algorithm PASS_THROUGH, RFC_2698, RFC_4115 */
++ e_FmPcdPlcrColorMode colorMode; /**< COLOR_BLIND, COLOR_AWARE */
++
++ union {
++ e_FmPcdPlcrColor dfltColor; /**< For Color-Blind Pass-Through mode; the policer will re-color
++ any incoming packet with the default value. */
++ e_FmPcdPlcrColor override; /**< For Color-Aware modes; the profile response to a
++ pre-color value of 2'b11. */
++ } color;
++
++ t_FmPcdPlcrNonPassthroughAlgParams nonPassthroughAlgParams; /**< RFC2698 or RFC4115 parameters */
++
++ e_FmPcdEngine nextEngineOnGreen; /**< Next engine for green-colored frames */
++ u_FmPcdPlcrNextEngineParams paramsOnGreen; /**< Next engine parameters for green-colored frames */
++
++ e_FmPcdEngine nextEngineOnYellow; /**< Next engine for yellow-colored frames */
++ u_FmPcdPlcrNextEngineParams paramsOnYellow; /**< Next engine parameters for yellow-colored frames */
++
++ e_FmPcdEngine nextEngineOnRed; /**< Next engine for red-colored frames */
++ u_FmPcdPlcrNextEngineParams paramsOnRed; /**< Next engine parameters for red-colored frames */
++
++ bool trapProfileOnFlowA; /**< Obsolete - do not use */
++ bool trapProfileOnFlowB; /**< Obsolete - do not use */
++ bool trapProfileOnFlowC; /**< Obsolete - do not use */
++} t_FmPcdPlcrProfileParams;
++
++/**************************************************************************//**
++ @Description Parameters for selecting a location for requested manipulation
++*//***************************************************************************/
++typedef struct t_FmManipHdrInfo {
++ e_NetHeaderType hdr; /**< Header selection */
++ e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled IP. Otherwise should be cleared. */
++ bool byField; /**< TRUE if the location of manipulation is according to some field in the specific header*/
++ t_FmPcdFields fullField; /**< Relevant only when byField = TRUE: Extract field */
++} t_FmManipHdrInfo;
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++/**************************************************************************//**
++ @Description Parameters for defining an insertion manipulation
++ of type e_FM_PCD_MANIP_INSRT_TO_START_OF_FRAME_TEMPLATE
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrInsrtByTemplateParams {
++ uint8_t size; /**< Size of insert template to the start of the frame. */
++ uint8_t hdrTemplate[FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE];
++ /**< Array of the insertion template. */
++
++ bool modifyOuterIp; /**< TRUE if user want to modify some fields in outer IP. */
++ struct {
++ uint16_t ipOuterOffset; /**< Offset of outer IP in the insert template, relevant if modifyOuterIp = TRUE.*/
++ uint16_t dscpEcn; /**< value of dscpEcn in IP outer, relevant if modifyOuterIp = TRUE.
++ in IPV4 dscpEcn only byte - it has to be adjusted to the right*/
++ bool udpPresent; /**< TRUE if UDP is present in the insert template, relevant if modifyOuterIp = TRUE.*/
++ uint8_t udpOffset; /**< Offset in the insert template of UDP, relevant if modifyOuterIp = TRUE and udpPresent=TRUE.*/
++ uint8_t ipIdentGenId; /**< Used by FMan-CTRL to calculate IP-identification field,relevant if modifyOuterIp = TRUE.*/
++ bool recalculateLength; /**< TRUE if recalculate length has to be performed due to the engines in the path which can change the frame later, relevant if modifyOuterIp = TRUE.*/
++ struct {
++ uint8_t blockSize; /**< The CAAM block-size; Used by FMan-CTRL to calculate the IP Total Length field.*/
++ uint8_t extraBytesAddedAlignedToBlockSize; /**< Used by FMan-CTRL to calculate the IP Total Length field and UDP length*/
++ uint8_t extraBytesAddedNotAlignedToBlockSize;/**< Used by FMan-CTRL to calculate the IP Total Length field and UDP length.*/
++ } recalculateLengthParams; /**< Recalculate length parameters - relevant if modifyOuterIp = TRUE and recalculateLength = TRUE */
++ } modifyOuterIpParams; /**< Outer IP modification parameters - ignored if modifyOuterIp is FALSE */
++
++ bool modifyOuterVlan; /**< TRUE if user wants to modify VPri field in the outer VLAN header*/
++ struct {
++ uint8_t vpri; /**< Value of VPri, relevant if modifyOuterVlan = TRUE
++ VPri only 3 bits, it has to be adjusted to the right*/
++ } modifyOuterVlanParams;
++} t_FmPcdManipHdrInsrtByTemplateParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining CAPWAP fragmentation
++*//***************************************************************************/
++typedef struct t_CapwapFragmentationParams {
++ uint16_t sizeForFragmentation; /**< if length of the frame is greater than this value, CAPWAP fragmentation will be executed.*/
++ bool headerOptionsCompr; /**< TRUE - first fragment include the CAPWAP header options field,
++ and all other fragments exclude the CAPWAP options field,
++ FALSE - all fragments include CAPWAP header options field. */
++} t_CapwapFragmentationParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining CAPWAP reassembly
++*//***************************************************************************/
++typedef struct t_CapwapReassemblyParams {
++ uint16_t maxNumFramesInProcess; /**< Number of frames which can be reassembled concurrently; must be power of 2.
++ In case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 4 - 512,
++ In case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 8 - 2048 */
++ bool haltOnDuplicationFrag; /**< If TRUE, reassembly process will be halted due to duplicated fragment,
++ and all processed fragments will be enqueued with error indication;
++ If FALSE, only duplicated fragments will be enqueued with error indication. */
++
++ e_FmPcdManipReassemTimeOutMode timeOutMode; /**< Expiration delay initialized by the reassembly process */
++ uint32_t fqidForTimeOutFrames; /**< FQID in which time out frames will enqueue during Time Out Process */
++ uint32_t timeoutRoutineRequestTime;
++ /**< Represents the time interval in microseconds between consecutive
++ timeout routine requests It has to be power of 2. */
++ uint32_t timeoutThresholdForReassmProcess;
++ /**< Time interval (microseconds) for marking frames in process as too old;
++ Frames in process are those for which at least one fragment was received
++ but not all fragments. */
++
++ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry;/**< Number of frames per hash entry (needed for the reassembly process) */
++} t_CapwapReassemblyParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining fragmentation/reassembly manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipFragOrReasmParams {
++ bool frag; /**< TRUE if using the structure for fragmentation,
++ otherwise this structure is used for reassembly */
++ uint8_t sgBpid; /**< Scatter/Gather buffer pool id;
++ Same LIODN number is used for these buffers as for
++ the received frames buffers, so buffers of this pool
++ need to be allocated in the same memory area as the
++ received buffers. If the received buffers arrive
++ from different sources, the Scatter/Gather BP id
++ should be mutual to all these sources. */
++ e_NetHeaderType hdr; /**< Header selection */
++ union {
++ t_CapwapFragmentationParams capwapFragParams; /**< Structure for CAPWAP fragmentation,
++ relevant if 'frag' = TRUE, 'hdr' = HEADER_TYPE_CAPWAP */
++ t_CapwapReassemblyParams capwapReasmParams; /**< Structure for CAPWAP reassembly,
++ relevant if 'frag' = FALSE, 'hdr' = HEADER_TYPE_CAPWAP */
++ } u;
++} t_FmPcdManipFragOrReasmParams;
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++
++
++/**************************************************************************//**
++ @Description Parameters for defining header removal by header type
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrRmvByHdrParams {
++ e_FmPcdManipHdrRmvByHdrType type; /**< Selection of header removal location */
++ union {
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ struct {
++ bool include; /**< If FALSE, remove until the specified header (not including the header);
++ If TRUE, remove also the specified header. */
++ t_FmManipHdrInfo hdrInfo;
++ } fromStartByHdr; /**< Relevant when type = e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
++#endif /* (DPAA_VERSION >= 11) || ... */
++#if (DPAA_VERSION >= 11)
++ t_FmManipHdrInfo hdrInfo; /**< Relevant when type = e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
++#endif /* (DPAA_VERSION >= 11) */
++ e_FmPcdManipHdrRmvSpecificL2 specificL2; /**< Relevant when type = e_FM_PCD_MANIP_BY_HDR_SPECIFIC_L2;
++ Defines which L2 headers to remove. */
++ } u;
++} t_FmPcdManipHdrRmvByHdrParams;
++
++/**************************************************************************//**
++ @Description Parameters for configuring IP fragmentation manipulation
++
++ Restrictions:
++ - IP Fragmentation output fragments must not be forwarded to application directly.
++ - Maximum number of fragments per frame is 16.
++ - Fragmentation of IP fragments is not supported.
++ - IPv4 packets containing header Option fields are fragmented by copying all option
++ fields to each fragment, regardless of the copy bit value.
++ - Transmit confirmation is not supported.
++ - Fragmentation after SEC can't handle S/G frames.
++ - Fragmentation nodes must be set as the last PCD action (i.e. the
++ corresponding CC node key must have next engine set to e_FM_PCD_DONE).
++ - Only BMan buffers shall be used for frames to be fragmented.
++ - IPF does not support VSP. Therefore, on the same port where we have IPF
++ we cannot support VSP.
++ - NOTE: The following comment is relevant only for FMAN v3 devices: IPF
++ does not support VSP. Therefore, on the same port where we have IPF we
++ cannot support VSP.
++*//***************************************************************************/
++typedef struct t_FmPcdManipFragIpParams {
++ uint16_t sizeForFragmentation; /**< If length of the frame is greater than this value,
++ IP fragmentation will be executed.*/
++#if (DPAA_VERSION == 10)
++ uint8_t scratchBpid; /**< Absolute buffer pool id according to BM configuration.*/
++#endif /* (DPAA_VERSION == 10) */
++ bool sgBpidEn; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
++ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
++ received frame's buffer. */
++ uint8_t sgBpid; /**< Scatter/Gather buffer pool id;
++ This parameters is relevant when 'sgBpidEn=TRUE';
++ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
++ of this pool need to be allocated in the same memory area as the received buffers.
++ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
++ mutual to all these sources. */
++ e_FmPcdManipDontFragAction dontFragAction; /**< Don't Fragment Action - If an IP packet is larger
++ than MTU and its DF bit is set, then this field will
++ determine the action to be taken.*/
++} t_FmPcdManipFragIpParams;
++
++/**************************************************************************//**
++ @Description Parameters for configuring IP reassembly manipulation.
++
++ This is a common structure for both IPv4 and IPv6 reassembly
++ manipulation. For reassembly of both IPv4 and IPv6, make sure to
++ set the 'hdr' field in t_FmPcdManipReassemParams to HEADER_TYPE_IPv6.
++
++ Restrictions:
++ - Application must define at least one scheme to catch the reassembled frames.
++ - Maximum number of fragments per frame is 16.
++ - Reassembly of IPv4 fragments containing Option fields is supported.
++
++*//***************************************************************************/
++typedef struct t_FmPcdManipReassemIpParams {
++ uint8_t relativeSchemeId[2]; /**< Partition relative scheme id:
++ relativeSchemeId[0] - Relative scheme ID for IPV4 Reassembly manipulation;
++ relativeSchemeId[1] - Relative scheme ID for IPV6 Reassembly manipulation;
++ NOTE: The following comment is relevant only for FMAN v2 devices:
++ Relative scheme ID for IPv4/IPv6 Reassembly manipulation must be smaller than
++ the user schemes id to ensure that the reassembly schemes will be first match;
++ Rest schemes, if defined, should have higher relative scheme ID. */
++#if (DPAA_VERSION >= 11)
++ uint32_t nonConsistentSpFqid; /**< In case that other fragments of the frame corresponds to different storage
++ profile than the opening fragment (Non-Consistent-SP state)
++ then one of two possible scenarios occurs:
++ if 'nonConsistentSpFqid != 0', the reassembled frame will be enqueued to
++ this fqid, otherwise a 'Non Consistent SP' bit will be set in the FD[status].*/
++#else
++ uint8_t sgBpid; /**< Buffer pool id for the S/G frame created by the reassembly process */
++#endif /* (DPAA_VERSION >= 11) */
++ uint8_t dataMemId; /**< Memory partition ID for the IPR's external tables structure */
++ uint16_t dataLiodnOffset; /**< LIODN offset for access the IPR's external tables structure. */
++ uint16_t minFragSize[2]; /**< Minimum fragment size:
++ minFragSize[0] - for ipv4, minFragSize[1] - for ipv6 */
++ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry[2];
++ /**< Number of frames per hash entry needed for reassembly process:
++ numOfFramesPerHashEntry[0] - for ipv4 (max value is e_FM_PCD_MANIP_EIGHT_WAYS_HASH);
++ numOfFramesPerHashEntry[1] - for ipv6 (max value is e_FM_PCD_MANIP_SIX_WAYS_HASH). */
++ uint16_t maxNumFramesInProcess; /**< Number of frames which can be processed by Reassembly in the same time;
++ Must be power of 2;
++ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 4 - 512;
++ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 8 - 2048. */
++ e_FmPcdManipReassemTimeOutMode timeOutMode; /**< Expiration delay initialized by Reassembly process */
++ uint32_t fqidForTimeOutFrames; /**< FQID in which time out frames will enqueue during Time Out Process;
++ Recommended value for this field is 0; in this way timed-out frames will be discarded */
++ uint32_t timeoutThresholdForReassmProcess;
++ /**< Represents the time interval in microseconds which defines
++ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
++} t_FmPcdManipReassemIpParams;
++
++/**************************************************************************//**
++ @Description structure for defining IPSEC manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipSpecialOffloadIPSecParams {
++ bool decryption; /**< TRUE if being used in decryption direction;
++ FALSE if being used in encryption direction. */
++ bool ecnCopy; /**< TRUE to copy the ECN bits from inner/outer to outer/inner
++ (direction depends on the 'decryption' field). */
++ bool dscpCopy; /**< TRUE to copy the DSCP bits from inner/outer to outer/inner
++ (direction depends on the 'decryption' field). */
++ bool variableIpHdrLen; /**< TRUE for supporting variable IP header length in decryption. */
++ bool variableIpVersion; /**< TRUE for supporting both IP version on the same SA in encryption */
++ uint8_t outerIPHdrLen; /**< if 'variableIpVersion == TRUE' then this field must be set to non-zero value;
++ It is specifies the length of the outer IP header that was configured in the
++ corresponding SA. */
++ uint16_t arwSize; /**< if <> '0' then will perform ARW check for this SA;
++ The value must be a multiplication of 16 */
++ uintptr_t arwAddr; /**< if arwSize <> '0' then this field must be set to non-zero value;
++ MUST be allocated from FMAN's MURAM that the post-sec op-port belongs to;
++ Must be 4B aligned. Required MURAM size is 'NEXT_POWER_OF_2(arwSize+32))/8+4' Bytes */
++} t_FmPcdManipSpecialOffloadIPSecParams;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Parameters for configuring CAPWAP fragmentation manipulation
++
++ Restrictions:
++ - Maximum number of fragments per frame is 16.
++ - Transmit confirmation is not supported.
++ - Fragmentation nodes must be set as the last PCD action (i.e. the
++ corresponding CC node key must have next engine set to e_FM_PCD_DONE).
++ - Only BMan buffers shall be used for frames to be fragmented.
++ - NOTE: The following comment is relevant only for FMAN v3 devices: IPF
++ does not support VSP. Therefore, on the same port where we have IPF we
++ cannot support VSP.
++*//***************************************************************************/
++typedef struct t_FmPcdManipFragCapwapParams {
++ uint16_t sizeForFragmentation; /**< If length of the frame is greater than this value,
++ CAPWAP fragmentation will be executed.*/
++ bool sgBpidEn; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
++ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
++ received frame's buffer. */
++ uint8_t sgBpid; /**< Scatter/Gather buffer pool id;
++ This parameters is relevant when 'sgBpidEn=TRUE';
++ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
++ of this pool need to be allocated in the same memory area as the received buffers.
++ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
++ mutual to all these sources. */
++ bool compressModeEn; /**< CAPWAP Header Options Compress Enable mode;
++ When this mode is enabled then only the first fragment include the CAPWAP header options
++ field (if user provides it in the input frame) and all other fragments exclude the CAPWAP
++ options field (CAPWAP header is updated accordingly).*/
++} t_FmPcdManipFragCapwapParams;
++
++/**************************************************************************//**
++ @Description Parameters for configuring CAPWAP reassembly manipulation.
++
++ Restrictions:
++ - Application must define one scheme to catch the reassembled frames.
++ - Maximum number of fragments per frame is 16.
++
++*//***************************************************************************/
++typedef struct t_FmPcdManipReassemCapwapParams {
++ uint8_t relativeSchemeId; /**< Partition relative scheme id;
++ NOTE: this id must be smaller than the user schemes id to ensure that the reassembly scheme will be first match;
++ Rest schemes, if defined, should have higher relative scheme ID. */
++ uint8_t dataMemId; /**< Memory partition ID for the IPR's external tables structure */
++ uint16_t dataLiodnOffset; /**< LIODN offset for access the IPR's external tables structure. */
++ uint16_t maxReassembledFrameLength;/**< The maximum CAPWAP reassembled frame length in bytes;
++ If maxReassembledFrameLength == 0, any successful reassembled frame length is
++ considered as a valid length;
++ if maxReassembledFrameLength > 0, a successful reassembled frame which its length
++ exceeds this value is considered as an error frame (FD status[CRE] bit is set). */
++ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry;
++ /**< Number of frames per hash entry needed for reassembly process */
++ uint16_t maxNumFramesInProcess; /**< Number of frames which can be processed by reassembly in the same time;
++ Must be power of 2;
++ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 4 - 512;
++ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 8 - 2048. */
++ e_FmPcdManipReassemTimeOutMode timeOutMode; /**< Expiration delay initialized by Reassembly process */
++ uint32_t fqidForTimeOutFrames; /**< FQID in which time out frames will enqueue during Time Out Process;
++ Recommended value for this field is 0; in this way timed-out frames will be discarded */
++ uint32_t timeoutThresholdForReassmProcess;
++ /**< Represents the time interval in microseconds which defines
++ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
++} t_FmPcdManipReassemCapwapParams;
++
++/**************************************************************************//**
++ @Description structure for defining CAPWAP manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipSpecialOffloadCapwapParams {
++ bool dtls; /**< TRUE if continue to SEC DTLS encryption */
++ e_FmPcdManipHdrQosSrc qosSrc; /**< TODO */
++} t_FmPcdManipSpecialOffloadCapwapParams;
++
++#endif /* (DPAA_VERSION >= 11) */
++
++
++/**************************************************************************//**
++ @Description Parameters for defining special offload manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipSpecialOffloadParams {
++ e_FmPcdManipSpecialOffloadType type; /**< Type of special offload manipulation */
++ union
++ {
++ t_FmPcdManipSpecialOffloadIPSecParams ipsec; /**< Parameters for IPSec; Relevant when
++ type = e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC */
++#if (DPAA_VERSION >= 11)
++ t_FmPcdManipSpecialOffloadCapwapParams capwap; /**< Parameters for CAPWAP; Relevant when
++ type = e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} t_FmPcdManipSpecialOffloadParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining insertion manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrInsrt {
++ uint8_t size; /**< size of inserted section */
++ uint8_t *p_Data; /**< data to be inserted */
++} t_FmPcdManipHdrInsrt;
++
++
++/**************************************************************************//**
++ @Description Parameters for defining generic removal manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrRmvGenericParams {
++ uint8_t offset; /**< Offset from beginning of header to the start
++ location of the removal */
++ uint8_t size; /**< Size of removed section */
++} t_FmPcdManipHdrRmvGenericParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining generic insertion manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrInsrtGenericParams {
++ uint8_t offset; /**< Offset from beginning of header to the start
++ location of the insertion */
++ uint8_t size; /**< Size of inserted section */
++ bool replace; /**< TRUE to override (replace) existing data at
++ 'offset', FALSE to insert */
++ uint8_t *p_Data; /**< Pointer to data to be inserted */
++} t_FmPcdManipHdrInsrtGenericParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation VLAN DSCP To Vpri translation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrFieldUpdateVlanDscpToVpri {
++ uint8_t dscpToVpriTable[FM_PCD_MANIP_DSCP_TO_VLAN_TRANS];
++ /**< A table of VPri values for each DSCP value;
++ The index is the DSCP value (0-0x3F) and the
++ value is the corresponding VPRI (0-15). */
++ uint8_t vpriDefVal; /**< 0-7, Relevant only if if updateType =
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN,
++ this field is the Q Tag default value if the
++ IP header is not found. */
++} t_FmPcdManipHdrFieldUpdateVlanDscpToVpri;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation VLAN fields updates
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrFieldUpdateVlan {
++ e_FmPcdManipHdrFieldUpdateVlan updateType; /**< Selects VLAN update type */
++ union {
++ uint8_t vpri; /**< 0-7, Relevant only if If updateType =
++ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_PRI, this
++ is the new VLAN pri. */
++ t_FmPcdManipHdrFieldUpdateVlanDscpToVpri dscpToVpri; /**< Parameters structure, Relevant only if updateType
++ = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN. */
++ } u;
++} t_FmPcdManipHdrFieldUpdateVlan;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation IPV4 fields updates
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrFieldUpdateIpv4 {
++ ipv4HdrManipUpdateFlags_t validUpdates; /**< ORed flag, selecting the required updates */
++ uint8_t tos; /**< 8 bit New TOS; Relevant if validUpdates contains
++ HDR_MANIP_IPV4_TOS */
++ uint16_t id; /**< 16 bit New IP ID; Relevant only if validUpdates
++ contains HDR_MANIP_IPV4_ID */
++ uint32_t src; /**< 32 bit New IP SRC; Relevant only if validUpdates
++ contains HDR_MANIP_IPV4_SRC */
++ uint32_t dst; /**< 32 bit New IP DST; Relevant only if validUpdates
++ contains HDR_MANIP_IPV4_DST */
++} t_FmPcdManipHdrFieldUpdateIpv4;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation IPV6 fields updates
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrFieldUpdateIpv6 {
++ ipv6HdrManipUpdateFlags_t validUpdates; /**< ORed flag, selecting the required updates */
++ uint8_t trafficClass; /**< 8 bit New Traffic Class; Relevant if validUpdates contains
++ HDR_MANIP_IPV6_TC */
++ uint8_t src[NET_HEADER_FIELD_IPv6_ADDR_SIZE];
++ /**< 16 byte new IP SRC; Relevant only if validUpdates
++ contains HDR_MANIP_IPV6_SRC */
++ uint8_t dst[NET_HEADER_FIELD_IPv6_ADDR_SIZE];
++ /**< 16 byte new IP DST; Relevant only if validUpdates
++ contains HDR_MANIP_IPV6_DST */
++} t_FmPcdManipHdrFieldUpdateIpv6;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation TCP/UDP fields updates
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrFieldUpdateTcpUdp {
++ tcpUdpHdrManipUpdateFlags_t validUpdates; /**< ORed flag, selecting the required updates */
++ uint16_t src; /**< 16 bit New TCP/UDP SRC; Relevant only if validUpdates
++ contains HDR_MANIP_TCP_UDP_SRC */
++ uint16_t dst; /**< 16 bit New TCP/UDP DST; Relevant only if validUpdates
++ contains HDR_MANIP_TCP_UDP_DST */
++} t_FmPcdManipHdrFieldUpdateTcpUdp;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation fields updates
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrFieldUpdateParams {
++ e_FmPcdManipHdrFieldUpdateType type; /**< Type of header field update manipulation */
++ union {
++ t_FmPcdManipHdrFieldUpdateVlan vlan; /**< Parameters for VLAN update. Relevant when
++ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN */
++ t_FmPcdManipHdrFieldUpdateIpv4 ipv4; /**< Parameters for IPv4 update. Relevant when
++ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4 */
++ t_FmPcdManipHdrFieldUpdateIpv6 ipv6; /**< Parameters for IPv6 update. Relevant when
++ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6 */
++ t_FmPcdManipHdrFieldUpdateTcpUdp tcpUdp; /**< Parameters for TCP/UDP update. Relevant when
++ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP */
++ } u;
++} t_FmPcdManipHdrFieldUpdateParams;
++
++
++
++/**************************************************************************//**
++ @Description Parameters for defining custom header manipulation for generic field replacement
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrCustomGenFieldReplace {
++ uint8_t srcOffset; /**< Location of new data - Offset from
++ Parse Result (>= 16, srcOffset+size <= 32, ) */
++ uint8_t dstOffset; /**< Location of data to be overwritten - Offset from
++ start of frame (dstOffset + size <= 256). */
++ uint8_t size; /**< The number of bytes (<=16) to be replaced */
++ uint8_t mask; /**< Optional 1 byte mask. Set to select bits for
++ replacement (1 - bit will be replaced);
++ Clear to use field as is. */
++ uint8_t maskOffset; /**< Relevant if mask != 0;
++ Mask offset within the replaces "size" */
++} t_FmPcdManipHdrCustomGenFieldReplace;
++
++/**************************************************************************//**
++ @Description Parameters for defining custom header manipulation for IP replacement
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrCustomIpHdrReplace {
++ e_FmPcdManipHdrCustomIpReplace replaceType; /**< Selects replace update type */
++ bool decTtlHl; /**< Decrement TTL (IPV4) or Hop limit (IPV6) by 1 */
++ bool updateIpv4Id; /**< Relevant when replaceType =
++ e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 */
++ uint16_t id; /**< 16 bit New IP ID; Relevant only if
++ updateIpv4Id = TRUE */
++ uint8_t hdrSize; /**< The size of the new IP header */
++ uint8_t hdr[FM_PCD_MANIP_MAX_HDR_SIZE];
++ /**< The new IP header */
++} t_FmPcdManipHdrCustomIpHdrReplace;
++
++/**************************************************************************//**
++ @Description Parameters for defining custom header manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrCustomParams {
++ e_FmPcdManipHdrCustomType type; /**< Type of header field update manipulation */
++ union {
++ t_FmPcdManipHdrCustomIpHdrReplace ipHdrReplace; /**< Parameters IP header replacement */
++ t_FmPcdManipHdrCustomGenFieldReplace genFieldReplace; /**< Parameters IP header replacement */
++ } u;
++} t_FmPcdManipHdrCustomParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining specific L2 insertion manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrInsrtSpecificL2Params {
++ e_FmPcdManipHdrInsrtSpecificL2 specificL2; /**< Selects which L2 headers to insert */
++ bool update; /**< TRUE to update MPLS header */
++ uint8_t size; /**< size of inserted section */
++ uint8_t *p_Data; /**< data to be inserted */
++} t_FmPcdManipHdrInsrtSpecificL2Params;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Parameters for defining IP insertion manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrInsrtIpParams {
++ bool calcL4Checksum; /**< Calculate L4 checksum. */
++ e_FmPcdManipHdrQosMappingMode mappingMode; /**< TODO */
++ uint8_t lastPidOffset; /**< the offset of the last Protocol within
++ the inserted header */
++ uint16_t id; /**< 16 bit New IP ID */
++ bool dontFragOverwrite;
++ /**< IPv4 only. DF is overwritten with the hash-result next-to-last byte.
++ * This byte is configured to be overwritten when RPD is set. */
++ uint8_t lastDstOffset;
++ /**< IPv6 only. if routing extension exist, user should set the offset of the destination address
++ * in order to calculate UDP checksum pseudo header;
++ * Otherwise set it to '0'. */
++ t_FmPcdManipHdrInsrt insrt; /**< size and data to be inserted. */
++} t_FmPcdManipHdrInsrtIpParams;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Parameters for defining header insertion manipulation by header type
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrInsrtByHdrParams {
++ e_FmPcdManipHdrInsrtByHdrType type; /**< Selects manipulation type */
++ union {
++
++ t_FmPcdManipHdrInsrtSpecificL2Params specificL2Params;
++ /**< Used when type = e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2:
++ Selects which L2 headers to insert */
++#if (DPAA_VERSION >= 11)
++ t_FmPcdManipHdrInsrtIpParams ipParams; /**< Used when type = e_FM_PCD_MANIP_INSRT_BY_HDR_IP */
++ t_FmPcdManipHdrInsrt insrt; /**< Used when type is one of e_FM_PCD_MANIP_INSRT_BY_HDR_UDP,
++ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, or
++ e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} t_FmPcdManipHdrInsrtByHdrParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining header insertion manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrInsrtParams {
++ e_FmPcdManipHdrInsrtType type; /**< Type of insertion manipulation */
++ union {
++ t_FmPcdManipHdrInsrtByHdrParams byHdr; /**< Parameters for defining header insertion manipulation by header type,
++ relevant if 'type' = e_FM_PCD_MANIP_INSRT_BY_HDR */
++ t_FmPcdManipHdrInsrtGenericParams generic; /**< Parameters for defining generic header insertion manipulation,
++ relevant if 'type' = e_FM_PCD_MANIP_INSRT_GENERIC */
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ t_FmPcdManipHdrInsrtByTemplateParams byTemplate; /**< Parameters for defining header insertion manipulation by template,
++ relevant if 'type' = e_FM_PCD_MANIP_INSRT_BY_TEMPLATE */
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++ } u;
++} t_FmPcdManipHdrInsrtParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining header removal manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrRmvParams {
++ e_FmPcdManipHdrRmvType type; /**< Type of header removal manipulation */
++ union {
++ t_FmPcdManipHdrRmvByHdrParams byHdr; /**< Parameters for defining header removal manipulation by header type,
++ relevant if type = e_FM_PCD_MANIP_RMV_BY_HDR */
++ t_FmPcdManipHdrRmvGenericParams generic; /**< Parameters for defining generic header removal manipulation,
++ relevant if type = e_FM_PCD_MANIP_RMV_GENERIC */
++ } u;
++} t_FmPcdManipHdrRmvParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation node
++*//***************************************************************************/
++typedef struct t_FmPcdManipHdrParams {
++ bool rmv; /**< TRUE, to define removal manipulation */
++ t_FmPcdManipHdrRmvParams rmvParams; /**< Parameters for removal manipulation, relevant if 'rmv' = TRUE */
++
++ bool insrt; /**< TRUE, to define insertion manipulation */
++ t_FmPcdManipHdrInsrtParams insrtParams; /**< Parameters for insertion manipulation, relevant if 'insrt' = TRUE */
++
++ bool fieldUpdate; /**< TRUE, to define field update manipulation */
++ t_FmPcdManipHdrFieldUpdateParams fieldUpdateParams; /**< Parameters for field update manipulation, relevant if 'fieldUpdate' = TRUE */
++
++ bool custom; /**< TRUE, to define custom manipulation */
++ t_FmPcdManipHdrCustomParams customParams; /**< Parameters for custom manipulation, relevant if 'custom' = TRUE */
++
++ bool dontParseAfterManip;/**< TRUE to de-activate the parser after the manipulation defined in this node.
++ Restrictions:
++ 1. MUST be set if the next engine after the CC is not another CC node
++ (but rather Policer or Keygen), and this is the last (no h_NextManip) in a chain
++ of manipulation nodes. This includes single nodes (i.e. no h_NextManip and
++ also never pointed as h_NextManip of other manipulation nodes)
++ 2. MUST be set if the next engine after the CC is another CC node, and
++ this is NOT the last manipulation node (i.e. it has h_NextManip).*/
++} t_FmPcdManipHdrParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining fragmentation manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipFragParams {
++ e_NetHeaderType hdr; /**< Header selection */
++ union {
++#if (DPAA_VERSION >= 11)
++ t_FmPcdManipFragCapwapParams capwapFrag; /**< Parameters for defining CAPWAP fragmentation,
++ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++ t_FmPcdManipFragIpParams ipFrag; /**< Parameters for defining IP fragmentation,
++ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
++ } u;
++} t_FmPcdManipFragParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining reassembly manipulation
++*//***************************************************************************/
++typedef struct t_FmPcdManipReassemParams {
++ e_NetHeaderType hdr; /**< Header selection */
++ union {
++#if (DPAA_VERSION >= 11)
++ t_FmPcdManipReassemCapwapParams capwapReassem; /**< Parameters for defining CAPWAP reassembly,
++ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++
++ t_FmPcdManipReassemIpParams ipReassem; /**< Parameters for defining IP reassembly,
++ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
++ } u;
++} t_FmPcdManipReassemParams;
++
++/**************************************************************************//**
++ @Description Parameters for defining a manipulation node
++*//***************************************************************************/
++typedef struct t_FmPcdManipParams {
++ e_FmPcdManipType type; /**< Selects type of manipulation node */
++ union{
++ t_FmPcdManipHdrParams hdr; /**< Parameters for defining header manipulation node */
++ t_FmPcdManipReassemParams reassem; /**< Parameters for defining reassembly manipulation node */
++ t_FmPcdManipFragParams frag; /**< Parameters for defining fragmentation manipulation node */
++ t_FmPcdManipSpecialOffloadParams specialOffload; /**< Parameters for defining special offload manipulation node */
++ } u;
++
++ t_Handle h_NextManip; /**< Supported for Header Manipulation only;
++ Handle to another (previously defined) manipulation node;
++ Allows concatenation of manipulation actions;
++ This parameter is optional and may be NULL. */
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ bool fragOrReasm; /**< TRUE, if defined fragmentation/reassembly manipulation */
++ t_FmPcdManipFragOrReasmParams fragOrReasmParams; /**< Parameters for fragmentation/reassembly manipulation,
++ relevant if fragOrReasm = TRUE */
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++} t_FmPcdManipParams;
++
++/**************************************************************************//**
++ @Description Structure for retrieving IP reassembly statistics
++*//***************************************************************************/
++typedef struct t_FmPcdManipReassemIpStats {
++ /* common counters for both IPv4 and IPv6 */
++ uint32_t timeout; /**< Counts the number of timeout occurrences */
++ uint32_t rfdPoolBusy; /**< Counts the number of failed attempts to allocate
++ a Reassembly Frame Descriptor */
++ uint32_t internalBufferBusy; /**< Counts the number of times an internal buffer busy occurred */
++ uint32_t externalBufferBusy; /**< Counts the number of times external buffer busy occurred */
++ uint32_t sgFragments; /**< Counts the number of Scatter/Gather fragments */
++ uint32_t dmaSemaphoreDepletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
++#if (DPAA_VERSION >= 11)
++ uint32_t nonConsistentSp; /**< Counts the number of Non Consistent Storage Profile events for
++ successfully reassembled frames */
++#endif /* (DPAA_VERSION >= 11) */
++ struct {
++ uint32_t successfullyReassembled; /**< Counts the number of successfully reassembled frames */
++ uint32_t validFragments; /**< Counts the total number of valid fragments that
++ have been processed for all frames */
++ uint32_t processedFragments; /**< Counts the number of processed fragments
++ (valid and error fragments) for all frames */
++ uint32_t malformedFragments; /**< Counts the number of malformed fragments processed for all frames */
++ uint32_t discardedFragments; /**< Counts the number of fragments discarded by the reassembly process */
++ uint32_t autoLearnBusy; /**< Counts the number of times a busy condition occurs when attempting
++ to access an IP-Reassembly Automatic Learning Hash set */
++ uint32_t moreThan16Fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
++ exceeds 16 */
++ } specificHdrStatistics[2]; /**< slot '0' is for IPv4, slot '1' is for IPv6 */
++} t_FmPcdManipReassemIpStats;
++
++/**************************************************************************//**
++ @Description Structure for retrieving IP fragmentation statistics
++*//***************************************************************************/
++typedef struct t_FmPcdManipFragIpStats {
++ uint32_t totalFrames; /**< Number of frames that passed through the manipulation node */
++ uint32_t fragmentedFrames; /**< Number of frames that were fragmented */
++ uint32_t generatedFragments; /**< Number of fragments that were generated */
++} t_FmPcdManipFragIpStats;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Structure for retrieving CAPWAP reassembly statistics
++*//***************************************************************************/
++typedef struct t_FmPcdManipReassemCapwapStats {
++ uint32_t timeout; /**< Counts the number of timeout occurrences */
++ uint32_t rfdPoolBusy; /**< Counts the number of failed attempts to allocate
++ a Reassembly Frame Descriptor */
++ uint32_t internalBufferBusy; /**< Counts the number of times an internal buffer busy occurred */
++ uint32_t externalBufferBusy; /**< Counts the number of times external buffer busy occurred */
++ uint32_t sgFragments; /**< Counts the number of Scatter/Gather fragments */
++ uint32_t dmaSemaphoreDepletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
++ uint32_t successfullyReassembled; /**< Counts the number of successfully reassembled frames */
++ uint32_t validFragments; /**< Counts the total number of valid fragments that
++ have been processed for all frames */
++ uint32_t processedFragments; /**< Counts the number of processed fragments
++ (valid and error fragments) for all frames */
++ uint32_t malformedFragments; /**< Counts the number of malformed fragments processed for all frames */
++ uint32_t autoLearnBusy; /**< Counts the number of times a busy condition occurs when attempting
++ to access an Reassembly Automatic Learning Hash set */
++ uint32_t discardedFragments; /**< Counts the number of fragments discarded by the reassembly process */
++ uint32_t moreThan16Fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
++ exceeds 16 */
++ uint32_t exceedMaxReassemblyFrameLen;/**< ounts the number of times that a successful reassembled frame
++ length exceeds MaxReassembledFrameLength value */
++} t_FmPcdManipReassemCapwapStats;
++
++/**************************************************************************//**
++ @Description Structure for retrieving CAPWAP fragmentation statistics
++*//***************************************************************************/
++typedef struct t_FmPcdManipFragCapwapStats {
++ uint32_t totalFrames; /**< Number of frames that passed through the manipulation node */
++ uint32_t fragmentedFrames; /**< Number of frames that were fragmented */
++ uint32_t generatedFragments; /**< Number of fragments that were generated */
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ uint8_t sgAllocationFailure; /**< Number of allocation failure of s/g buffers */
++#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
++} t_FmPcdManipFragCapwapStats;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Structure for retrieving reassembly statistics
++*//***************************************************************************/
++typedef struct t_FmPcdManipReassemStats {
++ union {
++ t_FmPcdManipReassemIpStats ipReassem; /**< Structure for IP reassembly statistics */
++#if (DPAA_VERSION >= 11)
++ t_FmPcdManipReassemCapwapStats capwapReassem; /**< Structure for CAPWAP reassembly statistics */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} t_FmPcdManipReassemStats;
++
++/**************************************************************************//**
++ @Description Structure for retrieving fragmentation statistics
++*//***************************************************************************/
++typedef struct t_FmPcdManipFragStats {
++ union {
++ t_FmPcdManipFragIpStats ipFrag; /**< Structure for IP fragmentation statistics */
++#if (DPAA_VERSION >= 11)
++ t_FmPcdManipFragCapwapStats capwapFrag; /**< Structure for CAPWAP fragmentation statistics */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} t_FmPcdManipFragStats;
++
++/**************************************************************************//**
++ @Description Structure for selecting manipulation statistics
++*//***************************************************************************/
++typedef struct t_FmPcdManipStats {
++ union {
++ t_FmPcdManipReassemStats reassem; /**< Structure for reassembly statistics */
++ t_FmPcdManipFragStats frag; /**< Structure for fragmentation statistics */
++ } u;
++} t_FmPcdManipStats;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Parameters for defining frame replicator group and its members
++*//***************************************************************************/
++typedef struct t_FmPcdFrmReplicGroupParams {
++ uint8_t maxNumOfEntries; /**< Maximal number of members in the group;
++ Must be at least 2. */
++ uint8_t numOfEntries; /**< Number of members in the group;
++ Must be at least 1. */
++ t_FmPcdCcNextEngineParams nextEngineParams[FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES];
++ /**< Array of members' parameters */
++} t_FmPcdFrmReplicGroupParams;
++#endif /* (DPAA_VERSION >= 11) */
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++/**************************************************************************//**
++ @Description structure for defining statistics node
++*//***************************************************************************/
++typedef struct t_FmPcdStatsParams {
++ e_FmPcdStatsType type; /**< type of statistics node */
++} t_FmPcdStatsParams;
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++
++/**************************************************************************//**
++ @Function FM_PCD_NetEnvCharacteristicsSet
++
++ @Description Define a set of Network Environment Characteristics.
++
++ When setting an environment it is important to understand its
++ application. It is not meant to describe the flows that will run
++ on the ports using this environment, but what the user means TO DO
++ with the PCD mechanisms in order to parse-classify-distribute those
++ frames.
++ By specifying a distinction unit, the user means it would use that option
++ for distinction between frames at either a KeyGen scheme or a coarse
++ classification action descriptor. Using interchangeable headers to define a
++ unit means that the user is indifferent to which of the interchangeable
++ headers is present in the frame, and wants the distinction to be based
++ on the presence of either one of them.
++
++ Depending on context, there are limitations to the use of environments. A
++ port using the PCD functionality is bound to an environment. Some or even
++ all ports may share an environment but also an environment per port is
++ possible. When initializing a scheme, a classification plan group (see below),
++ or a coarse classification tree, one of the initialized environments must be
++ stated and related to. When a port is bound to a scheme, a classification
++ plan group, or a coarse classification tree, it MUST be bound to the same
++ environment.
++
++ The different PCD modules, may relate (for flows definition) ONLY on
++ distinction units as defined by their environment. When initializing a
++ scheme for example, it may not choose to select IPV4 as a match for
++ recognizing flows unless it was defined in the relating environment. In
++ fact, to guide the user through the configuration of the PCD, each module's
++ characterization in terms of flows is not done using protocol names, but using
++ environment indexes.
++
++ In terms of HW implementation, the list of distinction units sets the LCV vectors
++ and later used for match vector, classification plan vectors and coarse classification
++ indexing.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_NetEnvParams A structure of parameters for the initialization of
++ the network environment.
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_NetEnvCharacteristicsSet(t_Handle h_FmPcd, t_FmPcdNetEnvParams *p_NetEnvParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_NetEnvCharacteristicsDelete
++
++ @Description Deletes a set of Network Environment Characteristics.
++
++ @Param[in] h_NetEnv A handle to the Network environment.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PCD_NetEnvCharacteristicsDelete(t_Handle h_NetEnv);
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSchemeSet
++
++ @Description Initializing or modifying and enabling a scheme for the KeyGen.
++ This routine should be called for adding or modifying a scheme.
++ When a scheme needs modifying, the API requires that it will be
++ rewritten. In such a case 'modify' should be TRUE. If the
++ routine is called for a valid scheme and 'modify' is FALSE,
++ it will return error.
++
++ @Param[in] h_FmPcd If this is a new scheme - A handle to an FM PCD Module.
++ Otherwise NULL (ignored by driver).
++ @Param[in,out] p_SchemeParams A structure of parameters for defining the scheme
++
++ @Return A handle to the initialized scheme on success; NULL code otherwise.
++ When used as "modify" (rather than for setting a new scheme),
++ p_SchemeParams->id.h_Scheme will return NULL if action fails due to scheme
++ BUSY state.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_KgSchemeSet(t_Handle h_FmPcd,
++ t_FmPcdKgSchemeParams *p_SchemeParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSchemeDelete
++
++ @Description Deleting an initialized scheme.
++
++ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet()
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
++*//***************************************************************************/
++t_Error FM_PCD_KgSchemeDelete(t_Handle h_Scheme);
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSchemeGetCounter
++
++ @Description Reads scheme packet counter.
++
++ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet().
++
++ @Return Counter's current value.
++
++ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
++*//***************************************************************************/
++uint32_t FM_PCD_KgSchemeGetCounter(t_Handle h_Scheme);
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSchemeSetCounter
++
++ @Description Writes scheme packet counter.
++
++ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet().
++ @Param[in] value New scheme counter value - typically '0' for
++ resetting the counter.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
++*//***************************************************************************/
++t_Error FM_PCD_KgSchemeSetCounter(t_Handle h_Scheme, uint32_t value);
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrProfileSet
++
++ @Description Sets a profile entry in the policer profile table.
++ The routine overrides any existing value.
++
++ @Param[in] h_FmPcd A handle to an FM PCD Module.
++ @Param[in] p_Profile A structure of parameters for defining a
++ policer profile entry.
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++ When used as "modify" (rather than for setting a new profile),
++ p_Profile->id.h_Profile will return NULL if action fails due to profile
++ BUSY state.
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_PlcrProfileSet(t_Handle h_FmPcd,
++ t_FmPcdPlcrProfileParams *p_Profile);
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrProfileDelete
++
++ @Description Delete a profile entry in the policer profile table.
++ The routine set entry to invalid.
++
++ @Param[in] h_Profile A handle to the profile.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Error FM_PCD_PlcrProfileDelete(t_Handle h_Profile);
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrProfileGetCounter
++
++ @Description Sets an entry in the classification plan.
++ The routine overrides any existing value.
++
++ @Param[in] h_Profile A handle to the profile.
++ @Param[in] counter Counter selector.
++
++ @Return specific counter value.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++uint32_t FM_PCD_PlcrProfileGetCounter(t_Handle h_Profile,
++ e_FmPcdPlcrProfileCounters counter);
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrProfileSetCounter
++
++ @Description Sets an entry in the classification plan.
++ The routine overrides any existing value.
++
++ @Param[in] h_Profile A handle to the profile.
++ @Param[in] counter Counter selector.
++ @Param[in] value value to set counter with.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Error FM_PCD_PlcrProfileSetCounter(t_Handle h_Profile,
++ e_FmPcdPlcrProfileCounters counter,
++ uint32_t value);
++
++/**************************************************************************//**
++ @Function FM_PCD_CcRootBuild
++
++ @Description This routine must be called to define a complete coarse
++ classification tree. This is the way to define coarse
++ classification to a certain flow - the KeyGen schemes
++ may point only to trees defined in this way.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_Params A structure of parameters to define the tree.
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_CcRootBuild (t_Handle h_FmPcd,
++ t_FmPcdCcTreeParams *p_Params);
++
++/**************************************************************************//**
++ @Function FM_PCD_CcRootDelete
++
++ @Description Deleting an built tree.
++
++ @Param[in] h_CcTree A handle to a CC tree.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree);
++
++/**************************************************************************//**
++ @Function FM_PCD_CcRootModifyNextEngine
++
++ @Description Modify the Next Engine Parameters in the entry of the tree.
++
++ @Param[in] h_CcTree A handle to the tree
++ @Param[in] grpId A Group index in the tree
++ @Param[in] index Entry index in the group defined by grpId
++ @Param[in] p_FmPcdCcNextEngineParams Pointer to new next engine parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_CcBuildTree().
++*//***************************************************************************/
++t_Error FM_PCD_CcRootModifyNextEngine(t_Handle h_CcTree,
++ uint8_t grpId,
++ uint8_t index,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableSet
++
++ @Description This routine should be called for each CC (coarse classification)
++ node. The whole CC tree should be built bottom up so that each
++ node points to already defined nodes.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_Param A structure of parameters defining the CC node
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_Param);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableDelete
++
++ @Description Deleting an built node.
++
++ @Param[in] h_CcNode A handle to a CC node.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyMissNextEngine
++
++ @Description Modify the Next Engine Parameters of the Miss key case of the node.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] p_FmPcdCcNextEngineParams Parameters for defining next engine
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet();
++ Not relevant in the case the node is of type 'INDEXED_LOOKUP'.
++ When configuring nextEngine = e_FM_PCD_CC, note that
++ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
++ from the currently changed table.
++
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableModifyMissNextEngine(t_Handle h_CcNode,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableRemoveKey
++
++ @Description Remove the key (including next engine parameters of this key)
++ defined by the index of the relevant node.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for removing
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
++ node and the nodes that lead to it.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableRemoveKey(t_Handle h_CcNode, uint16_t keyIndex);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableAddKey
++
++ @Description Add the key (including next engine parameters of this key in the
++ index defined by the keyIndex. Note that 'FM_PCD_LAST_KEY_INDEX'
++ may be used by user that don't care about the position of the
++ key in the table - in that case, the key will be automatically
++ added by the driver in the last available entry.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for adding.
++ @Param[in] keySize Key size of added key
++ @Param[in] p_KeyParams A pointer to the parameters includes
++ new key with Next Engine Parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
++ node and the nodes that lead to it.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableAddKey(t_Handle h_CcNode,
++ uint16_t keyIndex,
++ uint8_t keySize,
++ t_FmPcdCcKeyParams *p_KeyParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyNextEngine
++
++ @Description Modify the Next Engine Parameters in the relevant key entry of the node.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for Next Engine modifications
++ @Param[in] p_FmPcdCcNextEngineParams Parameters for defining next engine
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++ When configuring nextEngine = e_FM_PCD_CC, note that
++ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
++ from the currently changed table.
++
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableModifyNextEngine(t_Handle h_CcNode,
++ uint16_t keyIndex,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyKeyAndNextEngine
++
++ @Description Modify the key and Next Engine Parameters of this key in the
++ index defined by the keyIndex.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for adding
++ @Param[in] keySize Key size of added key
++ @Param[in] p_KeyParams A pointer to the parameters includes
++ modified key and modified Next Engine Parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
++ node and the nodes that lead to it.
++ When configuring nextEngine = e_FM_PCD_CC, note that
++ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
++ from the currently changed table.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableModifyKeyAndNextEngine(t_Handle h_CcNode,
++ uint16_t keyIndex,
++ uint8_t keySize,
++ t_FmPcdCcKeyParams *p_KeyParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyKey
++
++ @Description Modify the key in the index defined by the keyIndex.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for adding
++ @Param[in] keySize Key size of added key
++ @Param[in] p_Key A pointer to the new key
++ @Param[in] p_Mask A pointer to the new mask if relevant,
++ otherwise pointer to NULL
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
++ node and the nodes that lead to it.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode,
++ uint16_t keyIndex,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ uint8_t *p_Mask);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableFindNRemoveKey
++
++ @Description Remove the key (including next engine parameters of this key)
++ defined by the key and mask. Note that this routine will search
++ the node to locate the index of the required key (& mask) to remove.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keySize Key size of the one to remove.
++ @Param[in] p_Key A pointer to the requested key to remove.
++ @Param[in] p_Mask A pointer to the mask if relevant,
++ otherwise pointer to NULL
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
++ node and the nodes that lead to it.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableFindNRemoveKey(t_Handle h_CcNode,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ uint8_t *p_Mask);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableFindNModifyNextEngine
++
++ @Description Modify the Next Engine Parameters in the relevant key entry of
++ the node. Note that this routine will search the node to locate
++ the index of the required key (& mask) to modify.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keySize Key size of the one to modify.
++ @Param[in] p_Key A pointer to the requested key to modify.
++ @Param[in] p_Mask A pointer to the mask if relevant,
++ otherwise pointer to NULL
++ @Param[in] p_FmPcdCcNextEngineParams Parameters for defining next engine
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++ When configuring nextEngine = e_FM_PCD_CC, note that
++ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
++ from the currently changed table.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableFindNModifyNextEngine(t_Handle h_CcNode,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ uint8_t *p_Mask,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableFindNModifyKeyAndNextEngine
++
++ @Description Modify the key and Next Engine Parameters of this key in the
++ index defined by the keyIndex. Note that this routine will search
++ the node to locate the index of the required key (& mask) to modify.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keySize Key size of the one to modify.
++ @Param[in] p_Key A pointer to the requested key to modify.
++ @Param[in] p_Mask A pointer to the mask if relevant,
++ otherwise pointer to NULL
++ @Param[in] p_KeyParams A pointer to the parameters includes
++ modified key and modified Next Engine Parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
++ node and the nodes that lead to it.
++ When configuring nextEngine = e_FM_PCD_CC, note that
++ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
++ from the currently changed table.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableFindNModifyKeyAndNextEngine(t_Handle h_CcNode,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ uint8_t *p_Mask,
++ t_FmPcdCcKeyParams *p_KeyParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableFindNModifyKey
++
++ @Description Modify the key in the index defined by the keyIndex. Note that
++ this routine will search the node to locate the index of the
++ required key (& mask) to modify.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keySize Key size of the one to modify.
++ @Param[in] p_Key A pointer to the requested key to modify.
++ @Param[in] p_Mask A pointer to the mask if relevant,
++ otherwise pointer to NULL
++ @Param[in] p_NewKey A pointer to the new key
++ @Param[in] p_NewMask A pointer to the new mask if relevant,
++ otherwise pointer to NULL
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
++ node and the nodes that lead to it.
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableFindNModifyKey(t_Handle h_CcNode,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ uint8_t *p_Mask,
++ uint8_t *p_NewKey,
++ uint8_t *p_NewMask);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableGetKeyCounter
++
++ @Description This routine may be used to get a counter of specific key in a CC
++ Node; This counter reflects how many frames passed that were matched
++ this key.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for adding
++
++ @Return The specific key counter.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++uint32_t FM_PCD_MatchTableGetKeyCounter(t_Handle h_CcNode, uint16_t keyIndex);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableGetKeyStatistics
++
++ @Description This routine may be used to get statistics counters of specific key
++ in a CC Node.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames passed that were matched
++ this key; The total frames count will be returned in the counter
++ of the first range (as only one frame length range was defined).
++ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
++ frame count will be separated to frame length counters, based on
++ provided frame length ranges.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for adding
++ @Param[out] p_KeyStatistics Key statistics counters
++
++ @Return The specific key statistics.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode,
++ uint16_t keyIndex,
++ t_FmPcdCcKeyStatistics *p_KeyStatistics);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableGetMissStatistics
++
++ @Description This routine may be used to get statistics counters of miss entry
++ in a CC Node.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames were not matched to any
++ existing key and therefore passed through the miss entry; The
++ total frames count will be returned in the counter of the
++ first range (as only one frame length range was defined).
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[out] p_MissStatistics Statistics counters for 'miss'
++
++ @Return The statistics for 'miss'.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableGetMissStatistics(t_Handle h_CcNode,
++ t_FmPcdCcKeyStatistics *p_MissStatistics);
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableFindNGetKeyStatistics
++
++ @Description This routine may be used to get statistics counters of specific key
++ in a CC Node.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames passed that were matched
++ this key; The total frames count will be returned in the counter
++ of the first range (as only one frame length range was defined).
++ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
++ frame count will be separated to frame length counters, based on
++ provided frame length ranges.
++ Note that this routine will search the node to locate the index
++ of the required key based on received key parameters.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keySize Size of the requested key
++ @Param[in] p_Key A pointer to the requested key
++ @Param[in] p_Mask A pointer to the mask if relevant,
++ otherwise pointer to NULL
++ @Param[out] p_KeyStatistics Key statistics counters
++
++ @Return The specific key statistics.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableFindNGetKeyStatistics(t_Handle h_CcNode,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ uint8_t *p_Mask,
++ t_FmPcdCcKeyStatistics *p_KeyStatistics);
++
++/**************************************************************************//*
++ @Function FM_PCD_MatchTableGetNextEngine
++
++ @Description Gets NextEngine of the relevant keyIndex.
++
++ @Param[in] h_CcNode A handle to the node.
++ @Param[in] keyIndex keyIndex in the relevant node.
++ @Param[out] p_FmPcdCcNextEngineParams here updated nextEngine parameters for
++ the relevant keyIndex of the CC Node
++ received as parameter to this function
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableGetNextEngine(t_Handle h_CcNode,
++ uint16_t keyIndex,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//*
++ @Function FM_PCD_MatchTableGetIndexedHashBucket
++
++ @Description This routine simulates KeyGen operation on the provided key and
++ calculates to which hash bucket it will be mapped.
++
++ @Param[in] h_CcNode A handle to the node.
++ @Param[in] kgKeySize Key size as it was configured in the KG
++ scheme that leads to this hash.
++ @Param[in] p_KgKey Pointer to the key; must be like the key
++ that the KG is generated, i.e. the same
++ extraction and with mask if exist.
++ @Param[in] kgHashShift Hash-shift as it was configured in the KG
++ scheme that leads to this hash.
++ @Param[out] p_CcNodeBucketHandle Pointer to the bucket of the provided key.
++ @Param[out] p_BucketIndex Index to the bucket of the provided key
++ @Param[out] p_LastIndex Pointer to last index in the bucket of the
++ provided key.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet()
++*//***************************************************************************/
++t_Error FM_PCD_MatchTableGetIndexedHashBucket(t_Handle h_CcNode,
++ uint8_t kgKeySize,
++ uint8_t *p_KgKey,
++ uint8_t kgHashShift,
++ t_Handle *p_CcNodeBucketHandle,
++ uint8_t *p_BucketIndex,
++ uint16_t *p_LastIndex);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableSet
++
++ @Description This routine initializes a hash table structure.
++ KeyGen hash result determines the hash bucket.
++ Next, KeyGen key is compared against all keys of this
++ bucket (exact match).
++ Number of sets (number of buckets) of the hash equals to the
++ number of 1-s in 'hashResMask' in the provided parameters.
++ Number of hash table ways is then calculated by dividing
++ 'maxNumOfKeys' equally between the hash sets. This is the maximal
++ number of keys that a hash bucket may hold.
++ The hash table is initialized empty and keys may be
++ added to it following the initialization. Keys masks are not
++ supported in current hash table implementation.
++ The initialized hash table can be integrated as a node in a
++ CC tree.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_Param A structure of parameters defining the hash table
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableDelete
++
++ @Description This routine deletes the provided hash table and released all
++ its allocated resources.
++
++ @Param[in] h_HashTbl A handle to a hash table
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableAddKey
++
++ @Description This routine adds the provided key (including next engine
++ parameters of this key) to the hash table.
++ The key is added as the last key of the bucket that it is
++ mapped to.
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[in] keySize Key size of added key
++ @Param[in] p_KeyParams A pointer to the parameters includes
++ new key with next engine parameters; The pointer
++ to the key mask must be NULL, as masks are not
++ supported in hash table implementation.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_HashTableAddKey(t_Handle h_HashTbl,
++ uint8_t keySize,
++ t_FmPcdCcKeyParams *p_KeyParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableRemoveKey
++
++ @Description This routine removes the requested key (including next engine
++ parameters of this key) from the hash table.
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[in] keySize Key size of the one to remove.
++ @Param[in] p_Key A pointer to the requested key to remove.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_HashTableRemoveKey(t_Handle h_HashTbl,
++ uint8_t keySize,
++ uint8_t *p_Key);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableModifyNextEngine
++
++ @Description This routine modifies the next engine for the provided key. The
++ key should be previously added to the hash table.
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[in] keySize Key size of the key to modify.
++ @Param[in] p_Key A pointer to the requested key to modify.
++ @Param[in] p_FmPcdCcNextEngineParams A structure for defining new next engine
++ parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++ When configuring nextEngine = e_FM_PCD_CC, note that
++ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
++ from the currently changed table.
++*//***************************************************************************/
++t_Error FM_PCD_HashTableModifyNextEngine(t_Handle h_HashTbl,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableModifyMissNextEngine
++
++ @Description This routine modifies the next engine on key match miss.
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[in] p_FmPcdCcNextEngineParams A structure for defining new next engine
++ parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++ When configuring nextEngine = e_FM_PCD_CC, note that
++ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
++ from the currently changed table.
++*//***************************************************************************/
++t_Error FM_PCD_HashTableModifyMissNextEngine(t_Handle h_HashTbl,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//*
++ @Function FM_PCD_HashTableGetMissNextEngine
++
++ @Description Gets NextEngine in case of key match miss.
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[out] p_FmPcdCcNextEngineParams Next engine parameters for the specified
++ hash table.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_HashTableGetMissNextEngine(t_Handle h_HashTbl,
++ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableFindNGetKeyStatistics
++
++ @Description This routine may be used to get statistics counters of specific key
++ in a hash table.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames passed that were matched
++ this key; The total frames count will be returned in the counter
++ of the first range (as only one frame length range was defined).
++ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
++ frame count will be separated to frame length counters, based on
++ provided frame length ranges.
++ Note that this routine will identify the bucket of this key in
++ the hash table and will search the bucket to locate the index
++ of the required key based on received key parameters.
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[in] keySize Size of the requested key
++ @Param[in] p_Key A pointer to the requested key
++ @Param[out] p_KeyStatistics Key statistics counters
++
++ @Return The specific key statistics.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_HashTableFindNGetKeyStatistics(t_Handle h_HashTbl,
++ uint8_t keySize,
++ uint8_t *p_Key,
++ t_FmPcdCcKeyStatistics *p_KeyStatistics);
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableGetMissStatistics
++
++ @Description This routine may be used to get statistics counters of 'miss'
++ entry of the a hash table.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames were not matched to any
++ existing key and therefore passed through the miss entry;
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[out] p_MissStatistics Statistics counters for 'miss'
++
++ @Return The statistics for 'miss'.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++t_Error FM_PCD_HashTableGetMissStatistics(t_Handle h_HashTbl,
++ t_FmPcdCcKeyStatistics *p_MissStatistics);
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipNodeSet
++
++ @Description This routine should be called for defining a manipulation
++ node. A manipulation node must be defined before the CC node
++ that precedes it.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_FmPcdManipParams A structure of parameters defining the manipulation
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_ManipNodeSet(t_Handle h_FmPcd, t_FmPcdManipParams *p_FmPcdManipParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipNodeDelete
++
++ @Description Delete an existing manipulation node.
++
++ @Param[in] h_ManipNode A handle to a manipulation node.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_ManipNodeSet().
++*//***************************************************************************/
++t_Error FM_PCD_ManipNodeDelete(t_Handle h_ManipNode);
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipGetStatistics
++
++ @Description Retrieve the manipulation statistics.
++
++ @Param[in] h_ManipNode A handle to a manipulation node.
++ @Param[out] p_FmPcdManipStats A structure for retrieving the manipulation statistics
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_ManipNodeSet().
++*//***************************************************************************/
++t_Error FM_PCD_ManipGetStatistics(t_Handle h_ManipNode, t_FmPcdManipStats *p_FmPcdManipStats);
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipNodeReplace
++
++ @Description Change existing manipulation node to be according to new requirement.
++
++ @Param[in] h_ManipNode A handle to a manipulation node.
++ @Param[out] p_ManipParams A structure of parameters defining the change requirement
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_ManipNodeSet().
++*//***************************************************************************/
++t_Error FM_PCD_ManipNodeReplace(t_Handle h_ManipNode, t_FmPcdManipParams *p_ManipParams);
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicSetGroup
++
++ @Description Initialize a Frame Replicator group.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_FrmReplicGroupParam A structure of parameters for the initialization of
++ the frame replicator group.
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_FrmReplicSetGroup(t_Handle h_FmPcd, t_FmPcdFrmReplicGroupParams *p_FrmReplicGroupParam);
++
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicDeleteGroup
++
++ @Description Delete a Frame Replicator group.
++
++ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup().
++*//***************************************************************************/
++t_Error FM_PCD_FrmReplicDeleteGroup(t_Handle h_FrmReplicGroup);
++
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicAddMember
++
++ @Description Add the member in the index defined by the memberIndex.
++
++ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
++ @Param[in] memberIndex member index for adding.
++ @Param[in] p_MemberParams A pointer to the new member parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
++*//***************************************************************************/
++t_Error FM_PCD_FrmReplicAddMember(t_Handle h_FrmReplicGroup,
++ uint16_t memberIndex,
++ t_FmPcdCcNextEngineParams *p_MemberParams);
++
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicRemoveMember
++
++ @Description Remove the member defined by the index from the relevant group.
++
++ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
++ @Param[in] memberIndex member index for removing.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
++*//***************************************************************************/
++t_Error FM_PCD_FrmReplicRemoveMember(t_Handle h_FrmReplicGroup,
++ uint16_t memberIndex);
++#endif /* (DPAA_VERSION >= 11) */
++
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++/**************************************************************************//**
++ @Function FM_PCD_StatisticsSetNode
++
++ @Description This routine should be called for defining a statistics node.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_FmPcdstatsParams A structure of parameters defining the statistics
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++t_Handle FM_PCD_StatisticsSetNode(t_Handle h_FmPcd, t_FmPcdStatsParams *p_FmPcdstatsParams);
++#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++
++/** @} */ /* end of FM_PCD_Runtime_build_grp group */
++/** @} */ /* end of FM_PCD_Runtime_grp group */
++/** @} */ /* end of FM_PCD_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++#ifdef NCSW_BACKWARD_COMPATIBLE_API
++#define FM_PCD_MAX_NUM_OF_INTERCHANGABLE_HDRS FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS
++#define e_FM_PCD_MANIP_ONE_WAYS_HASH e_FM_PCD_MANIP_ONE_WAY_HASH
++#define e_FM_PCD_MANIP_TOW_WAYS_HASH e_FM_PCD_MANIP_TWO_WAYS_HASH
++
++#define e_FM_PCD_MANIP_FRAGMENT_PACKECT e_FM_PCD_MANIP_FRAGMENT_PACKET /* Feb13 */
++
++#define FM_PCD_SetNetEnvCharacteristics(_pcd, _params) \
++ FM_PCD_NetEnvCharacteristicsSet(_pcd, _params)
++#define FM_PCD_KgSetScheme(_pcd, _params) FM_PCD_KgSchemeSet(_pcd, _params)
++#define FM_PCD_CcBuildTree(_pcd, _params) FM_PCD_CcRootBuild(_pcd, _params)
++#define FM_PCD_CcSetNode(_pcd, _params) FM_PCD_MatchTableSet(_pcd, _params)
++#define FM_PCD_PlcrSetProfile(_pcd, _params) FM_PCD_PlcrProfileSet(_pcd, _params)
++#define FM_PCD_ManipSetNode(_pcd, _params) FM_PCD_ManipNodeSet(_pcd, _params)
++
++#define FM_PCD_DeleteNetEnvCharacteristics(_pcd, ...) \
++ FM_PCD_NetEnvCharacteristicsDelete(__VA_ARGS__)
++#define FM_PCD_KgDeleteScheme(_pcd, ...) \
++ FM_PCD_KgSchemeDelete(__VA_ARGS__)
++#define FM_PCD_KgGetSchemeCounter(_pcd, ...) \
++ FM_PCD_KgSchemeGetCounter(__VA_ARGS__)
++#define FM_PCD_KgSetSchemeCounter(_pcd, ...) \
++ FM_PCD_KgSchemeSetCounter(__VA_ARGS__)
++#define FM_PCD_PlcrDeleteProfile(_pcd, ...) \
++ FM_PCD_PlcrProfileDelete(__VA_ARGS__)
++#define FM_PCD_PlcrGetProfileCounter(_pcd, ...) \
++ FM_PCD_PlcrProfileGetCounter(__VA_ARGS__)
++#define FM_PCD_PlcrSetProfileCounter(_pcd, ...) \
++ FM_PCD_PlcrProfileSetCounter(__VA_ARGS__)
++#define FM_PCD_CcDeleteTree(_pcd, ...) \
++ FM_PCD_CcRootDelete(__VA_ARGS__)
++#define FM_PCD_CcTreeModifyNextEngine(_pcd, ...) \
++ FM_PCD_CcRootModifyNextEngine(__VA_ARGS__)
++#define FM_PCD_CcDeleteNode(_pcd, ...) \
++ FM_PCD_MatchTableDelete(__VA_ARGS__)
++#define FM_PCD_CcNodeModifyMissNextEngine(_pcd, ...) \
++ FM_PCD_MatchTableModifyMissNextEngine(__VA_ARGS__)
++#define FM_PCD_CcNodeRemoveKey(_pcd, ...) \
++ FM_PCD_MatchTableRemoveKey(__VA_ARGS__)
++#define FM_PCD_CcNodeAddKey(_pcd, ...) \
++ FM_PCD_MatchTableAddKey(__VA_ARGS__)
++#define FM_PCD_CcNodeModifyNextEngine(_pcd, ...) \
++ FM_PCD_MatchTableModifyNextEngine(__VA_ARGS__)
++#define FM_PCD_CcNodeModifyKeyAndNextEngine(_pcd, ...) \
++ FM_PCD_MatchTableModifyKeyAndNextEngine(__VA_ARGS__)
++#define FM_PCD_CcNodeModifyKey(_pcd, ...) \
++ FM_PCD_MatchTableModifyKey(__VA_ARGS__)
++#define FM_PCD_CcNodeFindNRemoveKey(_pcd, ...) \
++ FM_PCD_MatchTableFindNRemoveKey(__VA_ARGS__)
++#define FM_PCD_CcNodeFindNModifyNextEngine(_pcd, ...) \
++ FM_PCD_MatchTableFindNModifyNextEngine(__VA_ARGS__)
++#define FM_PCD_CcNodeFindNModifyKeyAndNextEngine(_pcd, ...) \
++ FM_PCD_MatchTableFindNModifyKeyAndNextEngine(__VA_ARGS__)
++#define FM_PCD_CcNodeFindNModifyKey(_pcd, ...) \
++ FM_PCD_MatchTableFindNModifyKey(__VA_ARGS__)
++#define FM_PCD_CcIndexedHashNodeGetBucket(_pcd, ...) \
++ FM_PCD_MatchTableGetIndexedHashBucket(__VA_ARGS__)
++#define FM_PCD_CcNodeGetNextEngine(_pcd, ...) \
++ FM_PCD_MatchTableGetNextEngine(__VA_ARGS__)
++#define FM_PCD_CcNodeGetKeyCounter(_pcd, ...) \
++ FM_PCD_MatchTableGetKeyCounter(__VA_ARGS__)
++#define FM_PCD_ManipDeleteNode(_pcd, ...) \
++ FM_PCD_ManipNodeDelete(__VA_ARGS__)
++#endif /* NCSW_BACKWARD_COMPATIBLE_API */
++
++
++#endif /* __FM_PCD_EXT */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_port_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_port_ext.h
+new file mode 100644
+index 00000000..08a5aa59
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_port_ext.h
+@@ -0,0 +1,2608 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_port_ext.h
++
++ @Description FM-Port Application Programming Interface.
++*//***************************************************************************/
++#ifndef __FM_PORT_EXT
++#define __FM_PORT_EXT
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_ext.h"
++#include "net_ext.h"
++
++
++/**************************************************************************//**
++
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_PORT_grp FM Port
++
++ @Description FM Port API
++
++ The FM uses a general module called "port" to represent a Tx port
++ (MAC), an Rx port (MAC) or Offline Parsing port.
++ The number of ports in an FM varies between SOCs.
++ The SW driver manages these ports as sub-modules of the FM, i.e.
++ after an FM is initialized, its ports may be initialized and
++ operated upon.
++
++ The port is initialized aware of its type, but other functions on
++ a port may be indifferent to its type. When necessary, the driver
++ verifies coherence and returns error if applicable.
++
++ On initialization, user specifies the port type and it's index
++ (relative to the port's type) - always starting at 0.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description An enum for defining port PCD modes.
++ This enum defines the superset of PCD engines support - i.e. not
++ all engines have to be used, but all have to be enabled. The real
++ flow of a specific frame depends on the PCD configuration and the
++ frame headers and payload.
++ Note: the first engine and the first engine after the parser (if
++ exists) should be in order, the order is important as it will
++ define the flow of the port. However, as for the rest engines
++ (the ones that follows), the order is not important anymore as
++ it is defined by the PCD graph itself.
++*//***************************************************************************/
++typedef enum e_FmPortPcdSupport {
++ e_FM_PORT_PCD_SUPPORT_NONE = 0 /**< BMI to BMI, PCD is not used */
++ , e_FM_PORT_PCD_SUPPORT_PRS_ONLY /**< Use only Parser */
++ , e_FM_PORT_PCD_SUPPORT_PLCR_ONLY /**< Use only Policer */
++ , e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR /**< Use Parser and Policer */
++ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG /**< Use Parser and Keygen */
++ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC /**< Use Parser, Keygen and Coarse Classification */
++ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR
++ /**< Use all PCD engines */
++ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR /**< Use Parser, Keygen and Policer */
++ , e_FM_PORT_PCD_SUPPORT_PRS_AND_CC /**< Use Parser and Coarse Classification */
++ , e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR /**< Use Parser and Coarse Classification and Policer */
++ , e_FM_PORT_PCD_SUPPORT_CC_ONLY /**< Use only Coarse Classification */
++#ifdef FM_CAPWAP_SUPPORT
++ , e_FM_PORT_PCD_SUPPORT_CC_AND_KG /**< Use Coarse Classification,and Keygen */
++ , e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR /**< Use Coarse Classification, Keygen and Policer */
++#endif /* FM_CAPWAP_SUPPORT */
++} e_FmPortPcdSupport;
++
++/**************************************************************************//**
++ @Description Port interrupts
++*//***************************************************************************/
++typedef enum e_FmPortExceptions {
++ e_FM_PORT_EXCEPTION_IM_BUSY /**< Independent-Mode Rx-BUSY */
++} e_FmPortExceptions;
++
++
++/**************************************************************************//**
++ @Collection General FM Port defines
++*//***************************************************************************/
++#define FM_PORT_PRS_RESULT_NUM_OF_WORDS 8 /**< Number of 4 bytes words in parser result */
++/* @} */
++
++/**************************************************************************//**
++ @Collection FM Frame error
++*//***************************************************************************/
++typedef uint32_t fmPortFrameErrSelect_t; /**< typedef for defining Frame Descriptor errors */
++
++#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT /**< Not for Rx-Port! Unsupported Format */
++#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH /**< Not for Rx-Port! Length Error */
++#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA /**< DMA Data error */
++#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM /**< non Frame-Manager error; probably come from SEC that
++ was chained to FM */
++
++#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR) /**< IPR error */
++#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & ~FM_FD_IPR) /**< IPR non-consistent-sp */
++
++#define FM_PORT_FRM_ERR_IPFE 0 /**< Obsolete; will be removed in the future */
++
++#ifdef FM_CAPWAP_SUPPORT
++#define FM_PORT_FRM_ERR_CRE FM_FD_ERR_CRE
++#define FM_PORT_FRM_ERR_CHE FM_FD_ERR_CHE
++#endif /* FM_CAPWAP_SUPPORT */
++
++#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL /**< Rx FIFO overflow, FCS error, code error, running disparity
++ error (SGMII and TBI modes), FIFO parity error. PHY
++ Sequence error, PHY error control character detected. */
++#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE /**< Frame too long OR Frame size exceeds max_length_frame */
++#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD /**< indicates a classifier "drop" operation */
++#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION /**< Extract Out of Frame */
++#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME /**< No Scheme Selected */
++#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW /**< Keysize Overflow */
++#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED /**< Frame color is red */
++#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW /**< Frame color is yellow */
++#define FM_PORT_FRM_ERR_ILL_PLCR FM_FD_ERR_ILL_PLCR /**< Illegal Policer Profile selected */
++#define FM_PORT_FRM_ERR_PLCR_FRAME_LEN FM_FD_ERR_PLCR_FRAME_LEN /**< Policer frame length error */
++#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT /**< Parser Time out Exceed */
++#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT /**< Invalid Soft Parser instruction */
++#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR /**< Header error was identified during parsing */
++#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED /**< Frame parsed beyind 256 first bytes */
++#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001 /**< FPM Frame Processing Timeout Exceeded */
++/* @} */
++
++
++
++/**************************************************************************//**
++ @Group FM_PORT_init_grp FM Port Initialization Unit
++
++ @Description FM Port Initialization Unit
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Exceptions user callback routine, will be called upon an
++ exception passing the exception identification.
++
++ @Param[in] h_App - User's application descriptor.
++ @Param[in] exception - The exception.
++ *//***************************************************************************/
++typedef void (t_FmPortExceptionCallback) (t_Handle h_App, e_FmPortExceptions exception);
++
++/**************************************************************************//**
++ @Description User callback function called by driver with received data.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_App Application's handle originally specified to
++ the API Config function
++ @Param[in] p_Data A pointer to data received
++ @Param[in] length length of received data
++ @Param[in] status receive status and errors
++ @Param[in] position position of buffer in frame
++ @Param[in] h_BufContext A handle of the user acossiated with this buffer
++
++ @Retval e_RX_STORE_RESPONSE_CONTINUE - order the driver to continue Rx
++ operation for all ready data.
++ @Retval e_RX_STORE_RESPONSE_PAUSE - order the driver to stop Rx operation.
++*//***************************************************************************/
++typedef e_RxStoreResponse (t_FmPortImRxStoreCallback) (t_Handle h_App,
++ uint8_t *p_Data,
++ uint16_t length,
++ uint16_t status,
++ uint8_t position,
++ t_Handle h_BufContext);
++
++/**************************************************************************//**
++ @Description User callback function called by driver when transmit completed.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_App Application's handle originally specified to
++ the API Config function
++ @Param[in] p_Data A pointer to data received
++ @Param[in] status transmit status and errors
++ @Param[in] lastBuffer is last buffer in frame
++ @Param[in] h_BufContext A handle of the user acossiated with this buffer
++ *//***************************************************************************/
++typedef void (t_FmPortImTxConfCallback) (t_Handle h_App,
++ uint8_t *p_Data,
++ uint16_t status,
++ t_Handle h_BufContext);
++
++/**************************************************************************//**
++ @Description A structure for additional Rx port parameters
++*//***************************************************************************/
++typedef struct t_FmPortRxParams {
++ uint32_t errFqid; /**< Error Queue Id. */
++ uint32_t dfltFqid; /**< Default Queue Id. */
++ uint16_t liodnOffset; /**< Port's LIODN offset. */
++ t_FmExtPools extBufPools; /**< Which external buffer pools are used
++ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes. */
++} t_FmPortRxParams;
++
++/**************************************************************************//**
++ @Description A structure for additional non-Rx port parameters
++*//***************************************************************************/
++typedef struct t_FmPortNonRxParams {
++ uint32_t errFqid; /**< Error Queue Id. */
++ uint32_t dfltFqid; /**< For Tx - Default Confirmation queue,
++ 0 means no Tx confirmation for processed
++ frames. For OP port - default Rx queue. */
++ uint32_t qmChannel; /**< QM-channel dedicated to this port; will be used
++ by the FM for dequeue. */
++} t_FmPortNonRxParams;
++
++/**************************************************************************//**
++ @Description A structure for additional Rx port parameters
++*//***************************************************************************/
++typedef struct t_FmPortImRxTxParams {
++ t_Handle h_FmMuram; /**< A handle of the FM-MURAM partition */
++ uint16_t liodnOffset; /**< For Rx ports only. Port's LIODN Offset. */
++ uint8_t dataMemId; /**< Memory partition ID for data buffers */
++ uint32_t dataMemAttributes; /**< Memory attributes for data buffers */
++ t_BufferPoolInfo rxPoolParams; /**< For Rx ports only. */
++ t_FmPortImRxStoreCallback *f_RxStore; /**< For Rx ports only. */
++ t_FmPortImTxConfCallback *f_TxConf; /**< For Tx ports only. */
++} t_FmPortImRxTxParams;
++
++/**************************************************************************//**
++ @Description A union for additional parameters depending on port type
++*//***************************************************************************/
++typedef union u_FmPortSpecificParams {
++ t_FmPortImRxTxParams imRxTxParams; /**< Rx/Tx Independent-Mode port parameter structure */
++ t_FmPortRxParams rxParams; /**< Rx port parameters structure */
++ t_FmPortNonRxParams nonRxParams; /**< Non-Rx port parameters structure */
++} u_FmPortSpecificParams;
++
++/**************************************************************************//**
++ @Description A structure representing FM initialization parameters
++*//***************************************************************************/
++typedef struct t_FmPortParams {
++ uintptr_t baseAddr; /**< Virtual Address of memory mapped FM Port registers.*/
++ t_Handle h_Fm; /**< A handle to the FM object this port related to */
++ e_FmPortType portType; /**< Port type */
++ uint8_t portId; /**< Port Id - relative to type;
++ NOTE: When configuring Offline Parsing port for
++ FMANv3 devices (DPAA_VERSION 11 and higher),
++ it is highly recommended NOT to use portId=0 due to lack
++ of HW resources on portId=0. */
++ bool independentModeEnable;
++ /**< This port is Independent-Mode - Used for Rx/Tx ports only! */
++ uint16_t liodnBase; /**< Irrelevant for P4080 rev 1. LIODN base for this port, to be
++ used together with LIODN offset. */
++ u_FmPortSpecificParams specificParams; /**< Additional parameters depending on port
++ type. */
++
++ t_FmPortExceptionCallback *f_Exception; /**< Relevant for IM only Callback routine to be called on BUSY exception */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks */
++} t_FmPortParams;
++
++
++/**************************************************************************//**
++ @Function FM_PORT_Config
++
++ @Description Creates a descriptor for the FM PORT module.
++
++ The routine returns a handle (descriptor) to the FM PORT object.
++ This descriptor must be passed as first parameter to all other
++ FM PORT function calls.
++
++ No actual initialization or configuration of FM hardware is
++ done by this routine.
++
++ @Param[in] p_FmPortParams - Pointer to data structure of parameters
++
++ @Retval Handle to FM object, or NULL for Failure.
++*//***************************************************************************/
++t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams);
++
++/**************************************************************************//**
++ @Function FM_PORT_Init
++
++ @Description Initializes the FM PORT module by defining the software structure
++ and configuring the hardware registers.
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PORT_Init(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_Free
++
++ @Description Frees all resources that were assigned to FM PORT module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PORT_Free(t_Handle h_FmPort);
++
++
++/**************************************************************************//**
++ @Group FM_PORT_advanced_init_grp FM Port Advanced Configuration Unit
++
++ @Description Configuration functions used to change default values.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description enum for defining QM frame dequeue
++*//***************************************************************************/
++typedef enum e_FmPortDeqType {
++ e_FM_PORT_DEQ_TYPE1, /**< Dequeue from the SP channel - with priority precedence,
++ and Intra-Class Scheduling respected. */
++ e_FM_PORT_DEQ_TYPE2, /**< Dequeue from the SP channel - with active FQ precedence,
++ and Intra-Class Scheduling respected. */
++ e_FM_PORT_DEQ_TYPE3 /**< Dequeue from the SP channel - with active FQ precedence,
++ and override Intra-Class Scheduling */
++} e_FmPortDeqType;
++
++/**************************************************************************//**
++ @Description enum for defining QM frame dequeue
++*//***************************************************************************/
++typedef enum e_FmPortDeqPrefetchOption {
++ e_FM_PORT_DEQ_NO_PREFETCH, /**< QMI preforms a dequeue action for a single frame
++ only when a dedicated portID Tnum is waiting. */
++ e_FM_PORT_DEQ_PARTIAL_PREFETCH, /**< QMI preforms a dequeue action for 3 frames when
++ one dedicated portId tnum is waiting. */
++ e_FM_PORT_DEQ_FULL_PREFETCH /**< QMI preforms a dequeue action for 3 frames when
++ no dedicated portId tnums are waiting. */
++
++} e_FmPortDeqPrefetchOption;
++
++/**************************************************************************//**
++ @Description enum for defining port default color
++*//***************************************************************************/
++typedef enum e_FmPortColor {
++ e_FM_PORT_COLOR_GREEN, /**< Default port color is green */
++ e_FM_PORT_COLOR_YELLOW, /**< Default port color is yellow */
++ e_FM_PORT_COLOR_RED, /**< Default port color is red */
++ e_FM_PORT_COLOR_OVERRIDE /**< Ignore color */
++} e_FmPortColor;
++
++/**************************************************************************//**
++ @Description A structure for defining Dual Tx rate limiting scale
++*//***************************************************************************/
++typedef enum e_FmPortDualRateLimiterScaleDown {
++ e_FM_PORT_DUAL_RATE_LIMITER_NONE = 0, /**< Use only single rate limiter */
++ e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_2, /**< Divide high rate limiter by 2 */
++ e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_4, /**< Divide high rate limiter by 4 */
++ e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 /**< Divide high rate limiter by 8 */
++} e_FmPortDualRateLimiterScaleDown;
++
++
++/**************************************************************************//**
++ @Description A structure for defining FM port resources
++*//***************************************************************************/
++typedef struct t_FmPortRsrc {
++ uint32_t num; /**< Committed required resource */
++ uint32_t extra; /**< Extra (not committed) required resource */
++} t_FmPortRsrc;
++
++/**************************************************************************//**
++ @Description A structure for defining observed pool depletion
++*//***************************************************************************/
++typedef struct t_FmPortObservedBufPoolDepletion {
++ t_FmBufPoolDepletion poolDepletionParams;/**< parameters to define pool depletion */
++ t_FmExtPools poolsParams; /**< Which external buffer pools are observed
++ (up to FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS),
++ and their sizes. */
++} t_FmPortObservedBufPoolDepletion;
++
++/**************************************************************************//**
++ @Description A structure for defining Tx rate limiting
++*//***************************************************************************/
++typedef struct t_FmPortRateLimit {
++ uint16_t maxBurstSize; /**< in KBytes for Tx ports, in frames
++ for OP ports. (note that
++ for early chips burst size is
++ rounded up to a multiply of 1000 frames).*/
++ uint32_t rateLimit; /**< in Kb/sec for Tx ports, in frame/sec for
++ OP ports. Rate limit refers to
++ data rate (rather than line rate). */
++ e_FmPortDualRateLimiterScaleDown rateLimitDivider; /**< For OP ports only. Not-valid
++ for some earlier chip revisions */
++} t_FmPortRateLimit;
++
++/**************************************************************************//**
++ @Description A structure for defining the parameters of
++ the Rx port performance counters
++*//***************************************************************************/
++typedef struct t_FmPortPerformanceCnt {
++ uint8_t taskCompVal; /**< Task compare value */
++ uint8_t queueCompVal; /**< Rx queue/Tx confirm queue compare
++ value (unused for H/O) */
++ uint8_t dmaCompVal; /**< Dma compare value */
++ uint32_t fifoCompVal; /**< Fifo compare value (in bytes) */
++} t_FmPortPerformanceCnt;
++
++
++/**************************************************************************//**
++ @Description A structure for defining the sizes of the Deep Sleep
++ the Auto Response tables
++*//***************************************************************************/
++typedef struct t_FmPortDsarTablesSizes
++{
++ uint16_t maxNumOfArpEntries;
++ uint16_t maxNumOfEchoIpv4Entries;
++ uint16_t maxNumOfNdpEntries;
++ uint16_t maxNumOfEchoIpv6Entries;
++ uint16_t maxNumOfSnmpIPV4Entries;
++ uint16_t maxNumOfSnmpIPV6Entries;
++ uint16_t maxNumOfSnmpOidEntries;
++ uint16_t maxNumOfSnmpOidChar; /* total amount of character needed for the snmp table */
++
++ uint16_t maxNumOfIpProtFiltering;
++ uint16_t maxNumOfTcpPortFiltering;
++ uint16_t maxNumOfUdpPortFiltering;
++} t_FmPortDsarTablesSizes;
++
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDsarSupport
++
++ @Description This function will allocate the amount of MURAM needed for
++ this max number of entries for Deep Sleep Auto Response.
++ it will calculate all needed MURAM for autoresponse including
++ necesary common stuff.
++
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] params A pointer to a structure containing the maximum
++ sizes of the auto response tables
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDsarSupport(t_Handle h_FmPortRx, t_FmPortDsarTablesSizes *params);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigNumOfOpenDmas
++
++ @Description Calling this routine changes the max number of open DMA's
++ available for this port. It changes this parameter in the
++ internal driver data base from its default configuration
++ [OP: 1]
++ [1G-RX, 1G-TX: 1 (+1)]
++ [10G-RX, 10G-TX: 8 (+8)]
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_OpenDmas A pointer to a structure of parameters defining
++ the open DMA allocation.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_OpenDmas);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigNumOfTasks
++
++ @Description Calling this routine changes the max number of tasks
++ available for this port. It changes this parameter in the
++ internal driver data base from its default configuration
++ [OP: 1]
++ [1G-RX, 1G-TX: 3 (+2)]
++ [10G-RX, 10G-TX: 16 (+8)]
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_NumOfTasks A pointer to a structure of parameters defining
++ the tasks allocation.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigSizeOfFifo
++
++ @Description Calling this routine changes the max FIFO size configured for this port.
++
++ This function changes the internal driver data base from its
++ default configuration. Please refer to the driver's User Guide for
++ information on default FIFO sizes in the various devices.
++ [OP: 2KB]
++ [1G-RX, 1G-TX: 11KB]
++ [10G-RX, 10G-TX: 12KB]
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_SizeOfFifo A pointer to a structure of parameters defining
++ the FIFO allocation.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDeqHighPriority
++
++ @Description Calling this routine changes the dequeue priority in the
++ internal driver data base from its default configuration
++ 1G: [DEFAULT_PORT_deqHighPriority_1G]
++ 10G: [DEFAULT_PORT_deqHighPriority_10G]
++
++ May be used for Non-Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] highPri TRUE to select high priority, FALSE for normal operation.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDeqHighPriority(t_Handle h_FmPort, bool highPri);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDeqType
++
++ @Description Calling this routine changes the dequeue type parameter in the
++ internal driver data base from its default configuration
++ [DEFAULT_PORT_deqType].
++
++ May be used for Non-Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] deqType According to QM definition.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDeqType(t_Handle h_FmPort, e_FmPortDeqType deqType);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDeqPrefetchOption
++
++ @Description Calling this routine changes the dequeue prefetch option parameter in the
++ internal driver data base from its default configuration
++ [DEFAULT_PORT_deqPrefetchOption]
++ Note: Available for some chips only
++
++ May be used for Non-Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] deqPrefetchOption New option
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDeqPrefetchOption(t_Handle h_FmPort, e_FmPortDeqPrefetchOption deqPrefetchOption);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDeqByteCnt
++
++ @Description Calling this routine changes the dequeue byte count parameter in
++ the internal driver data base from its default configuration
++ 1G:[DEFAULT_PORT_deqByteCnt_1G].
++ 10G:[DEFAULT_PORT_deqByteCnt_10G].
++
++ May be used for Non-Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] deqByteCnt New byte count
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDeqByteCnt(t_Handle h_FmPort, uint16_t deqByteCnt);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigBufferPrefixContent
++
++ @Description Defines the structure, size and content of the application buffer.
++ The prefix will
++ In Tx ports, if 'passPrsResult', the application
++ should set a value to their offsets in the prefix of
++ the FM will save the first 'privDataSize', than,
++ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
++ and timeStamp, and the packet itself (in this order), to the
++ application buffer, and to offset.
++ Calling this routine changes the buffer margins definitions
++ in the internal driver data base from its default
++ configuration: Data size: [DEFAULT_PORT_bufferPrefixContent_privDataSize]
++ Pass Parser result: [DEFAULT_PORT_bufferPrefixContent_passPrsResult].
++ Pass timestamp: [DEFAULT_PORT_bufferPrefixContent_passTimeStamp].
++
++ May be used for all ports
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in,out] p_FmBufferPrefixContent A structure of parameters describing the
++ structure of the buffer.
++ Out parameter: Start margin - offset
++ of data from start of external buffer.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigBufferPrefixContent(t_Handle h_FmPort,
++ t_FmBufferPrefixContent *p_FmBufferPrefixContent);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigCheksumLastBytesIgnore
++
++ @Description Calling this routine changes the number of checksum bytes to ignore
++ parameter in the internal driver data base from its default configuration
++ [DEFAULT_PORT_cheksumLastBytesIgnore]
++
++ May be used by Tx & Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] cheksumLastBytesIgnore New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigCheksumLastBytesIgnore(t_Handle h_FmPort, uint8_t cheksumLastBytesIgnore);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigCutBytesFromEnd
++
++ @Description Calling this routine changes the number of bytes to cut from a
++ frame's end parameter in the internal driver data base
++ from its default configuration [DEFAULT_PORT_cutBytesFromEnd]
++ Note that if the result of (frame length before chop - cutBytesFromEnd) is
++ less than 14 bytes, the chop operation is not executed.
++
++ May be used for Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] cutBytesFromEnd New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigCutBytesFromEnd(t_Handle h_FmPort, uint8_t cutBytesFromEnd);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigPoolDepletion
++
++ @Description Calling this routine enables pause frame generation depending on the
++ depletion status of BM pools. It also defines the conditions to activate
++ this functionality. By default, this functionality is disabled.
++
++ May be used for Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_BufPoolDepletion A structure of pool depletion parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigPoolDepletion(t_Handle h_FmPort, t_FmBufPoolDepletion *p_BufPoolDepletion);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigObservedPoolDepletion
++
++ @Description Calling this routine enables a mechanism to stop port enqueue
++ depending on the depletion status of selected BM pools.
++ It also defines the conditions to activate
++ this functionality. By default, this functionality is disabled.
++
++ Note: Available for some chips only
++
++ May be used for OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_FmPortObservedBufPoolDepletion A structure of parameters for pool depletion.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigObservedPoolDepletion(t_Handle h_FmPort,
++ t_FmPortObservedBufPoolDepletion *p_FmPortObservedBufPoolDepletion);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigExtBufPools
++
++ @Description This routine should be called for OP ports
++ that internally use BM buffer pools. In such cases, e.g. for fragmentation and
++ re-assembly, the FM needs new BM buffers. By calling this routine the user
++ specifies the BM buffer pools that should be used.
++
++ Note: Available for some chips only
++
++ May be used for OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_FmExtPools A structure of parameters for the external pools.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigExtBufPools(t_Handle h_FmPort, t_FmExtPools *p_FmExtPools);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigBackupPools
++
++ @Description Calling this routine allows the configuration of some of the BM pools
++ defined for this port as backup pools.
++ A pool configured to be a backup pool will be used only if all other
++ enabled non-backup pools are depleted.
++
++ May be used for Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_FmPortBackupBmPools An array of pool id's. All pools specified here will
++ be defined as backup pools.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigBackupPools(t_Handle h_FmPort, t_FmBackupBmPools *p_FmPortBackupBmPools);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigFrmDiscardOverride
++
++ @Description Calling this routine changes the error frames destination parameter
++ in the internal driver data base from its default configuration:
++ override = [DEFAULT_PORT_frmDiscardOverride]
++
++ May be used for Rx and OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] override TRUE to override discarding of error frames and
++ enqueueing them to error queue.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigFrmDiscardOverride(t_Handle h_FmPort, bool override);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigErrorsToDiscard
++
++ @Description Calling this routine changes the behaviour on error parameter
++ in the internal driver data base from its default configuration:
++ [DEFAULT_PORT_errorsToDiscard].
++ If a requested error was previously defined as "ErrorsToEnqueue" it's
++ definition will change and the frame will be discarded.
++ Errors that were not defined either as "ErrorsToEnqueue" nor as
++ "ErrorsToDiscard", will be forwarded to CPU.
++
++ May be used for Rx and OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] errs A list of errors to discard
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigErrorsToDiscard(t_Handle h_FmPort, fmPortFrameErrSelect_t errs);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDmaSwapData
++
++ @Description Calling this routine changes the DMA swap data aparameter
++ in the internal driver data base from its default
++ configuration [DEFAULT_PORT_dmaSwapData]
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] swapData New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDmaSwapData(t_Handle h_FmPort, e_FmDmaSwapOption swapData);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDmaIcCacheAttr
++
++ @Description Calling this routine changes the internal context cache
++ attribute parameter in the internal driver data base
++ from its default configuration [DEFAULT_PORT_dmaIntContextCacheAttr]
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] intContextCacheAttr New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDmaIcCacheAttr(t_Handle h_FmPort, e_FmDmaCacheOption intContextCacheAttr);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDmaHdrAttr
++
++ @Description Calling this routine changes the header cache
++ attribute parameter in the internal driver data base
++ from its default configuration [DEFAULT_PORT_dmaHeaderCacheAttr]
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] headerCacheAttr New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDmaHdrAttr(t_Handle h_FmPort, e_FmDmaCacheOption headerCacheAttr);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDmaScatterGatherAttr
++
++ @Description Calling this routine changes the scatter gather cache
++ attribute parameter in the internal driver data base
++ from its default configuration [DEFAULT_PORT_dmaScatterGatherCacheAttr]
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] scatterGatherCacheAttr New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDmaScatterGatherAttr(t_Handle h_FmPort, e_FmDmaCacheOption scatterGatherCacheAttr);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDmaWriteOptimize
++
++ @Description Calling this routine changes the write optimization
++ parameter in the internal driver data base
++ from its default configuration: By default optimize = [DEFAULT_PORT_dmaWriteOptimize].
++ Note:
++
++ 1. For head optimization, data alignment must be >= 16 (supported by default).
++
++ 3. For tail optimization, note that the optimization is performed by extending the write transaction
++ of the frame payload at the tail as needed to achieve optimal bus transfers, so that the last write
++ is extended to be on 16/64 bytes aligned block (chip dependent).
++
++ Relevant for non-Tx port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] optimize TRUE to enable optimization, FALSE for normal operation
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDmaWriteOptimize(t_Handle h_FmPort, bool optimize);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigNoScatherGather
++
++ @Description Calling this routine changes the noScatherGather parameter in internal driver data base
++ from its default configuration.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] noScatherGather (TRUE - frame is discarded if can not be stored in single buffer,
++ FALSE - frame can be stored in scatter gather (S/G) format).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigNoScatherGather(t_Handle h_FmPort, bool noScatherGather);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDfltColor
++
++ @Description Calling this routine changes the internal default color parameter
++ in the internal driver data base
++ from its default configuration [DEFAULT_PORT_color]
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] color New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDfltColor(t_Handle h_FmPort, e_FmPortColor color);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigSyncReq
++
++ @Description Calling this routine changes the synchronization attribute parameter
++ in the internal driver data base from its default configuration:
++ syncReq = [DEFAULT_PORT_syncReq]
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] syncReq TRUE to request synchronization, FALSE otherwize.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigSyncReq(t_Handle h_FmPort, bool syncReq);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigForwardReuseIntContext
++
++ @Description This routine is relevant for Rx ports that are routed to OP port.
++ It changes the internal context reuse option in the internal
++ driver data base from its default configuration:
++ reuse = [DEFAULT_PORT_forwardIntContextReuse]
++
++ May be used for Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] reuse TRUE to reuse internal context on frames
++ forwarded to OP port.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigForwardReuseIntContext(t_Handle h_FmPort, bool reuse);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigDontReleaseTxBufToBM
++
++ @Description This routine should be called if no Tx confirmation
++ is done, and yet buffers should not be released to the BM.
++ Normally, buffers are returned using the Tx confirmation
++ process. When Tx confirmation is not used (defFqid=0),
++ buffers are typically released to the BM. This routine
++ may be called to avoid this behavior and not release the
++ buffers.
++
++ May be used for Tx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigDontReleaseTxBufToBM(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigIMMaxRxBufLength
++
++ @Description Changes the maximum receive buffer length from its default
++ configuration: Closest rounded down power of 2 value of the
++ data buffer size.
++
++ The maximum receive buffer length directly affects the structure
++ of received frames (single- or multi-buffered) and the performance
++ of both the FM and the driver.
++
++ The selection between single- or multi-buffered frames should be
++ done according to the characteristics of the specific application.
++ The recommended mode is to use a single data buffer per packet,
++ as this mode provides the best performance. However, the user can
++ select to use multiple data buffers per packet.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] newVal Maximum receive buffer length (in bytes).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++ This routine is to be used only if Independent-Mode is enabled.
++*//***************************************************************************/
++t_Error FM_PORT_ConfigIMMaxRxBufLength(t_Handle h_FmPort, uint16_t newVal);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigIMRxBdRingLength
++
++ @Description Changes the receive BD ring length from its default
++ configuration:[DEFAULT_PORT_rxBdRingLength]
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] newVal The desired BD ring length.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++ This routine is to be used only if Independent-Mode is enabled.
++*//***************************************************************************/
++t_Error FM_PORT_ConfigIMRxBdRingLength(t_Handle h_FmPort, uint16_t newVal);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigIMTxBdRingLength
++
++ @Description Changes the transmit BD ring length from its default
++ configuration:[DEFAULT_PORT_txBdRingLength]
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] newVal The desired BD ring length.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++ This routine is to be used only if Independent-Mode is enabled.
++*//***************************************************************************/
++t_Error FM_PORT_ConfigIMTxBdRingLength(t_Handle h_FmPort, uint16_t newVal);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigIMFmanCtrlExternalStructsMemory
++
++ @Description Configures memory partition and attributes for FMan-Controller
++ data structures (e.g. BD rings).
++ Calling this routine changes the internal driver data base
++ from its default configuration
++ [DEFAULT_PORT_ImfwExtStructsMemId, DEFAULT_PORT_ImfwExtStructsMemAttr].
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] memId Memory partition ID.
++ @Param[in] memAttributes Memory attributes mask (a combination of MEMORY_ATTR_x flags).
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PORT_ConfigIMFmanCtrlExternalStructsMemory(t_Handle h_FmPort,
++ uint8_t memId,
++ uint32_t memAttributes);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigIMPolling
++
++ @Description Changes the Rx flow from interrupt driven (default) to polling.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++ This routine is to be used only if Independent-Mode is enabled.
++*//***************************************************************************/
++t_Error FM_PORT_ConfigIMPolling(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigMaxFrameLength
++
++ @Description Changes the definition of the max size of frame that should be
++ transmitted/received on this port from its default value [DEFAULT_PORT_maxFrameLength].
++ This parameter is used for confirmation of the minimum Fifo
++ size calculations and only for Tx ports or ports working in
++ independent mode. This should be larger than the maximum possible
++ MTU that will be used for this port (i.e. its MAC).
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] length Max size of frame
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++ This routine is to be used only if Independent-Mode is enabled.
++*//***************************************************************************/
++t_Error FM_PORT_ConfigMaxFrameLength(t_Handle h_FmPort, uint16_t length);
++
++/**************************************************************************//*
++ @Function FM_PORT_ConfigTxFifoMinFillLevel
++
++ @Description Calling this routine changes the fifo minimum
++ fill level parameter in the internal driver data base
++ from its default configuration [DEFAULT_PORT_txFifoMinFillLevel]
++
++ May be used for Tx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] minFillLevel New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigTxFifoMinFillLevel(t_Handle h_FmPort, uint32_t minFillLevel);
++
++/**************************************************************************//*
++ @Function FM_PORT_ConfigFifoDeqPipelineDepth
++
++ @Description Calling this routine changes the fifo dequeue
++ pipeline depth parameter in the internal driver data base
++
++ from its default configuration: 1G ports: [DEFAULT_PORT_fifoDeqPipelineDepth_1G],
++ 10G port: [DEFAULT_PORT_fifoDeqPipelineDepth_10G],
++ OP port: [DEFAULT_PORT_fifoDeqPipelineDepth_OH]
++
++ May be used for Tx/OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] deqPipelineDepth New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigFifoDeqPipelineDepth(t_Handle h_FmPort, uint8_t deqPipelineDepth);
++
++/**************************************************************************//*
++ @Function FM_PORT_ConfigTxFifoLowComfLevel
++
++ @Description Calling this routine changes the fifo low comfort level
++ parameter in internal driver data base
++ from its default configuration [DEFAULT_PORT_txFifoLowComfLevel]
++
++ May be used for Tx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] fifoLowComfLevel New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigTxFifoLowComfLevel(t_Handle h_FmPort, uint32_t fifoLowComfLevel);
++
++/**************************************************************************//*
++ @Function FM_PORT_ConfigRxFifoThreshold
++
++ @Description Calling this routine changes the threshold of the FIFO
++ fill level parameter in the internal driver data base
++ from its default configuration [DEFAULT_PORT_rxFifoThreshold]
++
++ If the total number of buffers which are
++ currently in use and associated with the
++ specific RX port exceed this threshold, the
++ BMI will signal the MAC to send a pause frame
++ over the link.
++
++ May be used for Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] fifoThreshold New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigRxFifoThreshold(t_Handle h_FmPort, uint32_t fifoThreshold);
++
++/**************************************************************************//*
++ @Function FM_PORT_ConfigRxFifoPriElevationLevel
++
++ @Description Calling this routine changes the priority elevation level
++ parameter in the internal driver data base from its default
++ configuration [DEFAULT_PORT_rxFifoPriElevationLevel]
++
++ If the total number of buffers which are currently in use and
++ associated with the specific RX port exceed the amount specified
++ in priElevationLevel, BMI will signal the main FM's DMA to
++ elevate the FM priority on the system bus.
++
++ May be used for Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] priElevationLevel New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigRxFifoPriElevationLevel(t_Handle h_FmPort, uint32_t priElevationLevel);
++
++#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
++/**************************************************************************//*
++ @Function FM_PORT_ConfigBCBWorkaround
++
++ @Description Configures BCB errata workaround.
++
++ When BCB errata is applicable, the workaround is always
++ performed by FM Controller. Thus, this functions doesn't
++ actually enable errata workaround but rather allows driver
++ to perform adjustments required due to errata workaround
++ execution in FM controller.
++
++ Applying BCB workaround also configures FM_PORT_FRM_ERR_PHYSICAL
++ errors to be discarded. Thus FM_PORT_FRM_ERR_PHYSICAL can't be
++ set by FM_PORT_SetErrorsRoute() function.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigBCBWorkaround(t_Handle h_FmPort);
++#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//*
++ @Function FM_PORT_ConfigInternalBuffOffset
++
++ @Description Configures internal buffer offset.
++
++ May be used for Rx and OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] val New value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ConfigInternalBuffOffset(t_Handle h_FmPort, uint8_t val);
++#endif /* (DPAA_VERSION >= 11) */
++
++/** @} */ /* end of FM_PORT_advanced_init_grp group */
++/** @} */ /* end of FM_PORT_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_PORT_runtime_control_grp FM Port Runtime Control Unit
++
++ @Description FM Port Runtime control unit API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description enum for defining FM Port counters
++*//***************************************************************************/
++typedef enum e_FmPortCounters {
++ e_FM_PORT_COUNTERS_CYCLE, /**< BMI performance counter */
++ e_FM_PORT_COUNTERS_TASK_UTIL, /**< BMI performance counter */
++ e_FM_PORT_COUNTERS_QUEUE_UTIL, /**< BMI performance counter */
++ e_FM_PORT_COUNTERS_DMA_UTIL, /**< BMI performance counter */
++ e_FM_PORT_COUNTERS_FIFO_UTIL, /**< BMI performance counter */
++ e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION, /**< BMI Rx only performance counter */
++ e_FM_PORT_COUNTERS_FRAME, /**< BMI statistics counter */
++ e_FM_PORT_COUNTERS_DISCARD_FRAME, /**< BMI statistics counter */
++ e_FM_PORT_COUNTERS_DEALLOC_BUF, /**< BMI deallocate buffer statistics counter */
++ e_FM_PORT_COUNTERS_RX_BAD_FRAME, /**< BMI Rx only statistics counter */
++ e_FM_PORT_COUNTERS_RX_LARGE_FRAME, /**< BMI Rx only statistics counter */
++ e_FM_PORT_COUNTERS_RX_FILTER_FRAME, /**< BMI Rx & OP only statistics counter */
++ e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR, /**< BMI Rx, OP & HC only statistics counter */
++ e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD, /**< BMI Rx, OP & HC statistics counter */
++ e_FM_PORT_COUNTERS_PREPARE_TO_ENQUEUE_COUNTER, /**< BMI Rx, OP & HC only statistics counter */
++ e_FM_PORT_COUNTERS_WRED_DISCARD, /**< BMI OP & HC only statistics counter */
++ e_FM_PORT_COUNTERS_LENGTH_ERR, /**< BMI non-Rx statistics counter */
++ e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT, /**< BMI non-Rx statistics counter */
++ e_FM_PORT_COUNTERS_DEQ_TOTAL, /**< QMI total QM dequeues counter */
++ e_FM_PORT_COUNTERS_ENQ_TOTAL, /**< QMI total QM enqueues counter */
++ e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI counter */
++ e_FM_PORT_COUNTERS_DEQ_CONFIRM /**< QMI counter */
++} e_FmPortCounters;
++
++typedef struct t_FmPortBmiStats {
++ uint32_t cntCycle;
++ uint32_t cntTaskUtil;
++ uint32_t cntQueueUtil;
++ uint32_t cntDmaUtil;
++ uint32_t cntFifoUtil;
++ uint32_t cntRxPauseActivation;
++ uint32_t cntFrame;
++ uint32_t cntDiscardFrame;
++ uint32_t cntDeallocBuf;
++ uint32_t cntRxBadFrame;
++ uint32_t cntRxLargeFrame;
++ uint32_t cntRxFilterFrame;
++ uint32_t cntRxListDmaErr;
++ uint32_t cntRxOutOfBuffersDiscard;
++ uint32_t cntWredDiscard;
++ uint32_t cntLengthErr;
++ uint32_t cntUnsupportedFormat;
++} t_FmPortBmiStats;
++
++/**************************************************************************//**
++ @Description Structure for Port id parameters.
++ Fields commented 'IN' are passed by the port module to be used
++ by the FM module.
++ Fields commented 'OUT' will be filled by FM before returning to port.
++*//***************************************************************************/
++typedef struct t_FmPortCongestionGrps {
++ uint16_t numOfCongestionGrpsToConsider; /**< The number of required CGs
++ to define the size of the following array */
++ uint8_t congestionGrpsToConsider[FM_PORT_NUM_OF_CONGESTION_GRPS];
++ /**< An array of CG indexes;
++ Note that the size of the array should be
++ 'numOfCongestionGrpsToConsider'. */
++#if (DPAA_VERSION >= 11)
++ bool pfcPrioritiesEn[FM_PORT_NUM_OF_CONGESTION_GRPS][FM_MAX_NUM_OF_PFC_PRIORITIES];
++ /**< a matrix that represents the map between the CG ids
++ defined in 'congestionGrpsToConsider' to the priorties
++ mapping array. */
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmPortCongestionGrps;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response ARP Entry
++*//***************************************************************************/
++typedef struct t_FmPortDsarArpEntry
++{
++ uint32_t ipAddress;
++ uint8_t mac[6];
++ bool isVlan;
++ uint16_t vid;
++} t_FmPortDsarArpEntry;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response ARP info
++*//***************************************************************************/
++typedef struct t_FmPortDsarArpInfo
++{
++ uint8_t tableSize;
++ t_FmPortDsarArpEntry *p_AutoResTable;
++ bool enableConflictDetection; /* when TRUE Conflict Detection will be checked and wake the host if needed */
++} t_FmPortDsarArpInfo;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response NDP Entry
++*//***************************************************************************/
++typedef struct t_FmPortDsarNdpEntry
++{
++ uint32_t ipAddress[4];
++ uint8_t mac[6];
++ bool isVlan;
++ uint16_t vid;
++} t_FmPortDsarNdpEntry;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response NDP info
++*//***************************************************************************/
++typedef struct t_FmPortDsarNdpInfo
++{
++ uint32_t multicastGroup;
++
++ uint8_t tableSizeAssigned;
++ t_FmPortDsarNdpEntry *p_AutoResTableAssigned; /* This list refer to solicitation IP addresses.
++ Note that all IP adresses must be from the same multicast group.
++ This will be checked and if not operation will fail. */
++ uint8_t tableSizeTmp;
++ t_FmPortDsarNdpEntry *p_AutoResTableTmp; /* This list refer to temp IP addresses.
++ Note that all temp IP adresses must be from the same multicast group.
++ This will be checked and if not operation will fail. */
++
++ bool enableConflictDetection; /* when TRUE Conflict Detection will be checked and wake the host if needed */
++
++} t_FmPortDsarNdpInfo;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response ICMPV4 info
++*//***************************************************************************/
++typedef struct t_FmPortDsarEchoIpv4Info
++{
++ uint8_t tableSize;
++ t_FmPortDsarArpEntry *p_AutoResTable;
++} t_FmPortDsarEchoIpv4Info;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response ICMPV6 info
++*//***************************************************************************/
++typedef struct t_FmPortDsarEchoIpv6Info
++{
++ uint8_t tableSize;
++ t_FmPortDsarNdpEntry *p_AutoResTable;
++} t_FmPortDsarEchoIpv6Info;
++
++/**************************************************************************//**
++@Description Deep Sleep Auto Response SNMP OIDs table entry
++
++*//***************************************************************************/
++typedef struct {
++ uint16_t oidSize;
++ uint8_t *oidVal; /* only the oid string */
++ uint16_t resSize;
++ uint8_t *resVal; /* resVal will be the entire reply,
++ i.e. "Type|Length|Value" */
++} t_FmPortDsarOidsEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP IPv4 Addresses Table Entry
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++typedef struct
++{
++ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
++ bool isVlan;
++ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++} t_FmPortDsarSnmpIpv4AddrTblEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP IPv6 Addresses Table Entry
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++typedef struct
++{
++ uint32_t ipv6Addr[4]; /*!< 4 * 32 bit IPv6 Address. */
++ bool isVlan;
++ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++} t_FmPortDsarSnmpIpv6AddrTblEntry;
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP Descriptor
++
++*//***************************************************************************/
++typedef struct
++{
++ uint16_t control; /**< Control bits [0-15]. */
++ uint16_t maxSnmpMsgLength; /**< Maximal allowed SNMP message length. */
++ uint16_t numOfIpv4Addresses; /**< Number of entries in IPv4 addresses table. */
++ uint16_t numOfIpv6Addresses; /**< Number of entries in IPv6 addresses table. */
++ t_FmPortDsarSnmpIpv4AddrTblEntry *p_Ipv4AddrTbl; /**< Pointer to IPv4 addresses table. */
++ t_FmPortDsarSnmpIpv6AddrTblEntry *p_Ipv6AddrTbl; /**< Pointer to IPv6 addresses table. */
++ uint8_t *p_RdOnlyCommunityStr; /**< Pointer to the Read Only Community String. */
++ uint8_t *p_RdWrCommunityStr; /**< Pointer to the Read Write Community String. */
++ t_FmPortDsarOidsEntry *p_OidsTbl; /**< Pointer to OIDs table. */
++ uint32_t oidsTblSize; /**< Number of entries in OIDs table. */
++} t_FmPortDsarSnmpInfo;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response filtering Entry
++*//***************************************************************************/
++typedef struct t_FmPortDsarFilteringEntry
++{
++ uint16_t srcPort;
++ uint16_t dstPort;
++ uint16_t srcPortMask;
++ uint16_t dstPortMask;
++} t_FmPortDsarFilteringEntry;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response filtering info
++*//***************************************************************************/
++typedef struct t_FmPortDsarFilteringInfo
++{
++ /* IP protocol filtering parameters */
++ uint8_t ipProtTableSize;
++ uint8_t *p_IpProtTablePtr;
++ bool ipProtPassOnHit; /* when TRUE, miss in the table will cause the packet to be droped,
++ hit will pass the packet to UDP/TCP filters if needed and if not
++ to the classification tree. If the classification tree will pass
++ the packet to a queue it will cause a wake interupt.
++ When FALSE it the other way around. */
++ /* UDP port filtering parameters */
++ uint8_t udpPortsTableSize;
++ t_FmPortDsarFilteringEntry *p_UdpPortsTablePtr;
++ bool udpPortPassOnHit; /* when TRUE, miss in the table will cause the packet to be droped,
++ hit will pass the packet to classification tree.
++ If the classification tree will pass the packet to a queue it
++ will cause a wake interupt.
++ When FALSE it the other way around. */
++ /* TCP port filtering parameters */
++ uint16_t tcpFlagsMask;
++ uint8_t tcpPortsTableSize;
++ t_FmPortDsarFilteringEntry *p_TcpPortsTablePtr;
++ bool tcpPortPassOnHit; /* when TRUE, miss in the table will cause the packet to be droped,
++ hit will pass the packet to classification tree.
++ If the classification tree will pass the packet to a queue it
++ will cause a wake interupt.
++ When FALSE it the other way around. */
++} t_FmPortDsarFilteringInfo;
++
++/**************************************************************************//**
++ @Description Structure for Deep Sleep Auto Response parameters
++*//***************************************************************************/
++typedef struct t_FmPortDsarParams
++{
++ t_Handle h_FmPortTx;
++ t_FmPortDsarArpInfo *p_AutoResArpInfo;
++ t_FmPortDsarEchoIpv4Info *p_AutoResEchoIpv4Info;
++ t_FmPortDsarNdpInfo *p_AutoResNdpInfo;
++ t_FmPortDsarEchoIpv6Info *p_AutoResEchoIpv6Info;
++ t_FmPortDsarSnmpInfo *p_AutoResSnmpInfo;
++ t_FmPortDsarFilteringInfo *p_AutoResFilteringInfo;
++} t_FmPortDsarParams;
++
++/**************************************************************************//**
++ @Function FM_PORT_EnterDsar
++
++ @Description Enter Deep Sleep Auto Response mode.
++ This function write the apropriate values to in the relevant
++ tables in the MURAM.
++
++ @Param[in] h_FmPortRx - FM PORT module descriptor
++ @Param[in] params - Auto Response parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_EnterDsar(t_Handle h_FmPortRx, t_FmPortDsarParams *params);
++
++/**************************************************************************//**
++ @Function FM_PORT_EnterDsarFinal
++
++ @Description Enter Deep Sleep Auto Response mode.
++ This function sets the Tx port in independent mode as needed
++ and redirect the receive flow to go through the
++ Dsar Fman-ctrl code
++
++ @Param[in] h_DsarRxPort - FM Rx PORT module descriptor
++ @Param[in] h_DsarTxPort - FM Tx PORT module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_EnterDsarFinal(t_Handle h_DsarRxPort, t_Handle h_DsarTxPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_ExitDsar
++
++ @Description Exit Deep Sleep Auto Response mode.
++ This function reverse the AR mode and put the ports back into
++ their original wake mode
++
++ @Param[in] h_FmPortRx - FM PORT Rx module descriptor
++ @Param[in] h_FmPortTx - FM PORT Tx module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_EnterDsar().
++*//***************************************************************************/
++void FM_PORT_ExitDsar(t_Handle h_FmPortRx, t_Handle h_FmPortTx);
++
++/**************************************************************************//**
++ @Function FM_PORT_IsInDsar
++
++ @Description This function returns TRUE if the port was set as Auto Response
++ and FALSE if not. Once Exit AR mode it will return FALSE as well
++ until re-enabled once more.
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++bool FM_PORT_IsInDsar(t_Handle h_FmPort);
++
++typedef struct t_FmPortDsarStats
++{
++ uint32_t arpArCnt;
++ uint32_t echoIcmpv4ArCnt;
++ uint32_t ndpArCnt;
++ uint32_t echoIcmpv6ArCnt;
++ uint32_t snmpGetCnt;
++ uint32_t snmpGetNextCnt;
++} t_FmPortDsarStats;
++
++/**************************************************************************//**
++ @Function FM_PORT_GetDsarStats
++
++ @Description Return statistics for Deep Sleep Auto Response
++
++ @Param[in] h_FmPortRx - FM PORT module descriptor
++ @Param[out] stats - structure containing the statistics counters
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_PORT_GetDsarStats(t_Handle h_FmPortRx, t_FmPortDsarStats *stats);
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/**************************************************************************//**
++ @Function FM_PORT_DumpRegs
++
++ @Description Dump all regs.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_DumpRegs(t_Handle h_FmPort);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++/**************************************************************************//**
++ @Function FM_PORT_GetBufferDataOffset
++
++ @Description Relevant for Rx ports.
++ Returns the data offset from the beginning of the data buffer
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++
++ @Return data offset.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++uint32_t FM_PORT_GetBufferDataOffset(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetBufferICInfo
++
++ @Description Returns the Internal Context offset from the beginning of the data buffer
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return Internal context info pointer on success, NULL if 'allOtherInfo' was not
++ configured for this port.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++uint8_t * FM_PORT_GetBufferICInfo(t_Handle h_FmPort, char *p_Data);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetBufferPrsResult
++
++ @Description Returns the pointer to the parse result in the data buffer.
++ In Rx ports this is relevant after reception, if parse
++ result is configured to be part of the data passed to the
++ application. For non Rx ports it may be used to get the pointer
++ of the area in the buffer where parse result should be
++ initialized - if so configured.
++ See FM_PORT_ConfigBufferPrefixContent for data buffer prefix
++ configuration.
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return Parse result pointer on success, NULL if parse result was not
++ configured for this port.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_FmPrsResult * FM_PORT_GetBufferPrsResult(t_Handle h_FmPort, char *p_Data);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetBufferTimeStamp
++
++ @Description Returns the time stamp in the data buffer.
++ Relevant for Rx ports for getting the buffer time stamp.
++ See FM_PORT_ConfigBufferPrefixContent for data buffer prefix
++ configuration.
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return A pointer to the hash result on success, NULL otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++uint64_t * FM_PORT_GetBufferTimeStamp(t_Handle h_FmPort, char *p_Data);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetBufferHashResult
++
++ @Description Given a data buffer, on the condition that hash result was defined
++ as a part of the buffer content (see FM_PORT_ConfigBufferPrefixContent)
++ this routine will return the pointer to the hash result location in the
++ buffer prefix.
++
++ @Param[in] h_FmPort - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return A pointer to the hash result on success, NULL otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++uint8_t * FM_PORT_GetBufferHashResult(t_Handle h_FmPort, char *p_Data);
++
++/**************************************************************************//**
++ @Function FM_PORT_Disable
++
++ @Description Gracefully disable an FM port. The port will not start new tasks after all
++ tasks associated with the port are terminated.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ This is a blocking routine, it returns after port is
++ gracefully stopped, i.e. the port will not except new frames,
++ but it will finish all frames or tasks which were already began
++*//***************************************************************************/
++t_Error FM_PORT_Disable(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_Enable
++
++ @Description A runtime routine provided to allow disable/enable of port.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_Enable(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_SetRateLimit
++
++ @Description Calling this routine enables rate limit algorithm.
++ By default, this functionality is disabled.
++ Note that rate-limit mechanism uses the FM time stamp.
++ The selected rate limit specified here would be
++ rounded DOWN to the nearest 16M.
++
++ May be used for Tx and OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_RateLimit A structure of rate limit parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ If rate limit is set on a port that need to send PFC frames,
++ it might violate the stop transmit timing.
++*//***************************************************************************/
++t_Error FM_PORT_SetRateLimit(t_Handle h_FmPort, t_FmPortRateLimit *p_RateLimit);
++
++/**************************************************************************//**
++ @Function FM_PORT_DeleteRateLimit
++
++ @Description Calling this routine disables and clears rate limit
++ initialization.
++
++ May be used for Tx and OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_DeleteRateLimit(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_SetPfcPrioritiesMappingToQmanWQ
++
++ @Description Calling this routine maps each PFC received priority to the transmit WQ.
++ This WQ will be blocked upon receiving a PFC frame with this priority.
++
++ May be used for Tx ports only.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] prio PFC priority (0-7).
++ @Param[in] wq Work Queue (0-7).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetPfcPrioritiesMappingToQmanWQ(t_Handle h_FmPort, uint8_t prio, uint8_t wq);
++
++/**************************************************************************//**
++ @Function FM_PORT_SetStatisticsCounters
++
++ @Description Calling this routine enables/disables port's statistics counters.
++ By default, counters are enabled.
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] enable TRUE to enable, FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetStatisticsCounters(t_Handle h_FmPort, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PORT_SetFrameQueueCounters
++
++ @Description Calling this routine enables/disables port's enqueue/dequeue counters.
++ By default, counters are enabled.
++
++ May be used for all ports
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] enable TRUE to enable, FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetFrameQueueCounters(t_Handle h_FmPort, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PORT_AnalyzePerformanceParams
++
++ @Description User may call this routine to so the driver will analyze if the
++ basic performance parameters are correct and also the driver may
++ suggest of improvements; The basic parameters are FIFO sizes, number
++ of DMAs and number of TNUMs for the port.
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_AnalyzePerformanceParams(t_Handle h_FmPort);
++
++
++/**************************************************************************//**
++ @Function FM_PORT_SetAllocBufCounter
++
++ @Description Calling this routine enables/disables BM pool allocate
++ buffer counters.
++ By default, counters are enabled.
++
++ May be used for Rx ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] poolId BM pool id.
++ @Param[in] enable TRUE to enable, FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, bool enable);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetBmiCounters
++
++ @Description Read port's BMI stat counters and place them into
++ a designated structure of counters.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[out] p_BmiStats counters structure
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_GetBmiCounters(t_Handle h_FmPort, t_FmPortBmiStats *p_BmiStats);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetCounter
++
++ @Description Reads one of the FM PORT counters.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] fmPortCounter The requested counter.
++
++ @Return Counter's current value.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ Note that it is user's responsibility to call this routine only
++ for enabled counters, and there will be no indication if a
++ disabled counter is accessed.
++*//***************************************************************************/
++uint32_t FM_PORT_GetCounter(t_Handle h_FmPort, e_FmPortCounters fmPortCounter);
++
++/**************************************************************************//**
++ @Function FM_PORT_ModifyCounter
++
++ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] fmPortCounter The requested counter.
++ @Param[in] value The requested value to be written into the counter.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ModifyCounter(t_Handle h_FmPort, e_FmPortCounters fmPortCounter, uint32_t value);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetAllocBufCounter
++
++ @Description Reads one of the FM PORT buffer counters.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] poolId The requested pool.
++
++ @Return Counter's current value.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ Note that it is user's responsibility to call this routine only
++ for enabled counters, and there will be no indication if a
++ disabled counter is accessed.
++*//***************************************************************************/
++uint32_t FM_PORT_GetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId);
++
++/**************************************************************************//**
++ @Function FM_PORT_ModifyAllocBufCounter
++
++ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] poolId The requested pool.
++ @Param[in] value The requested value to be written into the counter.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ModifyAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, uint32_t value);
++
++/**************************************************************************//**
++ @Function FM_PORT_AddCongestionGrps
++
++ @Description This routine effects the corresponding Tx port.
++ It should be called in order to enable pause
++ frame transmission in case of congestion in one or more
++ of the congestion groups relevant to this port.
++ Each call to this routine may add one or more congestion
++ groups to be considered relevant to this port.
++
++ May be used for Rx, or RX+OP ports only (depending on chip)
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_CongestionGrps A pointer to an array of congestion groups
++ id's to consider.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_AddCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps);
++
++/**************************************************************************//**
++ @Function FM_PORT_RemoveCongestionGrps
++
++ @Description This routine effects the corresponding Tx port. It should be
++ called when congestion groups were
++ defined for this port and are no longer relevant, or pause
++ frames transmitting is not required on their behalf.
++ Each call to this routine may remove one or more congestion
++ groups to be considered relevant to this port.
++
++ May be used for Rx, or RX+OP ports only (depending on chip)
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_CongestionGrps A pointer to an array of congestion groups
++ id's to consider.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_RemoveCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps);
++
++/**************************************************************************//**
++ @Function FM_PORT_IsStalled
++
++ @Description A routine for checking whether the specified port is stalled.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return TRUE if port is stalled, FALSE otherwize
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++bool FM_PORT_IsStalled(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_ReleaseStalled
++
++ @Description This routine may be called in case the port was stalled and may
++ now be released.
++ Note that this routine is available only on older FMan revisions
++ (FMan v2, DPAA v1.0 only).
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_ReleaseStalled(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_SetRxL4ChecksumVerify
++
++ @Description This routine is relevant for Rx ports (1G and 10G). The routine
++ set/clear the L3/L4 checksum verification (on RX side).
++ Note that this takes affect only if hw-parser is enabled!
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] l4Checksum boolean indicates whether to do L3/L4 checksum
++ on frames or not.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetRxL4ChecksumVerify(t_Handle h_FmPort, bool l4Checksum);
++
++/**************************************************************************//**
++ @Function FM_PORT_SetErrorsRoute
++
++ @Description Errors selected for this routine will cause a frame with that error
++ to be enqueued to error queue.
++ Errors not selected for this routine will cause a frame with that error
++ to be enqueued to the one of the other port queues.
++ By default all errors are defined to be enqueued to error queue.
++ Errors that were configured to be discarded (at initialization)
++ may not be selected here.
++
++ May be used for Rx and OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] errs A list of errors to enqueue to error queue
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetErrorsRoute(t_Handle h_FmPort, fmPortFrameErrSelect_t errs);
++
++/**************************************************************************//**
++ @Function FM_PORT_SetIMExceptions
++
++ @Description Calling this routine enables/disables FM PORT interrupts.
++
++ @Param[in] h_FmPort FM PORT module descriptor.
++ @Param[in] exception The exception to be selected.
++ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ This routine should NOT be called from guest-partition
++ (i.e. guestId != NCSW_MASTER_ID)
++*//***************************************************************************/
++t_Error FM_PORT_SetIMExceptions(t_Handle h_FmPort, e_FmPortExceptions exception, bool enable);
++
++/**************************************************************************//*
++ @Function FM_PORT_SetPerformanceCounters
++
++ @Description Calling this routine enables/disables port's performance counters.
++ By default, counters are enabled.
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] enable TRUE to enable, FALSE to disable.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetPerformanceCounters(t_Handle h_FmPort, bool enable);
++
++/**************************************************************************//*
++ @Function FM_PORT_SetPerformanceCountersParams
++
++ @Description Calling this routine defines port's performance
++ counters parameters.
++
++ May be used for all port types
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_FmPortPerformanceCnt A pointer to a structure of performance
++ counters parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetPerformanceCountersParams(t_Handle h_FmPort, t_FmPortPerformanceCnt *p_FmPortPerformanceCnt);
++
++/**************************************************************************//**
++ @Group FM_PORT_pcd_runtime_control_grp FM Port PCD Runtime Control Unit
++
++ @Description FM Port PCD Runtime control unit API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description A structure defining the KG scheme after the parser.
++ This is relevant only to change scheme selection mode - from
++ direct to indirect and vice versa, or when the scheme is selected directly,
++ to select the scheme id.
++
++*//***************************************************************************/
++typedef struct t_FmPcdKgSchemeSelect {
++ bool direct; /**< TRUE to use 'h_Scheme' directly, FALSE to use LCV. */
++ t_Handle h_DirectScheme; /**< Scheme handle, selects the scheme after parser;
++ Relevant only when 'direct' is TRUE. */
++} t_FmPcdKgSchemeSelect;
++
++/**************************************************************************//**
++ @Description A structure of scheme parameters
++*//***************************************************************************/
++typedef struct t_FmPcdPortSchemesParams {
++ uint8_t numOfSchemes; /**< Number of schemes for port to be bound to. */
++ t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES]; /**< Array of 'numOfSchemes' schemes for the
++ port to be bound to */
++} t_FmPcdPortSchemesParams;
++
++/**************************************************************************//**
++ @Description Union for defining port protocol parameters for parser
++*//***************************************************************************/
++typedef union u_FmPcdHdrPrsOpts {
++ /* MPLS */
++ struct {
++ bool labelInterpretationEnable; /**< When this bit is set, the last MPLS label will be
++ interpreted as described in HW spec table. When the bit
++ is cleared, the parser will advance to MPLS next parse */
++ e_NetHeaderType nextParse; /**< must be equal or higher than IPv4 */
++ } mplsPrsOptions;
++ /* VLAN */
++ struct {
++ uint16_t tagProtocolId1; /**< User defined Tag Protocol Identifier, to be recognized
++ on VLAN TAG on top of 0x8100 and 0x88A8 */
++ uint16_t tagProtocolId2; /**< User defined Tag Protocol Identifier, to be recognized
++ on VLAN TAG on top of 0x8100 and 0x88A8 */
++ } vlanPrsOptions;
++ /* PPP */
++ struct{
++ bool enableMTUCheck; /**< Check validity of MTU according to RFC2516 */
++ } pppoePrsOptions;
++
++ /* IPV6 */
++ struct{
++ bool routingHdrEnable; /**< TRUE to enable routing header, otherwise ignore */
++ } ipv6PrsOptions;
++
++ /* UDP */
++ struct{
++ bool padIgnoreChecksum; /**< TRUE to ignore pad in checksum */
++ } udpPrsOptions;
++
++ /* TCP */
++ struct {
++ bool padIgnoreChecksum; /**< TRUE to ignore pad in checksum */
++ } tcpPrsOptions;
++} u_FmPcdHdrPrsOpts;
++
++/**************************************************************************//**
++ @Description A structure for defining each header for the parser
++*//***************************************************************************/
++typedef struct t_FmPcdPrsAdditionalHdrParams {
++ e_NetHeaderType hdr; /**< Selected header; use HEADER_TYPE_NONE
++ to indicate that sw parser is to run first
++ (before HW parser, and independent of the
++ existence of any protocol), in this case,
++ swPrsEnable must be set, and all other
++ parameters are irrelevant. */
++ bool errDisable; /**< TRUE to disable error indication */
++ bool swPrsEnable; /**< Enable jump to SW parser when this
++ header is recognized by the HW parser. */
++ uint8_t indexPerHdr; /**< Normally 0, if more than one sw parser
++ attachments exists for the same header,
++ (in the main sw parser code) use this
++ index to distinguish between them. */
++ bool usePrsOpts; /**< TRUE to use parser options. */
++ u_FmPcdHdrPrsOpts prsOpts; /**< A union according to header type,
++ defining the parser options selected.*/
++} t_FmPcdPrsAdditionalHdrParams;
++
++/**************************************************************************//**
++ @Description struct for defining port PCD parameters
++*//***************************************************************************/
++typedef struct t_FmPortPcdPrsParams {
++ uint8_t prsResultPrivateInfo; /**< The private info provides a method of inserting
++ port information into the parser result. This information
++ may be extracted by Keygen and be used for frames
++ distribution when a per-port distinction is required,
++ it may also be used as a port logical id for analyzing
++ incoming frames. */
++ uint8_t parsingOffset; /**< Number of bytes from beginning of packet to start parsing */
++ e_NetHeaderType firstPrsHdr; /**< The type of the first header expected at 'parsingOffset' */
++ bool includeInPrsStatistics; /**< TRUE to include this port in the parser statistics;
++ NOTE: this field is not valid when the FM is in "guest" mode
++ and IPC is not available. */
++ uint8_t numOfHdrsWithAdditionalParams; /**< Normally 0, some headers may get
++ special parameters */
++ t_FmPcdPrsAdditionalHdrParams additionalParams[FM_PCD_PRS_NUM_OF_HDRS];
++ /**< 'numOfHdrsWithAdditionalParams' structures
++ of additional parameters
++ for each header that requires them */
++ bool setVlanTpid1; /**< TRUE to configure user selection of Ethertype to
++ indicate a VLAN tag (in addition to the TPID values
++ 0x8100 and 0x88A8). */
++ uint16_t vlanTpid1; /**< extra tag to use if setVlanTpid1=TRUE. */
++ bool setVlanTpid2; /**< TRUE to configure user selection of Ethertype to
++ indicate a VLAN tag (in addition to the TPID values
++ 0x8100 and 0x88A8). */
++ uint16_t vlanTpid2; /**< extra tag to use if setVlanTpid1=TRUE. */
++} t_FmPortPcdPrsParams;
++
++/**************************************************************************//**
++ @Description struct for defining coarse alassification parameters
++*//***************************************************************************/
++typedef struct t_FmPortPcdCcParams {
++ t_Handle h_CcTree; /**< A handle to a CC tree */
++} t_FmPortPcdCcParams;
++
++/**************************************************************************//**
++ @Description struct for defining keygen parameters
++*//***************************************************************************/
++typedef struct t_FmPortPcdKgParams {
++ uint8_t numOfSchemes; /**< Number of schemes for port to be bound to. */
++ t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES];
++ /**< Array of 'numOfSchemes' schemes handles for the
++ port to be bound to */
++ bool directScheme; /**< TRUE for going from parser to a specific scheme,
++ regardless of parser result */
++ t_Handle h_DirectScheme; /**< relevant only if direct == TRUE, Scheme handle,
++ as returned by FM_PCD_KgSetScheme */
++} t_FmPortPcdKgParams;
++
++/**************************************************************************//**
++ @Description struct for defining policer parameters
++*//***************************************************************************/
++typedef struct t_FmPortPcdPlcrParams {
++ t_Handle h_Profile; /**< Selected profile handle */
++} t_FmPortPcdPlcrParams;
++
++/**************************************************************************//**
++ @Description struct for defining port PCD parameters
++*//***************************************************************************/
++typedef struct t_FmPortPcdParams {
++ e_FmPortPcdSupport pcdSupport; /**< Relevant for Rx and offline ports only.
++ Describes the active PCD engines for this port. */
++ t_Handle h_NetEnv; /**< HL Unused in PLCR only mode */
++ t_FmPortPcdPrsParams *p_PrsParams; /**< Parser parameters for this port */
++ t_FmPortPcdCcParams *p_CcParams; /**< Coarse classification parameters for this port */
++ t_FmPortPcdKgParams *p_KgParams; /**< Keygen parameters for this port */
++ t_FmPortPcdPlcrParams *p_PlcrParams; /**< Policer parameters for this port; Relevant for one of
++ following cases:
++ e_FM_PORT_PCD_SUPPORT_PLCR_ONLY or
++ e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR were selected,
++ or if any flow uses a KG scheme were policer
++ profile is not generated
++ ('bypassPlcrProfileGeneration selected'). */
++ t_Handle h_IpReassemblyManip; /**< IP Reassembly manipulation */
++#if (DPAA_VERSION >= 11)
++ t_Handle h_CapwapReassemblyManip;/**< CAPWAP Reassembly manipulation */
++#endif /* (DPAA_VERSION >= 11) */
++} t_FmPortPcdParams;
++
++/**************************************************************************//**
++ @Description A structure for defining the Parser starting point
++*//***************************************************************************/
++typedef struct t_FmPcdPrsStart {
++ uint8_t parsingOffset; /**< Number of bytes from beginning of packet to
++ start parsing */
++ e_NetHeaderType firstPrsHdr; /**< The type of the first header axpected at
++ 'parsingOffset' */
++} t_FmPcdPrsStart;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description struct for defining external buffer margins
++*//***************************************************************************/
++typedef struct t_FmPortVSPAllocParams {
++ uint8_t numOfProfiles; /**< Number of Virtual Storage Profiles; must be a power of 2 */
++ uint8_t dfltRelativeId; /**< The default Virtual-Storage-Profile-id dedicated to Rx/OP port
++ The same default Virtual-Storage-Profile-id will be for coupled Tx port
++ if relevant function called for Rx port */
++ t_Handle h_FmTxPort; /**< Handle to coupled Tx Port; not relevant for OP port. */
++} t_FmPortVSPAllocParams;
++#endif /* (DPAA_VERSION >= 11) */
++
++
++/**************************************************************************//**
++ @Function FM_PORT_SetPCD
++
++ @Description Calling this routine defines the port's PCD configuration.
++ It changes it from its default configuration which is PCD
++ disabled (BMI to BMI) and configures it according to the passed
++ parameters.
++
++ May be used for Rx and OP ports only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_FmPortPcd A Structure of parameters defining the port's PCD
++ configuration.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_SetPCD(t_Handle h_FmPort, t_FmPortPcdParams *p_FmPortPcd);
++
++/**************************************************************************//**
++ @Function FM_PORT_DeletePCD
++
++ @Description Calling this routine releases the port's PCD configuration.
++ The port returns to its default configuration which is PCD
++ disabled (BMI to BMI) and all PCD configuration is removed.
++
++ May be used for Rx and OP ports which are
++ in PCD mode only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_DeletePCD(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_AttachPCD
++
++ @Description This routine may be called after FM_PORT_DetachPCD was called,
++ to return to the originally configured PCD support flow.
++ The couple of routines are used to allow PCD configuration changes
++ that demand that PCD will not be used while changes take place.
++
++ May be used for Rx and OP ports which are
++ in PCD mode only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++t_Error FM_PORT_AttachPCD(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_DetachPCD
++
++ @Description Calling this routine detaches the port from its PCD functionality.
++ The port returns to its default flow which is BMI to BMI.
++
++ May be used for Rx and OP ports which are
++ in PCD mode only
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_AttachPCD().
++*//***************************************************************************/
++t_Error FM_PORT_DetachPCD(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdPlcrAllocProfiles
++
++ @Description This routine may be called only for ports that use the Policer in
++ order to allocate private policer profiles.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] numOfProfiles The number of required policer profiles
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init() and FM_PCD_Init(),
++ and before FM_PORT_SetPCD().
++*//***************************************************************************/
++t_Error FM_PORT_PcdPlcrAllocProfiles(t_Handle h_FmPort, uint16_t numOfProfiles);
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdPlcrFreeProfiles
++
++ @Description This routine should be called for freeing private policer profiles.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init() and FM_PCD_Init(),
++ and before FM_PORT_SetPCD().
++*//***************************************************************************/
++t_Error FM_PORT_PcdPlcrFreeProfiles(t_Handle h_FmPort);
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Function FM_PORT_VSPAlloc
++
++ @Description This routine allocated VSPs per port and forces the port to work
++ in VSP mode. Note that the port is initialized by default with the
++ physical-storage-profile only.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_Params A structure of parameters for allocation VSP's per port
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init(), and before FM_PORT_SetPCD()
++ and also before FM_PORT_Enable(); i.e. the port should be disabled.
++*//***************************************************************************/
++t_Error FM_PORT_VSPAlloc(t_Handle h_FmPort, t_FmPortVSPAllocParams *p_Params);
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdKgModifyInitialScheme
++
++ @Description This routine may be called only for ports that use the keygen in
++ order to change the initial scheme frame should be routed to.
++ The change may be of a scheme id (in case of direct mode),
++ from direct to indirect, or from indirect to direct - specifying the scheme id.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_FmPcdKgScheme A structure of parameters for defining whether
++ a scheme is direct/indirect, and if direct - scheme id.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
++*//***************************************************************************/
++t_Error FM_PORT_PcdKgModifyInitialScheme (t_Handle h_FmPort, t_FmPcdKgSchemeSelect *p_FmPcdKgScheme);
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdPlcrModifyInitialProfile
++
++ @Description This routine may be called for ports with flows
++ e_FM_PORT_PCD_SUPPORT_PLCR_ONLY or e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR
++ only, to change the initial Policer profile frame should be
++ routed to. The change may be of a profile and/or absolute/direct
++ mode selection.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] h_Profile Policer profile handle
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
++*//***************************************************************************/
++t_Error FM_PORT_PcdPlcrModifyInitialProfile (t_Handle h_FmPort, t_Handle h_Profile);
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdCcModifyTree
++
++ @Description This routine may be called for ports that use coarse classification tree
++ if the user wishes to replace the tree. The routine may not be called while port
++ receives packets using the PCD functionalities, therefor port must be first detached
++ from the PCD, only than the routine may be called, and than port be attached to PCD again.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] h_CcTree A CC tree that was already built. The tree id as returned from
++ the BuildTree routine.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init(), FM_PORT_SetPCD() and FM_PORT_DetachPCD()
++*//***************************************************************************/
++t_Error FM_PORT_PcdCcModifyTree (t_Handle h_FmPort, t_Handle h_CcTree);
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdKgBindSchemes
++
++ @Description These routines may be called for adding more schemes for the
++ port to be bound to. The selected schemes are not added,
++ just this specific port starts using them.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_PortScheme A structure defining the list of schemes to be added.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
++*//***************************************************************************/
++t_Error FM_PORT_PcdKgBindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme);
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdKgUnbindSchemes
++
++ @Description These routines may be called for adding more schemes for the
++ port to be bound to. The selected schemes are not removed or invalidated,
++ just this specific port stops using them.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_PortScheme A structure defining the list of schemes to be added.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
++*//***************************************************************************/
++t_Error FM_PORT_PcdKgUnbindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme);
++
++/**************************************************************************//**
++ @Function FM_PORT_GetIPv4OptionsCount
++
++ @Description TODO
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[out] p_Ipv4OptionsCount will hold the counter value
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init()
++*//***************************************************************************/
++t_Error FM_PORT_GetIPv4OptionsCount(t_Handle h_FmPort, uint32_t *p_Ipv4OptionsCount);
++
++/** @} */ /* end of FM_PORT_pcd_runtime_control_grp group */
++/** @} */ /* end of FM_PORT_runtime_control_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_PORT_runtime_data_grp FM Port Runtime Data-path Unit
++
++ @Description FM Port Runtime data unit API functions, definitions and enums.
++ This API is valid only if working in Independent-Mode.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_PORT_ImTx
++
++ @Description Tx function, called to transmit a data buffer on the port.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_Data A pointer to an LCP data buffer.
++ @Param[in] length Size of data for transmission.
++ @Param[in] lastBuffer Buffer position - TRUE for the last buffer
++ of a frame, including a single buffer frame
++ @Param[in] h_BufContext A handle of the user acossiated with this buffer
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ NOTE - This routine can be used only when working in
++ Independent-Mode mode.
++*//***************************************************************************/
++t_Error FM_PORT_ImTx( t_Handle h_FmPort,
++ uint8_t *p_Data,
++ uint16_t length,
++ bool lastBuffer,
++ t_Handle h_BufContext);
++
++/**************************************************************************//**
++ @Function FM_PORT_ImTxConf
++
++ @Description Tx port confirmation routine, optional, may be called to verify
++ transmission of all frames. The procedure performed by this
++ routine will be performed automatically on next buffer transmission,
++ but if desired, calling this routine will invoke this action on
++ demand.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ NOTE - This routine can be used only when working in
++ Independent-Mode mode.
++*//***************************************************************************/
++void FM_PORT_ImTxConf(t_Handle h_FmPort);
++
++/**************************************************************************//**
++ @Function FM_PORT_ImRx
++
++ @Description Rx function, may be called to poll for received buffers.
++ Normally, Rx process is invoked by the driver on Rx interrupt.
++ Alternatively, this routine may be called on demand.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++ NOTE - This routine can be used only when working in
++ Independent-Mode mode.
++*//***************************************************************************/
++t_Error FM_PORT_ImRx(t_Handle h_FmPort);
++
++/** @} */ /* end of FM_PORT_runtime_data_grp group */
++/** @} */ /* end of FM_PORT_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++
++#ifdef NCSW_BACKWARD_COMPATIBLE_API
++#define FM_PORT_ConfigTxFifoDeqPipelineDepth FM_PORT_ConfigFifoDeqPipelineDepth
++#endif /* NCSW_BACKWARD_COMPATIBLE_API */
++
++
++#endif /* __FM_PORT_EXT */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_rtc_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_rtc_ext.h
+new file mode 100644
+index 00000000..72078ac4
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_rtc_ext.h
+@@ -0,0 +1,619 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_rtc_ext.h
++
++ @Description External definitions and API for FM RTC IEEE1588 Timer Module.
++
++ @Cautions None.
++*//***************************************************************************/
++
++#ifndef __FM_RTC_EXT_H__
++#define __FM_RTC_EXT_H__
++
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "fsl_fman_rtc.h"
++
++/**************************************************************************//**
++
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group fm_rtc_grp FM RTC
++
++ @Description FM RTC functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group fm_rtc_init_grp FM RTC Initialization Unit
++
++ @Description FM RTC initialization API.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description FM RTC Alarm Polarity Options.
++*//***************************************************************************/
++typedef enum e_FmRtcAlarmPolarity
++{
++ e_FM_RTC_ALARM_POLARITY_ACTIVE_HIGH = E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH, /**< Active-high output polarity */
++ e_FM_RTC_ALARM_POLARITY_ACTIVE_LOW = E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW /**< Active-low output polarity */
++} e_FmRtcAlarmPolarity;
++
++/**************************************************************************//**
++ @Description FM RTC Trigger Polarity Options.
++*//***************************************************************************/
++typedef enum e_FmRtcTriggerPolarity
++{
++ e_FM_RTC_TRIGGER_ON_RISING_EDGE = E_FMAN_RTC_TRIGGER_ON_RISING_EDGE, /**< Trigger on rising edge */
++ e_FM_RTC_TRIGGER_ON_FALLING_EDGE = E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE /**< Trigger on falling edge */
++} e_FmRtcTriggerPolarity;
++
++/**************************************************************************//**
++ @Description IEEE1588 Timer Module FM RTC Optional Clock Sources.
++*//***************************************************************************/
++typedef enum e_FmSrcClock
++{
++ e_FM_RTC_SOURCE_CLOCK_EXTERNAL = E_FMAN_RTC_SOURCE_CLOCK_EXTERNAL, /**< external high precision timer reference clock */
++ e_FM_RTC_SOURCE_CLOCK_SYSTEM = E_FMAN_RTC_SOURCE_CLOCK_SYSTEM, /**< MAC system clock */
++ e_FM_RTC_SOURCE_CLOCK_OSCILATOR = E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR /**< RTC clock oscilator */
++}e_FmSrcClk;
++
++/**************************************************************************//**
++ @Description FM RTC configuration parameters structure.
++
++ This structure should be passed to FM_RTC_Config().
++*//***************************************************************************/
++typedef struct t_FmRtcParams
++{
++ t_Handle h_Fm; /**< FM Handle*/
++ uintptr_t baseAddress; /**< Base address of FM RTC registers */
++ t_Handle h_App; /**< A handle to an application layer object; This handle will
++ be passed by the driver upon calling the above callbacks */
++} t_FmRtcParams;
++
++
++/**************************************************************************//**
++ @Function FM_RTC_Config
++
++ @Description Configures the FM RTC module according to user's parameters.
++
++ The driver assigns default values to some FM RTC parameters.
++ These parameters can be overwritten using the advanced
++ configuration routines.
++
++ @Param[in] p_FmRtcParam - FM RTC configuration parameters.
++
++ @Return Handle to the new FM RTC object; NULL pointer on failure.
++
++ @Cautions None
++*//***************************************************************************/
++t_Handle FM_RTC_Config(t_FmRtcParams *p_FmRtcParam);
++
++/**************************************************************************//**
++ @Function FM_RTC_Init
++
++ @Description Initializes the FM RTC driver and hardware.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_Init(t_Handle h_FmRtc);
++
++/**************************************************************************//**
++ @Function FM_RTC_Free
++
++ @Description Frees the FM RTC object and all allocated resources.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_Free(t_Handle h_FmRtc);
++
++
++/**************************************************************************//**
++ @Group fm_rtc_adv_config_grp FM RTC Advanced Configuration Unit
++
++ @Description FM RTC advanced configuration functions.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigPeriod
++
++ @Description Configures the period of the timestamp if different than
++ default [DEFAULT_clockPeriod].
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] period - Period in nano-seconds.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigPeriod(t_Handle h_FmRtc, uint32_t period);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigSourceClock
++
++ @Description Configures the source clock of the RTC.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] srcClk - Source clock selection.
++ @Param[in] freqInMhz - the source-clock frequency (in MHz).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigSourceClock(t_Handle h_FmRtc,
++ e_FmSrcClk srcClk,
++ uint32_t freqInMhz);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigPulseRealignment
++
++ @Description Configures the RTC to automatic FIPER pulse realignment in
++ response to timer adjustments [DEFAULT_pulseRealign]
++
++ In this mode, the RTC clock is identical to the source clock.
++ This feature can be useful when the system contains an external
++ RTC with inherent frequency compensation.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] enable - TRUE to enable automatic realignment.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigPulseRealignment(t_Handle h_FmRtc, bool enable);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigFrequencyBypass
++
++ @Description Configures the RTC to bypass the frequency compensation
++ mechanism. [DEFAULT_bypass]
++
++ In this mode, the RTC clock is identical to the source clock.
++ This feature can be useful when the system contains an external
++ RTC with inherent frequency compensation.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] enabled - TRUE to bypass frequency compensation;
++ FALSE otherwise.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigFrequencyBypass(t_Handle h_FmRtc, bool enabled);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigInvertedInputClockPhase
++
++ @Description Configures the RTC to invert the source clock phase on input.
++ [DEFAULT_invertInputClkPhase]
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] inverted - TRUE to invert the source clock phase on input.
++ FALSE otherwise.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigInvertedInputClockPhase(t_Handle h_FmRtc, bool inverted);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigInvertedOutputClockPhase
++
++ @Description Configures the RTC to invert the output clock phase.
++ [DEFAULT_invertOutputClkPhase]
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] inverted - TRUE to invert the output clock phase.
++ FALSE otherwise.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigInvertedOutputClockPhase(t_Handle h_FmRtc, bool inverted);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigOutputClockDivisor
++
++ @Description Configures the divisor for generating the output clock from
++ the RTC clock. [DEFAULT_outputClockDivisor]
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] divisor - Divisor for generation of the output clock.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigOutputClockDivisor(t_Handle h_FmRtc, uint16_t divisor);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigAlarmPolarity
++
++ @Description Configures the polarity (active-high/active-low) of a specific
++ alarm signal. [DEFAULT_alarmPolarity]
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] alarmId - Alarm ID.
++ @Param[in] alarmPolarity - Alarm polarity.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigAlarmPolarity(t_Handle h_FmRtc,
++ uint8_t alarmId,
++ e_FmRtcAlarmPolarity alarmPolarity);
++
++/**************************************************************************//**
++ @Function FM_RTC_ConfigExternalTriggerPolarity
++
++ @Description Configures the polarity (rising/falling edge) of a specific
++ external trigger signal. [DEFAULT_triggerPolarity]
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] triggerId - Trigger ID.
++ @Param[in] triggerPolarity - Trigger polarity.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
++*//***************************************************************************/
++t_Error FM_RTC_ConfigExternalTriggerPolarity(t_Handle h_FmRtc,
++ uint8_t triggerId,
++ e_FmRtcTriggerPolarity triggerPolarity);
++
++/** @} */ /* end of fm_rtc_adv_config_grp */
++/** @} */ /* end of fm_rtc_init_grp */
++
++
++/**************************************************************************//**
++ @Group fm_rtc_control_grp FM RTC Control Unit
++
++ @Description FM RTC runtime control API.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function t_FmRtcExceptionsCallback
++
++ @Description Exceptions user callback routine, used for RTC different mechanisms.
++
++ @Param[in] h_App - User's application descriptor.
++ @Param[in] id - source id.
++*//***************************************************************************/
++typedef void (t_FmRtcExceptionsCallback) ( t_Handle h_App, uint8_t id);
++
++/**************************************************************************//**
++ @Description FM RTC alarm parameters.
++*//***************************************************************************/
++typedef struct t_FmRtcAlarmParams {
++ uint8_t alarmId; /**< 0 or 1 */
++ uint64_t alarmTime; /**< In nanoseconds, the time when the alarm
++ should go off - must be a multiple of
++ the RTC period */
++ t_FmRtcExceptionsCallback *f_AlarmCallback; /**< This routine will be called when RTC
++ reaches alarmTime */
++ bool clearOnExpiration; /**< TRUE to turn off the alarm once expired. */
++} t_FmRtcAlarmParams;
++
++/**************************************************************************//**
++ @Description FM RTC Periodic Pulse parameters.
++*//***************************************************************************/
++typedef struct t_FmRtcPeriodicPulseParams {
++ uint8_t periodicPulseId; /**< 0 or 1 */
++ uint64_t periodicPulsePeriod; /**< In Nanoseconds. Must be
++ a multiple of the RTC period */
++ t_FmRtcExceptionsCallback *f_PeriodicPulseCallback; /**< This routine will be called every
++ periodicPulsePeriod. */
++} t_FmRtcPeriodicPulseParams;
++
++/**************************************************************************//**
++ @Description FM RTC Periodic Pulse parameters.
++*//***************************************************************************/
++typedef struct t_FmRtcExternalTriggerParams {
++ uint8_t externalTriggerId; /**< 0 or 1 */
++ bool usePulseAsInput; /**< Use the pulse interrupt instead of
++ an external signal */
++ t_FmRtcExceptionsCallback *f_ExternalTriggerCallback; /**< This routine will be called every
++ periodicPulsePeriod. */
++} t_FmRtcExternalTriggerParams;
++
++
++/**************************************************************************//**
++ @Function FM_RTC_Enable
++
++ @Description Enable the RTC (time count is started).
++
++ The user can select to resume the time count from previous
++ point, or to restart the time count.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] resetClock - Restart the time count from zero.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_Enable(t_Handle h_FmRtc, bool resetClock);
++
++/**************************************************************************//**
++ @Function FM_RTC_Disable
++
++ @Description Disables the RTC (time count is stopped).
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_Disable(t_Handle h_FmRtc);
++
++/**************************************************************************//**
++ @Function FM_RTC_SetClockOffset
++
++ @Description Sets the clock offset (usually relative to another clock).
++
++ The user can pass a negative offset value.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] offset - New clock offset (in nanoseconds).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_SetClockOffset(t_Handle h_FmRtc, int64_t offset);
++
++/**************************************************************************//**
++ @Function FM_RTC_SetAlarm
++
++ @Description Schedules an alarm event to a given RTC time.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] p_FmRtcAlarmParams - Alarm parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++ Must be called only prior to FM_RTC_Enable().
++*//***************************************************************************/
++t_Error FM_RTC_SetAlarm(t_Handle h_FmRtc, t_FmRtcAlarmParams *p_FmRtcAlarmParams);
++
++/**************************************************************************//**
++ @Function FM_RTC_SetPeriodicPulse
++
++ @Description Sets a periodic pulse.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] p_FmRtcPeriodicPulseParams - Periodic pulse parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++ Must be called only prior to FM_RTC_Enable().
++*//***************************************************************************/
++t_Error FM_RTC_SetPeriodicPulse(t_Handle h_FmRtc, t_FmRtcPeriodicPulseParams *p_FmRtcPeriodicPulseParams);
++
++/**************************************************************************//**
++ @Function FM_RTC_ClearPeriodicPulse
++
++ @Description Clears a periodic pulse.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] periodicPulseId - Periodic pulse id.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_ClearPeriodicPulse(t_Handle h_FmRtc, uint8_t periodicPulseId);
++
++/**************************************************************************//**
++ @Function FM_RTC_SetExternalTrigger
++
++ @Description Sets an external trigger indication and define a callback
++ routine to be called on such event.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] p_FmRtcExternalTriggerParams - External Trigger parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_SetExternalTrigger(t_Handle h_FmRtc, t_FmRtcExternalTriggerParams *p_FmRtcExternalTriggerParams);
++
++/**************************************************************************//**
++ @Function FM_RTC_ClearExternalTrigger
++
++ @Description Clears external trigger indication.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] id - External Trigger id.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_ClearExternalTrigger(t_Handle h_FmRtc, uint8_t id);
++
++/**************************************************************************//**
++ @Function FM_RTC_GetExternalTriggerTimeStamp
++
++ @Description Reads the External Trigger TimeStamp.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] triggerId - External Trigger id.
++ @Param[out] p_TimeStamp - External Trigger timestamp (in nanoseconds).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_GetExternalTriggerTimeStamp(t_Handle h_FmRtc,
++ uint8_t triggerId,
++ uint64_t *p_TimeStamp);
++
++/**************************************************************************//**
++ @Function FM_RTC_GetCurrentTime
++
++ @Description Returns the current RTC time.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[out] p_Ts - returned time stamp (in nanoseconds).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_GetCurrentTime(t_Handle h_FmRtc, uint64_t *p_Ts);
++
++/**************************************************************************//**
++ @Function FM_RTC_SetCurrentTime
++
++ @Description Sets the current RTC time.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] ts - The new time stamp (in nanoseconds).
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_SetCurrentTime(t_Handle h_FmRtc, uint64_t ts);
++
++/**************************************************************************//**
++ @Function FM_RTC_GetFreqCompensation
++
++ @Description Retrieves the frequency compensation value
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[out] p_Compensation - A pointer to the returned value of compensation.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_GetFreqCompensation(t_Handle h_FmRtc, uint32_t *p_Compensation);
++
++/**************************************************************************//**
++ @Function FM_RTC_SetFreqCompensation
++
++ @Description Sets a new frequency compensation value.
++
++ @Param[in] h_FmRtc - Handle to FM RTC object.
++ @Param[in] freqCompensation - The new frequency compensation value to set.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
++*//***************************************************************************/
++t_Error FM_RTC_SetFreqCompensation(t_Handle h_FmRtc, uint32_t freqCompensation);
++
++#ifdef CONFIG_PTP_1588_CLOCK_DPAA
++/**************************************************************************//**
++*@Function FM_RTC_EnableInterrupt
++*
++*@Description Enable interrupt of FM RTC.
++*
++*@Param[in] h_FmRtc - Handle to FM RTC object.
++*@Param[in] events - Interrupt events.
++*
++*@Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_RTC_EnableInterrupt(t_Handle h_FmRtc, uint32_t events);
++
++/**************************************************************************//**
++*@Function FM_RTC_DisableInterrupt
++*
++*@Description Disable interrupt of FM RTC.
++*
++*@Param[in] h_FmRtc - Handle to FM RTC object.
++*@Param[in] events - Interrupt events.
++*
++*@Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_RTC_DisableInterrupt(t_Handle h_FmRtc, uint32_t events);
++#endif
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++/**************************************************************************//**
++ @Function FM_RTC_DumpRegs
++
++ @Description Dumps all FM registers
++
++ @Param[in] h_FmRtc A handle to an FM RTC Module.
++
++ @Return E_OK on success;
++
++ @Cautions Allowed only FM_Init().
++*//***************************************************************************/
++t_Error FM_RTC_DumpRegs(t_Handle h_FmRtc);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++/** @} */ /* end of fm_rtc_control_grp */
++/** @} */ /* end of fm_rtc_grp */
++/** @} */ /* end of FM_grp group */
++
++
++#endif /* __FM_RTC_EXT_H__ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_vsp_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_vsp_ext.h
+new file mode 100644
+index 00000000..f9aed036
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_vsp_ext.h
+@@ -0,0 +1,411 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File fm_vsp_ext.h
++
++ @Description FM Virtual Storage-Profile ...
++*//***************************************************************************/
++#ifndef __FM_VSP_EXT_H
++#define __FM_VSP_EXT_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "debug_ext.h"
++
++#include "fm_ext.h"
++
++
++/**************************************************************************//**
++
++ @Group FM_grp Frame Manager API
++
++ @Description FM API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_VSP_grp FM Virtual-Storage-Profile
++
++ @Description FM Virtual-Storage-Profile API
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_VSP_init_grp FM VSP Initialization Unit
++
++ @Description FM VSP initialization API.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Virtual Storage Profile
++*//***************************************************************************/
++typedef struct t_FmVspParams {
++ t_Handle h_Fm; /**< A handle to the FM object this VSP related to */
++ t_FmExtPools extBufPools; /**< Which external buffer pools are used
++ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes.
++ parameter associated with Rx / OP port */
++ uint16_t liodnOffset; /**< VSP's LIODN offset */
++ struct {
++ e_FmPortType portType; /**< Port type */
++ uint8_t portId; /**< Port Id - relative to type */
++ } portParams;
++ uint8_t relativeProfileId; /**< VSP Id - relative to VSP's range
++ defined in relevant FM object */
++} t_FmVspParams;
++
++
++/**************************************************************************//**
++ @Function FM_VSP_Config
++
++ @Description Creates descriptor for the FM VSP module.
++
++ The routine returns a handle (descriptor) to the FM VSP object.
++ This descriptor must be passed as first parameter to all other
++ FM VSP function calls.
++
++ No actual initialization or configuration of FM hardware is
++ done by this routine.
++
++@Param[in] p_FmVspParams Pointer to data structure of parameters
++
++ @Retval Handle to FM VSP object, or NULL for Failure.
++*//***************************************************************************/
++t_Handle FM_VSP_Config(t_FmVspParams *p_FmVspParams);
++
++/**************************************************************************//**
++ @Function FM_VSP_Init
++
++ @Description Initializes the FM VSP module
++
++ @Param[in] h_FmVsp - FM VSP module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_VSP_Init(t_Handle h_FmVsp);
++
++/**************************************************************************//**
++ @Function FM_VSP_Free
++
++ @Description Frees all resources that were assigned to FM VSP module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmVsp - FM VSP module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error FM_VSP_Free(t_Handle h_FmVsp);
++
++
++/**************************************************************************//**
++ @Group FM_VSP_adv_config_grp FM VSP Advanced Configuration Unit
++
++ @Description FM VSP advanced configuration functions.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigBufferPrefixContent
++
++ @Description Defines the structure, size and content of the application buffer.
++
++ The prefix will
++ In VSPs defined for Tx ports, if 'passPrsResult', the application
++ should set a value to their offsets in the prefix of
++ the FM will save the first 'privDataSize', than,
++ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
++ and timeStamp, and the packet itself (in this order), to the
++ application buffer, and to offset.
++
++ Calling this routine changes the buffer margins definitions
++ in the internal driver data base from its default
++ configuration: Data size: [DEFAULT_FM_SP_bufferPrefixContent_privDataSize]
++ Pass Parser result: [DEFAULT_FM_SP_bufferPrefixContent_passPrsResult].
++ Pass timestamp: [DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp].
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in,out] p_FmBufferPrefixContent A structure of parameters describing the
++ structure of the buffer.
++ Out parameter: Start margin - offset
++ of data from start of external buffer.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigBufferPrefixContent(t_Handle h_FmVsp,
++ t_FmBufferPrefixContent *p_FmBufferPrefixContent);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigDmaSwapData
++
++ @Description Calling this routine changes the DMA swap data parameter
++ in the internal driver data base from its default
++ configuration [DEFAULT_FM_SP_dmaSwapData]
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] swapData New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigDmaSwapData(t_Handle h_FmVsp, e_FmDmaSwapOption swapData);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigDmaIcCacheAttr
++
++ @Description Calling this routine changes the internal context cache
++ attribute parameter in the internal driver data base
++ from its default configuration [DEFAULT_FM_SP_dmaIntContextCacheAttr]
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] intContextCacheAttr New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigDmaIcCacheAttr(t_Handle h_FmVsp,
++ e_FmDmaCacheOption intContextCacheAttr);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigDmaHdrAttr
++
++ @Description Calling this routine changes the header cache
++ attribute parameter in the internal driver data base
++ from its default configuration [DEFAULT_FM_SP_dmaHeaderCacheAttr]
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] headerCacheAttr New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigDmaHdrAttr(t_Handle h_FmVsp, e_FmDmaCacheOption headerCacheAttr);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigDmaScatterGatherAttr
++
++ @Description Calling this routine changes the scatter gather cache
++ attribute parameter in the internal driver data base
++ from its default configuration [DEFAULT_FM_SP_dmaScatterGatherCacheAttr]
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] scatterGatherCacheAttr New selection
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigDmaScatterGatherAttr(t_Handle h_FmVsp,
++ e_FmDmaCacheOption scatterGatherCacheAttr);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigDmaWriteOptimize
++
++ @Description Calling this routine changes the write optimization
++ parameter in the internal driver data base
++ from its default configuration: optimize = [DEFAULT_FM_SP_dmaWriteOptimize]
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] optimize TRUE to enable optimization, FALSE for normal operation
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigDmaWriteOptimize(t_Handle h_FmVsp, bool optimize);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigNoScatherGather
++
++ @Description Calling this routine changes the possibility to receive S/G frame
++ in the internal driver data base
++ from its default configuration: optimize = [DEFAULT_FM_SP_noScatherGather]
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] noScatherGather TRUE to operate without scatter/gather capability.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigNoScatherGather(t_Handle h_FmVsp, bool noScatherGather);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigPoolDepletion
++
++ @Description Calling this routine enables pause frame generation depending on the
++ depletion status of BM pools. It also defines the conditions to activate
++ this functionality. By default, this functionality is disabled.
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] p_BufPoolDepletion A structure of pool depletion parameters
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigPoolDepletion(t_Handle h_FmVsp, t_FmBufPoolDepletion *p_BufPoolDepletion);
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigBackupPools
++
++ @Description Calling this routine allows the configuration of some of the BM pools
++ defined for this port as backup pools.
++ A pool configured to be a backup pool will be used only if all other
++ enabled non-backup pools are depleted.
++
++ @Param[in] h_FmVsp A handle to a FM VSP module.
++ @Param[in] p_BackupBmPools An array of pool id's. All pools specified here will
++ be defined as backup pools.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++t_Error FM_VSP_ConfigBackupPools(t_Handle h_FmVsp, t_FmBackupBmPools *p_BackupBmPools);
++
++/** @} */ /* end of FM_VSP_adv_config_grp group */
++/** @} */ /* end of FM_VSP_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_VSP_control_grp FM VSP Control Unit
++
++ @Description FM VSP runtime control API.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function FM_VSP_GetBufferDataOffset
++
++ @Description Relevant for Rx ports.
++ Returns the data offset from the beginning of the data buffer
++
++ @Param[in] h_FmVsp - FM PORT module descriptor
++
++ @Return data offset.
++
++ @Cautions Allowed only following FM_VSP_Init().
++*//***************************************************************************/
++uint32_t FM_VSP_GetBufferDataOffset(t_Handle h_FmVsp);
++
++/**************************************************************************//**
++ @Function FM_VSP_GetBufferICInfo
++
++ @Description Returns the Internal Context offset from the beginning of the data buffer
++
++ @Param[in] h_FmVsp - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return Internal context info pointer on success, NULL if 'allOtherInfo' was not
++ configured for this port.
++
++ @Cautions Allowed only following FM_VSP_Init().
++*//***************************************************************************/
++uint8_t * FM_VSP_GetBufferICInfo(t_Handle h_FmVsp, char *p_Data);
++
++/**************************************************************************//**
++ @Function FM_VSP_GetBufferPrsResult
++
++ @Description Returns the pointer to the parse result in the data buffer.
++ In Rx ports this is relevant after reception, if parse
++ result is configured to be part of the data passed to the
++ application. For non Rx ports it may be used to get the pointer
++ of the area in the buffer where parse result should be
++ initialized - if so configured.
++ See FM_VSP_ConfigBufferPrefixContent for data buffer prefix
++ configuration.
++
++ @Param[in] h_FmVsp - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return Parse result pointer on success, NULL if parse result was not
++ configured for this port.
++
++ @Cautions Allowed only following FM_VSP_Init().
++*//***************************************************************************/
++t_FmPrsResult * FM_VSP_GetBufferPrsResult(t_Handle h_FmVsp, char *p_Data);
++
++/**************************************************************************//**
++ @Function FM_VSP_GetBufferTimeStamp
++
++ @Description Returns the time stamp in the data buffer.
++ Relevant for Rx ports for getting the buffer time stamp.
++ See FM_VSP_ConfigBufferPrefixContent for data buffer prefix
++ configuration.
++
++ @Param[in] h_FmVsp - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return A pointer to the hash result on success, NULL otherwise.
++
++ @Cautions Allowed only following FM_VSP_Init().
++*//***************************************************************************/
++uint64_t * FM_VSP_GetBufferTimeStamp(t_Handle h_FmVsp, char *p_Data);
++
++/**************************************************************************//**
++ @Function FM_VSP_GetBufferHashResult
++
++ @Description Given a data buffer, on the condition that hash result was defined
++ as a part of the buffer content (see FM_VSP_ConfigBufferPrefixContent)
++ this routine will return the pointer to the hash result location in the
++ buffer prefix.
++
++ @Param[in] h_FmVsp - FM PORT module descriptor
++ @Param[in] p_Data - A pointer to the data buffer.
++
++ @Return A pointer to the hash result on success, NULL otherwise.
++
++ @Cautions Allowed only following FM_VSP_Init().
++*//***************************************************************************/
++uint8_t * FM_VSP_GetBufferHashResult(t_Handle h_FmVsp, char *p_Data);
++
++
++/** @} */ /* end of FM_VSP_control_grp group */
++/** @} */ /* end of FM_VSP_grp group */
++/** @} */ /* end of FM_grp group */
++
++
++#endif /* __FM_VSP_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/mii_acc_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/mii_acc_ext.h
+new file mode 100644
+index 00000000..f635d3c2
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/mii_acc_ext.h
+@@ -0,0 +1,76 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++
++#ifndef __MII_ACC_EXT_H
++#define __MII_ACC_EXT_H
++
++
++/**************************************************************************//**
++ @Function MII_ReadPhyReg
++
++ @Description This routine is called to read a specified PHY
++ register value.
++
++ @Param[in] h_MiiAccess - Handle to MII configuration access registers
++ @Param[in] phyAddr - PHY address (0-31).
++ @Param[in] reg - PHY register to read
++ @Param[out] p_Data - Gets the register value.
++
++ @Return Always zero (success).
++*//***************************************************************************/
++int MII_ReadPhyReg(t_Handle h_MiiAccess,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t *p_Data);
++
++/**************************************************************************//**
++ @Function MII_WritePhyReg
++
++ @Description This routine is called to write data to a specified PHY
++ register.
++
++ @Param[in] h_MiiAccess - Handle to MII configuration access registers
++ @Param[in] phyAddr - PHY address (0-31).
++ @Param[in] reg - PHY register to write
++ @Param[in] data - Data to write in register.
++
++ @Return Always zero (success).
++*//***************************************************************************/
++int MII_WritePhyReg(t_Handle h_MiiAccess,
++ uint8_t phyAddr,
++ uint8_t reg,
++ uint16_t data);
++
++
++#endif /* __MII_ACC_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/core_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/core_ext.h
+new file mode 100644
+index 00000000..ec89a6dd
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/core_ext.h
+@@ -0,0 +1,90 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File core_ext.h
++
++ @Description Generic interface to basic core operations.
++
++ The system integrator must ensure that this interface is
++ mapped to a specific core implementation, by including the
++ appropriate header file.
++*//***************************************************************************/
++#ifndef __CORE_EXT_H
++#define __CORE_EXT_H
++
++#ifdef CONFIG_FMAN_ARM
++#include "arm_ext.h"
++#include <linux/smp.h>
++#else
++#ifdef NCSW_PPC_CORE
++#include "ppc_ext.h"
++#elif defined(NCSW_VXWORKS)
++#include "core_vxw_ext.h"
++#else
++#error "Core is not defined!"
++#endif /* NCSW_CORE */
++
++#if (!defined(CORE_IS_LITTLE_ENDIAN) && !defined(CORE_IS_BIG_ENDIAN))
++#error "Must define core as little-endian or big-endian!"
++#endif /* (!defined(CORE_IS_LITTLE_ENDIAN) && ... */
++
++#ifndef CORE_CACHELINE_SIZE
++#error "Must define the core cache-line size!"
++#endif /* !CORE_CACHELINE_SIZE */
++
++#endif /* CONFIG_FMAN_ARM */
++
++
++/**************************************************************************//**
++ @Function CORE_GetId
++
++ @Description Returns the core ID in the system.
++
++ @Return Core ID.
++*//***************************************************************************/
++uint32_t CORE_GetId(void);
++
++/**************************************************************************//**
++ @Function CORE_MemoryBarrier
++
++ @Description This routine will cause the core to stop executing any commands
++ until all previous memory read/write commands are completely out
++ of the core's pipeline.
++
++ @Return None.
++*//***************************************************************************/
++void CORE_MemoryBarrier(void);
++#define fsl_mem_core_barrier() CORE_MemoryBarrier()
++
++#endif /* __CORE_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/cores/arm_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/arm_ext.h
+new file mode 100644
+index 00000000..e63444a7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/arm_ext.h
+@@ -0,0 +1,55 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File arm_ext.h
++
++ @Description Core API for ARM cores
++
++ These routines must be implemented by each specific PowerPC
++ core driver.
++*//***************************************************************************/
++#ifndef __ARM_EXT_H
++#define __ARM_EXT_H
++
++#include "part_ext.h"
++
++
++#define CORE_IS_LITTLE_ENDIAN
++
++static __inline__ void CORE_MemoryBarrier(void)
++{
++ mb();
++}
++
++#endif /* __PPC_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/cores/e500v2_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/e500v2_ext.h
+new file mode 100644
+index 00000000..e79b1ddf
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/e500v2_ext.h
+@@ -0,0 +1,476 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File e500v2_ext.h
++
++ @Description E500 external definitions prototypes
++ This file is not included by the E500
++ source file as it is an assembly file. It is used
++ only for prototypes exposure, for inclusion
++ by user and other modules.
++*//***************************************************************************/
++
++#ifndef __E500V2_EXT_H
++#define __E500V2_EXT_H
++
++#include "std_ext.h"
++
++
++/* Layer 1 Cache Manipulations
++ *==============================
++ * Should not be called directly by the user.
++ */
++void L1DCache_Invalidate (void);
++void L1ICache_Invalidate(void);
++void L1DCache_Enable(void);
++void L1ICache_Enable(void);
++void L1DCache_Disable(void);
++void L1ICache_Disable(void);
++void L1DCache_Flush(void);
++void L1ICache_Flush(void);
++uint32_t L1ICache_IsEnabled(void);
++uint32_t L1DCache_IsEnabled(void);
++/*
++ *
++ */
++uint32_t L1DCache_LineLock(uint32_t addr);
++uint32_t L1ICache_LineLock(uint32_t addr);
++void L1Cache_BroadCastEnable(void);
++void L1Cache_BroadCastDisable(void);
++
++
++#define CORE_DCacheEnable E500_DCacheEnable
++#define CORE_ICacheEnable E500_ICacheEnable
++#define CORE_DCacheDisable E500_DCacheDisable
++#define CORE_ICacheDisable E500_ICacheDisable
++#define CORE_GetId E500_GetId
++#define CORE_TestAndSet E500_TestAndSet
++#define CORE_MemoryBarrier E500_MemoryBarrier
++#define CORE_InstructionSync E500_InstructionSync
++
++#define CORE_SetDozeMode E500_SetDozeMode
++#define CORE_SetNapMode E500_SetNapMode
++#define CORE_SetSleepMode E500_SetSleepMode
++#define CORE_SetJogMode E500_SetJogMode
++#define CORE_SetDeepSleepMode E500_SetDeepSleepMode
++
++#define CORE_RecoverDozeMode E500_RecoverDozeMode
++#define CORE_RecoverNapMode E500_RecoverNapMode
++#define CORE_RecoverSleepMode E500_RecoverSleepMode
++#define CORE_RecoverJogMode E500_RecoverJogMode
++
++void E500_SetDozeMode(void);
++void E500_SetNapMode(void);
++void E500_SetSleepMode(void);
++void E500_SetJogMode(void);
++t_Error E500_SetDeepSleepMode(uint32_t bptrAddress);
++
++void E500_RecoverDozeMode(void);
++void E500_RecoverNapMode(void);
++void E500_RecoverSleepMode(void);
++void E500_RecoverJogMode(void);
++
++
++/**************************************************************************//**
++ @Group E500_id E500 Application Programming Interface
++
++ @Description E500 API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group E500_init_grp E500 Initialization Unit
++
++ @Description E500 initialization unit API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++
++/**************************************************************************//**
++ @Function E500_DCacheEnable
++
++ @Description Enables the data cache for memory pages that are
++ not cache inhibited.
++
++ @Return None.
++*//***************************************************************************/
++void E500_DCacheEnable(void);
++
++/**************************************************************************//**
++ @Function E500_ICacheEnable
++
++ @Description Enables the instruction cache for memory pages that are
++ not cache inhibited.
++
++ @Return None.
++*//***************************************************************************/
++void E500_ICacheEnable(void);
++
++/**************************************************************************//**
++ @Function E500_DCacheDisable
++
++ @Description Disables the data cache.
++
++ @Return None.
++*//***************************************************************************/
++void E500_DCacheDisable(void);
++
++/**************************************************************************//**
++ @Function E500_ICacheDisable
++
++ @Description Disables the instruction cache.
++
++ @Return None.
++*//***************************************************************************/
++void E500_ICacheDisable(void);
++
++/**************************************************************************//**
++ @Function E500_DCacheFlush
++
++ @Description Flushes the data cache
++
++ @Return None.
++*//***************************************************************************/
++void E500_DCacheFlush(void);
++
++/**************************************************************************//**
++ @Function E500_ICacheFlush
++
++ @Description Flushes the instruction cache.
++
++ @Return None.
++*//***************************************************************************/
++void E500_ICacheFlush(void);
++
++/**************************************************************************//**
++ @Function E500_DCacheSetStashId
++
++ @Description Set Stash Id for data cache
++
++ @Param[in] stashId the stash id to be set.
++
++ @Return None.
++*//***************************************************************************/
++void E500_DCacheSetStashId(uint8_t stashId);
++
++/**************************************************************************//**
++ @Description E500mc L2 Cache Operation Mode
++*//***************************************************************************/
++typedef enum e_E500mcL2CacheMode
++{
++ e_L2_CACHE_MODE_DATA_ONLY = 0x00000001, /**< Cache data only */
++ e_L2_CACHE_MODE_INST_ONLY = 0x00000002, /**< Cache instructions only */
++ e_L2_CACHE_MODE_DATA_AND_INST = 0x00000003 /**< Cache data and instructions */
++} e_E500mcL2CacheMode;
++
++#if defined(CORE_E500MC) || defined(CORE_E5500)
++/**************************************************************************//**
++ @Function E500_L2CacheEnable
++
++ @Description Enables the cache for memory pages that are not cache inhibited.
++
++ @param[in] mode - L2 cache mode: data only, instruction only or instruction and data.
++
++ @Return None.
++
++ @Cautions This routine must be call only ONCE for both caches. I.e. it is
++ not possible to call this routine for i-cache and than to call
++ again for d-cache; The second call will override the first one.
++*//***************************************************************************/
++void E500_L2CacheEnable(e_E500mcL2CacheMode mode);
++
++/**************************************************************************//**
++ @Function E500_L2CacheDisable
++
++ @Description Disables the cache (data instruction or both).
++
++ @Return None.
++
++*//***************************************************************************/
++void E500_L2CacheDisable(void);
++
++/**************************************************************************//**
++ @Function E500_L2CacheFlush
++
++ @Description Flushes the cache.
++
++ @Return None.
++*//***************************************************************************/
++void E500_L2CacheFlush(void);
++
++/**************************************************************************//**
++ @Function E500_L2SetStashId
++
++ @Description Set Stash Id
++
++ @Param[in] stashId the stash id to be set.
++
++ @Return None.
++*//***************************************************************************/
++void E500_L2SetStashId(uint8_t stashId);
++#endif /* defined(CORE_E500MC) || defined(CORE_E5500) */
++
++#ifdef CORE_E6500
++/**************************************************************************//**
++ @Function E6500_L2CacheEnable
++
++ @Description Enables the cache for memory pages that are not cache inhibited.
++
++ @param[in] mode - L2 cache mode: support data & instruction only.
++
++ @Return None.
++
++ @Cautions This routine must be call only ONCE for both caches. I.e. it is
++ not possible to call this routine for i-cache and than to call
++ again for d-cache; The second call will override the first one.
++*//***************************************************************************/
++void E6500_L2CacheEnable(uintptr_t clusterBase);
++
++/**************************************************************************//**
++ @Function E6500_L2CacheDisable
++
++ @Description Disables the cache (data instruction or both).
++
++ @Return None.
++
++*//***************************************************************************/
++void E6500_L2CacheDisable(uintptr_t clusterBase);
++
++/**************************************************************************//**
++ @Function E6500_L2CacheFlush
++
++ @Description Flushes the cache.
++
++ @Return None.
++*//***************************************************************************/
++void E6500_L2CacheFlush(uintptr_t clusterBase);
++
++/**************************************************************************//**
++ @Function E6500_L2SetStashId
++
++ @Description Set Stash Id
++
++ @Param[in] stashId the stash id to be set.
++
++ @Return None.
++*//***************************************************************************/
++void E6500_L2SetStashId(uintptr_t clusterBase, uint8_t stashId);
++
++/**************************************************************************//**
++ @Function E6500_GetCcsrBase
++
++ @Description Obtain SoC CCSR base address
++
++ @Param[in] None.
++
++ @Return Physical CCSR base address.
++*//***************************************************************************/
++physAddress_t E6500_GetCcsrBase(void);
++#endif /* CORE_E6500 */
++
++/**************************************************************************//**
++ @Function E500_AddressBusStreamingEnable
++
++ @Description Enables address bus streaming on the CCB.
++
++ This setting, along with the ECM streaming configuration
++ parameters, enables address bus streaming on the CCB.
++
++ @Return None.
++*//***************************************************************************/
++void E500_AddressBusStreamingEnable(void);
++
++/**************************************************************************//**
++ @Function E500_AddressBusStreamingDisable
++
++ @Description Disables address bus streaming on the CCB.
++
++ @Return None.
++*//***************************************************************************/
++void E500_AddressBusStreamingDisable(void);
++
++/**************************************************************************//**
++ @Function E500_AddressBroadcastEnable
++
++ @Description Enables address broadcast.
++
++ The e500 broadcasts cache management instructions (dcbst, dcblc
++ (CT = 1), icblc (CT = 1), dcbf, dcbi, mbar, msync, tlbsync, icbi)
++ based on ABE. ABE must be set to allow management of external
++ L2 caches.
++
++ @Return None.
++*//***************************************************************************/
++void E500_AddressBroadcastEnable(void);
++
++/**************************************************************************//**
++ @Function E500_AddressBroadcastDisable
++
++ @Description Disables address broadcast.
++
++ The e500 broadcasts cache management instructions (dcbst, dcblc
++ (CT = 1), icblc (CT = 1), dcbf, dcbi, mbar, msync, tlbsync, icbi)
++ based on ABE. ABE must be set to allow management of external
++ L2 caches.
++
++ @Return None.
++*//***************************************************************************/
++void E500_AddressBroadcastDisable(void);
++
++/**************************************************************************//**
++ @Function E500_IsTaskletSupported
++
++ @Description Checks if tasklets are supported by the e500 interrupt handler.
++
++ @Retval TRUE - Tasklets are supported.
++ @Retval FALSE - Tasklets are not supported.
++*//***************************************************************************/
++bool E500_IsTaskletSupported(void);
++
++void E500_EnableTimeBase(void);
++void E500_DisableTimeBase(void);
++
++uint64_t E500_GetTimeBaseTime(void);
++
++void E500_GenericIntrInit(void);
++
++t_Error E500_SetIntr(int ppcIntrSrc,
++ void (* Isr)(t_Handle handle),
++ t_Handle handle);
++
++t_Error E500_ClearIntr(int ppcIntrSrc);
++
++/**************************************************************************//**
++ @Function E500_GenericIntrHandler
++
++ @Description This is the general e500 interrupt handler.
++
++ It is called by the main assembly interrupt handler
++ when an exception occurs and no other function has been
++ assigned to this exception.
++
++ @Param intrEntry - (In) The exception interrupt vector entry.
++*//***************************************************************************/
++void E500_GenericIntrHandler(uint32_t intrEntry);
++
++/**************************************************************************//**
++ @Function CriticalIntr
++
++ @Description This is the specific critical e500 interrupt handler.
++
++ It is called by the main assembly interrupt handler
++ when an critical interrupt.
++
++ @Param intrEntry - (In) The exception interrupt vector entry.
++*//***************************************************************************/
++void CriticalIntr(uint32_t intrEntry);
++
++
++/**************************************************************************//**
++ @Function E500_GetId
++
++ @Description Returns the core ID in the system.
++
++ @Return Core ID.
++*//***************************************************************************/
++uint32_t E500_GetId(void);
++
++/**************************************************************************//**
++ @Function E500_TestAndSet
++
++ @Description This routine tries to atomically test-and-set an integer
++ in memory to a non-zero value.
++
++ The memory will be set only if it is tested as zero, in which
++ case the routine returns the new non-zero value; otherwise the
++ routine returns zero.
++
++ @Param[in] p - pointer to a volatile int in memory, on which test-and-set
++ operation should be made.
++
++ @Retval Zero - Operation failed - memory was already set.
++ @Retval Non-zero - Operation succeeded - memory has been set.
++*//***************************************************************************/
++int E500_TestAndSet(volatile int *p);
++
++/**************************************************************************//**
++ @Function E500_MemoryBarrier
++
++ @Description This routine will cause the core to stop executing any commands
++ until all previous memory read/write commands are completely out
++ of the core's pipeline.
++
++ @Return None.
++*//***************************************************************************/
++static __inline__ void E500_MemoryBarrier(void)
++{
++#ifndef CORE_E500V2
++ __asm__ ("mbar 1");
++#else /* CORE_E500V2 */
++ /**** ERRATA WORK AROUND START ****/
++ /* ERRATA num: CPU1 */
++ /* Description: "mbar MO = 1" instruction fails to order caching-inhibited
++ guarded loads and stores. */
++
++ /* "msync" instruction is used instead */
++
++ __asm__ ("msync");
++
++ /**** ERRATA WORK AROUND END ****/
++#endif /* CORE_E500V2 */
++}
++
++/**************************************************************************//**
++ @Function E500_InstructionSync
++
++ @Description This routine will cause the core to wait for previous instructions
++ (including any interrupts they generate) to complete before the
++ synchronization command executes, which purges all instructions
++ from the processor's pipeline and refetches the next instruction.
++
++ @Return None.
++*//***************************************************************************/
++static __inline__ void E500_InstructionSync(void)
++{
++ __asm__ ("isync");
++}
++
++
++/** @} */ /* end of E500_init_grp group */
++/** @} */ /* end of E500_grp group */
++
++
++#endif /* __E500V2_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/cores/ppc_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/ppc_ext.h
+new file mode 100644
+index 00000000..9344b3a1
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/ppc_ext.h
+@@ -0,0 +1,141 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File ppc_ext.h
++
++ @Description Core API for PowerPC cores
++
++ These routines must be implemented by each specific PowerPC
++ core driver.
++*//***************************************************************************/
++#ifndef __PPC_EXT_H
++#define __PPC_EXT_H
++
++#include "part_ext.h"
++
++
++#define CORE_IS_BIG_ENDIAN
++
++#if defined(CORE_E300) || defined(CORE_E500V2)
++#define CORE_CACHELINE_SIZE 32
++#elif defined(CORE_E500MC) || defined(CORE_E5500) || defined(CORE_E6500)
++#define CORE_CACHELINE_SIZE 64
++#else
++#error "Core not defined!"
++#endif /* defined(CORE_E300) || ... */
++
++
++/**************************************************************************//**
++ @Function CORE_TestAndSet
++
++ @Description This routine tries to atomically test-and-set an integer
++ in memory to a non-zero value.
++
++ The memory will be set only if it is tested as zero, in which
++ case the routine returns the new non-zero value; otherwise the
++ routine returns zero.
++
++ @Param[in] p - pointer to a volatile int in memory, on which test-and-set
++ operation should be made.
++
++ @Retval Zero - Operation failed - memory was already set.
++ @Retval Non-zero - Operation succeeded - memory has been set.
++*//***************************************************************************/
++int CORE_TestAndSet(volatile int *p);
++
++/**************************************************************************//**
++ @Function CORE_InstructionSync
++
++ @Description This routine will cause the core to wait for previous instructions
++ (including any interrupts they generate) to complete before the
++ synchronization command executes, which purges all instructions
++ from the processor's pipeline and refetches the next instruction.
++
++ @Return None.
++*//***************************************************************************/
++void CORE_InstructionSync(void);
++
++/**************************************************************************//**
++ @Function CORE_DCacheEnable
++
++ @Description Enables the data cache for memory pages that are
++ not cache inhibited.
++
++ @Return None.
++*//***************************************************************************/
++void CORE_DCacheEnable(void);
++
++/**************************************************************************//**
++ @Function CORE_ICacheEnable
++
++ @Description Enables the instruction cache for memory pages that are
++ not cache inhibited.
++
++ @Return None.
++*//***************************************************************************/
++void CORE_ICacheEnable(void);
++
++/**************************************************************************//**
++ @Function CORE_DCacheDisable
++
++ @Description Disables the data cache.
++
++ @Return None.
++*//***************************************************************************/
++void CORE_DCacheDisable(void);
++
++/**************************************************************************//**
++ @Function CORE_ICacheDisable
++
++ @Description Disables the instruction cache.
++
++ @Return None.
++*//***************************************************************************/
++void CORE_ICacheDisable(void);
++
++
++
++#if defined(CORE_E300)
++#include "e300_ext.h"
++#elif defined(CORE_E500V2) || defined(CORE_E500MC) || defined(CORE_E5500) || defined(CORE_E6500)
++#include "e500v2_ext.h"
++#if !defined(NCSW_LINUX)
++#include "e500v2_asm_ext.h"
++#endif
++#else
++#error "Core not defined!"
++#endif
++
++
++#endif /* __PPC_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/ddr_std_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/ddr_std_ext.h
+new file mode 100644
+index 00000000..8bb343fc
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/ddr_std_ext.h
+@@ -0,0 +1,77 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DDR_SDT_EXT_H
++#define __DDR_SDT_EXT_H
++
++
++/**************************************************************************//**
++ @Group ddr_Generic_Resources
++
++ @Description ddr generic functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++
++/**************************************************************************//**
++ @Description SPD maximum size
++*//***************************************************************************/
++#define SPD_MAX_SIZE 256
++
++/**************************************************************************//**
++ @Description DDR types select
++*//***************************************************************************/
++typedef enum e_DdrType
++{
++ e_DDR_DDR1,
++ e_DDR_DDR2,
++ e_DDR_DDR3,
++ e_DDR_DDR3L,
++ e_DDR_DDR4
++} e_DdrType;
++
++/**************************************************************************//**
++ @Description DDR Mode.
++*//***************************************************************************/
++typedef enum e_DdrMode
++{
++ e_DDR_BUS_WIDTH_32BIT,
++ e_DDR_BUS_WIDTH_64BIT
++} e_DdrMode;
++
++/** @} */ /* end of ddr_Generic_Resources group */
++
++
++
++#endif /* __DDR_SDT_EXT_H */
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/debug_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/debug_ext.h
+new file mode 100644
+index 00000000..57db0a14
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/debug_ext.h
+@@ -0,0 +1,233 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File debug_ext.h
++
++ @Description Debug mode definitions.
++*//***************************************************************************/
++
++#ifndef __DEBUG_EXT_H
++#define __DEBUG_EXT_H
++
++#include "std_ext.h"
++#include "xx_ext.h"
++#include "memcpy_ext.h"
++#if (DEBUG_ERRORS > 0)
++#include "sprint_ext.h"
++#include "string_ext.h"
++#endif /* DEBUG_ERRORS > 0 */
++
++
++#if (DEBUG_ERRORS > 0)
++
++/* Internally used macros */
++
++#define DUMP_Print XX_Print
++#define DUMP_MAX_LEVELS 6
++#define DUMP_IDX_LEN 6
++#define DUMP_MAX_STR 64
++
++
++#define _CREATE_DUMP_SUBSTR(phrase) \
++ dumpTmpLevel = 0; dumpSubStr[0] = '\0'; \
++ snprintf(dumpTmpStr, DUMP_MAX_STR, "%s", #phrase); \
++ p_DumpToken = strtok(dumpTmpStr, (dumpIsArr[0] ? "[" : ".")); \
++ while ((p_DumpToken != NULL) && (dumpTmpLevel < DUMP_MAX_LEVELS)) \
++ { \
++ strlcat(dumpSubStr, p_DumpToken, DUMP_MAX_STR); \
++ if (dumpIsArr[dumpTmpLevel]) \
++ { \
++ strlcat(dumpSubStr, dumpIdxStr[dumpTmpLevel], DUMP_MAX_STR); \
++ p_DumpToken = strtok(NULL, "."); \
++ } \
++ if ((p_DumpToken != NULL) && \
++ ((p_DumpToken = strtok(NULL, (dumpIsArr[++dumpTmpLevel] ? "[" : "."))) != NULL)) \
++ strlcat(dumpSubStr, ".", DUMP_MAX_STR); \
++ }
++
++
++/**************************************************************************//**
++ @Group gen_id General Drivers Utilities
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group dump_id Memory and Registers Dump Mechanism
++
++ @Description Macros for dumping memory mapped structures.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Declaration of dump mechanism variables.
++
++ This macro must be declared at the beginning of each routine
++ which uses the dump mechanism macros, before the routine's code
++ starts.
++*//***************************************************************************/
++#define DECLARE_DUMP \
++ char dumpIdxStr[DUMP_MAX_LEVELS + 1][DUMP_IDX_LEN] = { "", }; \
++ char dumpSubStr[DUMP_MAX_STR] = ""; \
++ char dumpTmpStr[DUMP_MAX_STR] = ""; \
++ char *p_DumpToken = NULL; \
++ int dumpArrIdx = 0, dumpArrSize = 0, dumpLevel = 0, dumpTmpLevel = 0; \
++ uint8_t dumpIsArr[DUMP_MAX_LEVELS + 1] = { 0 }; \
++ /* Prevent warnings if not all used */ \
++ UNUSED(dumpIdxStr[0][0]); \
++ UNUSED(dumpSubStr[0]); \
++ UNUSED(dumpTmpStr[0]); \
++ UNUSED(p_DumpToken); \
++ UNUSED(dumpArrIdx); \
++ UNUSED(dumpArrSize); \
++ UNUSED(dumpLevel); \
++ UNUSED(dumpTmpLevel); \
++ UNUSED(dumpIsArr[0]);
++
++
++/**************************************************************************//**
++ @Description Prints a title for a subsequent dumped structure or memory.
++
++ The inputs for this macro are the structure/memory title and
++ its base addresses.
++*//***************************************************************************/
++#define DUMP_TITLE(addr, msg) \
++ DUMP_Print("\r\n"); DUMP_Print msg; \
++ if (addr) \
++ DUMP_Print(" (%p)", (addr)); \
++ DUMP_Print("\r\n---------------------------------------------------------\r\n");
++
++/**************************************************************************//**
++ @Description Prints a subtitle for a subsequent dumped sub-structure (optional).
++
++ The inputs for this macro are the sub-structure subtitle.
++ A separating line with this subtitle will be printed.
++*//***************************************************************************/
++#define DUMP_SUBTITLE(subtitle) \
++ DUMP_Print("----------- "); DUMP_Print subtitle; DUMP_Print("\r\n")
++
++
++/**************************************************************************//**
++ @Description Dumps a memory region in 4-bytes aligned format.
++
++ The inputs for this macro are the base addresses and size
++ (in bytes) of the memory region.
++*//***************************************************************************/
++#define DUMP_MEMORY(addr, size) \
++ MemDisp((uint8_t *)(addr), (int)(size))
++
++
++/**************************************************************************//**
++ @Description Declares a dump loop, for dumping a sub-structure array.
++
++ The inputs for this macro are:
++ - idx: an index variable, for indexing the sub-structure items
++ inside the loop. This variable must be declared separately
++ in the beginning of the routine.
++ - cnt: the number of times to repeat the loop. This number should
++ equal the number of items in the sub-structures array.
++
++ Note, that the body of the loop must be written inside brackets.
++*//***************************************************************************/
++#define DUMP_SUBSTRUCT_ARRAY(idx, cnt) \
++ for (idx=0, dumpIsArr[dumpLevel++] = 1; \
++ (idx < cnt) && (dumpLevel > 0) && snprintf(dumpIdxStr[dumpLevel-1], DUMP_IDX_LEN, "[%d]", idx); \
++ idx++, ((idx < cnt) || (dumpIsArr[--dumpLevel] = 0)))
++
++
++/**************************************************************************//**
++ @Description Dumps a structure's member variable.
++
++ The input for this macro is the full reference for the member
++ variable, where the structure is referenced using a pointer.
++
++ Note, that a members array must be dumped using DUMP_ARR macro,
++ rather than using this macro.
++
++ If the member variable is part of a sub-structure hierarchy,
++ the full hierarchy (including array indexing) must be specified.
++
++ Examples: p_Struct->member
++ p_Struct->sub.member
++ p_Struct->sub[i].member
++*//***************************************************************************/
++#define DUMP_VAR(st, phrase) \
++ do { \
++ void *addr = (void *)&((st)->phrase); \
++ physAddress_t physAddr = XX_VirtToPhys(addr); \
++ _CREATE_DUMP_SUBSTR(phrase); \
++ DUMP_Print("0x%010llX: 0x%08x%8s\t%s\r\n", \
++ physAddr, GET_UINT32(*(uint32_t*)addr), "", dumpSubStr); \
++ } while (0)
++
++
++/**************************************************************************//**
++ @Description Dumps a structure's members array.
++
++ The input for this macro is the full reference for the members
++ array, where the structure is referenced using a pointer.
++
++ If the members array is part of a sub-structure hierarchy,
++ the full hierarchy (including array indexing) must be specified.
++
++ Examples: p_Struct->array
++ p_Struct->sub.array
++ p_Struct->sub[i].array
++*//***************************************************************************/
++#define DUMP_ARR(st, phrase) \
++ do { \
++ physAddress_t physAddr; \
++ _CREATE_DUMP_SUBSTR(phrase); \
++ dumpArrSize = ARRAY_SIZE((st)->phrase); \
++ for (dumpArrIdx=0; dumpArrIdx < dumpArrSize; dumpArrIdx++) { \
++ physAddr = XX_VirtToPhys((void *)&((st)->phrase[dumpArrIdx])); \
++ DUMP_Print("0x%010llX: 0x%08x%8s\t%s[%d]\r\n", \
++ physAddr, GET_UINT32((st)->phrase[dumpArrIdx]), "", dumpSubStr, dumpArrIdx); \
++ } \
++ } while (0)
++
++
++
++#endif /* DEBUG_ERRORS > 0 */
++
++
++/** @} */ /* end of dump_id group */
++/** @} */ /* end of gen_id group */
++
++
++#endif /* __DEBUG_EXT_H */
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/endian_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/endian_ext.h
+new file mode 100644
+index 00000000..5cdec668
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/endian_ext.h
+@@ -0,0 +1,447 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++
++ @File endian_ext.h
++
++ @Description Big/little endian swapping routines.
++*//***************************************************************************/
++
++#ifndef __ENDIAN_EXT_H
++#define __ENDIAN_EXT_H
++
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group gen_id General Drivers Utilities
++
++ @Description General usage API. This API is intended for usage by both the
++ internal modules and the user's application.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group endian_id Big/Little-Endian Conversion
++
++ @Description Routines and macros for Big/Little-Endian conversion and
++ general byte swapping.
++
++ All routines and macros are expecting unsigned values as
++ parameters, but will generate the correct result also for
++ signed values. Therefore, signed/unsigned casting is allowed.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection Byte-Swap Macros
++
++ Macros for swapping byte order.
++
++ @Cautions The parameters of these macros are evaluated multiple times.
++ For calculated expressions or expressions that contain function
++ calls it is recommended to use the byte-swap routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Swaps the byte order of a given 16-bit value.
++
++ @Param[in] val - The 16-bit value to swap.
++
++ @Return The byte-swapped value..
++
++ @Cautions The given value is evaluated multiple times by this macro.
++ For calculated expressions or expressions that contain function
++ calls it is recommended to use the SwapUint16() routine.
++
++ @hideinitializer
++*//***************************************************************************/
++#define SWAP_UINT16(val) \
++ ((uint16_t)((((val) & 0x00FF) << 8) | (((val) & 0xFF00) >> 8)))
++
++/**************************************************************************//**
++ @Description Swaps the byte order of a given 32-bit value.
++
++ @Param[in] val - The 32-bit value to swap.
++
++ @Return The byte-swapped value..
++
++ @Cautions The given value is evaluated multiple times by this macro.
++ For calculated expressions or expressions that contain function
++ calls it is recommended to use the SwapUint32() routine.
++
++ @hideinitializer
++*//***************************************************************************/
++#define SWAP_UINT32(val) \
++ ((uint32_t)((((val) & 0x000000FF) << 24) | \
++ (((val) & 0x0000FF00) << 8) | \
++ (((val) & 0x00FF0000) >> 8) | \
++ (((val) & 0xFF000000) >> 24)))
++
++/**************************************************************************//**
++ @Description Swaps the byte order of a given 64-bit value.
++
++ @Param[in] val - The 64-bit value to swap.
++
++ @Return The byte-swapped value..
++
++ @Cautions The given value is evaluated multiple times by this macro.
++ For calculated expressions or expressions that contain function
++ calls it is recommended to use the SwapUint64() routine.
++
++ @hideinitializer
++*//***************************************************************************/
++#define SWAP_UINT64(val) \
++ ((uint64_t)((((val) & 0x00000000000000FFULL) << 56) | \
++ (((val) & 0x000000000000FF00ULL) << 40) | \
++ (((val) & 0x0000000000FF0000ULL) << 24) | \
++ (((val) & 0x00000000FF000000ULL) << 8) | \
++ (((val) & 0x000000FF00000000ULL) >> 8) | \
++ (((val) & 0x0000FF0000000000ULL) >> 24) | \
++ (((val) & 0x00FF000000000000ULL) >> 40) | \
++ (((val) & 0xFF00000000000000ULL) >> 56)))
++
++/* @} */
++
++/**************************************************************************//**
++ @Collection Byte-Swap Routines
++
++ Routines for swapping the byte order of a given parameter and
++ returning the swapped value.
++
++ These inline routines are safer than the byte-swap macros,
++ because they evaluate the parameter expression only once.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function SwapUint16
++
++ @Description Returns the byte-swapped value of a given 16-bit value.
++
++ @Param[in] val - The 16-bit value.
++
++ @Return The byte-swapped value of the parameter.
++*//***************************************************************************/
++static __inline__ uint16_t SwapUint16(uint16_t val)
++{
++ return (uint16_t)(((val & 0x00FF) << 8) |
++ ((val & 0xFF00) >> 8));
++}
++
++/**************************************************************************//**
++ @Function SwapUint32
++
++ @Description Returns the byte-swapped value of a given 32-bit value.
++
++ @Param[in] val - The 32-bit value.
++
++ @Return The byte-swapped value of the parameter.
++*//***************************************************************************/
++static __inline__ uint32_t SwapUint32(uint32_t val)
++{
++ return (uint32_t)(((val & 0x000000FF) << 24) |
++ ((val & 0x0000FF00) << 8) |
++ ((val & 0x00FF0000) >> 8) |
++ ((val & 0xFF000000) >> 24));
++}
++
++/**************************************************************************//**
++ @Function SwapUint64
++
++ @Description Returns the byte-swapped value of a given 64-bit value.
++
++ @Param[in] val - The 64-bit value.
++
++ @Return The byte-swapped value of the parameter.
++*//***************************************************************************/
++static __inline__ uint64_t SwapUint64(uint64_t val)
++{
++ return (uint64_t)(((val & 0x00000000000000FFULL) << 56) |
++ ((val & 0x000000000000FF00ULL) << 40) |
++ ((val & 0x0000000000FF0000ULL) << 24) |
++ ((val & 0x00000000FF000000ULL) << 8) |
++ ((val & 0x000000FF00000000ULL) >> 8) |
++ ((val & 0x0000FF0000000000ULL) >> 24) |
++ ((val & 0x00FF000000000000ULL) >> 40) |
++ ((val & 0xFF00000000000000ULL) >> 56));
++}
++
++/* @} */
++
++/**************************************************************************//**
++ @Collection In-place Byte-Swap-And-Set Routines
++
++ Routines for swapping the byte order of a given variable and
++ setting the swapped value back to the same variable.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function SwapUint16P
++
++ @Description Swaps the byte order of a given 16-bit variable.
++
++ @Param[in] p_Val - Pointer to the 16-bit variable.
++
++ @Return None.
++*//***************************************************************************/
++static __inline__ void SwapUint16P(uint16_t *p_Val)
++{
++ *p_Val = SwapUint16(*p_Val);
++}
++
++/**************************************************************************//**
++ @Function SwapUint32P
++
++ @Description Swaps the byte order of a given 32-bit variable.
++
++ @Param[in] p_Val - Pointer to the 32-bit variable.
++
++ @Return None.
++*//***************************************************************************/
++static __inline__ void SwapUint32P(uint32_t *p_Val)
++{
++ *p_Val = SwapUint32(*p_Val);
++}
++
++/**************************************************************************//**
++ @Function SwapUint64P
++
++ @Description Swaps the byte order of a given 64-bit variable.
++
++ @Param[in] p_Val - Pointer to the 64-bit variable.
++
++ @Return None.
++*//***************************************************************************/
++static __inline__ void SwapUint64P(uint64_t *p_Val)
++{
++ *p_Val = SwapUint64(*p_Val);
++}
++
++/* @} */
++
++
++/**************************************************************************//**
++ @Collection Little-Endian Conversion Macros
++
++ These macros convert given parameters to or from Little-Endian
++ format. Use these macros when you want to read or write a specific
++ Little-Endian value in memory, without a-priori knowing the CPU
++ byte order.
++
++ These macros use the byte-swap routines. For conversion of
++ constants in initialization structures, you may use the CONST
++ versions of these macros (see below), which are using the
++ byte-swap macros instead.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Converts a given 16-bit value from CPU byte order to
++ Little-Endian byte order.
++
++ @Param[in] val - The 16-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CPU_TO_LE16(val) SwapUint16(val)
++
++/**************************************************************************//**
++ @Description Converts a given 32-bit value from CPU byte order to
++ Little-Endian byte order.
++
++ @Param[in] val - The 32-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CPU_TO_LE32(val) SwapUint32(val)
++
++/**************************************************************************//**
++ @Description Converts a given 64-bit value from CPU byte order to
++ Little-Endian byte order.
++
++ @Param[in] val - The 64-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CPU_TO_LE64(val) SwapUint64(val)
++
++
++/**************************************************************************//**
++ @Description Converts a given 16-bit value from Little-Endian byte order to
++ CPU byte order.
++
++ @Param[in] val - The 16-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define LE16_TO_CPU(val) CPU_TO_LE16(val)
++
++/**************************************************************************//**
++ @Description Converts a given 32-bit value from Little-Endian byte order to
++ CPU byte order.
++
++ @Param[in] val - The 32-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define LE32_TO_CPU(val) CPU_TO_LE32(val)
++
++/**************************************************************************//**
++ @Description Converts a given 64-bit value from Little-Endian byte order to
++ CPU byte order.
++
++ @Param[in] val - The 64-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define LE64_TO_CPU(val) CPU_TO_LE64(val)
++
++/* @} */
++
++/**************************************************************************//**
++ @Collection Little-Endian Constant Conversion Macros
++
++ These macros convert given constants to or from Little-Endian
++ format. Use these macros when you want to read or write a specific
++ Little-Endian constant in memory, without a-priori knowing the
++ CPU byte order.
++
++ These macros use the byte-swap macros, therefore can be used for
++ conversion of constants in initialization structures.
++
++ @Cautions The parameters of these macros are evaluated multiple times.
++ For non-constant expressions, use the non-CONST macro versions.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Converts a given 16-bit constant from CPU byte order to
++ Little-Endian byte order.
++
++ @Param[in] val - The 16-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CONST_CPU_TO_LE16(val) SWAP_UINT16(val)
++
++/**************************************************************************//**
++ @Description Converts a given 32-bit constant from CPU byte order to
++ Little-Endian byte order.
++
++ @Param[in] val - The 32-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CONST_CPU_TO_LE32(val) SWAP_UINT32(val)
++
++/**************************************************************************//**
++ @Description Converts a given 64-bit constant from CPU byte order to
++ Little-Endian byte order.
++
++ @Param[in] val - The 64-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CONST_CPU_TO_LE64(val) SWAP_UINT64(val)
++
++
++/**************************************************************************//**
++ @Description Converts a given 16-bit constant from Little-Endian byte order
++ to CPU byte order.
++
++ @Param[in] val - The 16-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CONST_LE16_TO_CPU(val) CONST_CPU_TO_LE16(val)
++
++/**************************************************************************//**
++ @Description Converts a given 32-bit constant from Little-Endian byte order
++ to CPU byte order.
++
++ @Param[in] val - The 32-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CONST_LE32_TO_CPU(val) CONST_CPU_TO_LE32(val)
++
++/**************************************************************************//**
++ @Description Converts a given 64-bit constant from Little-Endian byte order
++ to CPU byte order.
++
++ @Param[in] val - The 64-bit value to convert.
++
++ @Return The converted value.
++
++ @hideinitializer
++*//***************************************************************************/
++#define CONST_LE64_TO_CPU(val) CONST_CPU_TO_LE64(val)
++
++/* @} */
++
++
++/** @} */ /* end of endian_id group */
++/** @} */ /* end of gen_id group */
++
++
++#endif /* __ENDIAN_EXT_H */
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/enet_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/enet_ext.h
+new file mode 100644
+index 00000000..ef3bee55
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/enet_ext.h
+@@ -0,0 +1,205 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File enet_ext.h
++
++ @Description Ethernet generic definitions and enums.
++*//***************************************************************************/
++
++#ifndef __ENET_EXT_H
++#define __ENET_EXT_H
++
++#include "fsl_enet.h"
++
++#define ENET_NUM_OCTETS_PER_ADDRESS 6 /**< Number of octets (8-bit bytes) in an ethernet address */
++#define ENET_GROUP_ADDR 0x01 /**< Group address mask for ethernet addresses */
++
++
++/**************************************************************************//**
++ @Description Ethernet Address
++*//***************************************************************************/
++typedef uint8_t t_EnetAddr[ENET_NUM_OCTETS_PER_ADDRESS];
++
++/**************************************************************************//**
++ @Description Ethernet Address Type.
++*//***************************************************************************/
++typedef enum e_EnetAddrType
++{
++ e_ENET_ADDR_TYPE_INDIVIDUAL, /**< Individual (unicast) address */
++ e_ENET_ADDR_TYPE_GROUP, /**< Group (multicast) address */
++ e_ENET_ADDR_TYPE_BROADCAST /**< Broadcast address */
++} e_EnetAddrType;
++
++/**************************************************************************//**
++ @Description Ethernet MAC-PHY Interface
++*//***************************************************************************/
++typedef enum e_EnetInterface
++{
++ e_ENET_IF_MII = E_ENET_IF_MII, /**< MII interface */
++ e_ENET_IF_RMII = E_ENET_IF_RMII, /**< RMII interface */
++ e_ENET_IF_SMII = E_ENET_IF_SMII, /**< SMII interface */
++ e_ENET_IF_GMII = E_ENET_IF_GMII, /**< GMII interface */
++ e_ENET_IF_RGMII = E_ENET_IF_RGMII, /**< RGMII interface */
++ e_ENET_IF_TBI = E_ENET_IF_TBI, /**< TBI interface */
++ e_ENET_IF_RTBI = E_ENET_IF_RTBI, /**< RTBI interface */
++ e_ENET_IF_SGMII = E_ENET_IF_SGMII, /**< SGMII interface */
++ e_ENET_IF_XGMII = E_ENET_IF_XGMII, /**< XGMII interface */
++ e_ENET_IF_QSGMII= E_ENET_IF_QSGMII, /**< QSGMII interface */
++ e_ENET_IF_XFI = E_ENET_IF_XFI /**< XFI interface */
++} e_EnetInterface;
++
++#define ENET_IF_SGMII_BASEX 0x80000000 /**< SGMII/QSGII interface with 1000BaseX
++ auto-negotiation between MAC and phy
++ or backplane;
++ Note: 1000BaseX auto-negotiation relates
++ only to interface between MAC and phy/backplane,
++ SGMII phy can still synchronize with far-end phy
++ at 10Mbps, 100Mbps or 1000Mbps */
++
++/**************************************************************************//**
++ @Description Ethernet Duplex Mode
++*//***************************************************************************/
++typedef enum e_EnetDuplexMode
++{
++ e_ENET_HALF_DUPLEX, /**< Half-Duplex mode */
++ e_ENET_FULL_DUPLEX /**< Full-Duplex mode */
++} e_EnetDuplexMode;
++
++/**************************************************************************//**
++ @Description Ethernet Speed (nominal data rate)
++*//***************************************************************************/
++typedef enum e_EnetSpeed
++{
++ e_ENET_SPEED_10 = E_ENET_SPEED_10, /**< 10 Mbps */
++ e_ENET_SPEED_100 = E_ENET_SPEED_100, /**< 100 Mbps */
++ e_ENET_SPEED_1000 = E_ENET_SPEED_1000, /**< 1000 Mbps = 1 Gbps */
++ e_ENET_SPEED_2500 = E_ENET_SPEED_2500, /**< 2500 Mbps = 2.5 Gbps */
++ e_ENET_SPEED_10000 = E_ENET_SPEED_10000 /**< 10000 Mbps = 10 Gbps */
++} e_EnetSpeed;
++
++/**************************************************************************//**
++ @Description Ethernet mode (combination of MAC-PHY interface and speed)
++*//***************************************************************************/
++typedef enum e_EnetMode
++{
++ e_ENET_MODE_INVALID = 0, /**< Invalid Ethernet mode */
++ e_ENET_MODE_MII_10 = (e_ENET_IF_MII | e_ENET_SPEED_10), /**< 10 Mbps MII */
++ e_ENET_MODE_MII_100 = (e_ENET_IF_MII | e_ENET_SPEED_100), /**< 100 Mbps MII */
++ e_ENET_MODE_RMII_10 = (e_ENET_IF_RMII | e_ENET_SPEED_10), /**< 10 Mbps RMII */
++ e_ENET_MODE_RMII_100 = (e_ENET_IF_RMII | e_ENET_SPEED_100), /**< 100 Mbps RMII */
++ e_ENET_MODE_SMII_10 = (e_ENET_IF_SMII | e_ENET_SPEED_10), /**< 10 Mbps SMII */
++ e_ENET_MODE_SMII_100 = (e_ENET_IF_SMII | e_ENET_SPEED_100), /**< 100 Mbps SMII */
++ e_ENET_MODE_GMII_1000 = (e_ENET_IF_GMII | e_ENET_SPEED_1000), /**< 1000 Mbps GMII */
++ e_ENET_MODE_RGMII_10 = (e_ENET_IF_RGMII | e_ENET_SPEED_10), /**< 10 Mbps RGMII */
++ e_ENET_MODE_RGMII_100 = (e_ENET_IF_RGMII | e_ENET_SPEED_100), /**< 100 Mbps RGMII */
++ e_ENET_MODE_RGMII_1000 = (e_ENET_IF_RGMII | e_ENET_SPEED_1000), /**< 1000 Mbps RGMII */
++ e_ENET_MODE_TBI_1000 = (e_ENET_IF_TBI | e_ENET_SPEED_1000), /**< 1000 Mbps TBI */
++ e_ENET_MODE_RTBI_1000 = (e_ENET_IF_RTBI | e_ENET_SPEED_1000), /**< 1000 Mbps RTBI */
++ e_ENET_MODE_SGMII_10 = (e_ENET_IF_SGMII | e_ENET_SPEED_10),
++ /**< 10 Mbps SGMII with auto-negotiation between MAC and
++ SGMII phy according to Cisco SGMII specification */
++ e_ENET_MODE_SGMII_100 = (e_ENET_IF_SGMII | e_ENET_SPEED_100),
++ /**< 100 Mbps SGMII with auto-negotiation between MAC and
++ SGMII phy according to Cisco SGMII specification */
++ e_ENET_MODE_SGMII_1000 = (e_ENET_IF_SGMII | e_ENET_SPEED_1000),
++ /**< 1000 Mbps SGMII with auto-negotiation between MAC and
++ SGMII phy according to Cisco SGMII specification */
++ e_ENET_MODE_SGMII_2500 = (e_ENET_IF_SGMII | e_ENET_SPEED_2500),
++ e_ENET_MODE_SGMII_BASEX_10 = (ENET_IF_SGMII_BASEX | e_ENET_IF_SGMII | e_ENET_SPEED_10),
++ /**< 10 Mbps SGMII with 1000BaseX auto-negotiation between
++ MAC and SGMII phy or backplane */
++ e_ENET_MODE_SGMII_BASEX_100 = (ENET_IF_SGMII_BASEX | e_ENET_IF_SGMII | e_ENET_SPEED_100),
++ /**< 100 Mbps SGMII with 1000BaseX auto-negotiation between
++ MAC and SGMII phy or backplane */
++ e_ENET_MODE_SGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | e_ENET_IF_SGMII | e_ENET_SPEED_1000),
++ /**< 1000 Mbps SGMII with 1000BaseX auto-negotiation between
++ MAC and SGMII phy or backplane */
++ e_ENET_MODE_QSGMII_1000 = (e_ENET_IF_QSGMII| e_ENET_SPEED_1000),
++ /**< 1000 Mbps QSGMII with auto-negotiation between MAC and
++ QSGMII phy according to Cisco QSGMII specification */
++ e_ENET_MODE_QSGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | e_ENET_IF_QSGMII| e_ENET_SPEED_1000),
++ /**< 1000 Mbps QSGMII with 1000BaseX auto-negotiation between
++ MAC and QSGMII phy or backplane */
++ e_ENET_MODE_XGMII_10000 = (e_ENET_IF_XGMII | e_ENET_SPEED_10000), /**< 10000 Mbps XGMII */
++ e_ENET_MODE_XFI_10000 = (e_ENET_IF_XFI | e_ENET_SPEED_10000) /**< 10000 Mbps XFI */
++} e_EnetMode;
++
++
++#define IS_ENET_MODE_VALID(mode) \
++ (((mode) == e_ENET_MODE_MII_10 ) || \
++ ((mode) == e_ENET_MODE_MII_100 ) || \
++ ((mode) == e_ENET_MODE_RMII_10 ) || \
++ ((mode) == e_ENET_MODE_RMII_100 ) || \
++ ((mode) == e_ENET_MODE_SMII_10 ) || \
++ ((mode) == e_ENET_MODE_SMII_100 ) || \
++ ((mode) == e_ENET_MODE_GMII_1000 ) || \
++ ((mode) == e_ENET_MODE_RGMII_10 ) || \
++ ((mode) == e_ENET_MODE_RGMII_100 ) || \
++ ((mode) == e_ENET_MODE_RGMII_1000 ) || \
++ ((mode) == e_ENET_MODE_TBI_1000 ) || \
++ ((mode) == e_ENET_MODE_RTBI_1000 ) || \
++ ((mode) == e_ENET_MODE_SGMII_10 ) || \
++ ((mode) == e_ENET_MODE_SGMII_100 ) || \
++ ((mode) == e_ENET_MODE_SGMII_1000 ) || \
++ ((mode) == e_ENET_MODE_SGMII_BASEX_10 ) || \
++ ((mode) == e_ENET_MODE_SGMII_BASEX_100 ) || \
++ ((mode) == e_ENET_MODE_SGMII_BASEX_1000 ) || \
++ ((mode) == e_ENET_MODE_XGMII_10000) || \
++ ((mode) == e_ENET_MODE_QSGMII_1000) || \
++ ((mode) == e_ENET_MODE_QSGMII_BASEX_1000) || \
++ ((mode) == e_ENET_MODE_XFI_10000))
++
++
++#define MAKE_ENET_MODE(_interface, _speed) (e_EnetMode)((_interface) | (_speed))
++
++#define ENET_INTERFACE_FROM_MODE(mode) (e_EnetInterface)((mode) & 0x0FFF0000)
++#define ENET_SPEED_FROM_MODE(mode) (e_EnetSpeed)((mode) & 0x0000FFFF)
++
++#define ENET_ADDR_TO_UINT64(_enetAddr) \
++ (uint64_t)(((uint64_t)(_enetAddr)[0] << 40) | \
++ ((uint64_t)(_enetAddr)[1] << 32) | \
++ ((uint64_t)(_enetAddr)[2] << 24) | \
++ ((uint64_t)(_enetAddr)[3] << 16) | \
++ ((uint64_t)(_enetAddr)[4] << 8) | \
++ ((uint64_t)(_enetAddr)[5]))
++
++#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enetAddr) \
++ do { \
++ int i; \
++ for (i=0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) \
++ (_enetAddr)[i] = (uint8_t)((_addr64) >> ((5-i)*8)); \
++ } while (0)
++
++
++#endif /* __ENET_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/error_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/error_ext.h
+new file mode 100644
+index 00000000..2a5ad67b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/error_ext.h
+@@ -0,0 +1,529 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File error_ext.h
++
++ @Description Error definitions.
++*//***************************************************************************/
++
++#ifndef __ERROR_EXT_H
++#define __ERROR_EXT_H
++
++#if !defined(NCSW_LINUX)
++#include <errno.h>
++#endif
++
++#include "std_ext.h"
++#include "xx_ext.h"
++#include "core_ext.h"
++
++
++
++
++/**************************************************************************//**
++ @Group gen_id General Drivers Utilities
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group gen_error_id Errors, Events and Debug
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/******************************************************************************
++The scheme below provides the bits description for error codes:
++
++ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
++| Reserved (should be zero) | Module ID |
++
++ 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
++| Error Type |
++******************************************************************************/
++
++#define ERROR_CODE(_err) ((((uint32_t)_err) & 0x0000FFFF) | __ERR_MODULE__)
++
++#define GET_ERROR_TYPE(_errcode) ((_errcode) & 0x0000FFFF)
++ /**< Extract module code from error code (#t_Error) */
++
++#define GET_ERROR_MODULE(_errcode) ((_errcode) & 0x00FF0000)
++ /**< Extract error type (#e_ErrorType) from
++ error code (#t_Error) */
++
++
++/**************************************************************************//**
++ @Description Error Type Enumeration
++*//***************************************************************************/
++typedef enum e_ErrorType /* Comments / Associated Message Strings */
++{ /* ------------------------------------------------------------ */
++ E_OK = 0 /* Never use "RETURN_ERROR" with E_OK; Use "return E_OK;" */
++ ,E_WRITE_FAILED = EIO /**< Write access failed on memory/device. */
++ /* String: none, or device name. */
++ ,E_NO_DEVICE = ENXIO /**< The associated device is not initialized. */
++ /* String: none. */
++ ,E_NOT_AVAILABLE = EAGAIN
++ /**< Resource is unavailable. */
++ /* String: none, unless the operation is not the main goal
++ of the function (in this case add resource description). */
++ ,E_NO_MEMORY = ENOMEM /**< External memory allocation failed. */
++ /* String: description of item for which allocation failed. */
++ ,E_INVALID_ADDRESS = EFAULT
++ /**< Invalid address. */
++ /* String: description of the specific violation. */
++ ,E_BUSY = EBUSY /**< Resource or module is busy. */
++ /* String: none, unless the operation is not the main goal
++ of the function (in this case add resource description). */
++ ,E_ALREADY_EXISTS = EEXIST
++ /**< Requested resource or item already exists. */
++ /* Use when resource duplication or sharing are not allowed.
++ String: none, unless the operation is not the main goal
++ of the function (in this case add item description). */
++ ,E_INVALID_OPERATION = ENODEV
++ /**< The operation/command is invalid (unrecognized). */
++ /* String: none. */
++ ,E_INVALID_VALUE = EDOM /**< Invalid value. */
++ /* Use for non-enumeration parameters, and
++ only when other error types are not suitable.
++ String: parameter description + "(should be <attribute>)",
++ e.g: "Maximum Rx buffer length (should be divisible by 8)",
++ "Channel number (should be even)". */
++ ,E_NOT_IN_RANGE = ERANGE/**< Parameter value is out of range. */
++ /* Don't use this error for enumeration parameters.
++ String: parameter description + "(should be %d-%d)",
++ e.g: "Number of pad characters (should be 0-15)". */
++ ,E_NOT_SUPPORTED = ENOSYS
++ /**< The function is not supported or not implemented. */
++ /* String: none. */
++ ,E_INVALID_STATE /**< The operation is not allowed in current module state. */
++ /* String: none. */
++ ,E_INVALID_HANDLE /**< Invalid handle of module or object. */
++ /* String: none, unless the function takes in more than one
++ handle (in this case add the handle description) */
++ ,E_INVALID_ID /**< Invalid module ID (usually enumeration or index). */
++ /* String: none, unless the function takes in more than one
++ ID (in this case add the ID description) */
++ ,E_NULL_POINTER /**< Unexpected NULL pointer. */
++ /* String: pointer description. */
++ ,E_INVALID_SELECTION /**< Invalid selection or mode. */
++ /* Use for enumeration values, only when other error types
++ are not suitable.
++ String: parameter description. */
++ ,E_INVALID_COMM_MODE /**< Invalid communication mode. */
++ /* String: none, unless the function takes in more than one
++ communication mode indications (in this case add
++ parameter description). */
++ ,E_INVALID_MEMORY_TYPE /**< Invalid memory type. */
++ /* String: none, unless the function takes in more than one
++ memory types (in this case add memory description,
++ e.g: "Data memory", "Buffer descriptors memory"). */
++ ,E_INVALID_CLOCK /**< Invalid clock. */
++ /* String: none, unless the function takes in more than one
++ clocks (in this case add clock description,
++ e.g: "Rx clock", "Tx clock"). */
++ ,E_CONFLICT /**< Some setting conflicts with another setting. */
++ /* String: description of the conflicting settings. */
++ ,E_NOT_ALIGNED /**< Non-aligned address. */
++ /* String: parameter description + "(should be %d-bytes aligned)",
++ e.g: "Rx data buffer (should be 32-bytes aligned)". */
++ ,E_NOT_FOUND /**< Requested resource or item was not found. */
++ /* Use only when the resource/item is uniquely identified.
++ String: none, unless the operation is not the main goal
++ of the function (in this case add item description). */
++ ,E_FULL /**< Resource is full. */
++ /* String: none, unless the operation is not the main goal
++ of the function (in this case add resource description). */
++ ,E_EMPTY /**< Resource is empty. */
++ /* String: none, unless the operation is not the main goal
++ of the function (in this case add resource description). */
++ ,E_ALREADY_FREE /**< Specified resource or item is already free or deleted. */
++ /* String: none, unless the operation is not the main goal
++ of the function (in this case add item description). */
++ ,E_READ_FAILED /**< Read access failed on memory/device. */
++ /* String: none, or device name. */
++ ,E_INVALID_FRAME /**< Invalid frame object (NULL handle or missing buffers). */
++ /* String: none. */
++ ,E_SEND_FAILED /**< Send operation failed on device. */
++ /* String: none, or device name. */
++ ,E_RECEIVE_FAILED /**< Receive operation failed on device. */
++ /* String: none, or device name. */
++ ,E_TIMEOUT/* = ETIMEDOUT*/ /**< The operation timed out. */
++ /* String: none. */
++
++ ,E_DUMMY_LAST /* NEVER USED */
++
++} e_ErrorType;
++
++/**************************************************************************//**
++ @Description Event Type Enumeration
++*//***************************************************************************/
++typedef enum e_Event /* Comments / Associated Flags and Message Strings */
++{ /* ------------------------------------------------------------ */
++ EV_NO_EVENT = 0 /**< No event; Never used. */
++
++ ,EV_RX_DISCARD /**< Received packet discarded (by the driver, and only for
++ complete packets);
++ Flags: error flags in case of error, zero otherwise. */
++ /* String: reason for discard, e.g: "Error in frame",
++ "Disordered frame", "Incomplete frame", "No frame object". */
++ ,EV_RX_ERROR /**< Receive error (by hardware/firmware);
++ Flags: usually status flags from the buffer descriptor. */
++ /* String: none. */
++ ,EV_TX_ERROR /**< Transmit error (by hardware/firmware);
++ Flags: usually status flags from the buffer descriptor. */
++ /* String: none. */
++ ,EV_NO_BUFFERS /**< System ran out of buffer objects;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_NO_MB_FRAMES /**< System ran out of multi-buffer frame objects;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_NO_SB_FRAMES /**< System ran out of single-buffer frame objects;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_TX_QUEUE_FULL /**< Transmit queue is full;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_RX_QUEUE_FULL /**< Receive queue is full;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_INTR_QUEUE_FULL /**< Interrupt queue overflow;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_NO_DATA_BUFFER /**< Data buffer allocation (from higher layer) failed;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_OBJ_POOL_EMPTY /**< Objects pool is empty;
++ Flags: zero. */
++ /* String: object description (name). */
++ ,EV_BUS_ERROR /**< Illegal access on bus;
++ Flags: the address (if available) or bus identifier */
++ /* String: bus/address/module description. */
++ ,EV_PTP_TXTS_QUEUE_FULL /**< PTP Tx timestamps queue is full;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_PTP_RXTS_QUEUE_FULL /**< PTP Rx timestamps queue is full;
++ Flags: zero. */
++ /* String: none. */
++ ,EV_DUMMY_LAST
++
++} e_Event;
++
++
++/**************************************************************************//**
++ @Collection Debug Levels for Errors and Events
++
++ The level description refers to errors only.
++ For events, classification is done by the user.
++
++ The TRACE, INFO and WARNING levels are allowed only when using
++ the DBG macro, and are not allowed when using the error macros
++ (RETURN_ERROR or REPORT_ERROR).
++ @{
++*//***************************************************************************/
++#define REPORT_LEVEL_CRITICAL 1 /**< Crasher: Incorrect flow, NULL pointers/handles. */
++#define REPORT_LEVEL_MAJOR 2 /**< Cannot proceed: Invalid operation, parameters or
++ configuration. */
++#define REPORT_LEVEL_MINOR 3 /**< Recoverable problem: a repeating call with the same
++ parameters may be successful. */
++#define REPORT_LEVEL_WARNING 4 /**< Something is not exactly right, yet it is not an error. */
++#define REPORT_LEVEL_INFO 5 /**< Messages which may be of interest to user/programmer. */
++#define REPORT_LEVEL_TRACE 6 /**< Program flow messages. */
++
++#define EVENT_DISABLED 0xFF /**< Disabled event (not reported at all) */
++
++/* @} */
++
++
++
++#define NO_MSG ("")
++
++#ifndef DEBUG_GLOBAL_LEVEL
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
++#endif /* DEBUG_GLOBAL_LEVEL */
++
++#ifndef ERROR_GLOBAL_LEVEL
++#define ERROR_GLOBAL_LEVEL DEBUG_GLOBAL_LEVEL
++#endif /* ERROR_GLOBAL_LEVEL */
++
++#ifndef EVENT_GLOBAL_LEVEL
++#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
++#endif /* EVENT_GLOBAL_LEVEL */
++
++#ifdef EVENT_LOCAL_LEVEL
++#define EVENT_DYNAMIC_LEVEL EVENT_LOCAL_LEVEL
++#else
++#define EVENT_DYNAMIC_LEVEL EVENT_GLOBAL_LEVEL
++#endif /* EVENT_LOCAL_LEVEL */
++
++
++#ifndef DEBUG_DYNAMIC_LEVEL
++#define DEBUG_USING_STATIC_LEVEL
++
++#ifdef DEBUG_STATIC_LEVEL
++#define DEBUG_DYNAMIC_LEVEL DEBUG_STATIC_LEVEL
++#else
++#define DEBUG_DYNAMIC_LEVEL DEBUG_GLOBAL_LEVEL
++#endif /* DEBUG_STATIC_LEVEL */
++
++#else /* DEBUG_DYNAMIC_LEVEL */
++#ifdef DEBUG_STATIC_LEVEL
++#error "Please use either DEBUG_STATIC_LEVEL or DEBUG_DYNAMIC_LEVEL (not both)"
++#else
++int DEBUG_DYNAMIC_LEVEL = DEBUG_GLOBAL_LEVEL;
++#endif /* DEBUG_STATIC_LEVEL */
++#endif /* !DEBUG_DYNAMIC_LEVEL */
++
++
++#ifndef ERROR_DYNAMIC_LEVEL
++
++#ifdef ERROR_STATIC_LEVEL
++#define ERROR_DYNAMIC_LEVEL ERROR_STATIC_LEVEL
++#else
++#define ERROR_DYNAMIC_LEVEL ERROR_GLOBAL_LEVEL
++#endif /* ERROR_STATIC_LEVEL */
++
++#else /* ERROR_DYNAMIC_LEVEL */
++#ifdef ERROR_STATIC_LEVEL
++#error "Please use either ERROR_STATIC_LEVEL or ERROR_DYNAMIC_LEVEL (not both)"
++#else
++int ERROR_DYNAMIC_LEVEL = ERROR_GLOBAL_LEVEL;
++#endif /* ERROR_STATIC_LEVEL */
++#endif /* !ERROR_DYNAMIC_LEVEL */
++
++#define PRINT_FORMAT "[CPU%02d, %s:%d %s]"
++#define PRINT_FMT_PARAMS raw_smp_processor_id(), __FILE__, __LINE__, __FUNCTION__
++
++#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0))
++/* No debug/error/event messages at all */
++#define DBG(_level, _vmsg)
++
++#define REPORT_ERROR(_level, _err, _vmsg)
++
++#define RETURN_ERROR(_level, _err, _vmsg) \
++ return ERROR_CODE(_err)
++
++#if (REPORT_EVENTS > 0)
++
++#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) \
++ do { \
++ if (_ev##_LEVEL <= EVENT_DYNAMIC_LEVEL) { \
++ XX_EventById((uint32_t)(_ev), (t_Handle)(_appId), (uint16_t)(_flg), NO_MSG); \
++ } \
++ } while (0)
++
++#else
++
++#define REPORT_EVENT(_ev, _appId, _flg, _vmsg)
++
++#endif /* (REPORT_EVENTS > 0) */
++
++
++#else /* DEBUG_ERRORS > 0 */
++
++extern const char *dbgLevelStrings[];
++extern const char *moduleStrings[];
++#if (REPORT_EVENTS > 0)
++extern const char *eventStrings[];
++#endif /* (REPORT_EVENTS > 0) */
++
++char * ErrTypeStrings (e_ErrorType err);
++
++
++#if ((defined(DEBUG_USING_STATIC_LEVEL)) && (DEBUG_DYNAMIC_LEVEL < REPORT_LEVEL_WARNING))
++/* No need for DBG macro - debug level is higher anyway */
++#define DBG(_level, _vmsg)
++#else
++#define DBG(_level, _vmsg) \
++ do { \
++ if (REPORT_LEVEL_##_level <= DEBUG_DYNAMIC_LEVEL) { \
++ XX_Print("> %s (%s) " PRINT_FORMAT ": ", \
++ dbgLevelStrings[REPORT_LEVEL_##_level - 1], \
++ moduleStrings[__ERR_MODULE__ >> 16], \
++ PRINT_FMT_PARAMS); \
++ XX_Print _vmsg; \
++ XX_Print("\r\n"); \
++ } \
++ } while (0)
++#endif /* (defined(DEBUG_USING_STATIC_LEVEL) && (DEBUG_DYNAMIC_LEVEL < WARNING)) */
++
++
++#define REPORT_ERROR(_level, _err, _vmsg) \
++ do { \
++ if (REPORT_LEVEL_##_level <= ERROR_DYNAMIC_LEVEL) { \
++ XX_Print("! %s %s Error " PRINT_FORMAT ": %s; ", \
++ dbgLevelStrings[REPORT_LEVEL_##_level - 1], \
++ moduleStrings[__ERR_MODULE__ >> 16], \
++ PRINT_FMT_PARAMS, \
++ ErrTypeStrings((e_ErrorType)GET_ERROR_TYPE(_err))); \
++ XX_Print _vmsg; \
++ XX_Print("\r\n"); \
++ } \
++ } while (0)
++
++
++#define RETURN_ERROR(_level, _err, _vmsg) \
++ do { \
++ REPORT_ERROR(_level, (_err), _vmsg); \
++ return ERROR_CODE(_err); \
++ } while (0)
++
++
++#if (REPORT_EVENTS > 0)
++
++#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) \
++ do { \
++ if (_ev##_LEVEL <= EVENT_DYNAMIC_LEVEL) { \
++ XX_Print("~ %s %s Event " PRINT_FORMAT ": %s (flags: 0x%04x); ", \
++ dbgLevelStrings[_ev##_LEVEL - 1], \
++ moduleStrings[__ERR_MODULE__ >> 16], \
++ PRINT_FMT_PARAMS, \
++ eventStrings[((_ev) - EV_NO_EVENT - 1)], \
++ (uint16_t)(_flg)); \
++ XX_Print _vmsg; \
++ XX_Print("\r\n"); \
++ XX_EventById((uint32_t)(_ev), (t_Handle)(_appId), (uint16_t)(_flg), NO_MSG); \
++ } \
++ } while (0)
++
++#else /* not REPORT_EVENTS */
++
++#define REPORT_EVENT(_ev, _appId, _flg, _vmsg)
++
++#endif /* (REPORT_EVENTS > 0) */
++
++#endif /* (DEBUG_ERRORS > 0) */
++
++
++/**************************************************************************//**
++ @Function ASSERT_COND
++
++ @Description Assertion macro.
++
++ @Param[in] _cond - The condition being checked, in positive form;
++ Failure of the condition triggers the assert.
++*//***************************************************************************/
++#ifdef DISABLE_ASSERTIONS
++#define ASSERT_COND(_cond)
++#else
++#define ASSERT_COND(_cond) \
++ do { \
++ if (!(_cond)) { \
++ XX_Print("*** ASSERT_COND failed " PRINT_FORMAT "\r\n", \
++ PRINT_FMT_PARAMS); \
++ XX_Exit(1); \
++ } \
++ } while (0)
++#endif /* DISABLE_ASSERTIONS */
++
++
++#ifdef DISABLE_INIT_PARAMETERS_CHECK
++
++#define CHECK_INIT_PARAMETERS(handle, f_check)
++#define CHECK_INIT_PARAMETERS_RETURN_VALUE(handle, f_check, retval)
++
++#else
++
++#define CHECK_INIT_PARAMETERS(handle, f_check) \
++ do { \
++ t_Error err = f_check(handle); \
++ if (err != E_OK) { \
++ RETURN_ERROR(MAJOR, err, NO_MSG); \
++ } \
++ } while (0)
++
++#define CHECK_INIT_PARAMETERS_RETURN_VALUE(handle, f_check, retval) \
++ do { \
++ t_Error err = f_check(handle); \
++ if (err != E_OK) { \
++ REPORT_ERROR(MAJOR, err, NO_MSG); \
++ return (retval); \
++ } \
++ } while (0)
++
++#endif /* DISABLE_INIT_PARAMETERS_CHECK */
++
++#ifdef DISABLE_SANITY_CHECKS
++
++#define SANITY_CHECK_RETURN_ERROR(_cond, _err)
++#define SANITY_CHECK_RETURN_VALUE(_cond, _err, retval)
++#define SANITY_CHECK_RETURN(_cond, _err)
++#define SANITY_CHECK_EXIT(_cond, _err)
++
++#else /* DISABLE_SANITY_CHECKS */
++
++#define SANITY_CHECK_RETURN_ERROR(_cond, _err) \
++ do { \
++ if (!(_cond)) { \
++ RETURN_ERROR(CRITICAL, (_err), NO_MSG); \
++ } \
++ } while (0)
++
++#define SANITY_CHECK_RETURN_VALUE(_cond, _err, retval) \
++ do { \
++ if (!(_cond)) { \
++ REPORT_ERROR(CRITICAL, (_err), NO_MSG); \
++ return (retval); \
++ } \
++ } while (0)
++
++#define SANITY_CHECK_RETURN(_cond, _err) \
++ do { \
++ if (!(_cond)) { \
++ REPORT_ERROR(CRITICAL, (_err), NO_MSG); \
++ return; \
++ } \
++ } while (0)
++
++#define SANITY_CHECK_EXIT(_cond, _err) \
++ do { \
++ if (!(_cond)) { \
++ REPORT_ERROR(CRITICAL, (_err), NO_MSG); \
++ XX_Exit(1); \
++ } \
++ } while (0)
++
++#endif /* DISABLE_SANITY_CHECKS */
++
++/** @} */ /* end of Debug/error Utils group */
++
++/** @} */ /* end of General Utils group */
++
++#endif /* __ERROR_EXT_H */
++
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/etc/list_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/list_ext.h
+new file mode 100644
+index 00000000..ee6b9f29
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/list_ext.h
+@@ -0,0 +1,358 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++
++ @File list_ext.h
++
++ @Description External prototypes for list.c
++*//***************************************************************************/
++
++#ifndef __LIST_EXT_H
++#define __LIST_EXT_H
++
++
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group etc_id Utility Library Application Programming Interface
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group list_id List
++
++ @Description List module functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description List structure.
++*//***************************************************************************/
++typedef struct List
++{
++ struct List *p_Next; /**< A pointer to the next list object */
++ struct List *p_Prev; /**< A pointer to the previous list object */
++} t_List;
++
++
++/**************************************************************************//**
++ @Function LIST_FIRST/LIST_LAST/LIST_NEXT/LIST_PREV
++
++ @Description Macro to get first/last/next/previous entry in a list.
++
++ @Param[in] p_List - A pointer to a list.
++*//***************************************************************************/
++#define LIST_FIRST(p_List) (p_List)->p_Next
++#define LIST_LAST(p_List) (p_List)->p_Prev
++#define LIST_NEXT LIST_FIRST
++#define LIST_PREV LIST_LAST
++
++
++/**************************************************************************//**
++ @Function LIST_INIT
++
++ @Description Macro for initialization of a list struct.
++
++ @Param[in] lst - The t_List object to initialize.
++*//***************************************************************************/
++#define LIST_INIT(lst) {&(lst), &(lst)}
++
++
++/**************************************************************************//**
++ @Function LIST
++
++ @Description Macro to declare of a list.
++
++ @Param[in] listName - The list object name.
++*//***************************************************************************/
++#define LIST(listName) t_List listName = LIST_INIT(listName)
++
++
++/**************************************************************************//**
++ @Function INIT_LIST
++
++ @Description Macro to initialize a list pointer.
++
++ @Param[in] p_List - The list pointer.
++*//***************************************************************************/
++#define INIT_LIST(p_List) LIST_FIRST(p_List) = LIST_LAST(p_List) = (p_List)
++
++
++/**************************************************************************//**
++ @Function LIST_OBJECT
++
++ @Description Macro to get the struct (object) for this entry.
++
++ @Param[in] type - The type of the struct (object) this list is embedded in.
++ @Param[in] member - The name of the t_List object within the struct.
++
++ @Return The structure pointer for this entry.
++*//***************************************************************************/
++#define MEMBER_OFFSET(type, member) (PTR_TO_UINT(&((type *)0)->member))
++#define LIST_OBJECT(p_List, type, member) \
++ ((type *)((char *)(p_List)-MEMBER_OFFSET(type, member)))
++
++
++/**************************************************************************//**
++ @Function LIST_FOR_EACH
++
++ @Description Macro to iterate over a list.
++
++ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
++ @Param[in] p_Head - A pointer to the head for your list pointer.
++
++ @Cautions You can't delete items with this routine.
++ For deletion use LIST_FOR_EACH_SAFE().
++*//***************************************************************************/
++#define LIST_FOR_EACH(p_Pos, p_Head) \
++ for (p_Pos = LIST_FIRST(p_Head); p_Pos != (p_Head); p_Pos = LIST_NEXT(p_Pos))
++
++
++/**************************************************************************//**
++ @Function LIST_FOR_EACH_SAFE
++
++ @Description Macro to iterate over a list safe against removal of list entry.
++
++ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
++ @Param[in] p_Tmp - Another pointer to a list to use as temporary storage.
++ @Param[in] p_Head - A pointer to the head for your list pointer.
++*//***************************************************************************/
++#define LIST_FOR_EACH_SAFE(p_Pos, p_Tmp, p_Head) \
++ for (p_Pos = LIST_FIRST(p_Head), p_Tmp = LIST_FIRST(p_Pos); \
++ p_Pos != (p_Head); \
++ p_Pos = p_Tmp, p_Tmp = LIST_NEXT(p_Pos))
++
++
++/**************************************************************************//**
++ @Function LIST_FOR_EACH_OBJECT_SAFE
++
++ @Description Macro to iterate over list of given type safely.
++
++ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
++ @Param[in] p_Tmp - Another pointer to a list to use as temporary storage.
++ @Param[in] type - The type of the struct this is embedded in.
++ @Param[in] p_Head - A pointer to the head for your list pointer.
++ @Param[in] member - The name of the list_struct within the struct.
++
++ @Cautions You can't delete items with this routine.
++ For deletion use LIST_FOR_EACH_SAFE().
++*//***************************************************************************/
++#define LIST_FOR_EACH_OBJECT_SAFE(p_Pos, p_Tmp, p_Head, type, member) \
++ for (p_Pos = LIST_OBJECT(LIST_FIRST(p_Head), type, member), \
++ p_Tmp = LIST_OBJECT(LIST_FIRST(&p_Pos->member), type, member); \
++ &p_Pos->member != (p_Head); \
++ p_Pos = p_Tmp, \
++ p_Tmp = LIST_OBJECT(LIST_FIRST(&p_Pos->member), type, member))
++
++/**************************************************************************//**
++ @Function LIST_FOR_EACH_OBJECT
++
++ @Description Macro to iterate over list of given type.
++
++ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
++ @Param[in] type - The type of the struct this is embedded in.
++ @Param[in] p_Head - A pointer to the head for your list pointer.
++ @Param[in] member - The name of the list_struct within the struct.
++
++ @Cautions You can't delete items with this routine.
++ For deletion use LIST_FOR_EACH_SAFE().
++*//***************************************************************************/
++#define LIST_FOR_EACH_OBJECT(p_Pos, type, p_Head, member) \
++ for (p_Pos = LIST_OBJECT(LIST_FIRST(p_Head), type, member); \
++ &p_Pos->member != (p_Head); \
++ p_Pos = LIST_OBJECT(LIST_FIRST(&(p_Pos->member)), type, member))
++
++
++/**************************************************************************//**
++ @Function LIST_Add
++
++ @Description Add a new entry to a list.
++
++ Insert a new entry after the specified head.
++ This is good for implementing stacks.
++
++ @Param[in] p_New - A pointer to a new list entry to be added.
++ @Param[in] p_Head - A pointer to a list head to add it after.
++
++ @Return none.
++*//***************************************************************************/
++static __inline__ void LIST_Add(t_List *p_New, t_List *p_Head)
++{
++ LIST_PREV(LIST_NEXT(p_Head)) = p_New;
++ LIST_NEXT(p_New) = LIST_NEXT(p_Head);
++ LIST_PREV(p_New) = p_Head;
++ LIST_NEXT(p_Head) = p_New;
++}
++
++
++/**************************************************************************//**
++ @Function LIST_AddToTail
++
++ @Description Add a new entry to a list.
++
++ Insert a new entry before the specified head.
++ This is useful for implementing queues.
++
++ @Param[in] p_New - A pointer to a new list entry to be added.
++ @Param[in] p_Head - A pointer to a list head to add it before.
++
++ @Return none.
++*//***************************************************************************/
++static __inline__ void LIST_AddToTail(t_List *p_New, t_List *p_Head)
++{
++ LIST_NEXT(LIST_PREV(p_Head)) = p_New;
++ LIST_PREV(p_New) = LIST_PREV(p_Head);
++ LIST_NEXT(p_New) = p_Head;
++ LIST_PREV(p_Head) = p_New;
++}
++
++
++/**************************************************************************//**
++ @Function LIST_Del
++
++ @Description Deletes entry from a list.
++
++ @Param[in] p_Entry - A pointer to the element to delete from the list.
++
++ @Return none.
++
++ @Cautions LIST_IsEmpty() on entry does not return true after this,
++ the entry is in an undefined state.
++*//***************************************************************************/
++static __inline__ void LIST_Del(t_List *p_Entry)
++{
++ LIST_PREV(LIST_NEXT(p_Entry)) = LIST_PREV(p_Entry);
++ LIST_NEXT(LIST_PREV(p_Entry)) = LIST_NEXT(p_Entry);
++}
++
++
++/**************************************************************************//**
++ @Function LIST_DelAndInit
++
++ @Description Deletes entry from list and reinitialize it.
++
++ @Param[in] p_Entry - A pointer to the element to delete from the list.
++
++ @Return none.
++*//***************************************************************************/
++static __inline__ void LIST_DelAndInit(t_List *p_Entry)
++{
++ LIST_Del(p_Entry);
++ INIT_LIST(p_Entry);
++}
++
++
++/**************************************************************************//**
++ @Function LIST_Move
++
++ @Description Delete from one list and add as another's head.
++
++ @Param[in] p_Entry - A pointer to the list entry to move.
++ @Param[in] p_Head - A pointer to the list head that will precede our entry.
++
++ @Return none.
++*//***************************************************************************/
++static __inline__ void LIST_Move(t_List *p_Entry, t_List *p_Head)
++{
++ LIST_Del(p_Entry);
++ LIST_Add(p_Entry, p_Head);
++}
++
++
++/**************************************************************************//**
++ @Function LIST_MoveToTail
++
++ @Description Delete from one list and add as another's tail.
++
++ @Param[in] p_Entry - A pointer to the entry to move.
++ @Param[in] p_Head - A pointer to the list head that will follow our entry.
++
++ @Return none.
++*//***************************************************************************/
++static __inline__ void LIST_MoveToTail(t_List *p_Entry, t_List *p_Head)
++{
++ LIST_Del(p_Entry);
++ LIST_AddToTail(p_Entry, p_Head);
++}
++
++
++/**************************************************************************//**
++ @Function LIST_IsEmpty
++
++ @Description Tests whether a list is empty.
++
++ @Param[in] p_List - A pointer to the list to test.
++
++ @Return 1 if the list is empty, 0 otherwise.
++*//***************************************************************************/
++static __inline__ int LIST_IsEmpty(t_List *p_List)
++{
++ return (LIST_FIRST(p_List) == p_List);
++}
++
++
++/**************************************************************************//**
++ @Function LIST_Append
++
++ @Description Join two lists.
++
++ @Param[in] p_NewList - A pointer to the new list to add.
++ @Param[in] p_Head - A pointer to the place to add it in the first list.
++
++ @Return none.
++*//***************************************************************************/
++void LIST_Append(t_List *p_NewList, t_List *p_Head);
++
++
++/**************************************************************************//**
++ @Function LIST_NumOfObjs
++
++ @Description Counts number of objects in the list
++
++ @Param[in] p_List - A pointer to the list which objects are to be counted.
++
++ @Return Number of objects in the list.
++*//***************************************************************************/
++int LIST_NumOfObjs(t_List *p_List);
++
++/** @} */ /* end of list_id group */
++/** @} */ /* end of etc_id group */
++
++
++#endif /* __LIST_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mem_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mem_ext.h
+new file mode 100644
+index 00000000..d0565d41
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mem_ext.h
+@@ -0,0 +1,318 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++
++ @File mem_ext.h
++
++ @Description External prototypes for the memory manager object
++*//***************************************************************************/
++
++#ifndef __MEM_EXT_H
++#define __MEM_EXT_H
++
++#include "std_ext.h"
++#include "part_ext.h"
++
++
++/**************************************************************************//**
++ @Group etc_id Utility Library Application Programming Interface
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group mem_id Slab Memory Manager
++
++ @Description Slab Memory Manager module functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/* Each block is of the following structure:
++ *
++ *
++ * +-----------+----------+---------------------------+-----------+-----------+
++ * | Alignment | Prefix | Data | Postfix | Alignment |
++ * | field | field | field | field | Padding |
++ * | | | | | |
++ * +-----------+----------+---------------------------+-----------+-----------+
++ * and at the beginning of all bytes, an additional optional padding might reside
++ * to ensure that the first blocks data field is aligned as requested.
++ */
++
++
++#define MEM_MAX_NAME_LENGTH 8
++
++/**************************************************************************//*
++ @Description Memory Segment structure
++*//***************************************************************************/
++
++typedef struct
++{
++ char name[MEM_MAX_NAME_LENGTH];
++ /* The segment's name */
++ uint8_t **p_Bases; /* Base addresses of the segments */
++ uint8_t **p_BlocksStack; /* Array of pointers to blocks */
++ t_Handle h_Spinlock;
++ uint16_t dataSize; /* Size of each data block */
++ uint16_t prefixSize; /* How many bytes to reserve before the data */
++ uint16_t postfixSize; /* How many bytes to reserve after the data */
++ uint16_t alignment; /* Requested alignment for the data field */
++ int allocOwner; /* Memory allocation owner */
++ uint32_t getFailures; /* Number of times get failed */
++ uint32_t num; /* Number of blocks in segment */
++ uint32_t current; /* Current block */
++ bool consecutiveMem; /* Allocate consecutive data blocks memory */
++#ifdef DEBUG_MEM_LEAKS
++ void *p_MemDbg; /* MEM debug database (MEM leaks detection) */
++ uint32_t blockOffset;
++ uint32_t blockSize;
++#endif /* DEBUG_MEM_LEAKS */
++} t_MemorySegment;
++
++
++
++/**************************************************************************//**
++ @Function MEM_Init
++
++ @Description Create a new memory segment.
++
++ @Param[in] name - Name of memory partition.
++ @Param[in] p_Handle - Handle to new segment is returned through here.
++ @Param[in] num - Number of blocks in new segment.
++ @Param[in] dataSize - Size of blocks in segment.
++ @Param[in] prefixSize - How many bytes to allocate before the data.
++ @Param[in] postfixSize - How many bytes to allocate after the data.
++ @Param[in] alignment - Requested alignment for data field (in bytes).
++
++ @Return E_OK - success, E_NO_MEMORY - out of memory.
++*//***************************************************************************/
++t_Error MEM_Init(char name[],
++ t_Handle *p_Handle,
++ uint32_t num,
++ uint16_t dataSize,
++ uint16_t prefixSize,
++ uint16_t postfixSize,
++ uint16_t alignment);
++
++/**************************************************************************//**
++ @Function MEM_InitSmart
++
++ @Description Create a new memory segment.
++
++ @Param[in] name - Name of memory partition.
++ @Param[in] p_Handle - Handle to new segment is returned through here.
++ @Param[in] num - Number of blocks in new segment.
++ @Param[in] dataSize - Size of blocks in segment.
++ @Param[in] prefixSize - How many bytes to allocate before the data.
++ @Param[in] postfixSize - How many bytes to allocate after the data.
++ @Param[in] alignment - Requested alignment for data field (in bytes).
++ @Param[in] memPartitionId - Memory partition ID for allocation.
++ @Param[in] consecutiveMem - Whether to allocate the memory blocks
++ continuously or not.
++
++ @Return E_OK - success, E_NO_MEMORY - out of memory.
++*//***************************************************************************/
++t_Error MEM_InitSmart(char name[],
++ t_Handle *p_Handle,
++ uint32_t num,
++ uint16_t dataSize,
++ uint16_t prefixSize,
++ uint16_t postfixSize,
++ uint16_t alignment,
++ uint8_t memPartitionId,
++ bool consecutiveMem);
++
++/**************************************************************************//**
++ @Function MEM_InitByAddress
++
++ @Description Create a new memory segment with a specified base address.
++
++ @Param[in] name - Name of memory partition.
++ @Param[in] p_Handle - Handle to new segment is returned through here.
++ @Param[in] num - Number of blocks in new segment.
++ @Param[in] dataSize - Size of blocks in segment.
++ @Param[in] prefixSize - How many bytes to allocate before the data.
++ @Param[in] postfixSize - How many bytes to allocate after the data.
++ @Param[in] alignment - Requested alignment for data field (in bytes).
++ @Param[in] address - The required base address.
++
++ @Return E_OK - success, E_NO_MEMORY - out of memory.
++ *//***************************************************************************/
++t_Error MEM_InitByAddress(char name[],
++ t_Handle *p_Handle,
++ uint32_t num,
++ uint16_t dataSize,
++ uint16_t prefixSize,
++ uint16_t postfixSize,
++ uint16_t alignment,
++ uint8_t *address);
++
++/**************************************************************************//**
++ @Function MEM_Free
++
++ @Description Free a specific memory segment.
++
++ @Param[in] h_Mem - Handle to memory segment.
++
++ @Return None.
++*//***************************************************************************/
++void MEM_Free(t_Handle h_Mem);
++
++/**************************************************************************//**
++ @Function MEM_Get
++
++ @Description Get a block of memory from a segment.
++
++ @Param[in] h_Mem - Handle to memory segment.
++
++ @Return Pointer to new memory block on success,0 otherwise.
++*//***************************************************************************/
++void * MEM_Get(t_Handle h_Mem);
++
++/**************************************************************************//**
++ @Function MEM_GetN
++
++ @Description Get up to N blocks of memory from a segment.
++
++ The blocks are assumed to be of a fixed size (one size per segment).
++
++ @Param[in] h_Mem - Handle to memory segment.
++ @Param[in] num - Number of blocks to allocate.
++ @Param[out] array - Array of at least num pointers to which the addresses
++ of the allocated blocks are written.
++
++ @Return The number of blocks actually allocated.
++
++ @Cautions Interrupts are disabled for all of the allocation loop.
++ Although this loop is very short for each block (several machine
++ instructions), you should not allocate a very large number
++ of blocks via this routine.
++*//***************************************************************************/
++uint16_t MEM_GetN(t_Handle h_Mem, uint32_t num, void *array[]);
++
++/**************************************************************************//**
++ @Function MEM_Put
++
++ @Description Put a block of memory back to a segment.
++
++ @Param[in] h_Mem - Handle to memory segment.
++ @Param[in] p_Block - The block to return.
++
++ @Return Pointer to new memory block on success,0 otherwise.
++*//***************************************************************************/
++t_Error MEM_Put(t_Handle h_Mem, void *p_Block);
++
++/**************************************************************************//**
++ @Function MEM_ComputePartitionSize
++
++ @Description calculate a tight upper boundary of the size of a partition with
++ given attributes.
++
++ The returned value is suitable if one wants to use MEM_InitByAddress().
++
++ @Param[in] num - The number of blocks in the segment.
++ @Param[in] dataSize - Size of block to get.
++ @Param[in] prefixSize - The prefix size
++ @Param postfixSize - The postfix size
++ @Param[in] alignment - The requested alignment value (in bytes)
++
++ @Return The memory block size a segment with the given attributes needs.
++*//***************************************************************************/
++uint32_t MEM_ComputePartitionSize(uint32_t num,
++ uint16_t dataSize,
++ uint16_t prefixSize,
++ uint16_t postfixSize,
++ uint16_t alignment);
++
++#ifdef DEBUG_MEM_LEAKS
++#if !((defined(__MWERKS__) || defined(__GNUC__)) && (__dest_os == __ppc_eabi))
++#error "Memory-Leaks-Debug option is supported only for freescale CodeWarrior"
++#endif /* !(defined(__MWERKS__) && ... */
++
++/**************************************************************************//**
++ @Function MEM_CheckLeaks
++
++ @Description Report MEM object leaks.
++
++ This routine is automatically called by the MEM_Free() routine,
++ but it can also be invoked while the MEM object is alive.
++
++ @Param[in] h_Mem - Handle to memory segment.
++
++ @Return None.
++*//***************************************************************************/
++void MEM_CheckLeaks(t_Handle h_Mem);
++
++#else /* not DEBUG_MEM_LEAKS */
++#define MEM_CheckLeaks(h_Mem)
++#endif /* not DEBUG_MEM_LEAKS */
++
++/**************************************************************************//**
++ @Description Get base of MEM
++*//***************************************************************************/
++#define MEM_GetBase(h_Mem) ((t_MemorySegment *)(h_Mem))->p_Bases[0]
++
++/**************************************************************************//**
++ @Description Get size of MEM block
++*//***************************************************************************/
++#define MEM_GetSize(h_Mem) ((t_MemorySegment *)(h_Mem))->dataSize
++
++/**************************************************************************//**
++ @Description Get prefix size of MEM block
++*//***************************************************************************/
++#define MEM_GetPrefixSize(h_Mem) ((t_MemorySegment *)(h_Mem))->prefixSize
++
++/**************************************************************************//**
++ @Description Get postfix size of MEM block
++*//***************************************************************************/
++#define MEM_GetPostfixSize(h_Mem) ((t_MemorySegment *)(h_Mem))->postfixSize
++
++/**************************************************************************//**
++ @Description Get alignment of MEM block (in bytes)
++*//***************************************************************************/
++#define MEM_GetAlignment(h_Mem) ((t_MemorySegment *)(h_Mem))->alignment
++
++/**************************************************************************//**
++ @Description Get the number of blocks in the segment
++*//***************************************************************************/
++#define MEM_GetNumOfBlocks(h_Mem) ((t_MemorySegment *)(h_Mem))->num
++
++/** @} */ /* end of MEM group */
++/** @} */ /* end of etc_id group */
++
++
++#endif /* __MEM_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/etc/memcpy_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/memcpy_ext.h
+new file mode 100644
+index 00000000..1b3a2fac
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/memcpy_ext.h
+@@ -0,0 +1,208 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++
++ @File memcpy_ext.h
++
++ @Description Efficient functions for copying and setting blocks of memory.
++*//***************************************************************************/
++
++#ifndef __MEMCPY_EXT_H
++#define __MEMCPY_EXT_H
++
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group etc_id Utility Library Application Programming Interface
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group mem_cpy Memory Copy
++
++ @Description Memory Copy module functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function MemCpy32
++
++ @Description Copies one memory buffer into another one in 4-byte chunks!
++ Which should be more efficient than byte by byte.
++
++ For large buffers (over 60 bytes) this function is about 4 times
++ more efficient than the trivial memory copy. For short buffers
++ it is reduced to the trivial copy and may be a bit worse.
++
++ @Param[in] pDst - The address of the destination buffer.
++ @Param[in] pSrc - The address of the source buffer.
++ @Param[in] size - The number of bytes that will be copied from pSrc to pDst.
++
++ @Return pDst (the address of the destination buffer).
++
++ @Cautions There is no parameter or boundary checking! It is up to the user
++ to supply non-null parameters as source & destination and size
++ that actually fits into the destination buffer.
++*//***************************************************************************/
++void * MemCpy32(void* pDst,void* pSrc, uint32_t size);
++void * IO2IOCpy32(void* pDst,void* pSrc, uint32_t size);
++void * IO2MemCpy32(void* pDst,void* pSrc, uint32_t size);
++void * Mem2IOCpy32(void* pDst,void* pSrc, uint32_t size);
++
++/**************************************************************************//**
++ @Function MemCpy64
++
++ @Description Copies one memory buffer into another one in 8-byte chunks!
++ Which should be more efficient than byte by byte.
++
++ For large buffers (over 60 bytes) this function is about 8 times
++ more efficient than the trivial memory copy. For short buffers
++ it is reduced to the trivial copy and may be a bit worse.
++
++ Some testing suggests that MemCpy32() preforms better than
++ MemCpy64() over small buffers. On average they break even at
++ 100 byte buffers. For buffers larger than that MemCpy64 is
++ superior.
++
++ @Param[in] pDst - The address of the destination buffer.
++ @Param[in] pSrc - The address of the source buffer.
++ @Param[in] size - The number of bytes that will be copied from pSrc to pDst.
++
++ @Return pDst (the address of the destination buffer).
++
++ @Cautions There is no parameter or boundary checking! It is up to the user
++ to supply non null parameters as source & destination and size
++ that actually fits into their buffer.
++
++ Do not use under Linux.
++*//***************************************************************************/
++void * MemCpy64(void* pDst,void* pSrc, uint32_t size);
++
++/**************************************************************************//**
++ @Function MemSet32
++
++ @Description Sets all bytes of a memory buffer to a specific value, in
++ 4-byte chunks.
++
++ @Param[in] pDst - The address of the destination buffer.
++ @Param[in] val - Value to set destination bytes to.
++ @Param[in] size - The number of bytes that will be set to val.
++
++ @Return pDst (the address of the destination buffer).
++
++ @Cautions There is no parameter or boundary checking! It is up to the user
++ to supply non null parameter as destination and size
++ that actually fits into the destination buffer.
++*//***************************************************************************/
++void * MemSet32(void* pDst, uint8_t val, uint32_t size);
++void * IOMemSet32(void* pDst, uint8_t val, uint32_t size);
++
++/**************************************************************************//**
++ @Function MemSet64
++
++ @Description Sets all bytes of a memory buffer to a specific value, in
++ 8-byte chunks.
++
++ @Param[in] pDst - The address of the destination buffer.
++ @Param[in] val - Value to set destination bytes to.
++ @Param[in] size - The number of bytes that will be set to val.
++
++ @Return pDst (the address of the destination buffer).
++
++ @Cautions There is no parameter or boundary checking! It is up to the user
++ to supply non null parameter as destination and size
++ that actually fits into the destination buffer.
++*//***************************************************************************/
++void * MemSet64(void* pDst, uint8_t val, uint32_t size);
++
++/**************************************************************************//**
++ @Function MemDisp
++
++ @Description Displays a block of memory in chunks of 32 bits.
++
++ @Param[in] addr - The address of the memory to display.
++ @Param[in] size - The number of bytes that will be displayed.
++
++ @Return None.
++
++ @Cautions There is no parameter or boundary checking! It is up to the user
++ to supply non null parameter as destination and size
++ that actually fits into the destination buffer.
++*//***************************************************************************/
++void MemDisp(uint8_t *addr, int size);
++
++/**************************************************************************//**
++ @Function MemCpy8
++
++ @Description Trivial copy one memory buffer into another byte by byte
++
++ @Param[in] pDst - The address of the destination buffer.
++ @Param[in] pSrc - The address of the source buffer.
++ @Param[in] size - The number of bytes that will be copied from pSrc to pDst.
++
++ @Return pDst (the address of the destination buffer).
++
++ @Cautions There is no parameter or boundary checking! It is up to the user
++ to supply non-null parameters as source & destination and size
++ that actually fits into the destination buffer.
++*//***************************************************************************/
++void * MemCpy8(void* pDst,void* pSrc, uint32_t size);
++
++/**************************************************************************//**
++ @Function MemSet8
++
++ @Description Sets all bytes of a memory buffer to a specific value byte by byte.
++
++ @Param[in] pDst - The address of the destination buffer.
++ @Param[in] c - Value to set destination bytes to.
++ @Param[in] size - The number of bytes that will be set to val.
++
++ @Return pDst (the address of the destination buffer).
++
++ @Cautions There is no parameter or boundary checking! It is up to the user
++ to supply non null parameter as destination and size
++ that actually fits into the destination buffer.
++*//***************************************************************************/
++void * MemSet8(void* pDst, int c, uint32_t size);
++
++/** @} */ /* end of mem_cpy group */
++/** @} */ /* end of etc_id group */
++
++
++#endif /* __MEMCPY_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mm_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mm_ext.h
+new file mode 100644
+index 00000000..fa7c85e3
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mm_ext.h
+@@ -0,0 +1,310 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File mm_ext.h
++
++ @Description Memory Manager Application Programming Interface
++*//***************************************************************************/
++#ifndef __MM_EXT
++#define __MM_EXT
++
++#include "std_ext.h"
++
++#define MM_MAX_ALIGNMENT 20 /* Alignments from 2 to 128 are available
++ where maximum alignment defined as
++ MM_MAX_ALIGNMENT power of 2 */
++
++#define MM_MAX_NAME_LEN 32
++
++/**************************************************************************//**
++ @Group etc_id Utility Library Application Programming Interface
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group mm_grp Flexible Memory Manager
++
++ @Description Flexible Memory Manager module functions,definitions and enums.
++ (All of the following functions,definitions and enums can be found in mm_ext.h)
++
++ @{
++*//***************************************************************************/
++
++
++/**************************************************************************//**
++ @Function MM_Init
++
++ @Description Initializes a new MM object.
++
++ It initializes a new memory block consisting of base address
++ and size of the available memory by calling to MemBlock_Init
++ routine. It is also initializes a new free block for each
++ by calling FreeBlock_Init routine, which is pointed to
++ the almost all memory started from the required alignment
++ from the base address and to the end of the memory.
++ The handle to the new MM object is returned via "MM"
++ argument (passed by reference).
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] base - Base address of the MM.
++ @Param[in] size - Size of the MM.
++
++ @Return E_OK is returned on success. E_NOMEMORY is returned if the new MM object or a new free block can not be initialized.
++*//***************************************************************************/
++t_Error MM_Init(t_Handle *h_MM, uint64_t base, uint64_t size);
++
++/**************************************************************************//**
++ @Function MM_Get
++
++ @Description Allocates a block of memory according to the given size and the alignment.
++
++ The Alignment argument tells from which
++ free list allocate a block of memory. 2^alignment indicates
++ the alignment that the base address of the allocated block
++ should have. So, the only values 1, 2, 4, 8, 16, 32 and 64
++ are available for the alignment argument.
++ The routine passes through the specific free list of free
++ blocks and seeks for a first block that have anough memory
++ that is required (best fit).
++ After the block is found and data is allocated, it calls
++ the internal MM_CutFree routine to update all free lists
++ do not include a just allocated block. Of course, each
++ free list contains a free blocks with the same alignment.
++ It is also creates a busy block that holds
++ information about an allocated block.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] size - Size of the MM.
++ @Param[in] alignment - Index as a power of two defines a required
++ alignment (in bytes); Should be 1, 2, 4, 8, 16, 32 or 64
++ @Param[in] name - The name that specifies an allocated block.
++
++ @Return base address of an allocated block ILLEGAL_BASE if can't allocate a block
++*//***************************************************************************/
++uint64_t MM_Get(t_Handle h_MM, uint64_t size, uint64_t alignment, char *name);
++
++/**************************************************************************//**
++ @Function MM_GetBase
++
++ @Description Gets the base address of the required MM objects.
++
++ @Param[in] h_MM - Handle to the MM object.
++
++ @Return base address of the block.
++*//***************************************************************************/
++uint64_t MM_GetBase(t_Handle h_MM);
++
++/**************************************************************************//**
++ @Function MM_GetForce
++
++ @Description Force memory allocation.
++
++ It means to allocate a block of memory of the given
++ size from the given base address.
++ The routine checks if the required block can be allocated
++ (that is it is free) and then, calls the internal MM_CutFree
++ routine to update all free lists do not include that block.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] base - Base address of the MM.
++ @Param[in] size - Size of the MM.
++ @Param[in] name - Name that specifies an allocated block.
++
++ @Return base address of an allocated block, ILLEGAL_BASE if can't allocate a block.
++*//***************************************************************************/
++uint64_t MM_GetForce(t_Handle h_MM, uint64_t base, uint64_t size, char *name);
++
++/**************************************************************************//**
++ @Function MM_GetForceMin
++
++ @Description Allocates a block of memory according to the given size, the alignment and minimum base address.
++
++ The Alignment argument tells from which
++ free list allocate a block of memory. 2^alignment indicates
++ the alignment that the base address of the allocated block
++ should have. So, the only values 1, 2, 4, 8, 16, 32 and 64
++ are available for the alignment argument.
++ The minimum baser address forces the location of the block
++ to be from a given address onward.
++ The routine passes through the specific free list of free
++ blocks and seeks for the first base address equal or smaller
++ than the required minimum address and end address larger than
++ than the required base + its size - i.e. that may contain
++ the required block.
++ After the block is found and data is allocated, it calls
++ the internal MM_CutFree routine to update all free lists
++ do not include a just allocated block. Of course, each
++ free list contains a free blocks with the same alignment.
++ It is also creates a busy block that holds
++ information about an allocated block.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] size - Size of the MM.
++ @Param[in] alignment - Index as a power of two defines a required
++ alignment (in bytes); Should be 1, 2, 4, 8, 16, 32 or 64
++ @Param[in] min - The minimum base address of the block.
++ @Param[in] name - Name that specifies an allocated block.
++
++ @Return base address of an allocated block,ILLEGAL_BASE if can't allocate a block.
++*//***************************************************************************/
++uint64_t MM_GetForceMin(t_Handle h_MM,
++ uint64_t size,
++ uint64_t alignment,
++ uint64_t min,
++ char *name);
++
++/**************************************************************************//**
++ @Function MM_Put
++
++ @Description Puts a block of memory of the given base address back to the memory.
++
++ It checks if there is a busy block with the
++ given base address. If not, it returns 0, that
++ means can't free a block. Otherwise, it gets parameters of
++ the busy block and after it updates lists of free blocks,
++ removes that busy block from the list by calling to MM_CutBusy
++ routine.
++ After that it calls to MM_AddFree routine to add a new free
++ block to the free lists.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] base - Base address of the MM.
++
++ @Return The size of bytes released, 0 if failed.
++*//***************************************************************************/
++uint64_t MM_Put(t_Handle h_MM, uint64_t base);
++
++/**************************************************************************//**
++ @Function MM_PutForce
++
++ @Description Releases a block of memory of the required size from the required base address.
++
++ First, it calls to MM_CutBusy routine
++ to cut a free block from the busy list. And then, calls to
++ MM_AddFree routine to add the free block to the free lists.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] base - Base address of of a block to free.
++ @Param[in] size - Size of a block to free.
++
++ @Return The number of bytes released, 0 on failure.
++*//***************************************************************************/
++uint64_t MM_PutForce(t_Handle h_MM, uint64_t base, uint64_t size);
++
++/**************************************************************************//**
++ @Function MM_Add
++
++ @Description Adds a new memory block for memory allocation.
++
++ When a new memory block is initialized and added to the
++ memory list, it calls to MM_AddFree routine to add the
++ new free block to the free lists.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] base - Base address of the memory block.
++ @Param[in] size - Size of the memory block.
++
++ @Return E_OK on success, otherwise returns an error code.
++*//***************************************************************************/
++t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size);
++
++/**************************************************************************//**
++ @Function MM_Dump
++
++ @Description Prints results of free and busy lists.
++
++ @Param[in] h_MM - Handle to the MM object.
++*//***************************************************************************/
++void MM_Dump(t_Handle h_MM);
++
++/**************************************************************************//**
++ @Function MM_Free
++
++ @Description Releases memory allocated for MM object.
++
++ @Param[in] h_MM - Handle of the MM object.
++*//***************************************************************************/
++void MM_Free(t_Handle h_MM);
++
++/**************************************************************************//**
++ @Function MM_GetMemBlock
++
++ @Description Returns base address of the memory block specified by the index.
++
++ If index is 0, returns base address
++ of the first memory block, 1 - returns base address
++ of the second memory block, etc.
++ Note, those memory blocks are allocated by the
++ application before MM_Init or MM_Add and have to
++ be released by the application before or after invoking
++ the MM_Free routine.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] index - Index of the memory block.
++
++ @Return valid base address or ILLEGAL_BASE if no memory block specified by the index.
++*//***************************************************************************/
++uint64_t MM_GetMemBlock(t_Handle h_MM, int index);
++
++/**************************************************************************//**
++ @Function MM_InRange
++
++ @Description Checks if a specific address is in the memory range of the passed MM object.
++
++ @Param[in] h_MM - Handle to the MM object.
++ @Param[in] addr - The address to be checked.
++
++ @Return TRUE if the address is in the address range of the block, FALSE otherwise.
++*//***************************************************************************/
++bool MM_InRange(t_Handle h_MM, uint64_t addr);
++
++/**************************************************************************//**
++ @Function MM_GetFreeMemSize
++
++ @Description Returns the size (in bytes) of free memory.
++
++ @Param[in] h_MM - Handle to the MM object.
++
++ @Return Free memory size in bytes.
++*//***************************************************************************/
++uint64_t MM_GetFreeMemSize(t_Handle h_MM);
++
++
++/** @} */ /* end of mm_grp group */
++/** @} */ /* end of etc_id group */
++
++#endif /* __MM_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/etc/sprint_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/sprint_ext.h
+new file mode 100644
+index 00000000..52f7a9dc
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/sprint_ext.h
+@@ -0,0 +1,118 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File sprint_ext.h
++
++ @Description Debug routines (externals).
++
++*//***************************************************************************/
++
++#ifndef __SPRINT_EXT_H
++#define __SPRINT_EXT_H
++
++
++#if defined(NCSW_LINUX) && defined(__KERNEL__)
++#include <linux/kernel.h>
++
++#elif defined(NCSW_VXWORKS)
++#include "private/stdioP.h"
++
++#else
++#include <stdio.h>
++#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
++
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group etc_id Utility Library Application Programming Interface
++
++ @Description External routines.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group sprint_id Sprint
++
++ @Description Sprint & Sscan module functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Function Sprint
++
++ @Description Format a string and place it in a buffer.
++
++ @Param[in] buff - The buffer to place the result into.
++ @Param[in] str - The format string to use.
++ @Param[in] ... - Arguments for the format string.
++
++ @Return Number of bytes formatted.
++*//***************************************************************************/
++int Sprint(char *buff, const char *str, ...);
++
++/**************************************************************************//**
++ @Function Snprint
++
++ @Description Format a string and place it in a buffer.
++
++ @Param[in] buf - The buffer to place the result into.
++ @Param[in] size - The size of the buffer, including the trailing null space.
++ @Param[in] fmt - The format string to use.
++ @Param[in] ... - Arguments for the format string.
++
++ @Return Number of bytes formatted.
++*//***************************************************************************/
++int Snprint(char * buf, uint32_t size, const char *fmt, ...);
++
++/**************************************************************************//**
++ @Function Sscan
++
++ @Description Unformat a buffer into a list of arguments.
++
++ @Param[in] buf - input buffer.
++ @Param[in] fmt - formatting of buffer.
++ @Param[out] ... - resulting arguments.
++
++ @Return Number of bytes unformatted.
++*//***************************************************************************/
++int Sscan(const char * buf, const char * fmt, ...);
++
++/** @} */ /* end of sprint_id group */
++/** @} */ /* end of etc_id group */
++
++
++#endif /* __SPRINT_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/arch/ppc_access.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/arch/ppc_access.h
+new file mode 100644
+index 00000000..c7b9b46f
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/arch/ppc_access.h
+@@ -0,0 +1,37 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef FL_E500_MACROS_H
++#define FL_E500_MACROS_H
++
++#endif /* FL_E500_MACROS_H */
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/general.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/general.h
+new file mode 100644
+index 00000000..b3f516fb
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/general.h
+@@ -0,0 +1,52 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __GENERAL_H
++#define __GENERAL_H
++
++#include "std_ext.h"
++#if !defined(NCSW_LINUX)
++#include "errno.h"
++#endif
++
++
++extern uint32_t get_mac_addr_crc(uint64_t _addr);
++
++#ifndef CONFIG_FMAN_ARM
++#define iowrite32be(val, addr) WRITE_UINT32(*addr, val)
++#define ioread32be(addr) GET_UINT32(*addr)
++#endif
++
++#define ether_crc(len, addr) get_mac_addr_crc(*(uint64_t *)(addr)>>16)
++
++
++#endif /* __GENERAL_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fman_common.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fman_common.h
+new file mode 100755
+index 00000000..8b194e99
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fman_common.h
+@@ -0,0 +1,78 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __FMAN_COMMON_H
++#define __FMAN_COMMON_H
++
++/**************************************************************************//**
++ @Description NIA Description
++*//***************************************************************************/
++#define NIA_ORDER_RESTOR 0x00800000
++#define NIA_ENG_FM_CTL 0x00000000
++#define NIA_ENG_PRS 0x00440000
++#define NIA_ENG_KG 0x00480000
++#define NIA_ENG_PLCR 0x004C0000
++#define NIA_ENG_BMI 0x00500000
++#define NIA_ENG_QMI_ENQ 0x00540000
++#define NIA_ENG_QMI_DEQ 0x00580000
++#define NIA_ENG_MASK 0x007C0000
++
++#define NIA_FM_CTL_AC_CC 0x00000006
++#define NIA_FM_CTL_AC_HC 0x0000000C
++#define NIA_FM_CTL_AC_IND_MODE_TX 0x00000008
++#define NIA_FM_CTL_AC_IND_MODE_RX 0x0000000A
++#define NIA_FM_CTL_AC_FRAG 0x0000000e
++#define NIA_FM_CTL_AC_PRE_FETCH 0x00000010
++#define NIA_FM_CTL_AC_POST_FETCH_PCD 0x00000012
++#define NIA_FM_CTL_AC_POST_FETCH_PCD_UDP_LEN 0x00000018
++#define NIA_FM_CTL_AC_POST_FETCH_NO_PCD 0x00000012
++#define NIA_FM_CTL_AC_FRAG_CHECK 0x00000014
++#define NIA_FM_CTL_AC_PRE_CC 0x00000020
++
++
++#define NIA_BMI_AC_ENQ_FRAME 0x00000002
++#define NIA_BMI_AC_TX_RELEASE 0x000002C0
++#define NIA_BMI_AC_RELEASE 0x000000C0
++#define NIA_BMI_AC_DISCARD 0x000000C1
++#define NIA_BMI_AC_TX 0x00000274
++#define NIA_BMI_AC_FETCH 0x00000208
++#define NIA_BMI_AC_MASK 0x000003FF
++
++#define NIA_KG_DIRECT 0x00000100
++#define NIA_KG_CC_EN 0x00000200
++#define NIA_PLCR_ABSOLUTE 0x00008000
++
++#define NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA 0x00000202
++#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c
++
++#endif /* __FMAN_COMMON_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_enet.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_enet.h
+new file mode 100644
+index 00000000..caa87fc6
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_enet.h
+@@ -0,0 +1,273 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_ENET_H
++#define __FSL_ENET_H
++
++/**
++ @Description Ethernet MAC-PHY Interface
++*/
++
++enum enet_interface {
++ E_ENET_IF_MII = 0x00010000, /**< MII interface */
++ E_ENET_IF_RMII = 0x00020000, /**< RMII interface */
++ E_ENET_IF_SMII = 0x00030000, /**< SMII interface */
++ E_ENET_IF_GMII = 0x00040000, /**< GMII interface */
++ E_ENET_IF_RGMII = 0x00050000, /**< RGMII interface */
++ E_ENET_IF_TBI = 0x00060000, /**< TBI interface */
++ E_ENET_IF_RTBI = 0x00070000, /**< RTBI interface */
++ E_ENET_IF_SGMII = 0x00080000, /**< SGMII interface */
++ E_ENET_IF_XGMII = 0x00090000, /**< XGMII interface */
++ E_ENET_IF_QSGMII = 0x000a0000, /**< QSGMII interface */
++ E_ENET_IF_XFI = 0x000b0000 /**< XFI interface */
++};
++
++/**
++ @Description Ethernet Speed (nominal data rate)
++*/
++enum enet_speed {
++ E_ENET_SPEED_10 = 10, /**< 10 Mbps */
++ E_ENET_SPEED_100 = 100, /**< 100 Mbps */
++ E_ENET_SPEED_1000 = 1000, /**< 1000 Mbps = 1 Gbps */
++ E_ENET_SPEED_2500 = 2500, /**< 2500 Mbps = 2.5 Gbps */
++ E_ENET_SPEED_10000 = 10000 /**< 10000 Mbps = 10 Gbps */
++};
++
++enum mac_type {
++ E_MAC_DTSEC,
++ E_MAC_TGEC,
++ E_MAC_MEMAC
++};
++
++/**************************************************************************//**
++ @Description Enum for inter-module interrupts registration
++*//***************************************************************************/
++enum fman_event_modules {
++ E_FMAN_MOD_PRS, /**< Parser event */
++ E_FMAN_MOD_KG, /**< Keygen event */
++ E_FMAN_MOD_PLCR, /**< Policer event */
++ E_FMAN_MOD_10G_MAC, /**< 10G MAC event */
++ E_FMAN_MOD_1G_MAC, /**< 1G MAC event */
++ E_FMAN_MOD_TMR, /**< Timer event */
++ E_FMAN_MOD_FMAN_CTRL, /**< FMAN Controller Timer event */
++ E_FMAN_MOD_MACSEC,
++ E_FMAN_MOD_DUMMY_LAST
++};
++
++/**************************************************************************//**
++ @Description Enum for interrupts types
++*//***************************************************************************/
++enum fman_intr_type {
++ E_FMAN_INTR_TYPE_ERR,
++ E_FMAN_INTR_TYPE_NORMAL
++};
++
++/**************************************************************************//**
++ @Description enum for defining MAC types
++*//***************************************************************************/
++enum fman_mac_type {
++ E_FMAN_MAC_10G = 0, /**< 10G MAC */
++ E_FMAN_MAC_1G /**< 1G MAC */
++};
++
++enum fman_mac_exceptions {
++ E_FMAN_MAC_EX_10G_MDIO_SCAN_EVENTMDIO = 0,
++ /**< 10GEC MDIO scan event interrupt */
++ E_FMAN_MAC_EX_10G_MDIO_CMD_CMPL,
++ /**< 10GEC MDIO command completion interrupt */
++ E_FMAN_MAC_EX_10G_REM_FAULT,
++ /**< 10GEC, mEMAC Remote fault interrupt */
++ E_FMAN_MAC_EX_10G_LOC_FAULT,
++ /**< 10GEC, mEMAC Local fault interrupt */
++ E_FMAN_MAC_EX_10G_1TX_ECC_ER,
++ /**< 10GEC, mEMAC Transmit frame ECC error interrupt */
++ E_FMAN_MAC_EX_10G_TX_FIFO_UNFL,
++ /**< 10GEC, mEMAC Transmit FIFO underflow interrupt */
++ E_FMAN_MAC_EX_10G_TX_FIFO_OVFL,
++ /**< 10GEC, mEMAC Transmit FIFO overflow interrupt */
++ E_FMAN_MAC_EX_10G_TX_ER,
++ /**< 10GEC Transmit frame error interrupt */
++ E_FMAN_MAC_EX_10G_RX_FIFO_OVFL,
++ /**< 10GEC, mEMAC Receive FIFO overflow interrupt */
++ E_FMAN_MAC_EX_10G_RX_ECC_ER,
++ /**< 10GEC, mEMAC Receive frame ECC error interrupt */
++ E_FMAN_MAC_EX_10G_RX_JAB_FRM,
++ /**< 10GEC Receive jabber frame interrupt */
++ E_FMAN_MAC_EX_10G_RX_OVRSZ_FRM,
++ /**< 10GEC Receive oversized frame interrupt */
++ E_FMAN_MAC_EX_10G_RX_RUNT_FRM,
++ /**< 10GEC Receive runt frame interrupt */
++ E_FMAN_MAC_EX_10G_RX_FRAG_FRM,
++ /**< 10GEC Receive fragment frame interrupt */
++ E_FMAN_MAC_EX_10G_RX_LEN_ER,
++ /**< 10GEC Receive payload length error interrupt */
++ E_FMAN_MAC_EX_10G_RX_CRC_ER,
++ /**< 10GEC Receive CRC error interrupt */
++ E_FMAN_MAC_EX_10G_RX_ALIGN_ER,
++ /**< 10GEC Receive alignment error interrupt */
++ E_FMAN_MAC_EX_1G_BAB_RX,
++ /**< dTSEC Babbling receive error */
++ E_FMAN_MAC_EX_1G_RX_CTL,
++ /**< dTSEC Receive control (pause frame) interrupt */
++ E_FMAN_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET,
++ /**< dTSEC Graceful transmit stop complete */
++ E_FMAN_MAC_EX_1G_BAB_TX,
++ /**< dTSEC Babbling transmit error */
++ E_FMAN_MAC_EX_1G_TX_CTL,
++ /**< dTSEC Transmit control (pause frame) interrupt */
++ E_FMAN_MAC_EX_1G_TX_ERR,
++ /**< dTSEC Transmit error */
++ E_FMAN_MAC_EX_1G_LATE_COL,
++ /**< dTSEC Late collision */
++ E_FMAN_MAC_EX_1G_COL_RET_LMT,
++ /**< dTSEC Collision retry limit */
++ E_FMAN_MAC_EX_1G_TX_FIFO_UNDRN,
++ /**< dTSEC Transmit FIFO underrun */
++ E_FMAN_MAC_EX_1G_MAG_PCKT,
++ /**< dTSEC Magic Packet detection */
++ E_FMAN_MAC_EX_1G_MII_MNG_RD_COMPLET,
++ /**< dTSEC MII management read completion */
++ E_FMAN_MAC_EX_1G_MII_MNG_WR_COMPLET,
++ /**< dTSEC MII management write completion */
++ E_FMAN_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET,
++ /**< dTSEC Graceful receive stop complete */
++ E_FMAN_MAC_EX_1G_TX_DATA_ERR,
++ /**< dTSEC Internal data error on transmit */
++ E_FMAN_MAC_EX_1G_RX_DATA_ERR,
++ /**< dTSEC Internal data error on receive */
++ E_FMAN_MAC_EX_1G_1588_TS_RX_ERR,
++ /**< dTSEC Time-Stamp Receive Error */
++ E_FMAN_MAC_EX_1G_RX_MIB_CNT_OVFL,
++ /**< dTSEC MIB counter overflow */
++ E_FMAN_MAC_EX_TS_FIFO_ECC_ERR,
++ /**< mEMAC Time-stamp FIFO ECC error interrupt;
++ not supported on T4240/B4860 rev1 chips */
++};
++
++#define ENET_IF_SGMII_BASEX 0x80000000
++ /**< SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
++ and phy or backplane;
++ Note: 1000BaseX auto-negotiation relates only to interface between MAC
++ and phy/backplane, SGMII phy can still synchronize with far-end phy at
++ 10Mbps, 100Mbps or 1000Mbps */
++
++enum enet_mode {
++ E_ENET_MODE_INVALID = 0,
++ /**< Invalid Ethernet mode */
++ E_ENET_MODE_MII_10 = (E_ENET_IF_MII | E_ENET_SPEED_10),
++ /**< 10 Mbps MII */
++ E_ENET_MODE_MII_100 = (E_ENET_IF_MII | E_ENET_SPEED_100),
++ /**< 100 Mbps MII */
++ E_ENET_MODE_RMII_10 = (E_ENET_IF_RMII | E_ENET_SPEED_10),
++ /**< 10 Mbps RMII */
++ E_ENET_MODE_RMII_100 = (E_ENET_IF_RMII | E_ENET_SPEED_100),
++ /**< 100 Mbps RMII */
++ E_ENET_MODE_SMII_10 = (E_ENET_IF_SMII | E_ENET_SPEED_10),
++ /**< 10 Mbps SMII */
++ E_ENET_MODE_SMII_100 = (E_ENET_IF_SMII | E_ENET_SPEED_100),
++ /**< 100 Mbps SMII */
++ E_ENET_MODE_GMII_1000 = (E_ENET_IF_GMII | E_ENET_SPEED_1000),
++ /**< 1000 Mbps GMII */
++ E_ENET_MODE_RGMII_10 = (E_ENET_IF_RGMII | E_ENET_SPEED_10),
++ /**< 10 Mbps RGMII */
++ E_ENET_MODE_RGMII_100 = (E_ENET_IF_RGMII | E_ENET_SPEED_100),
++ /**< 100 Mbps RGMII */
++ E_ENET_MODE_RGMII_1000 = (E_ENET_IF_RGMII | E_ENET_SPEED_1000),
++ /**< 1000 Mbps RGMII */
++ E_ENET_MODE_TBI_1000 = (E_ENET_IF_TBI | E_ENET_SPEED_1000),
++ /**< 1000 Mbps TBI */
++ E_ENET_MODE_RTBI_1000 = (E_ENET_IF_RTBI | E_ENET_SPEED_1000),
++ /**< 1000 Mbps RTBI */
++ E_ENET_MODE_SGMII_10 = (E_ENET_IF_SGMII | E_ENET_SPEED_10),
++ /**< 10 Mbps SGMII with auto-negotiation between MAC and
++ SGMII phy according to Cisco SGMII specification */
++ E_ENET_MODE_SGMII_100 = (E_ENET_IF_SGMII | E_ENET_SPEED_100),
++ /**< 100 Mbps SGMII with auto-negotiation between MAC and
++ SGMII phy according to Cisco SGMII specification */
++ E_ENET_MODE_SGMII_1000 = (E_ENET_IF_SGMII | E_ENET_SPEED_1000),
++ /**< 1000 Mbps SGMII with auto-negotiation between MAC and
++ SGMII phy according to Cisco SGMII specification */
++ E_ENET_MODE_SGMII_BASEX_10 = (ENET_IF_SGMII_BASEX | E_ENET_IF_SGMII
++ | E_ENET_SPEED_10),
++ /**< 10 Mbps SGMII with 1000BaseX auto-negotiation between
++ MAC and SGMII phy or backplane */
++ E_ENET_MODE_SGMII_BASEX_100 = (ENET_IF_SGMII_BASEX | E_ENET_IF_SGMII
++ | E_ENET_SPEED_100),
++ /**< 100 Mbps SGMII with 1000BaseX auto-negotiation between
++ MAC and SGMII phy or backplane */
++ E_ENET_MODE_SGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | E_ENET_IF_SGMII
++ | E_ENET_SPEED_1000),
++ /**< 1000 Mbps SGMII with 1000BaseX auto-negotiation between
++ MAC and SGMII phy or backplane */
++ E_ENET_MODE_QSGMII_1000 = (E_ENET_IF_QSGMII | E_ENET_SPEED_1000),
++ /**< 1000 Mbps QSGMII with auto-negotiation between MAC and
++ QSGMII phy according to Cisco QSGMII specification */
++ E_ENET_MODE_QSGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | E_ENET_IF_QSGMII
++ | E_ENET_SPEED_1000),
++ /**< 1000 Mbps QSGMII with 1000BaseX auto-negotiation between
++ MAC and QSGMII phy or backplane */
++ E_ENET_MODE_XGMII_10000 = (E_ENET_IF_XGMII | E_ENET_SPEED_10000),
++ /**< 10000 Mbps XGMII */
++ E_ENET_MODE_XFI_10000 = (E_ENET_IF_XFI | E_ENET_SPEED_10000)
++ /**< 10000 Mbps XFI */
++};
++
++enum fmam_mac_statistics_level {
++ E_FMAN_MAC_NONE_STATISTICS, /**< No statistics */
++ E_FMAN_MAC_PARTIAL_STATISTICS, /**< Only error counters are available;
++ Optimized for performance */
++ E_FMAN_MAC_FULL_STATISTICS /**< All counters available; Not
++ optimized for performance */
++};
++
++#define _MAKE_ENET_MODE(_interface, _speed) (enum enet_mode)((_interface) \
++ | (_speed))
++
++#define _ENET_INTERFACE_FROM_MODE(mode) (enum enet_interface) \
++ ((mode) & 0x0FFF0000)
++#define _ENET_SPEED_FROM_MODE(mode) (enum enet_speed)((mode) & 0x0000FFFF)
++#define _ENET_ADDR_TO_UINT64(_enet_addr) \
++ (uint64_t)(((uint64_t)(_enet_addr)[0] << 40) | \
++ ((uint64_t)(_enet_addr)[1] << 32) | \
++ ((uint64_t)(_enet_addr)[2] << 24) | \
++ ((uint64_t)(_enet_addr)[3] << 16) | \
++ ((uint64_t)(_enet_addr)[4] << 8) | \
++ ((uint64_t)(_enet_addr)[5]))
++
++#define _MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
++ do { \
++ int i; \
++ for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) \
++ (_enet_addr)[i] = (uint8_t)((_addr64) >> ((5-i)*8));\
++ } while (0)
++
++#endif /* __FSL_ENET_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman.h
+new file mode 100755
+index 00000000..96a63fa7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman.h
+@@ -0,0 +1,825 @@
++/*
++ * Copyright 2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_H
++#define __FSL_FMAN_H
++
++#include "common/general.h"
++
++struct fman_ext_pool_params {
++ uint8_t id; /**< External buffer pool id */
++ uint16_t size; /**< External buffer pool buffer size */
++};
++
++struct fman_ext_pools {
++ uint8_t num_pools_used; /**< Number of pools use by this port */
++ struct fman_ext_pool_params *ext_buf_pool;
++ /**< Parameters for each port */
++};
++
++struct fman_backup_bm_pools {
++ uint8_t num_backup_pools; /**< Number of BM backup pools -
++ must be smaller than the total number
++ of pools defined for the specified
++ port.*/
++ uint8_t *pool_ids; /**< numOfBackupPools pool id's,
++ specifying which pools should be used
++ only as backup. Pool id's specified
++ here must be a subset of the pools
++ used by the specified port.*/
++};
++
++/**************************************************************************//**
++ @Description A structure for defining BM pool depletion criteria
++*//***************************************************************************/
++struct fman_buf_pool_depletion {
++ bool buf_pool_depletion_enabled;
++ bool pools_grp_mode_enable; /**< select mode in which pause frames
++ will be sent after a number of pools
++ (all together!) are depleted */
++ uint8_t num_pools; /**< the number of depleted pools that
++ will invoke pause frames transmission.
++ */
++ bool *pools_to_consider; /**< For each pool, TRUE if it should be
++ considered for depletion (Note - this
++ pool must be used by this port!). */
++ bool single_pool_mode_enable; /**< select mode in which pause frames
++ will be sent after a single-pool
++ is depleted; */
++ bool *pools_to_consider_for_single_mode;
++ /**< For each pool, TRUE if it should be
++ considered for depletion (Note - this
++ pool must be used by this port!) */
++ bool has_pfc_priorities;
++ bool *pfc_priorities_en; /**< This field is used by the MAC as
++ the Priority Enable Vector in the PFC
++ frame which is transmitted */
++};
++
++/**************************************************************************//**
++ @Description Enum for defining port DMA swap mode
++*//***************************************************************************/
++enum fman_dma_swap_option {
++ FMAN_DMA_NO_SWP, /**< No swap, transfer data as is.*/
++ FMAN_DMA_SWP_PPC_LE, /**< The transferred data should be swapped
++ in PowerPc Little Endian mode. */
++ FMAN_DMA_SWP_BE /**< The transferred data should be swapped
++ in Big Endian mode */
++};
++
++/**************************************************************************//**
++ @Description Enum for defining port DMA cache attributes
++*//***************************************************************************/
++enum fman_dma_cache_option {
++ FMAN_DMA_NO_STASH = 0, /**< Cacheable, no Allocate (No Stashing) */
++ FMAN_DMA_STASH = 1 /**< Cacheable and Allocate (Stashing on) */
++};
++
++typedef struct t_FmPrsResult fm_prs_result_t;
++typedef enum e_EnetMode enet_mode_t;
++typedef t_Handle handle_t;
++
++struct fman_revision_info {
++ uint8_t majorRev; /**< Major revision */
++ uint8_t minorRev; /**< Minor revision */
++};
++
++/* sizes */
++#define CAPWAP_FRAG_EXTRA_SPACE 32
++#define OFFSET_UNITS 16
++#define MAX_INT_OFFSET 240
++#define MAX_IC_SIZE 256
++#define MAX_EXT_OFFSET 496
++#define MAX_EXT_BUFFER_OFFSET 511
++
++/**************************************************************************
++ @Description Memory Mapped Registers
++***************************************************************************/
++#define FMAN_LIODN_TBL 64 /* size of LIODN table */
++
++struct fman_fpm_regs {
++ uint32_t fmfp_tnc; /**< FPM TNUM Control 0x00 */
++ uint32_t fmfp_prc; /**< FPM Port_ID FmCtl Association 0x04 */
++ uint32_t fmfp_brkc; /**< FPM Breakpoint Control 0x08 */
++ uint32_t fmfp_mxd; /**< FPM Flush Control 0x0c */
++ uint32_t fmfp_dist1; /**< FPM Dispatch Thresholds1 0x10 */
++ uint32_t fmfp_dist2; /**< FPM Dispatch Thresholds2 0x14 */
++ uint32_t fm_epi; /**< FM Error Pending Interrupts 0x18 */
++ uint32_t fm_rie; /**< FM Error Interrupt Enable 0x1c */
++ uint32_t fmfp_fcev[4]; /**< FPM FMan-Controller Event 1-4 0x20-0x2f */
++ uint32_t res0030[4]; /**< res 0x30 - 0x3f */
++ uint32_t fmfp_cee[4]; /**< PM FMan-Controller Event 1-4 0x40-0x4f */
++ uint32_t res0050[4]; /**< res 0x50-0x5f */
++ uint32_t fmfp_tsc1; /**< FPM TimeStamp Control1 0x60 */
++ uint32_t fmfp_tsc2; /**< FPM TimeStamp Control2 0x64 */
++ uint32_t fmfp_tsp; /**< FPM Time Stamp 0x68 */
++ uint32_t fmfp_tsf; /**< FPM Time Stamp Fraction 0x6c */
++ uint32_t fm_rcr; /**< FM Rams Control 0x70 */
++ uint32_t fmfp_extc; /**< FPM External Requests Control 0x74 */
++ uint32_t fmfp_ext1; /**< FPM External Requests Config1 0x78 */
++ uint32_t fmfp_ext2; /**< FPM External Requests Config2 0x7c */
++ uint32_t fmfp_drd[16]; /**< FPM Data_Ram Data 0-15 0x80 - 0xbf */
++ uint32_t fmfp_dra; /**< FPM Data Ram Access 0xc0 */
++ uint32_t fm_ip_rev_1; /**< FM IP Block Revision 1 0xc4 */
++ uint32_t fm_ip_rev_2; /**< FM IP Block Revision 2 0xc8 */
++ uint32_t fm_rstc; /**< FM Reset Command 0xcc */
++ uint32_t fm_cld; /**< FM Classifier Debug 0xd0 */
++ uint32_t fm_npi; /**< FM Normal Pending Interrupts 0xd4 */
++ uint32_t fmfp_exte; /**< FPM External Requests Enable 0xd8 */
++ uint32_t fmfp_ee; /**< FPM Event & Mask 0xdc */
++ uint32_t fmfp_cev[4]; /**< FPM CPU Event 1-4 0xe0-0xef */
++ uint32_t res00f0[4]; /**< res 0xf0-0xff */
++ uint32_t fmfp_ps[64]; /**< FPM Port Status 0x100-0x1ff */
++ uint32_t fmfp_clfabc; /**< FPM CLFABC 0x200 */
++ uint32_t fmfp_clfcc; /**< FPM CLFCC 0x204 */
++ uint32_t fmfp_clfaval; /**< FPM CLFAVAL 0x208 */
++ uint32_t fmfp_clfbval; /**< FPM CLFBVAL 0x20c */
++ uint32_t fmfp_clfcval; /**< FPM CLFCVAL 0x210 */
++ uint32_t fmfp_clfamsk; /**< FPM CLFAMSK 0x214 */
++ uint32_t fmfp_clfbmsk; /**< FPM CLFBMSK 0x218 */
++ uint32_t fmfp_clfcmsk; /**< FPM CLFCMSK 0x21c */
++ uint32_t fmfp_clfamc; /**< FPM CLFAMC 0x220 */
++ uint32_t fmfp_clfbmc; /**< FPM CLFBMC 0x224 */
++ uint32_t fmfp_clfcmc; /**< FPM CLFCMC 0x228 */
++ uint32_t fmfp_decceh; /**< FPM DECCEH 0x22c */
++ uint32_t res0230[116]; /**< res 0x230 - 0x3ff */
++ uint32_t fmfp_ts[128]; /**< 0x400: FPM Task Status 0x400 - 0x5ff */
++ uint32_t res0600[0x400 - 384];
++};
++
++struct fman_bmi_regs {
++ uint32_t fmbm_init; /**< BMI Initialization 0x00 */
++ uint32_t fmbm_cfg1; /**< BMI Configuration 1 0x04 */
++ uint32_t fmbm_cfg2; /**< BMI Configuration 2 0x08 */
++ uint32_t res000c[5]; /**< 0x0c - 0x1f */
++ uint32_t fmbm_ievr; /**< Interrupt Event Register 0x20 */
++ uint32_t fmbm_ier; /**< Interrupt Enable Register 0x24 */
++ uint32_t fmbm_ifr; /**< Interrupt Force Register 0x28 */
++ uint32_t res002c[5]; /**< 0x2c - 0x3f */
++ uint32_t fmbm_arb[8]; /**< BMI Arbitration 0x40 - 0x5f */
++ uint32_t res0060[12]; /**<0x60 - 0x8f */
++ uint32_t fmbm_dtc[3]; /**< Debug Trap Counter 0x90 - 0x9b */
++ uint32_t res009c; /**< 0x9c */
++ uint32_t fmbm_dcv[3][4]; /**< Debug Compare val 0xa0-0xcf */
++ uint32_t fmbm_dcm[3][4]; /**< Debug Compare Mask 0xd0-0xff */
++ uint32_t fmbm_gde; /**< BMI Global Debug Enable 0x100 */
++ uint32_t fmbm_pp[63]; /**< BMI Port Parameters 0x104 - 0x1ff */
++ uint32_t res0200; /**< 0x200 */
++ uint32_t fmbm_pfs[63]; /**< BMI Port FIFO Size 0x204 - 0x2ff */
++ uint32_t res0300; /**< 0x300 */
++ uint32_t fmbm_spliodn[63]; /**< Port Partition ID 0x304 - 0x3ff */
++};
++
++struct fman_qmi_regs {
++ uint32_t fmqm_gc; /**< General Configuration Register 0x00 */
++ uint32_t res0004; /**< 0x04 */
++ uint32_t fmqm_eie; /**< Error Interrupt Event Register 0x08 */
++ uint32_t fmqm_eien; /**< Error Interrupt Enable Register 0x0c */
++ uint32_t fmqm_eif; /**< Error Interrupt Force Register 0x10 */
++ uint32_t fmqm_ie; /**< Interrupt Event Register 0x14 */
++ uint32_t fmqm_ien; /**< Interrupt Enable Register 0x18 */
++ uint32_t fmqm_if; /**< Interrupt Force Register 0x1c */
++ uint32_t fmqm_gs; /**< Global Status Register 0x20 */
++ uint32_t fmqm_ts; /**< Task Status Register 0x24 */
++ uint32_t fmqm_etfc; /**< Enqueue Total Frame Counter 0x28 */
++ uint32_t fmqm_dtfc; /**< Dequeue Total Frame Counter 0x2c */
++ uint32_t fmqm_dc0; /**< Dequeue Counter 0 0x30 */
++ uint32_t fmqm_dc1; /**< Dequeue Counter 1 0x34 */
++ uint32_t fmqm_dc2; /**< Dequeue Counter 2 0x38 */
++ uint32_t fmqm_dc3; /**< Dequeue Counter 3 0x3c */
++ uint32_t fmqm_dfdc; /**< Dequeue FQID from Default Counter 0x40 */
++ uint32_t fmqm_dfcc; /**< Dequeue FQID from Context Counter 0x44 */
++ uint32_t fmqm_dffc; /**< Dequeue FQID from FD Counter 0x48 */
++ uint32_t fmqm_dcc; /**< Dequeue Confirm Counter 0x4c */
++ uint32_t res0050[7]; /**< 0x50 - 0x6b */
++ uint32_t fmqm_tapc; /**< Tnum Aging Period Control 0x6c */
++ uint32_t fmqm_dmcvc; /**< Dequeue MAC Command Valid Counter 0x70 */
++ uint32_t fmqm_difdcc; /**< Dequeue Invalid FD Command Counter 0x74 */
++ uint32_t fmqm_da1v; /**< Dequeue A1 Valid Counter 0x78 */
++ uint32_t res007c; /**< 0x7c */
++ uint32_t fmqm_dtc; /**< 0x80 Debug Trap Counter 0x80 */
++ uint32_t fmqm_efddd; /**< 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
++ uint32_t res0088[2]; /**< 0x88 - 0x8f */
++ struct {
++ uint32_t fmqm_dtcfg1; /**< 0x90 dbg trap cfg 1 Register 0x00 */
++ uint32_t fmqm_dtval1; /**< Debug Trap Value 1 Register 0x04 */
++ uint32_t fmqm_dtm1; /**< Debug Trap Mask 1 Register 0x08 */
++ uint32_t fmqm_dtc1; /**< Debug Trap Counter 1 Register 0x0c */
++ uint32_t fmqm_dtcfg2; /**< dbg Trap cfg 2 Register 0x10 */
++ uint32_t fmqm_dtval2; /**< Debug Trap Value 2 Register 0x14 */
++ uint32_t fmqm_dtm2; /**< Debug Trap Mask 2 Register 0x18 */
++ uint32_t res001c; /**< 0x1c */
++ } dbg_traps[3]; /**< 0x90 - 0xef */
++ uint8_t res00f0[0x400 - 0xf0]; /**< 0xf0 - 0x3ff */
++};
++
++struct fman_dma_regs {
++ uint32_t fmdmsr; /**< FM DMA status register 0x00 */
++ uint32_t fmdmmr; /**< FM DMA mode register 0x04 */
++ uint32_t fmdmtr; /**< FM DMA bus threshold register 0x08 */
++ uint32_t fmdmhy; /**< FM DMA bus hysteresis register 0x0c */
++ uint32_t fmdmsetr; /**< FM DMA SOS emergency Threshold Register 0x10 */
++ uint32_t fmdmtah; /**< FM DMA transfer bus address high reg 0x14 */
++ uint32_t fmdmtal; /**< FM DMA transfer bus address low reg 0x18 */
++ uint32_t fmdmtcid; /**< FM DMA transfer bus communication ID reg 0x1c */
++ uint32_t fmdmra; /**< FM DMA bus internal ram address register 0x20 */
++ uint32_t fmdmrd; /**< FM DMA bus internal ram data register 0x24 */
++ uint32_t fmdmwcr; /**< FM DMA CAM watchdog counter value 0x28 */
++ uint32_t fmdmebcr; /**< FM DMA CAM base in MURAM register 0x2c */
++ uint32_t fmdmccqdr; /**< FM DMA CAM and CMD Queue Debug reg 0x30 */
++ uint32_t fmdmccqvr1; /**< FM DMA CAM and CMD Queue Value reg #1 0x34 */
++ uint32_t fmdmccqvr2; /**< FM DMA CAM and CMD Queue Value reg #2 0x38 */
++ uint32_t fmdmcqvr3; /**< FM DMA CMD Queue Value register #3 0x3c */
++ uint32_t fmdmcqvr4; /**< FM DMA CMD Queue Value register #4 0x40 */
++ uint32_t fmdmcqvr5; /**< FM DMA CMD Queue Value register #5 0x44 */
++ uint32_t fmdmsefrc; /**< FM DMA Semaphore Entry Full Reject Cntr 0x48 */
++ uint32_t fmdmsqfrc; /**< FM DMA Semaphore Queue Full Reject Cntr 0x4c */
++ uint32_t fmdmssrc; /**< FM DMA Semaphore SYNC Reject Counter 0x50 */
++ uint32_t fmdmdcr; /**< FM DMA Debug Counter 0x54 */
++ uint32_t fmdmemsr; /**< FM DMA Emergency Smoother Register 0x58 */
++ uint32_t res005c; /**< 0x5c */
++ uint32_t fmdmplr[FMAN_LIODN_TBL / 2]; /**< DMA LIODN regs 0x60-0xdf */
++ uint32_t res00e0[0x400 - 56];
++};
++
++struct fman_rg {
++ struct fman_fpm_regs *fpm_rg;
++ struct fman_dma_regs *dma_rg;
++ struct fman_bmi_regs *bmi_rg;
++ struct fman_qmi_regs *qmi_rg;
++};
++
++enum fman_dma_cache_override {
++ E_FMAN_DMA_NO_CACHE_OR = 0, /**< No override of the Cache field */
++ E_FMAN_DMA_NO_STASH_DATA, /**< No data stashing in system level cache */
++ E_FMAN_DMA_MAY_STASH_DATA, /**< Stashing allowed in sys level cache */
++ E_FMAN_DMA_STASH_DATA /**< Stashing performed in system level cache */
++};
++
++enum fman_dma_aid_mode {
++ E_FMAN_DMA_AID_OUT_PORT_ID = 0, /**< 4 LSB of PORT_ID */
++ E_FMAN_DMA_AID_OUT_TNUM /**< 4 LSB of TNUM */
++};
++
++enum fman_dma_dbg_cnt_mode {
++ E_FMAN_DMA_DBG_NO_CNT = 0, /**< No counting */
++ E_FMAN_DMA_DBG_CNT_DONE, /**< Count DONE commands */
++ E_FMAN_DMA_DBG_CNT_COMM_Q_EM, /**< command Q emergency signal */
++ E_FMAN_DMA_DBG_CNT_INT_READ_EM, /**< Read buf emergency signal */
++ E_FMAN_DMA_DBG_CNT_INT_WRITE_EM, /**< Write buf emergency signal */
++ E_FMAN_DMA_DBG_CNT_FPM_WAIT, /**< FPM WAIT signal */
++ E_FMAN_DMA_DBG_CNT_SIGLE_BIT_ECC, /**< Single bit ECC errors */
++ E_FMAN_DMA_DBG_CNT_RAW_WAR_PROT /**< RAW & WAR protection counter */
++};
++
++enum fman_dma_emergency_level {
++ E_FMAN_DMA_EM_EBS = 0, /**< EBS emergency */
++ E_FMAN_DMA_EM_SOS /**< SOS emergency */
++};
++
++enum fman_catastrophic_err {
++ E_FMAN_CATAST_ERR_STALL_PORT = 0, /**< Port_ID stalled reset required */
++ E_FMAN_CATAST_ERR_STALL_TASK /**< Only erroneous task is stalled */
++};
++
++enum fman_dma_err {
++ E_FMAN_DMA_ERR_CATASTROPHIC = 0, /**< Catastrophic DMA error */
++ E_FMAN_DMA_ERR_REPORT /**< Reported DMA error */
++};
++
++struct fman_cfg {
++ uint16_t liodn_bs_pr_port[FMAN_LIODN_TBL];/* base per port */
++ bool en_counters;
++ uint8_t disp_limit_tsh;
++ uint8_t prs_disp_tsh;
++ uint8_t plcr_disp_tsh;
++ uint8_t kg_disp_tsh;
++ uint8_t bmi_disp_tsh;
++ uint8_t qmi_enq_disp_tsh;
++ uint8_t qmi_deq_disp_tsh;
++ uint8_t fm_ctl1_disp_tsh;
++ uint8_t fm_ctl2_disp_tsh;
++ enum fman_dma_cache_override dma_cache_override;
++ enum fman_dma_aid_mode dma_aid_mode;
++ bool dma_aid_override;
++ uint8_t dma_axi_dbg_num_of_beats;
++ uint8_t dma_cam_num_of_entries;
++ uint32_t dma_watchdog;
++ uint8_t dma_comm_qtsh_asrt_emer;
++ uint8_t dma_write_buf_tsh_asrt_emer;
++ uint8_t dma_read_buf_tsh_asrt_emer;
++ uint8_t dma_comm_qtsh_clr_emer;
++ uint8_t dma_write_buf_tsh_clr_emer;
++ uint8_t dma_read_buf_tsh_clr_emer;
++ uint32_t dma_sos_emergency;
++ enum fman_dma_dbg_cnt_mode dma_dbg_cnt_mode;
++ bool dma_stop_on_bus_error;
++ bool dma_en_emergency;
++ uint32_t dma_emergency_bus_select;
++ enum fman_dma_emergency_level dma_emergency_level;
++ bool dma_en_emergency_smoother;
++ uint32_t dma_emergency_switch_counter;
++ bool halt_on_external_activ;
++ bool halt_on_unrecov_ecc_err;
++ enum fman_catastrophic_err catastrophic_err;
++ enum fman_dma_err dma_err;
++ bool en_muram_test_mode;
++ bool en_iram_test_mode;
++ bool external_ecc_rams_enable;
++ uint16_t tnum_aging_period;
++ uint32_t exceptions;
++ uint16_t clk_freq;
++ bool pedantic_dma;
++ uint32_t cam_base_addr;
++ uint32_t fifo_base_addr;
++ uint32_t total_fifo_size;
++ uint8_t total_num_of_tasks;
++ bool qmi_deq_option_support;
++ uint32_t qmi_def_tnums_thresh;
++ bool fman_partition_array;
++ uint8_t num_of_fman_ctrl_evnt_regs;
++};
++
++/**************************************************************************//**
++ @Description Exceptions
++*//***************************************************************************/
++#define FMAN_EX_DMA_BUS_ERROR 0x80000000
++#define FMAN_EX_DMA_READ_ECC 0x40000000
++#define FMAN_EX_DMA_SYSTEM_WRITE_ECC 0x20000000
++#define FMAN_EX_DMA_FM_WRITE_ECC 0x10000000
++#define FMAN_EX_FPM_STALL_ON_TASKS 0x08000000
++#define FMAN_EX_FPM_SINGLE_ECC 0x04000000
++#define FMAN_EX_FPM_DOUBLE_ECC 0x02000000
++#define FMAN_EX_QMI_SINGLE_ECC 0x01000000
++#define FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
++#define FMAN_EX_QMI_DOUBLE_ECC 0x00400000
++#define FMAN_EX_BMI_LIST_RAM_ECC 0x00200000
++#define FMAN_EX_BMI_PIPELINE_ECC 0x00100000
++#define FMAN_EX_BMI_STATISTICS_RAM_ECC 0x00080000
++#define FMAN_EX_IRAM_ECC 0x00040000
++#define FMAN_EX_NURAM_ECC 0x00020000
++#define FMAN_EX_BMI_DISPATCH_RAM_ECC 0x00010000
++
++enum fman_exceptions {
++ E_FMAN_EX_DMA_BUS_ERROR = 0, /**< DMA bus error. */
++ E_FMAN_EX_DMA_READ_ECC, /**< Read Buffer ECC error */
++ E_FMAN_EX_DMA_SYSTEM_WRITE_ECC, /**< Write Buffer ECC err on sys side */
++ E_FMAN_EX_DMA_FM_WRITE_ECC, /**< Write Buffer ECC error on FM side */
++ E_FMAN_EX_FPM_STALL_ON_TASKS, /**< Stall of tasks on FPM */
++ E_FMAN_EX_FPM_SINGLE_ECC, /**< Single ECC on FPM. */
++ E_FMAN_EX_FPM_DOUBLE_ECC, /**< Double ECC error on FPM ram access */
++ E_FMAN_EX_QMI_SINGLE_ECC, /**< Single ECC on QMI. */
++ E_FMAN_EX_QMI_DOUBLE_ECC, /**< Double bit ECC occurred on QMI */
++ E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/**< DeQ from unknown port id */
++ E_FMAN_EX_BMI_LIST_RAM_ECC, /**< Linked List RAM ECC error */
++ E_FMAN_EX_BMI_STORAGE_PROFILE_ECC, /**< storage profile */
++ E_FMAN_EX_BMI_STATISTICS_RAM_ECC, /**< Statistics RAM ECC Err Enable */
++ E_FMAN_EX_BMI_DISPATCH_RAM_ECC, /**< Dispatch RAM ECC Error Enable */
++ E_FMAN_EX_IRAM_ECC, /**< Double bit ECC occurred on IRAM*/
++ E_FMAN_EX_MURAM_ECC /**< Double bit ECC occurred on MURAM*/
++};
++
++enum fman_counters {
++ E_FMAN_COUNTERS_ENQ_TOTAL_FRAME = 0, /**< QMI tot enQ frames counter */
++ E_FMAN_COUNTERS_DEQ_TOTAL_FRAME, /**< QMI tot deQ frames counter */
++ E_FMAN_COUNTERS_DEQ_0, /**< QMI 0 frames from QMan counter */
++ E_FMAN_COUNTERS_DEQ_1, /**< QMI 1 frames from QMan counter */
++ E_FMAN_COUNTERS_DEQ_2, /**< QMI 2 frames from QMan counter */
++ E_FMAN_COUNTERS_DEQ_3, /**< QMI 3 frames from QMan counter */
++ E_FMAN_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI deQ from dflt queue cntr */
++ E_FMAN_COUNTERS_DEQ_FROM_CONTEXT, /**< QMI deQ from FQ context cntr */
++ E_FMAN_COUNTERS_DEQ_FROM_FD, /**< QMI deQ from FD command field cntr */
++ E_FMAN_COUNTERS_DEQ_CONFIRM, /**< QMI dequeue confirm counter */
++ E_FMAN_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT, /**< DMA full entry cntr */
++ E_FMAN_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT, /**< DMA full CAM Q cntr */
++ E_FMAN_COUNTERS_SEMAPHOR_SYNC_REJECT /**< DMA sync counter */
++};
++
++#define FPM_PRT_FM_CTL1 0x00000001
++#define FPM_PRT_FM_CTL2 0x00000002
++
++/**************************************************************************//**
++ @Description DMA definitions
++*//***************************************************************************/
++
++/* masks */
++#define DMA_MODE_AID_OR 0x20000000
++#define DMA_MODE_SBER 0x10000000
++#define DMA_MODE_BER 0x00200000
++#define DMA_MODE_EB 0x00100000
++#define DMA_MODE_ECC 0x00000020
++#define DMA_MODE_PRIVILEGE_PROT 0x00001000
++#define DMA_MODE_SECURE_PROT 0x00000800
++#define DMA_MODE_EMER_READ 0x00080000
++#define DMA_MODE_EMER_WRITE 0x00040000
++#define DMA_MODE_CACHE_OR_MASK 0xC0000000
++#define DMA_MODE_CEN_MASK 0x0000E000
++#define DMA_MODE_DBG_MASK 0x00000380
++#define DMA_MODE_AXI_DBG_MASK 0x0F000000
++
++#define DMA_EMSR_EMSTR_MASK 0x0000FFFF
++
++#define DMA_TRANSFER_PORTID_MASK 0xFF000000
++#define DMA_TRANSFER_TNUM_MASK 0x00FF0000
++#define DMA_TRANSFER_LIODN_MASK 0x00000FFF
++
++#define DMA_HIGH_LIODN_MASK 0x0FFF0000
++#define DMA_LOW_LIODN_MASK 0x00000FFF
++
++#define DMA_STATUS_CMD_QUEUE_NOT_EMPTY 0x10000000
++#define DMA_STATUS_BUS_ERR 0x08000000
++#define DMA_STATUS_READ_ECC 0x04000000
++#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000
++#define DMA_STATUS_FM_WRITE_ECC 0x01000000
++#define DMA_STATUS_SYSTEM_DPEXT_ECC 0x00800000
++#define DMA_STATUS_FM_DPEXT_ECC 0x00400000
++#define DMA_STATUS_SYSTEM_DPDAT_ECC 0x00200000
++#define DMA_STATUS_FM_DPDAT_ECC 0x00100000
++#define DMA_STATUS_FM_SPDAT_ECC 0x00080000
++
++#define FM_LIODN_BASE_MASK 0x00000FFF
++
++/* shifts */
++#define DMA_MODE_CACHE_OR_SHIFT 30
++#define DMA_MODE_BUS_PRI_SHIFT 16
++#define DMA_MODE_AXI_DBG_SHIFT 24
++#define DMA_MODE_CEN_SHIFT 13
++#define DMA_MODE_BUS_PROT_SHIFT 10
++#define DMA_MODE_DBG_SHIFT 7
++#define DMA_MODE_EMER_LVL_SHIFT 6
++#define DMA_MODE_AID_MODE_SHIFT 4
++#define DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS 16
++#define DMA_MODE_MAX_CAM_NUM_OF_ENTRIES 32
++
++#define DMA_THRESH_COMMQ_SHIFT 24
++#define DMA_THRESH_READ_INT_BUF_SHIFT 16
++
++#define DMA_LIODN_SHIFT 16
++
++#define DMA_TRANSFER_PORTID_SHIFT 24
++#define DMA_TRANSFER_TNUM_SHIFT 16
++
++/* sizes */
++#define DMA_MAX_WATCHDOG 0xffffffff
++
++/* others */
++#define DMA_CAM_SIZEOF_ENTRY 0x40
++#define DMA_CAM_ALIGN 0x1000
++#define DMA_CAM_UNITS 8
++
++/**************************************************************************//**
++ @Description General defines
++*//***************************************************************************/
++
++#define FM_DEBUG_STATUS_REGISTER_OFFSET 0x000d1084UL
++#define FM_UCODE_DEBUG_INSTRUCTION 0x6ffff805UL
++
++/**************************************************************************//**
++ @Description FPM defines
++*//***************************************************************************/
++
++/* masks */
++#define FPM_EV_MASK_DOUBLE_ECC 0x80000000
++#define FPM_EV_MASK_STALL 0x40000000
++#define FPM_EV_MASK_SINGLE_ECC 0x20000000
++#define FPM_EV_MASK_RELEASE_FM 0x00010000
++#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000
++#define FPM_EV_MASK_STALL_EN 0x00004000
++#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000
++#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008
++#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004
++
++#define FPM_RAM_RAMS_ECC_EN 0x80000000
++#define FPM_RAM_IRAM_ECC_EN 0x40000000
++#define FPM_RAM_MURAM_ECC 0x00008000
++#define FPM_RAM_IRAM_ECC 0x00004000
++#define FPM_RAM_MURAM_TEST_ECC 0x20000000
++#define FPM_RAM_IRAM_TEST_ECC 0x10000000
++#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000
++
++#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
++#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
++
++#define FPM_REV1_MAJOR_MASK 0x0000FF00
++#define FPM_REV1_MINOR_MASK 0x000000FF
++
++#define FPM_REV2_INTEG_MASK 0x00FF0000
++#define FPM_REV2_ERR_MASK 0x0000FF00
++#define FPM_REV2_CFG_MASK 0x000000FF
++
++#define FPM_TS_FRACTION_MASK 0x0000FFFF
++#define FPM_TS_CTL_EN 0x80000000
++
++#define FPM_PRC_REALSE_STALLED 0x00800000
++
++#define FPM_PS_STALLED 0x00800000
++#define FPM_PS_FM_CTL1_SEL 0x80000000
++#define FPM_PS_FM_CTL2_SEL 0x40000000
++#define FPM_PS_FM_CTL_SEL_MASK (FPM_PS_FM_CTL1_SEL | FPM_PS_FM_CTL2_SEL)
++
++#define FPM_RSTC_FM_RESET 0x80000000
++#define FPM_RSTC_10G0_RESET 0x04000000
++#define FPM_RSTC_1G0_RESET 0x40000000
++#define FPM_RSTC_1G1_RESET 0x20000000
++#define FPM_RSTC_1G2_RESET 0x10000000
++#define FPM_RSTC_1G3_RESET 0x08000000
++#define FPM_RSTC_1G4_RESET 0x02000000
++
++
++#define FPM_DISP_LIMIT_MASK 0x1F000000
++#define FPM_THR1_PRS_MASK 0xFF000000
++#define FPM_THR1_KG_MASK 0x00FF0000
++#define FPM_THR1_PLCR_MASK 0x0000FF00
++#define FPM_THR1_BMI_MASK 0x000000FF
++
++#define FPM_THR2_QMI_ENQ_MASK 0xFF000000
++#define FPM_THR2_QMI_DEQ_MASK 0x000000FF
++#define FPM_THR2_FM_CTL1_MASK 0x00FF0000
++#define FPM_THR2_FM_CTL2_MASK 0x0000FF00
++
++/* shifts */
++#define FPM_DISP_LIMIT_SHIFT 24
++
++#define FPM_THR1_PRS_SHIFT 24
++#define FPM_THR1_KG_SHIFT 16
++#define FPM_THR1_PLCR_SHIFT 8
++#define FPM_THR1_BMI_SHIFT 0
++
++#define FPM_THR2_QMI_ENQ_SHIFT 24
++#define FPM_THR2_QMI_DEQ_SHIFT 0
++#define FPM_THR2_FM_CTL1_SHIFT 16
++#define FPM_THR2_FM_CTL2_SHIFT 8
++
++#define FPM_EV_MASK_CAT_ERR_SHIFT 1
++#define FPM_EV_MASK_DMA_ERR_SHIFT 0
++
++#define FPM_REV1_MAJOR_SHIFT 8
++#define FPM_REV1_MINOR_SHIFT 0
++
++#define FPM_REV2_INTEG_SHIFT 16
++#define FPM_REV2_ERR_SHIFT 8
++#define FPM_REV2_CFG_SHIFT 0
++
++#define FPM_TS_INT_SHIFT 16
++
++#define FPM_PORT_FM_CTL_PORTID_SHIFT 24
++
++#define FPM_PS_FM_CTL_SEL_SHIFT 30
++#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16
++
++#define FPM_DISP_LIMIT_SHIFT 24
++
++/* Interrupts defines */
++#define FPM_EVENT_FM_CTL_0 0x00008000
++#define FPM_EVENT_FM_CTL 0x0000FF00
++#define FPM_EVENT_FM_CTL_BRK 0x00000080
++
++/* others */
++#define FPM_MAX_DISP_LIMIT 31
++#define FPM_RSTC_FM_RESET 0x80000000
++#define FPM_RSTC_1G0_RESET 0x40000000
++#define FPM_RSTC_1G1_RESET 0x20000000
++#define FPM_RSTC_1G2_RESET 0x10000000
++#define FPM_RSTC_1G3_RESET 0x08000000
++#define FPM_RSTC_10G0_RESET 0x04000000
++#define FPM_RSTC_1G4_RESET 0x02000000
++#define FPM_RSTC_1G5_RESET 0x01000000
++#define FPM_RSTC_1G6_RESET 0x00800000
++#define FPM_RSTC_1G7_RESET 0x00400000
++#define FPM_RSTC_10G1_RESET 0x00200000
++/**************************************************************************//**
++ @Description BMI defines
++*//***************************************************************************/
++/* masks */
++#define BMI_INIT_START 0x80000000
++#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
++#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
++#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
++#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
++#define BMI_NUM_OF_TASKS_MASK 0x3F000000
++#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000
++#define BMI_NUM_OF_DMAS_MASK 0x00000F00
++#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F
++#define BMI_FIFO_SIZE_MASK 0x000003FF
++#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000
++#define BMI_CFG2_DMAS_MASK 0x0000003F
++#define BMI_TOTAL_FIFO_SIZE_MASK 0x07FF0000
++#define BMI_TOTAL_NUM_OF_TASKS_MASK 0x007F0000
++
++/* shifts */
++#define BMI_CFG2_TASKS_SHIFT 16
++#define BMI_CFG2_DMAS_SHIFT 0
++#define BMI_CFG1_FIFO_SIZE_SHIFT 16
++#define BMI_FIFO_SIZE_SHIFT 0
++#define BMI_EXTRA_FIFO_SIZE_SHIFT 16
++#define BMI_NUM_OF_TASKS_SHIFT 24
++#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16
++#define BMI_NUM_OF_DMAS_SHIFT 8
++#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0
++
++/* others */
++#define BMI_FIFO_ALIGN 0x100
++#define FMAN_BMI_FIFO_UNITS 0x100
++
++
++/**************************************************************************//**
++ @Description QMI defines
++*//***************************************************************************/
++/* masks */
++#define QMI_CFG_ENQ_EN 0x80000000
++#define QMI_CFG_DEQ_EN 0x40000000
++#define QMI_CFG_EN_COUNTERS 0x10000000
++#define QMI_CFG_SOFT_RESET 0x01000000
++#define QMI_CFG_DEQ_MASK 0x0000003F
++#define QMI_CFG_ENQ_MASK 0x00003F00
++
++#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
++#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
++#define QMI_INTR_EN_SINGLE_ECC 0x80000000
++
++/* shifts */
++#define QMI_CFG_ENQ_SHIFT 8
++#define QMI_TAPC_TAP 22
++
++#define QMI_GS_HALT_NOT_BUSY 0x00000002
++
++/**************************************************************************//**
++ @Description IRAM defines
++*//***************************************************************************/
++/* masks */
++#define IRAM_IADD_AIE 0x80000000
++#define IRAM_READY 0x80000000
++
++uint32_t fman_get_bmi_err_event(struct fman_bmi_regs *bmi_rg);
++uint32_t fman_get_qmi_err_event(struct fman_qmi_regs *qmi_rg);
++uint32_t fman_get_dma_com_id(struct fman_dma_regs *dma_rg);
++uint64_t fman_get_dma_addr(struct fman_dma_regs *dma_rg);
++uint32_t fman_get_dma_err_event(struct fman_dma_regs *dma_rg);
++uint32_t fman_get_fpm_err_event(struct fman_fpm_regs *fpm_rg);
++uint32_t fman_get_muram_err_event(struct fman_fpm_regs *fpm_rg);
++uint32_t fman_get_iram_err_event(struct fman_fpm_regs *fpm_rg);
++uint32_t fman_get_qmi_event(struct fman_qmi_regs *qmi_rg);
++uint32_t fman_get_fpm_error_interrupts(struct fman_fpm_regs *fpm_rg);
++uint32_t fman_get_ctrl_intr(struct fman_fpm_regs *fpm_rg,
++ uint8_t event_reg_id);
++uint8_t fman_get_qmi_deq_th(struct fman_qmi_regs *qmi_rg);
++uint8_t fman_get_qmi_enq_th(struct fman_qmi_regs *qmi_rg);
++uint16_t fman_get_size_of_fifo(struct fman_bmi_regs *bmi_rg, uint8_t port_id);
++uint32_t fman_get_total_fifo_size(struct fman_bmi_regs *bmi_rg);
++uint16_t fman_get_size_of_extra_fifo(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id);
++uint8_t fman_get_num_of_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id);
++uint8_t fman_get_num_extra_tasks(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id);
++uint8_t fman_get_num_of_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id);
++uint8_t fman_get_num_extra_dmas(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id);
++uint32_t fman_get_normal_pending(struct fman_fpm_regs *fpm_rg);
++uint32_t fman_get_controller_event(struct fman_fpm_regs *fpm_rg,
++ uint8_t reg_id);
++uint32_t fman_get_error_pending(struct fman_fpm_regs *fpm_rg);
++void fman_get_revision(struct fman_fpm_regs *fpm_rg, uint8_t *major,
++ uint8_t *minor);
++uint32_t fman_get_counter(struct fman_rg *fman_rg,
++ enum fman_counters reg_name);
++uint32_t fman_get_dma_status(struct fman_dma_regs *dma_rg);
++
++
++int fman_set_erratum_10gmac_a004_wa(struct fman_fpm_regs *fpm_rg);
++void fman_set_ctrl_intr(struct fman_fpm_regs *fpm_rg, uint8_t event_reg_id,
++ uint32_t enable_events);
++void fman_set_num_of_riscs_per_port(struct fman_fpm_regs *fpm_rg,
++ uint8_t port_id,
++ uint8_t num_fman_ctrls,
++ uint32_t or_fman_ctrl);
++void fman_set_order_restoration_per_port(struct fman_fpm_regs *fpm_rg,
++ uint8_t port_id,
++ bool independent_mode,
++ bool is_rx_port);
++void fman_set_qmi_enq_th(struct fman_qmi_regs *qmi_rg, uint8_t val);
++void fman_set_qmi_deq_th(struct fman_qmi_regs *qmi_rg, uint8_t val);
++void fman_set_liodn_per_port(struct fman_rg *fman_rg,
++ uint8_t port_id,
++ uint16_t liodn_base,
++ uint16_t liodn_offset);
++void fman_set_size_of_fifo(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint32_t size_of_fifo,
++ uint32_t extra_size_of_fifo);
++void fman_set_num_of_tasks(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint8_t num_of_tasks,
++ uint8_t num_of_extra_tasks);
++void fman_set_num_of_open_dmas(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint8_t num_of_open_dmas,
++ uint8_t num_of_extra_open_dmas,
++ uint8_t total_num_of_dmas);
++void fman_set_ports_bandwidth(struct fman_bmi_regs *bmi_rg, uint8_t *weights);
++int fman_set_exception(struct fman_rg *fman_rg,
++ enum fman_exceptions exception,
++ bool enable);
++void fman_set_dma_emergency(struct fman_dma_regs *dma_rg, bool is_write,
++ bool enable);
++void fman_set_dma_ext_bus_pri(struct fman_dma_regs *dma_rg, uint32_t pri);
++void fman_set_congestion_group_pfc_priority(uint32_t *cpg_rg,
++ uint32_t congestion_group_id,
++ uint8_t piority_bit_map,
++ uint32_t reg_num);
++
++
++void fman_defconfig(struct fman_cfg *cfg, bool is_master);
++void fman_regconfig(struct fman_rg *fman_rg, struct fman_cfg *cfg);
++int fman_fpm_init(struct fman_fpm_regs *fpm_rg, struct fman_cfg *cfg);
++int fman_bmi_init(struct fman_bmi_regs *bmi_rg, struct fman_cfg *cfg);
++int fman_qmi_init(struct fman_qmi_regs *qmi_rg, struct fman_cfg *cfg);
++int fman_dma_init(struct fman_dma_regs *dma_rg, struct fman_cfg *cfg);
++void fman_free_resources(struct fman_rg *fman_rg);
++int fman_enable(struct fman_rg *fman_rg, struct fman_cfg *cfg);
++void fman_reset(struct fman_fpm_regs *fpm_rg);
++void fman_resume(struct fman_fpm_regs *fpm_rg);
++
++
++void fman_enable_time_stamp(struct fman_fpm_regs *fpm_rg,
++ uint8_t count1ubit,
++ uint16_t fm_clk_freq);
++void fman_enable_rams_ecc(struct fman_fpm_regs *fpm_rg);
++void fman_qmi_disable_dispatch_limit(struct fman_fpm_regs *fpm_rg);
++void fman_disable_rams_ecc(struct fman_fpm_regs *fpm_rg);
++void fman_resume_stalled_port(struct fman_fpm_regs *fpm_rg, uint8_t port_id);
++int fman_reset_mac(struct fman_fpm_regs *fpm_rg, uint8_t macId, bool is_10g);
++bool fman_is_port_stalled(struct fman_fpm_regs *fpm_rg, uint8_t port_id);
++bool fman_rams_ecc_is_external_ctl(struct fman_fpm_regs *fpm_rg);
++bool fman_is_qmi_halt_not_busy_state(struct fman_qmi_regs *qmi_rg);
++int fman_modify_counter(struct fman_rg *fman_rg,
++ enum fman_counters reg_name,
++ uint32_t val);
++void fman_force_intr(struct fman_rg *fman_rg,
++ enum fman_exceptions exception);
++void fman_set_vsp_window(struct fman_bmi_regs *bmi_rg,
++ uint8_t port_id,
++ uint8_t base_storage_profile,
++ uint8_t log2_num_of_profiles);
++
++/**************************************************************************//**
++ @Description default values
++*//***************************************************************************/
++#define DEFAULT_CATASTROPHIC_ERR E_FMAN_CATAST_ERR_STALL_PORT
++#define DEFAULT_DMA_ERR E_FMAN_DMA_ERR_CATASTROPHIC
++#define DEFAULT_HALT_ON_EXTERNAL_ACTIVATION FALSE /* do not change! if changed, must be disabled for rev1 ! */
++#define DEFAULT_HALT_ON_UNRECOVERABLE_ECC_ERROR FALSE /* do not change! if changed, must be disabled for rev1 ! */
++#define DEFAULT_EXTERNAL_ECC_RAMS_ENABLE FALSE
++#define DEFAULT_AID_OVERRIDE FALSE
++#define DEFAULT_AID_MODE E_FMAN_DMA_AID_OUT_TNUM
++#define DEFAULT_DMA_COMM_Q_LOW 0x2A
++#define DEFAULT_DMA_COMM_Q_HIGH 0x3F
++#define DEFAULT_CACHE_OVERRIDE E_FMAN_DMA_NO_CACHE_OR
++#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64
++#define DEFAULT_DMA_DBG_CNT_MODE E_FMAN_DMA_DBG_NO_CNT
++#define DEFAULT_DMA_EN_EMERGENCY FALSE
++#define DEFAULT_DMA_SOS_EMERGENCY 0
++#define DEFAULT_DMA_WATCHDOG 0 /* disabled */
++#define DEFAULT_DMA_EN_EMERGENCY_SMOOTHER FALSE
++#define DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER 0
++#define DEFAULT_DISP_LIMIT 0
++#define DEFAULT_PRS_DISP_TH 16
++#define DEFAULT_PLCR_DISP_TH 16
++#define DEFAULT_KG_DISP_TH 16
++#define DEFAULT_BMI_DISP_TH 16
++#define DEFAULT_QMI_ENQ_DISP_TH 16
++#define DEFAULT_QMI_DEQ_DISP_TH 16
++#define DEFAULT_FM_CTL1_DISP_TH 16
++#define DEFAULT_FM_CTL2_DISP_TH 16
++#define DEFAULT_TNUM_AGING_PERIOD 4
++
++
++#endif /* __FSL_FMAN_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h
+new file mode 100644
+index 00000000..6004e478
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h
+@@ -0,0 +1,1096 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_DTSEC_H
++#define __FSL_FMAN_DTSEC_H
++
++#include "common/general.h"
++#include "fsl_enet.h"
++
++/**
++ * DOC: dTSEC Init sequence
++ *
++ * To prepare dTSEC block for transfer use the following call sequence:
++ *
++ * - fman_dtsec_defconfig() - This step is optional and yet recommended. Its
++ * use is to obtain the default dTSEC configuration parameters.
++ *
++ * - Change dtsec configuration in &dtsec_cfg. This structure will be used
++ * to customize the dTSEC behavior.
++ *
++ * - fman_dtsec_init() - Applies the configuration on dTSEC hardware. Note that
++ * dTSEC is initialized while both Tx and Rx are disabled.
++ *
++ * - fman_dtsec_set_mac_address() - Set the station address (mac address).
++ * This is used by dTSEC to match against received packets.
++ *
++ * - fman_dtsec_adjust_link() - Set the link speed and duplex parameters
++ * after the PHY establishes the link.
++ *
++ * - dtsec_enable_tx() and dtsec_enable_rx() to enable transmission and
++ * reception.
++ */
++
++/**
++ * DOC: dTSEC Graceful stop
++ *
++ * To temporary stop dTSEC activity use fman_dtsec_stop_tx() and
++ * fman_dtsec_stop_rx(). Note that these functions request dTSEC graceful stop
++ * but return before this stop is complete. To query for graceful stop
++ * completion use fman_dtsec_get_event() and check DTSEC_IEVENT_GTSC and
++ * DTSEC_IEVENT_GRSC bits. Alternatively the dTSEC interrupt mask can be set to
++ * enable graceful stop interrupts.
++ *
++ * To resume operation after graceful stop use fman_dtsec_start_tx() and
++ * fman_dtsec_start_rx().
++ */
++
++/**
++ * DOC: dTSEC interrupt handling
++ *
++ * This code does not provide an interrupt handler for dTSEC. Instead this
++ * handler should be implemented and registered to the operating system by the
++ * caller. Some primitives for accessing the event status and mask registers
++ * are provided.
++ *
++ * See "dTSEC Events" section for a list of events that dTSEC can generate.
++ */
++
++/**
++ * DOC: dTSEC Events
++ *
++ * Interrupt events cause dTSEC event bits to be set. Software may poll the
++ * event register at any time to check for pending interrupts. If an event
++ * occurs and its corresponding enable bit is set in the interrupt mask
++ * register, the event also causes a hardware interrupt at the PIC.
++ *
++ * To poll for event status use the fman_dtsec_get_event() function.
++ * To configure the interrupt mask use fman_dtsec_enable_interrupt() and
++ * fman_dtsec_disable_interrupt() functions.
++ * After servicing a dTSEC interrupt use fman_dtsec_ack_event to reset the
++ * serviced event bit.
++ *
++ * The following events may be signaled by dTSEC hardware:
++ *
++ * %DTSEC_IEVENT_BABR - Babbling receive error. This bit indicates that
++ * a frame was received with length in excess of the MAC's maximum frame length
++ * register.
++ *
++ * %DTSEC_IEVENT_RXC - Receive control (pause frame) interrupt. A pause
++ * control frame was received while Rx pause frame handling is enabled.
++ * Also see fman_dtsec_handle_rx_pause().
++ *
++ * %DTSEC_IEVENT_MSRO - MIB counter overflow. The count for one of the MIB
++ * counters has exceeded the size of its register.
++ *
++ * %DTSEC_IEVENT_GTSC - Graceful transmit stop complete. Graceful stop is now
++ * complete. The transmitter is in a stopped state, in which only pause frames
++ * can be transmitted.
++ * Also see fman_dtsec_stop_tx().
++ *
++ * %DTSEC_IEVENT_BABT - Babbling transmit error. The transmitted frame length
++ * has exceeded the value in the MAC's Maximum Frame Length register.
++ *
++ * %DTSEC_IEVENT_TXC - Transmit control (pause frame) interrupt. his bit
++ * indicates that a control frame was transmitted.
++ *
++ * %DTSEC_IEVENT_TXE - Transmit error. This bit indicates that an error
++ * occurred on the transmitted channel. This bit is set whenever any transmit
++ * error occurs which causes the dTSEC to discard all or part of a frame
++ * (LC, CRL, XFUN).
++ *
++ * %DTSEC_IEVENT_LC - Late collision. This bit indicates that a collision
++ * occurred beyond the collision window (slot time) in half-duplex mode.
++ * The frame is truncated with a bad CRC and the remainder of the frame
++ * is discarded.
++ *
++ * %DTSEC_IEVENT_CRL - Collision retry limit. is bit indicates that the number
++ * of successive transmission collisions has exceeded the MAC's half-duplex
++ * register's retransmission maximum count. The frame is discarded without
++ * being transmitted and transmission of the next frame commences. This only
++ * occurs while in half-duplex mode.
++ * The number of retransmit attempts can be set in
++ * &dtsec_halfdup_cfg.@retransmit before calling fman_dtsec_init().
++ *
++ * %DTSEC_IEVENT_XFUN - Transmit FIFO underrun. This bit indicates that the
++ * transmit FIFO became empty before the complete frame was transmitted.
++ * The frame is truncated with a bad CRC and the remainder of the frame is
++ * discarded.
++ *
++ * %DTSEC_IEVENT_MAG - TBD
++ *
++ * %DTSEC_IEVENT_MMRD - MII management read completion.
++ *
++ * %DTSEC_IEVENT_MMWR - MII management write completion.
++ *
++ * %DTSEC_IEVENT_GRSC - Graceful receive stop complete. It allows the user to
++ * know if the system has completed the stop and it is safe to write to receive
++ * registers (status, control or configuration registers) that are used by the
++ * system during normal operation.
++ *
++ * %DTSEC_IEVENT_TDPE - Internal data error on transmit. This bit indicates
++ * that the dTSEC has detected a parity error on its stored transmit data, which
++ * is likely to compromise the validity of recently transferred frames.
++ *
++ * %DTSEC_IEVENT_RDPE - Internal data error on receive. This bit indicates that
++ * the dTSEC has detected a parity error on its stored receive data, which is
++ * likely to compromise the validity of recently transferred frames.
++ */
++/* Interrupt Mask Register (IMASK) */
++#define DTSEC_IMASK_BREN 0x80000000
++#define DTSEC_IMASK_RXCEN 0x40000000
++#define DTSEC_IMASK_MSROEN 0x04000000
++#define DTSEC_IMASK_GTSCEN 0x02000000
++#define DTSEC_IMASK_BTEN 0x01000000
++#define DTSEC_IMASK_TXCEN 0x00800000
++#define DTSEC_IMASK_TXEEN 0x00400000
++#define DTSEC_IMASK_LCEN 0x00040000
++#define DTSEC_IMASK_CRLEN 0x00020000
++#define DTSEC_IMASK_XFUNEN 0x00010000
++#define DTSEC_IMASK_ABRTEN 0x00008000
++#define DTSEC_IMASK_IFERREN 0x00004000
++#define DTSEC_IMASK_MAGEN 0x00000800
++#define DTSEC_IMASK_MMRDEN 0x00000400
++#define DTSEC_IMASK_MMWREN 0x00000200
++#define DTSEC_IMASK_GRSCEN 0x00000100
++#define DTSEC_IMASK_TDPEEN 0x00000002
++#define DTSEC_IMASK_RDPEEN 0x00000001
++
++#define DTSEC_EVENTS_MASK \
++ ((uint32_t)(DTSEC_IMASK_BREN | \
++ DTSEC_IMASK_RXCEN | \
++ DTSEC_IMASK_BTEN | \
++ DTSEC_IMASK_TXCEN | \
++ DTSEC_IMASK_TXEEN | \
++ DTSEC_IMASK_ABRTEN | \
++ DTSEC_IMASK_LCEN | \
++ DTSEC_IMASK_CRLEN | \
++ DTSEC_IMASK_XFUNEN | \
++ DTSEC_IMASK_IFERREN | \
++ DTSEC_IMASK_MAGEN | \
++ DTSEC_IMASK_TDPEEN | \
++ DTSEC_IMASK_RDPEEN))
++
++/* dtsec timestamp event bits */
++#define TMR_PEMASK_TSREEN 0x00010000
++#define TMR_PEVENT_TSRE 0x00010000
++
++/* Group address bit indication */
++#define MAC_GROUP_ADDRESS 0x0000010000000000ULL
++/* size in bytes of L2 address */
++#define MAC_ADDRLEN 6
++
++#define DEFAULT_HALFDUP_ON FALSE
++#define DEFAULT_HALFDUP_RETRANSMIT 0xf
++#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
++#define DEFAULT_HALFDUP_EXCESS_DEFER TRUE
++#define DEFAULT_HALFDUP_NO_BACKOFF FALSE
++#define DEFAULT_HALFDUP_BP_NO_BACKOFF FALSE
++#define DEFAULT_HALFDUP_ALT_BACKOFF_VAL 0x0A
++#define DEFAULT_HALFDUP_ALT_BACKOFF_EN FALSE
++#define DEFAULT_RX_DROP_BCAST FALSE
++#define DEFAULT_RX_SHORT_FRM TRUE
++#define DEFAULT_RX_LEN_CHECK FALSE
++#define DEFAULT_TX_PAD_CRC TRUE
++#define DEFAULT_TX_CRC FALSE
++#define DEFAULT_RX_CTRL_ACC FALSE
++#define DEFAULT_TX_PAUSE_TIME 0xf000
++#define DEFAULT_TBIPA 5
++#define DEFAULT_RX_PREPEND 0
++#define DEFAULT_PTP_TSU_EN TRUE
++#define DEFAULT_PTP_EXCEPTION_EN TRUE
++#define DEFAULT_PREAMBLE_LEN 7
++#define DEFAULT_RX_PREAMBLE FALSE
++#define DEFAULT_TX_PREAMBLE FALSE
++#define DEFAULT_LOOPBACK FALSE
++#define DEFAULT_RX_TIME_STAMP_EN FALSE
++#define DEFAULT_TX_TIME_STAMP_EN FALSE
++#define DEFAULT_RX_FLOW TRUE
++#define DEFAULT_TX_FLOW TRUE
++#define DEFAULT_RX_GROUP_HASH_EXD FALSE
++#define DEFAULT_TX_PAUSE_TIME_EXTD 0
++#define DEFAULT_RX_PROMISC FALSE
++#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
++#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
++#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
++#define DEFAULT_BACK_TO_BACK_IPG 0x60
++#define DEFAULT_MAXIMUM_FRAME 0x600
++#define DEFAULT_TBI_PHY_ADDR 5
++#define DEFAULT_WAKE_ON_LAN FALSE
++
++/* register related defines (bits, field offsets..) */
++#define DTSEC_ID1_ID 0xffff0000
++#define DTSEC_ID1_REV_MJ 0x0000FF00
++#define DTSEC_ID1_REV_MN 0x000000ff
++
++#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
++#define DTSEC_ID2_INT_NORMAL_OFF 0x00020000
++
++#define DTSEC_ECNTRL_CLRCNT 0x00004000
++#define DTSEC_ECNTRL_AUTOZ 0x00002000
++#define DTSEC_ECNTRL_STEN 0x00001000
++#define DTSEC_ECNTRL_CFG_RO 0x80000000
++#define DTSEC_ECNTRL_GMIIM 0x00000040
++#define DTSEC_ECNTRL_TBIM 0x00000020
++#define DTSEC_ECNTRL_SGMIIM 0x00000002
++#define DTSEC_ECNTRL_RPM 0x00000010
++#define DTSEC_ECNTRL_R100M 0x00000008
++#define DTSEC_ECNTRL_RMM 0x00000004
++#define DTSEC_ECNTRL_QSGMIIM 0x00000001
++
++#define DTSEC_TCTRL_THDF 0x00000800
++#define DTSEC_TCTRL_TTSE 0x00000040
++#define DTSEC_TCTRL_GTS 0x00000020
++#define DTSEC_TCTRL_TFC_PAUSE 0x00000010
++
++/* PTV offsets */
++#define PTV_PTE_OFST 16
++
++#define RCTRL_CFA 0x00008000
++#define RCTRL_GHTX 0x00000400
++#define RCTRL_RTSE 0x00000040
++#define RCTRL_GRS 0x00000020
++#define RCTRL_BC_REJ 0x00000010
++#define RCTRL_MPROM 0x00000008
++#define RCTRL_RSF 0x00000004
++#define RCTRL_UPROM 0x00000001
++#define RCTRL_PROM (RCTRL_UPROM | RCTRL_MPROM)
++
++#define TMR_CTL_ESFDP 0x00000800
++#define TMR_CTL_ESFDE 0x00000400
++
++#define MACCFG1_SOFT_RESET 0x80000000
++#define MACCFG1_LOOPBACK 0x00000100
++#define MACCFG1_RX_FLOW 0x00000020
++#define MACCFG1_TX_FLOW 0x00000010
++#define MACCFG1_TX_EN 0x00000001
++#define MACCFG1_RX_EN 0x00000004
++#define MACCFG1_RESET_RxMC 0x00080000
++#define MACCFG1_RESET_TxMC 0x00040000
++#define MACCFG1_RESET_RxFUN 0x00020000
++#define MACCFG1_RESET_TxFUN 0x00010000
++
++#define MACCFG2_NIBBLE_MODE 0x00000100
++#define MACCFG2_BYTE_MODE 0x00000200
++#define MACCFG2_PRE_AM_Rx_EN 0x00000080
++#define MACCFG2_PRE_AM_Tx_EN 0x00000040
++#define MACCFG2_LENGTH_CHECK 0x00000010
++#define MACCFG2_MAGIC_PACKET_EN 0x00000008
++#define MACCFG2_PAD_CRC_EN 0x00000004
++#define MACCFG2_CRC_EN 0x00000002
++#define MACCFG2_FULL_DUPLEX 0x00000001
++
++#define PREAMBLE_LENGTH_SHIFT 12
++
++#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
++#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
++#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
++
++#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
++#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
++#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
++#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
++
++#define HAFDUP_ALT_BEB 0x00080000
++#define HAFDUP_BP_NO_BACKOFF 0x00040000
++#define HAFDUP_NO_BACKOFF 0x00020000
++#define HAFDUP_EXCESS_DEFER 0x00010000
++#define HAFDUP_COLLISION_WINDOW 0x000003ff
++
++#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT 20
++#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
++#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
++
++#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
++
++/* CAR1/2 bits */
++#define DTSEC_CAR1_TR64 0x80000000
++#define DTSEC_CAR1_TR127 0x40000000
++#define DTSEC_CAR1_TR255 0x20000000
++#define DTSEC_CAR1_TR511 0x10000000
++#define DTSEC_CAR1_TRK1 0x08000000
++#define DTSEC_CAR1_TRMAX 0x04000000
++#define DTSEC_CAR1_TRMGV 0x02000000
++
++#define DTSEC_CAR1_RBYT 0x00010000
++#define DTSEC_CAR1_RPKT 0x00008000
++#define DTSEC_CAR1_RFCS 0x00004000
++#define DTSEC_CAR1_RMCA 0x00002000
++#define DTSEC_CAR1_RBCA 0x00001000
++#define DTSEC_CAR1_RXCF 0x00000800
++#define DTSEC_CAR1_RXPF 0x00000400
++#define DTSEC_CAR1_RXUO 0x00000200
++#define DTSEC_CAR1_RALN 0x00000100
++#define DTSEC_CAR1_RFLR 0x00000080
++#define DTSEC_CAR1_RCDE 0x00000040
++#define DTSEC_CAR1_RCSE 0x00000020
++#define DTSEC_CAR1_RUND 0x00000010
++#define DTSEC_CAR1_ROVR 0x00000008
++#define DTSEC_CAR1_RFRG 0x00000004
++#define DTSEC_CAR1_RJBR 0x00000002
++#define DTSEC_CAR1_RDRP 0x00000001
++
++#define DTSEC_CAR2_TJBR 0x00080000
++#define DTSEC_CAR2_TFCS 0x00040000
++#define DTSEC_CAR2_TXCF 0x00020000
++#define DTSEC_CAR2_TOVR 0x00010000
++#define DTSEC_CAR2_TUND 0x00008000
++#define DTSEC_CAR2_TFRG 0x00004000
++#define DTSEC_CAR2_TBYT 0x00002000
++#define DTSEC_CAR2_TPKT 0x00001000
++#define DTSEC_CAR2_TMCA 0x00000800
++#define DTSEC_CAR2_TBCA 0x00000400
++#define DTSEC_CAR2_TXPF 0x00000200
++#define DTSEC_CAR2_TDFR 0x00000100
++#define DTSEC_CAR2_TEDF 0x00000080
++#define DTSEC_CAR2_TSCL 0x00000040
++#define DTSEC_CAR2_TMCL 0x00000020
++#define DTSEC_CAR2_TLCL 0x00000010
++#define DTSEC_CAR2_TXCL 0x00000008
++#define DTSEC_CAR2_TNCL 0x00000004
++#define DTSEC_CAR2_TDRP 0x00000001
++
++#define CAM1_ERRORS_ONLY \
++ (DTSEC_CAR1_RXPF | DTSEC_CAR1_RALN | DTSEC_CAR1_RFLR \
++ | DTSEC_CAR1_RCDE | DTSEC_CAR1_RCSE | DTSEC_CAR1_RUND \
++ | DTSEC_CAR1_ROVR | DTSEC_CAR1_RFRG | DTSEC_CAR1_RJBR \
++ | DTSEC_CAR1_RDRP)
++
++#define CAM2_ERRORS_ONLY (DTSEC_CAR2_TFCS | DTSEC_CAR2_TXPF | DTSEC_CAR2_TDRP)
++
++/*
++ * Group of dTSEC specific counters relating to the standard RMON MIB Group 1
++ * (or Ethernet) statistics.
++ */
++#define CAM1_MIB_GRP_1 \
++ (DTSEC_CAR1_RDRP | DTSEC_CAR1_RBYT | DTSEC_CAR1_RPKT | DTSEC_CAR1_RMCA\
++ | DTSEC_CAR1_RBCA | DTSEC_CAR1_RALN | DTSEC_CAR1_RUND | DTSEC_CAR1_ROVR\
++ | DTSEC_CAR1_RFRG | DTSEC_CAR1_RJBR \
++ | DTSEC_CAR1_TR64 | DTSEC_CAR1_TR127 | DTSEC_CAR1_TR255 \
++ | DTSEC_CAR1_TR511 | DTSEC_CAR1_TRMAX)
++
++#define CAM2_MIB_GRP_1 (DTSEC_CAR2_TNCL | DTSEC_CAR2_TDRP)
++
++/* memory map */
++
++struct dtsec_regs {
++ /* dTSEC General Control and Status Registers */
++ uint32_t tsec_id; /* 0x000 ETSEC_ID register */
++ uint32_t tsec_id2; /* 0x004 ETSEC_ID2 register */
++ uint32_t ievent; /* 0x008 Interrupt event register */
++ uint32_t imask; /* 0x00C Interrupt mask register */
++ uint32_t reserved0010[1];
++ uint32_t ecntrl; /* 0x014 E control register */
++ uint32_t ptv; /* 0x018 Pause time value register */
++ uint32_t tbipa; /* 0x01C TBI PHY address register */
++ uint32_t tmr_ctrl; /* 0x020 Time-stamp Control register */
++ uint32_t tmr_pevent; /* 0x024 Time-stamp event register */
++ uint32_t tmr_pemask; /* 0x028 Timer event mask register */
++ uint32_t reserved002c[5];
++ uint32_t tctrl; /* 0x040 Transmit control register */
++ uint32_t reserved0044[3];
++ uint32_t rctrl; /* 0x050 Receive control register */
++ uint32_t reserved0054[11];
++ uint32_t igaddr[8]; /* 0x080-0x09C Individual/group address */
++ uint32_t gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
++ uint32_t reserved00c0[16];
++ uint32_t maccfg1; /* 0x100 MAC configuration #1 */
++ uint32_t maccfg2; /* 0x104 MAC configuration #2 */
++ uint32_t ipgifg; /* 0x108 IPG/IFG */
++ uint32_t hafdup; /* 0x10C Half-duplex */
++ uint32_t maxfrm; /* 0x110 Maximum frame */
++ uint32_t reserved0114[10];
++ uint32_t ifstat; /* 0x13C Interface status */
++ uint32_t macstnaddr1; /* 0x140 Station Address,part 1 */
++ uint32_t macstnaddr2; /* 0x144 Station Address,part 2 */
++ struct {
++ uint32_t exact_match1; /* octets 1-4 */
++ uint32_t exact_match2; /* octets 5-6 */
++ } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
++ uint32_t reserved01c0[16];
++ uint32_t tr64; /* 0x200 transmit and receive 64 byte frame counter */
++ uint32_t tr127; /* 0x204 transmit and receive 65 to 127 byte frame
++ * counter */
++ uint32_t tr255; /* 0x208 transmit and receive 128 to 255 byte frame
++ * counter */
++ uint32_t tr511; /* 0x20C transmit and receive 256 to 511 byte frame
++ * counter */
++ uint32_t tr1k; /* 0x210 transmit and receive 512 to 1023 byte frame
++ * counter */
++ uint32_t trmax; /* 0x214 transmit and receive 1024 to 1518 byte frame
++ * counter */
++ uint32_t trmgv; /* 0x218 transmit and receive 1519 to 1522 byte good
++ * VLAN frame count */
++ uint32_t rbyt; /* 0x21C receive byte counter */
++ uint32_t rpkt; /* 0x220 receive packet counter */
++ uint32_t rfcs; /* 0x224 receive FCS error counter */
++ uint32_t rmca; /* 0x228 RMCA receive multicast packet counter */
++ uint32_t rbca; /* 0x22C receive broadcast packet counter */
++ uint32_t rxcf; /* 0x230 receive control frame packet counter */
++ uint32_t rxpf; /* 0x234 receive pause frame packet counter */
++ uint32_t rxuo; /* 0x238 receive unknown OP code counter */
++ uint32_t raln; /* 0x23C receive alignment error counter */
++ uint32_t rflr; /* 0x240 receive frame length error counter */
++ uint32_t rcde; /* 0x244 receive code error counter */
++ uint32_t rcse; /* 0x248 receive carrier sense error counter */
++ uint32_t rund; /* 0x24C receive undersize packet counter */
++ uint32_t rovr; /* 0x250 receive oversize packet counter */
++ uint32_t rfrg; /* 0x254 receive fragments counter */
++ uint32_t rjbr; /* 0x258 receive jabber counter */
++ uint32_t rdrp; /* 0x25C receive drop */
++ uint32_t tbyt; /* 0x260 transmit byte counter */
++ uint32_t tpkt; /* 0x264 transmit packet counter */
++ uint32_t tmca; /* 0x268 transmit multicast packet counter */
++ uint32_t tbca; /* 0x26C transmit broadcast packet counter */
++ uint32_t txpf; /* 0x270 transmit pause control frame counter */
++ uint32_t tdfr; /* 0x274 transmit deferral packet counter */
++ uint32_t tedf; /* 0x278 transmit excessive deferral packet counter */
++ uint32_t tscl; /* 0x27C transmit single collision packet counter */
++ uint32_t tmcl; /* 0x280 transmit multiple collision packet counter */
++ uint32_t tlcl; /* 0x284 transmit late collision packet counter */
++ uint32_t txcl; /* 0x288 transmit excessive collision packet counter */
++ uint32_t tncl; /* 0x28C transmit total collision counter */
++ uint32_t reserved0290[1];
++ uint32_t tdrp; /* 0x294 transmit drop frame counter */
++ uint32_t tjbr; /* 0x298 transmit jabber frame counter */
++ uint32_t tfcs; /* 0x29C transmit FCS error counter */
++ uint32_t txcf; /* 0x2A0 transmit control frame counter */
++ uint32_t tovr; /* 0x2A4 transmit oversize frame counter */
++ uint32_t tund; /* 0x2A8 transmit undersize frame counter */
++ uint32_t tfrg; /* 0x2AC transmit fragments frame counter */
++ uint32_t car1; /* 0x2B0 carry register one register* */
++ uint32_t car2; /* 0x2B4 carry register two register* */
++ uint32_t cam1; /* 0x2B8 carry register one mask register */
++ uint32_t cam2; /* 0x2BC carry register two mask register */
++ uint32_t reserved02c0[848];
++};
++
++/**
++ * struct dtsec_mib_grp_1_counters - MIB counter overflows
++ *
++ * @tr64: Transmit and Receive 64 byte frame count. Increment for each
++ * good or bad frame, of any type, transmitted or received, which
++ * is 64 bytes in length.
++ * @tr127: Transmit and Receive 65 to 127 byte frame count. Increments for
++ * each good or bad frame of any type, transmitted or received,
++ * which is 65-127 bytes in length.
++ * @tr255: Transmit and Receive 128 to 255 byte frame count. Increments
++ * for each good or bad frame, of any type, transmitted or
++ * received, which is 128-255 bytes in length.
++ * @tr511: Transmit and Receive 256 to 511 byte frame count. Increments
++ * for each good or bad frame, of any type, transmitted or
++ * received, which is 256-511 bytes in length.
++ * @tr1k: Transmit and Receive 512 to 1023 byte frame count. Increments
++ * for each good or bad frame, of any type, transmitted or
++ * received, which is 512-1023 bytes in length.
++ * @trmax: Transmit and Receive 1024 to 1518 byte frame count. Increments
++ * for each good or bad frame, of any type, transmitted or
++ * received, which is 1024-1518 bytes in length.
++ * @rfrg: Receive fragments count. Increments for each received frame
++ * which is less than 64 bytes in length and contains an invalid
++ * FCS. This includes integral and non-integral lengths.
++ * @rjbr: Receive jabber count. Increments for received frames which
++ * exceed 1518 (non VLAN) or 1522 (VLAN) bytes and contain an
++ * invalid FCS. This includes alignment errors.
++ * @rdrp: Receive dropped packets count. Increments for received frames
++ * which are streamed to system but are later dropped due to lack
++ * of system resources. Does not increment for frames rejected due
++ * to address filtering.
++ * @raln: Receive alignment error count. Increments for each received
++ * frame from 64 to 1518 (non VLAN) or 1522 (VLAN) which contains
++ * an invalid FCS and is not an integral number of bytes.
++ * @rund: Receive undersize packet count. Increments each time a frame is
++ * received which is less than 64 bytes in length and contains a
++ * valid FCS and is otherwise well formed. This count does not
++ * include range length errors.
++ * @rovr: Receive oversize packet count. Increments each time a frame is
++ * received which exceeded 1518 (non VLAN) or 1522 (VLAN) and
++ * contains a valid FCS and is otherwise well formed.
++ * @rbyt: Receive byte count. Increments by the byte count of frames
++ * received, including those in bad packets, excluding preamble and
++ * SFD but including FCS bytes.
++ * @rpkt: Receive packet count. Increments for each received frame
++ * (including bad packets, all unicast, broadcast, and multicast
++ * packets).
++ * @rmca: Receive multicast packet count. Increments for each multicast
++ * frame with valid CRC and of lengths 64 to 1518 (non VLAN) or
++ * 1522 (VLAN), excluding broadcast frames. This count does not
++ * include range/length errors.
++ * @rbca: Receive broadcast packet count. Increments for each broadcast
++ * frame with valid CRC and of lengths 64 to 1518 (non VLAN) or
++ * 1522 (VLAN), excluding multicast frames. Does not include
++ * range/length errors.
++ * @tdrp: Transmit drop frame count. Increments each time a memory error
++ * or an underrun has occurred.
++ * @tncl: Transmit total collision counter. Increments by the number of
++ * collisions experienced during the transmission of a frame. Does
++ * not increment for aborted frames.
++ *
++ * The structure contains a group of dTSEC HW specific counters relating to the
++ * standard RMON MIB Group 1 (or Ethernet statistics) counters. This structure
++ * is counting only the carry events of the corresponding HW counters.
++ *
++ * tr64 to trmax notes: Frame sizes specified are considered excluding preamble
++ * and SFD but including FCS bytes.
++ */
++struct dtsec_mib_grp_1_counters {
++ uint64_t rdrp;
++ uint64_t tdrp;
++ uint64_t rbyt;
++ uint64_t rpkt;
++ uint64_t rbca;
++ uint64_t rmca;
++ uint64_t raln;
++ uint64_t rund;
++ uint64_t rovr;
++ uint64_t rfrg;
++ uint64_t rjbr;
++ uint64_t tncl;
++ uint64_t tr64;
++ uint64_t tr127;
++ uint64_t tr255;
++ uint64_t tr511;
++ uint64_t tr1k;
++ uint64_t trmax;
++};
++
++enum dtsec_stat_counters {
++ E_DTSEC_STAT_TR64,
++ E_DTSEC_STAT_TR127,
++ E_DTSEC_STAT_TR255,
++ E_DTSEC_STAT_TR511,
++ E_DTSEC_STAT_TR1K,
++ E_DTSEC_STAT_TRMAX,
++ E_DTSEC_STAT_TRMGV,
++ E_DTSEC_STAT_RBYT,
++ E_DTSEC_STAT_RPKT,
++ E_DTSEC_STAT_RMCA,
++ E_DTSEC_STAT_RBCA,
++ E_DTSEC_STAT_RXPF,
++ E_DTSEC_STAT_RALN,
++ E_DTSEC_STAT_RFLR,
++ E_DTSEC_STAT_RCDE,
++ E_DTSEC_STAT_RCSE,
++ E_DTSEC_STAT_RUND,
++ E_DTSEC_STAT_ROVR,
++ E_DTSEC_STAT_RFRG,
++ E_DTSEC_STAT_RJBR,
++ E_DTSEC_STAT_RDRP,
++ E_DTSEC_STAT_TFCS,
++ E_DTSEC_STAT_TBYT,
++ E_DTSEC_STAT_TPKT,
++ E_DTSEC_STAT_TMCA,
++ E_DTSEC_STAT_TBCA,
++ E_DTSEC_STAT_TXPF,
++ E_DTSEC_STAT_TNCL,
++ E_DTSEC_STAT_TDRP
++};
++
++enum dtsec_stat_level {
++ /* No statistics */
++ E_MAC_STAT_NONE = 0,
++ /* Only RMON MIB group 1 (ether stats). Optimized for performance */
++ E_MAC_STAT_MIB_GRP1,
++ /* Only error counters are available. Optimized for performance */
++ E_MAC_STAT_PARTIAL,
++ /* All counters available. Not optimized for performance */
++ E_MAC_STAT_FULL
++};
++
++
++/**
++ * struct dtsec_cfg - dTSEC configuration
++ *
++ * @halfdup_on: Transmit half-duplex flow control, under software
++ * control for 10/100-Mbps half-duplex media. If set,
++ * back pressure is applied to media by raising carrier.
++ * @halfdup_retransmit: Number of retransmission attempts following a collision.
++ * If this is exceeded dTSEC aborts transmission due to
++ * excessive collisions. The standard specifies the
++ * attempt limit to be 15.
++ * @halfdup_coll_window:The number of bytes of the frame during which
++ * collisions may occur. The default value of 55
++ * corresponds to the frame byte at the end of the
++ * standard 512-bit slot time window. If collisions are
++ * detected after this byte, the late collision event is
++ * asserted and transmission of current frame is aborted.
++ * @rx_drop_bcast: Discard broadcast frames. If set, all broadcast frames
++ * will be discarded by dTSEC.
++ * @rx_short_frm: Accept short frames. If set, dTSEC will accept frames
++ * of length 14..63 bytes.
++ * @rx_len_check: Length check for received frames. If set, the MAC
++ * checks the frame's length field on receive to ensure it
++ * matches the actual data field length. This only works
++ * for received frames with length field less than 1500.
++ * No check is performed for larger frames.
++ * @tx_pad_crc: Pad and append CRC. If set, the MAC pads all
++ * transmitted short frames and appends a CRC to every
++ * frame regardless of padding requirement.
++ * @tx_crc: Transmission CRC enable. If set, the MAC appends a CRC
++ * to all frames. If frames presented to the MAC have a
++ * valid length and contain a valid CRC, @tx_crc should be
++ * reset.
++ * This field is ignored if @tx_pad_crc is set.
++ * @rx_ctrl_acc: Control frame accept. If set, this overrides 802.3
++ * standard control frame behavior, and all Ethernet frames
++ * that have an ethertype of 0x8808 are treated as normal
++ * Ethernet frames and passed up to the packet interface on
++ * a DA match. Received pause control frames are passed to
++ * the packet interface only if Rx flow control is also
++ * disabled. See fman_dtsec_handle_rx_pause() function.
++ * @tx_pause_time: Transmit pause time value. This pause value is used as
++ * part of the pause frame to be sent when a transmit pause
++ * frame is initiated. If set to 0 this disables
++ * transmission of pause frames.
++ * @rx_preamble: Receive preamble enable. If set, the MAC recovers the
++ * received Ethernet 7-byte preamble and passes it to the
++ * packet interface at the start of each received frame.
++ * This field should be reset for internal MAC loop-back
++ * mode.
++ * @tx_preamble: User defined preamble enable for transmitted frames.
++ * If set, a user-defined preamble must passed to the MAC
++ * and it is transmitted instead of the standard preamble.
++ * @preamble_len: Length, in bytes, of the preamble field preceding each
++ * Ethernet start-of-frame delimiter byte. The default
++ * value of 0x7 should be used in order to guarantee
++ * reliable operation with IEEE 802.3 compliant hardware.
++ * @rx_prepend: Packet alignment padding length. The specified number
++ * of bytes (1-31) of zero padding are inserted before the
++ * start of each received frame. For Ethernet, where
++ * optional preamble extraction is enabled, the padding
++ * appears before the preamble, otherwise the padding
++ * precedes the layer 2 header.
++ *
++ * This structure contains basic dTSEC configuration and must be passed to
++ * fman_dtsec_init() function. A default set of configuration values can be
++ * obtained by calling fman_dtsec_defconfig().
++ */
++struct dtsec_cfg {
++ bool halfdup_on;
++ bool halfdup_alt_backoff_en;
++ bool halfdup_excess_defer;
++ bool halfdup_no_backoff;
++ bool halfdup_bp_no_backoff;
++ uint8_t halfdup_alt_backoff_val;
++ uint16_t halfdup_retransmit;
++ uint16_t halfdup_coll_window;
++ bool rx_drop_bcast;
++ bool rx_short_frm;
++ bool rx_len_check;
++ bool tx_pad_crc;
++ bool tx_crc;
++ bool rx_ctrl_acc;
++ unsigned short tx_pause_time;
++ unsigned short tbipa;
++ bool ptp_tsu_en;
++ bool ptp_exception_en;
++ bool rx_preamble;
++ bool tx_preamble;
++ unsigned char preamble_len;
++ unsigned char rx_prepend;
++ bool loopback;
++ bool rx_time_stamp_en;
++ bool tx_time_stamp_en;
++ bool rx_flow;
++ bool tx_flow;
++ bool rx_group_hash_exd;
++ bool rx_promisc;
++ uint8_t tbi_phy_addr;
++ uint16_t tx_pause_time_extd;
++ uint16_t maximum_frame;
++ uint32_t non_back_to_back_ipg1;
++ uint32_t non_back_to_back_ipg2;
++ uint32_t min_ifg_enforcement;
++ uint32_t back_to_back_ipg;
++ bool wake_on_lan;
++};
++
++
++/**
++ * fman_dtsec_defconfig() - Get default dTSEC configuration
++ * @cfg: pointer to configuration structure.
++ *
++ * Call this function to obtain a default set of configuration values for
++ * initializing dTSEC. The user can overwrite any of the values before calling
++ * fman_dtsec_init(), if specific configuration needs to be applied.
++ */
++void fman_dtsec_defconfig(struct dtsec_cfg *cfg);
++
++/**
++ * fman_dtsec_init() - Init dTSEC hardware block
++ * @regs: Pointer to dTSEC register block
++ * @cfg: dTSEC configuration data
++ * @iface_mode: dTSEC interface mode, the type of MAC - PHY interface.
++ * @iface_speed: 1G or 10G
++ * @macaddr: MAC station address to be assigned to the device
++ * @fm_rev_maj: major rev number
++ * @fm_rev_min: minor rev number
++ * @exceptions_mask: initial exceptions mask
++ *
++ * This function initializes dTSEC and applies basic configuration.
++ *
++ * dTSEC initialization sequence:
++ * Before enabling Rx/Tx call dtsec_set_address() to set MAC address,
++ * fman_dtsec_adjust_link() to configure interface speed and duplex and finally
++ * dtsec_enable_tx()/dtsec_enable_rx() to start transmission and reception.
++ *
++ * Returns: 0 if successful, an error code otherwise.
++ */
++int fman_dtsec_init(struct dtsec_regs *regs, struct dtsec_cfg *cfg,
++ enum enet_interface iface_mode,
++ enum enet_speed iface_speed,
++ uint8_t *macaddr, uint8_t fm_rev_maj,
++ uint8_t fm_rev_min,
++ uint32_t exception_mask);
++
++/**
++ * fman_dtsec_enable() - Enable dTSEC Tx and Tx
++ * @regs: Pointer to dTSEC register block
++ * @apply_rx: enable rx side
++ * @apply_tx: enable tx side
++ *
++ * This function resets Tx and Rx graceful stop bit and enables dTSEC Tx and Rx.
++ */
++void fman_dtsec_enable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx);
++
++/**
++ * fman_dtsec_disable() - Disable dTSEC Tx and Rx
++ * @regs: Pointer to dTSEC register block
++ * @apply_rx: disable rx side
++ * @apply_tx: disable tx side
++ *
++ * This function disables Tx and Rx in dTSEC.
++ */
++void fman_dtsec_disable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx);
++
++/**
++ * fman_dtsec_get_revision() - Get dTSEC hardware revision
++ * @regs: Pointer to dTSEC register block
++ *
++ * Returns dtsec_id content
++ *
++ * Call this function to obtain the dTSEC hardware version.
++ */
++uint32_t fman_dtsec_get_revision(struct dtsec_regs *regs);
++
++/**
++ * fman_dtsec_set_mac_address() - Set MAC station address
++ * @regs: Pointer to dTSEC register block
++ * @macaddr: MAC address array
++ *
++ * This function sets MAC station address. To enable unicast reception call
++ * this after fman_dtsec_init(). While promiscuous mode is disabled dTSEC will
++ * match the destination address of received unicast frames against this
++ * address.
++ */
++void fman_dtsec_set_mac_address(struct dtsec_regs *regs, uint8_t *macaddr);
++
++/**
++ * fman_dtsec_get_mac_address() - Query MAC station address
++ * @regs: Pointer to dTSEC register block
++ * @macaddr: MAC address array
++ */
++void fman_dtsec_get_mac_address(struct dtsec_regs *regs, uint8_t *macaddr);
++
++/**
++ * fman_dtsec_set_uc_promisc() - Sets unicast promiscuous mode
++ * @regs: Pointer to dTSEC register block
++ * @enable: Enable unicast promiscuous mode
++ *
++ * Use this function to enable/disable dTSEC L2 address filtering. If the
++ * address filtering is disabled all unicast packets are accepted.
++ * To set dTSEC in promiscuous mode call both fman_dtsec_set_uc_promisc() and
++ * fman_dtsec_set_mc_promisc() to disable filtering for both unicast and
++ * multicast addresses.
++ */
++void fman_dtsec_set_uc_promisc(struct dtsec_regs *regs, bool enable);
++
++/**
++ * fman_dtsec_set_wol() - Enable/Disable wake on lan
++ * (magic packet support)
++ * @regs: Pointer to dTSEC register block
++ * @en: Enable Wake On Lan support in dTSEC
++ *
++ */
++void fman_dtsec_set_wol(struct dtsec_regs *regs, bool en);
++
++/**
++ * fman_dtsec_adjust_link() - Adjust dTSEC speed/duplex settings
++ * @regs: Pointer to dTSEC register block
++ * @iface_mode: dTSEC interface mode
++ * @speed: Link speed
++ * @full_dx: True for full-duplex, false for half-duplex.
++ *
++ * This function configures the MAC to function and the desired rates. Use it
++ * to configure dTSEC after fman_dtsec_init() and whenever the link speed
++ * changes (for instance following PHY auto-negociation).
++ *
++ * Returns: 0 if successful, an error code otherwise.
++ */
++int fman_dtsec_adjust_link(struct dtsec_regs *regs,
++ enum enet_interface iface_mode,
++ enum enet_speed speed, bool full_dx);
++
++/**
++ * fman_dtsec_set_tbi_phy_addr() - Updates TBI address field
++ * @regs: Pointer to dTSEC register block
++ * @address: Valid PHY address in the range of 1 to 31. 0 is reserved.
++ *
++ * In SGMII mode, the dTSEC's TBIPA field must contain a valid TBI PHY address
++ * so that the associated TBI PHY (i.e. the link) may be initialized.
++ *
++ * Returns: 0 if successful, an error code otherwise.
++ */
++int fman_dtsec_set_tbi_phy_addr(struct dtsec_regs *regs,
++ uint8_t addr);
++
++/**
++ * fman_dtsec_set_max_frame_len() - Set max frame length
++ * @regs: Pointer to dTSEC register block
++ * @length: Max frame length.
++ *
++ * Sets maximum frame length for received and transmitted frames. Frames that
++ * exceeds this length are truncated.
++ */
++void fman_dtsec_set_max_frame_len(struct dtsec_regs *regs, uint16_t length);
++
++/**
++ * fman_dtsec_get_max_frame_len() - Query max frame length
++ * @regs: Pointer to dTSEC register block
++ *
++ * Returns: the current value of the maximum frame length.
++ */
++uint16_t fman_dtsec_get_max_frame_len(struct dtsec_regs *regs);
++
++/**
++ * fman_dtsec_handle_rx_pause() - Configure pause frame handling
++ * @regs: Pointer to dTSEC register block
++ * @en: Enable pause frame handling in dTSEC
++ *
++ * If enabled, dTSEC will handle pause frames internally. This must be disabled
++ * if dTSEC is set in half-duplex mode.
++ * If pause frame handling is disabled and &dtsec_cfg.rx_ctrl_acc is set, pause
++ * frames will be transferred to the packet interface just like regular Ethernet
++ * frames.
++ */
++void fman_dtsec_handle_rx_pause(struct dtsec_regs *regs, bool en);
++
++/**
++ * fman_dtsec_set_tx_pause_frames() - Configure Tx pause time
++ * @regs: Pointer to dTSEC register block
++ * @time: Time value included in pause frames
++ *
++ * Call this function to set the time value used in transmitted pause frames.
++ * If time is 0, transmission of pause frames is disabled
++ */
++void fman_dtsec_set_tx_pause_frames(struct dtsec_regs *regs, uint16_t time);
++
++/**
++ * fman_dtsec_ack_event() - Acknowledge handled events
++ * @regs: Pointer to dTSEC register block
++ * @ev_mask: Events to acknowledge
++ *
++ * After handling events signaled by dTSEC in either polling or interrupt mode,
++ * call this function to reset the associated status bits in dTSEC event
++ * register.
++ */
++void fman_dtsec_ack_event(struct dtsec_regs *regs, uint32_t ev_mask);
++
++/**
++ * fman_dtsec_get_event() - Returns currently asserted events
++ * @regs: Pointer to dTSEC register block
++ * @ev_mask: Mask of relevant events
++ *
++ * Call this function to obtain a bit-mask of events that are currently asserted
++ * in dTSEC, taken from IEVENT register.
++ *
++ * Returns: a bit-mask of events asserted in dTSEC.
++ */
++uint32_t fman_dtsec_get_event(struct dtsec_regs *regs, uint32_t ev_mask);
++
++/**
++ * fman_dtsec_get_interrupt_mask() - Returns a bit-mask of enabled interrupts
++ * @regs: Pointer to dTSEC register block
++ *
++ * Call this function to obtain a bit-mask of enabled interrupts
++ * in dTSEC, taken from IMASK register.
++ *
++ * Returns: a bit-mask of enabled interrupts in dTSEC.
++ */
++uint32_t fman_dtsec_get_interrupt_mask(struct dtsec_regs *regs);
++
++void fman_dtsec_clear_addr_in_paddr(struct dtsec_regs *regs,
++ uint8_t paddr_num);
++
++void fman_dtsec_add_addr_in_paddr(struct dtsec_regs *regs,
++ uint64_t addr,
++ uint8_t paddr_num);
++
++void fman_dtsec_enable_tmr_interrupt (struct dtsec_regs *regs);
++
++void fman_dtsec_disable_tmr_interrupt(struct dtsec_regs *regs);
++
++/**
++ * fman_dtsec_disable_interrupt() - Disables interrupts for the specified events
++ * @regs: Pointer to dTSEC register block
++ * @ev_mask: Mask of relevant events
++ *
++ * Call this function to disable interrupts in dTSEC for the specified events.
++ * To enable interrupts use fman_dtsec_enable_interrupt().
++ */
++void fman_dtsec_disable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask);
++
++/**
++ * fman_dtsec_enable_interrupt() - Enable interrupts for the specified events
++ * @regs: Pointer to dTSEC register block
++ * @ev_mask: Mask of relevant events
++ *
++ * Call this function to enable interrupts in dTSEC for the specified events.
++ * To disable interrupts use fman_dtsec_disable_interrupt().
++ */
++void fman_dtsec_enable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask);
++
++/**
++ * fman_dtsec_set_ts() - Enables dTSEC timestamps
++ * @regs: Pointer to dTSEC register block
++ * @en: true to enable timestamps, false to disable them
++ *
++ * Call this function to enable/disable dTSEC timestamps. This affects both
++ * Tx and Rx.
++ */
++void fman_dtsec_set_ts(struct dtsec_regs *regs, bool en);
++
++/**
++ * fman_dtsec_set_bucket() - Enables/disables a filter bucket
++ * @regs: Pointer to dTSEC register block
++ * @bucket: Bucket index
++ * @enable: true/false to enable/disable this bucket
++ *
++ * This function enables or disables the specified bucket. Enabling a bucket
++ * associated with an address configures dTSEC to accept received packets
++ * with that destination address.
++ * Multiple addresses may be associated with the same bucket. Disabling a
++ * bucket will affect all addresses associated with that bucket. A bucket that
++ * is enabled requires further filtering and verification in the upper layers
++ *
++ */
++void fman_dtsec_set_bucket(struct dtsec_regs *regs, int bucket, bool enable);
++
++/**
++ * dtsec_set_hash_table() - insert a crc code into thr filter table
++ * @regs: Pointer to dTSEC register block
++ * @crc: crc to insert
++ * @mcast: true is this is a multicast address
++ * @ghtx: true if we are in ghtx mode
++ *
++ * This function inserts a crc code into the filter table.
++ */
++void fman_dtsec_set_hash_table(struct dtsec_regs *regs, uint32_t crc,
++ bool mcast, bool ghtx);
++
++/**
++ * fman_dtsec_reset_filter_table() - Resets the address filtering table
++ * @regs: Pointer to dTSEC register block
++ * @mcast: Reset multicast entries
++ * @ucast: Reset unicast entries
++ *
++ * Resets all entries in L2 address filter table. After calling this function
++ * all buckets enabled using fman_dtsec_set_bucket() will be disabled.
++ * If dtsec_init_filter_table() was called with @unicast_hash set to false,
++ * @ucast argument is ignored.
++ * This does not affect the primary nor the 15 additional addresses configured
++ * using dtsec_set_address() or dtsec_set_match_address().
++ */
++void fman_dtsec_reset_filter_table(struct dtsec_regs *regs, bool mcast,
++ bool ucast);
++
++/**
++ * fman_dtsec_set_mc_promisc() - Set multicast promiscuous mode
++ * @regs: Pointer to dTSEC register block
++ * @enable: Enable multicast promiscuous mode
++ *
++ * Call this to enable/disable L2 address filtering for multicast packets.
++ */
++void fman_dtsec_set_mc_promisc(struct dtsec_regs *regs, bool enable);
++
++/* statistics APIs */
++
++/**
++ * fman_dtsec_set_stat_level() - Enable a group of MIB statistics counters
++ * @regs: Pointer to dTSEC register block
++ * @level: Specifies a certain group of dTSEC MIB HW counters or _all_,
++ * to specify all the existing counters.
++ * If set to _none_, it disables all the counters.
++ *
++ * Enables the MIB statistics hw counters and sets up the carry interrupt
++ * masks for the counters corresponding to the @level input parameter.
++ *
++ * Returns: error if invalid @level value given.
++ */
++int fman_dtsec_set_stat_level(struct dtsec_regs *regs,
++ enum dtsec_stat_level level);
++
++/**
++ * fman_dtsec_reset_stat() - Completely resets all dTSEC HW counters
++ * @regs: Pointer to dTSEC register block
++ */
++void fman_dtsec_reset_stat(struct dtsec_regs *regs);
++
++/**
++ * fman_dtsec_get_clear_carry_regs() - Read and clear carry bits (CAR1-2 registers)
++ * @regs: Pointer to dTSEC register block
++ * @car1: car1 register value
++ * @car2: car2 register value
++ *
++ * When set, the carry bits signal that an overflow occurred on the
++ * corresponding counters.
++ * Note that the carry bits (CAR1-2 registers) will assert the
++ * %DTSEC_IEVENT_MSRO interrupt if unmasked (via CAM1-2 regs).
++ *
++ * Returns: true if overflow occurred, otherwise - false
++ */
++bool fman_dtsec_get_clear_carry_regs(struct dtsec_regs *regs,
++ uint32_t *car1, uint32_t *car2);
++
++uint32_t fman_dtsec_check_and_clear_tmr_event(struct dtsec_regs *regs);
++
++uint32_t fman_dtsec_get_stat_counter(struct dtsec_regs *regs,
++ enum dtsec_stat_counters reg_name);
++
++void fman_dtsec_start_tx(struct dtsec_regs *regs);
++void fman_dtsec_start_rx(struct dtsec_regs *regs);
++void fman_dtsec_stop_tx(struct dtsec_regs *regs);
++void fman_dtsec_stop_rx(struct dtsec_regs *regs);
++uint32_t fman_dtsec_get_rctrl(struct dtsec_regs *regs);
++
++
++#endif /* __FSL_FMAN_DTSEC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h
+new file mode 100644
+index 00000000..0dda09c3
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h
+@@ -0,0 +1,107 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_DTSEC_MII_ACC_H
++#define __FSL_FMAN_DTSEC_MII_ACC_H
++
++#include "common/general.h"
++
++
++/* MII Management Configuration Register */
++#define MIIMCFG_RESET_MGMT 0x80000000
++#define MIIMCFG_MGNTCLK_MASK 0x00000007
++#define MIIMCFG_MGNTCLK_SHIFT 0
++
++/* MII Management Command Register */
++#define MIIMCOM_SCAN_CYCLE 0x00000002
++#define MIIMCOM_READ_CYCLE 0x00000001
++
++/* MII Management Address Register */
++#define MIIMADD_PHY_ADDR_SHIFT 8
++#define MIIMADD_PHY_ADDR_MASK 0x00001f00
++
++#define MIIMADD_REG_ADDR_SHIFT 0
++#define MIIMADD_REG_ADDR_MASK 0x0000001f
++
++/* MII Management Indicator Register */
++#define MIIMIND_BUSY 0x00000001
++
++
++/* PHY Control Register */
++#define PHY_CR_PHY_RESET 0x8000
++#define PHY_CR_LOOPBACK 0x4000
++#define PHY_CR_SPEED0 0x2000
++#define PHY_CR_ANE 0x1000
++#define PHY_CR_RESET_AN 0x0200
++#define PHY_CR_FULLDUPLEX 0x0100
++#define PHY_CR_SPEED1 0x0040
++
++#define PHY_TBICON_SRESET 0x8000
++#define PHY_TBICON_SPEED2 0x0020
++#define PHY_TBICON_CLK_SEL 0x0020
++#define PHY_TBIANA_SGMII 0x4001
++#define PHY_TBIANA_1000X 0x01a0
++/* register map */
++
++/* MII Configuration Control Memory Map Registers */
++struct dtsec_mii_reg {
++ uint32_t reserved1[72];
++ uint32_t miimcfg; /* MII Mgmt:configuration */
++ uint32_t miimcom; /* MII Mgmt:command */
++ uint32_t miimadd; /* MII Mgmt:address */
++ uint32_t miimcon; /* MII Mgmt:control 3 */
++ uint32_t miimstat; /* MII Mgmt:status */
++ uint32_t miimind; /* MII Mgmt:indicators */
++};
++
++/* dTSEC MII API */
++
++/* functions to access the mii registers for phy configuration.
++ * this functionality may not be available for all dtsecs in the system.
++ * consult the reference manual for details */
++void fman_dtsec_mii_reset(struct dtsec_mii_reg *regs);
++/* frequency is in MHz.
++ * note that dtsec clock is 1/2 of fman clock */
++void fman_dtsec_mii_init(struct dtsec_mii_reg *regs, uint16_t dtsec_freq);
++int fman_dtsec_mii_write_reg(struct dtsec_mii_reg *regs,
++ uint8_t addr,
++ uint8_t reg,
++ uint16_t data,
++ uint16_t dtsec_freq);
++
++int fman_dtsec_mii_read_reg(struct dtsec_mii_reg *regs,
++ uint8_t addr,
++ uint8_t reg,
++ uint16_t *data,
++ uint16_t dtsec_freq);
++
++#endif /* __FSL_FMAN_DTSEC_MII_ACC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_kg.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_kg.h
+new file mode 100644
+index 00000000..010e4b70
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_kg.h
+@@ -0,0 +1,514 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_KG_H
++#define __FSL_FMAN_KG_H
++
++#include "common/general.h"
++
++#define FM_KG_NUM_OF_GENERIC_REGS 8 /**< Num of generic KeyGen regs */
++#define FMAN_MAX_NUM_OF_HW_PORTS 64
++/**< Total num of masks allowed on KG extractions */
++#define FM_KG_EXTRACT_MASKS_NUM 4
++#define FM_KG_NUM_CLS_PLAN_ENTR 8 /**< Num of class. plan regs */
++#define FM_KG_CLS_PLAN_GRPS_NUM 32 /**< Max num of class. groups */
++
++struct fman_kg_regs {
++ uint32_t fmkg_gcr;
++ uint32_t res004;
++ uint32_t res008;
++ uint32_t fmkg_eer;
++ uint32_t fmkg_eeer;
++ uint32_t res014;
++ uint32_t res018;
++ uint32_t fmkg_seer;
++ uint32_t fmkg_seeer;
++ uint32_t fmkg_gsr;
++ uint32_t fmkg_tpc;
++ uint32_t fmkg_serc;
++ uint32_t res030[4];
++ uint32_t fmkg_fdor;
++ uint32_t fmkg_gdv0r;
++ uint32_t fmkg_gdv1r;
++ uint32_t res04c[6];
++ uint32_t fmkg_feer;
++ uint32_t res068[38];
++ uint32_t fmkg_indirect[63];
++ uint32_t fmkg_ar;
++};
++
++struct fman_kg_scheme_regs {
++ uint32_t kgse_mode; /**< MODE */
++ uint32_t kgse_ekfc; /**< Extract Known Fields Command */
++ uint32_t kgse_ekdv; /**< Extract Known Default Value */
++ uint32_t kgse_bmch; /**< Bit Mask Command High */
++ uint32_t kgse_bmcl; /**< Bit Mask Command Low */
++ uint32_t kgse_fqb; /**< Frame Queue Base */
++ uint32_t kgse_hc; /**< Hash Command */
++ uint32_t kgse_ppc; /**< Policer Profile Command */
++ uint32_t kgse_gec[FM_KG_NUM_OF_GENERIC_REGS];
++ /**< Generic Extract Command */
++ uint32_t kgse_spc; /**< KeyGen Scheme Entry Statistic Packet Counter */
++ uint32_t kgse_dv0; /**< KeyGen Scheme Entry Default Value 0 */
++ uint32_t kgse_dv1; /**< KeyGen Scheme Entry Default Value 1 */
++ uint32_t kgse_ccbs; /**< KeyGen Scheme Entry Coarse Classification Bit*/
++ uint32_t kgse_mv; /**< KeyGen Scheme Entry Match vector */
++ uint32_t kgse_om; /**< KeyGen Scheme Entry Operation Mode bits */
++ uint32_t kgse_vsp; /**< KeyGen Scheme Entry Virtual Storage Profile */
++};
++
++struct fman_kg_pe_regs{
++ uint32_t fmkg_pe_sp;
++ uint32_t fmkg_pe_cpp;
++};
++
++struct fman_kg_cp_regs {
++ uint32_t kgcpe[FM_KG_NUM_CLS_PLAN_ENTR];
++};
++
++
++#define FM_KG_KGAR_GO 0x80000000
++#define FM_KG_KGAR_READ 0x40000000
++#define FM_KG_KGAR_WRITE 0x00000000
++#define FM_KG_KGAR_SEL_SCHEME_ENTRY 0x00000000
++#define FM_KG_KGAR_SCM_WSEL_UPDATE_CNT 0x00008000
++
++#define KG_SCH_PP_SHIFT_HIGH 0x80000000
++#define KG_SCH_PP_NO_GEN 0x10000000
++#define KG_SCH_PP_SHIFT_LOW 0x0000F000
++#define KG_SCH_MODE_NIA_PLCR 0x40000000
++#define KG_SCH_GEN_EXTRACT_TYPE 0x00008000
++#define KG_SCH_BITMASK_MASK 0x000000FF
++#define KG_SCH_GEN_VALID 0x80000000
++#define KG_SCH_GEN_MASK 0x00FF0000
++#define FM_PCD_KG_KGAR_ERR 0x20000000
++#define FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY 0x01000000
++#define FM_PCD_KG_KGAR_SEL_PORT_ENTRY 0x02000000
++#define FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP 0x00008000
++#define FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP 0x00004000
++#define FM_PCD_KG_KGAR_WSEL_MASK 0x0000FF00
++#define KG_SCH_HASH_CONFIG_NO_FQID 0x80000000
++#define KG_SCH_HASH_CONFIG_SYM 0x40000000
++
++#define FM_EX_KG_DOUBLE_ECC 0x80000000
++#define FM_EX_KG_KEYSIZE_OVERFLOW 0x40000000
++
++/* ECC capture register */
++#define KG_FMKG_SERC_CAP 0x80000000
++#define KG_FMKG_SERC_CET 0x40000000
++#define KG_FMKG_SERC_CNT_MSK 0x00FF0000
++#define KG_FMKG_SERC_CNT_SHIFT 16
++#define KG_FMKG_SERC_ADDR_MSK 0x000003FF
++
++/* Masks */
++#define FM_KG_KGGCR_EN 0x80000000
++#define KG_SCH_GEN_VALID 0x80000000
++#define KG_SCH_GEN_EXTRACT_TYPE 0x00008000
++#define KG_ERR_TYPE_DOUBLE 0x40000000
++#define KG_ERR_ADDR_MASK 0x00000FFF
++#define KG_SCH_MODE_EN 0x80000000
++
++/* shifts */
++#define FM_KG_KGAR_NUM_SHIFT 16
++#define FM_KG_PE_CPP_MASK_SHIFT 16
++#define FM_KG_KGAR_WSEL_SHIFT 8
++
++#define FM_KG_SCH_GEN_HT_INVALID 0
++
++#define FM_KG_MASK_SEL_GEN_BASE 0x20
++
++#define KG_GET_MASK_SEL_SHIFT(shift, i) \
++switch (i) \
++{ \
++ case 0: (shift) = 26; break; \
++ case 1: (shift) = 20; break; \
++ case 2: (shift) = 10; break; \
++ case 3: (shift) = 4; break; \
++ default: (shift) = 0; \
++}
++
++#define KG_GET_MASK_OFFSET_SHIFT(shift, i) \
++switch (i) \
++{ \
++ case 0: (shift) = 16; break; \
++ case 1: (shift) = 0; break; \
++ case 2: (shift) = 28; break; \
++ case 3: (shift) = 24; break; \
++ default: (shift) = 0; \
++}
++
++#define KG_GET_MASK_SHIFT(shift, i) \
++switch (i) \
++{ \
++ case 0: shift = 24; break; \
++ case 1: shift = 16; break; \
++ case 2: shift = 8; break; \
++ case 3: shift = 0; break; \
++ default: shift = 0; \
++}
++
++/* Port entry CPP register */
++#define FMAN_KG_PE_CPP_MASK_SHIFT 16
++
++/* Scheme registers */
++#define FMAN_KG_SCH_MODE_EN 0x80000000
++#define FMAN_KG_SCH_MODE_NIA_PLCR 0x40000000
++#define FMAN_KG_SCH_MODE_CCOBASE_SHIFT 24
++
++#define FMAN_KG_SCH_DEF_MAC_ADDR_SHIFT 30
++#define FMAN_KG_SCH_DEF_VLAN_TCI_SHIFT 28
++#define FMAN_KG_SCH_DEF_ETYPE_SHIFT 26
++#define FMAN_KG_SCH_DEF_PPP_SID_SHIFT 24
++#define FMAN_KG_SCH_DEF_PPP_PID_SHIFT 22
++#define FMAN_KG_SCH_DEF_MPLS_SHIFT 20
++#define FMAN_KG_SCH_DEF_IP_ADDR_SHIFT 18
++#define FMAN_KG_SCH_DEF_PTYPE_SHIFT 16
++#define FMAN_KG_SCH_DEF_IP_TOS_TC_SHIFT 14
++#define FMAN_KG_SCH_DEF_IPv6_FL_SHIFT 12
++#define FMAN_KG_SCH_DEF_IPSEC_SPI_SHIFT 10
++#define FMAN_KG_SCH_DEF_L4_PORT_SHIFT 8
++#define FMAN_KG_SCH_DEF_TCP_FLG_SHIFT 6
++
++#define FMAN_KG_SCH_GEN_VALID 0x80000000
++#define FMAN_KG_SCH_GEN_SIZE_MAX 16
++#define FMAN_KG_SCH_GEN_OR 0x00008000
++
++#define FMAN_KG_SCH_GEN_DEF_SHIFT 29
++#define FMAN_KG_SCH_GEN_SIZE_SHIFT 24
++#define FMAN_KG_SCH_GEN_MASK_SHIFT 16
++#define FMAN_KG_SCH_GEN_HT_SHIFT 8
++
++#define FMAN_KG_SCH_HASH_HSHIFT_SHIFT 24
++#define FMAN_KG_SCH_HASH_HSHIFT_MAX 0x28
++#define FMAN_KG_SCH_HASH_SYM 0x40000000
++#define FMAN_KG_SCH_HASH_NO_FQID_GEN 0x80000000
++
++#define FMAN_KG_SCH_PP_SH_SHIFT 27
++#define FMAN_KG_SCH_PP_SL_SHIFT 12
++#define FMAN_KG_SCH_PP_SH_MASK 0x80000000
++#define FMAN_KG_SCH_PP_SL_MASK 0x0000F000
++#define FMAN_KG_SCH_PP_SHIFT_MAX 0x17
++#define FMAN_KG_SCH_PP_MASK_SHIFT 16
++#define FMAN_KG_SCH_PP_NO_GEN 0x10000000
++
++enum fman_kg_gen_extract_src {
++ E_FMAN_KG_GEN_EXTRACT_ETH,
++ E_FMAN_KG_GEN_EXTRACT_ETYPE,
++ E_FMAN_KG_GEN_EXTRACT_SNAP,
++ E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_1,
++ E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_N,
++ E_FMAN_KG_GEN_EXTRACT_PPPoE,
++ E_FMAN_KG_GEN_EXTRACT_MPLS_1,
++ E_FMAN_KG_GEN_EXTRACT_MPLS_2,
++ E_FMAN_KG_GEN_EXTRACT_MPLS_3,
++ E_FMAN_KG_GEN_EXTRACT_MPLS_N,
++ E_FMAN_KG_GEN_EXTRACT_IPv4_1,
++ E_FMAN_KG_GEN_EXTRACT_IPv6_1,
++ E_FMAN_KG_GEN_EXTRACT_IPv4_2,
++ E_FMAN_KG_GEN_EXTRACT_IPv6_2,
++ E_FMAN_KG_GEN_EXTRACT_MINENCAP,
++ E_FMAN_KG_GEN_EXTRACT_IP_PID,
++ E_FMAN_KG_GEN_EXTRACT_GRE,
++ E_FMAN_KG_GEN_EXTRACT_TCP,
++ E_FMAN_KG_GEN_EXTRACT_UDP,
++ E_FMAN_KG_GEN_EXTRACT_SCTP,
++ E_FMAN_KG_GEN_EXTRACT_DCCP,
++ E_FMAN_KG_GEN_EXTRACT_IPSEC_AH,
++ E_FMAN_KG_GEN_EXTRACT_IPSEC_ESP,
++ E_FMAN_KG_GEN_EXTRACT_SHIM_1,
++ E_FMAN_KG_GEN_EXTRACT_SHIM_2,
++ E_FMAN_KG_GEN_EXTRACT_FROM_DFLT,
++ E_FMAN_KG_GEN_EXTRACT_FROM_FRAME_START,
++ E_FMAN_KG_GEN_EXTRACT_FROM_PARSE_RESULT,
++ E_FMAN_KG_GEN_EXTRACT_FROM_END_OF_PARSE,
++ E_FMAN_KG_GEN_EXTRACT_FROM_FQID
++};
++
++struct fman_kg_ex_ecc_attr
++{
++ bool valid;
++ bool double_ecc;
++ uint16_t addr;
++ uint8_t single_ecc_count;
++};
++
++enum fman_kg_def_select
++{
++ E_FMAN_KG_DEF_GLOBAL_0,
++ E_FMAN_KG_DEF_GLOBAL_1,
++ E_FMAN_KG_DEF_SCHEME_0,
++ E_FMAN_KG_DEF_SCHEME_1
++};
++
++struct fman_kg_extract_def
++{
++ enum fman_kg_def_select mac_addr;
++ enum fman_kg_def_select vlan_tci;
++ enum fman_kg_def_select etype;
++ enum fman_kg_def_select ppp_sid;
++ enum fman_kg_def_select ppp_pid;
++ enum fman_kg_def_select mpls;
++ enum fman_kg_def_select ip_addr;
++ enum fman_kg_def_select ptype;
++ enum fman_kg_def_select ip_tos_tc;
++ enum fman_kg_def_select ipv6_fl;
++ enum fman_kg_def_select ipsec_spi;
++ enum fman_kg_def_select l4_port;
++ enum fman_kg_def_select tcp_flg;
++};
++
++enum fman_kg_gen_extract_type
++{
++ E_FMAN_KG_HASH_EXTRACT,
++ E_FMAN_KG_OR_EXTRACT
++};
++
++struct fman_kg_gen_extract_params
++{
++ /* Hash or Or-ed extract */
++ enum fman_kg_gen_extract_type type;
++ enum fman_kg_gen_extract_src src;
++ bool no_validation;
++ /* Extraction offset from the header location specified above */
++ uint8_t offset;
++ /* Size of extraction for FMAN_KG_HASH_EXTRACT,
++ * hash result shift for FMAN_KG_OR_EXTRACT */
++ uint8_t extract;
++ uint8_t mask;
++ /* Default value to use when header specified
++ * by fman_kg_gen_extract_src doesn't present */
++ enum fman_kg_def_select def_val;
++};
++
++struct fman_kg_extract_mask
++{
++ /**< Indication if mask is on known field extraction or
++ * on general extraction; TRUE for known field */
++ bool is_known;
++ /**< One of FMAN_KG_EXTRACT_xxx defines for known fields mask and
++ * generic register index for generic extracts mask */
++ uint32_t field_or_gen_idx;
++ /**< Byte offset from start of the extracted data specified
++ * by field_or_gen_idx */
++ uint8_t offset;
++ /**< Byte mask (selected bits will be used) */
++ uint8_t mask;
++};
++
++struct fman_kg_extract_params
++{
++ /* Or-ed mask of FMAN_KG_EXTRACT_xxx defines */
++ uint32_t known_fields;
++ struct fman_kg_extract_def known_fields_def;
++ /* Number of entries in gen_extract */
++ uint8_t gen_extract_num;
++ struct fman_kg_gen_extract_params gen_extract[FM_KG_NUM_OF_GENERIC_REGS];
++ /* Number of entries in masks */
++ uint8_t masks_num;
++ struct fman_kg_extract_mask masks[FM_KG_EXTRACT_MASKS_NUM];
++ uint32_t def_scheme_0;
++ uint32_t def_scheme_1;
++};
++
++struct fman_kg_hash_params
++{
++ bool use_hash;
++ uint8_t shift_r;
++ uint32_t mask; /**< 24-bit mask */
++ bool sym; /**< Symmetric hash for src and dest pairs */
++};
++
++struct fman_kg_pp_params
++{
++ uint8_t base;
++ uint8_t shift;
++ uint8_t mask;
++ bool bypass_pp_gen;
++};
++
++struct fman_kg_cc_params
++{
++ uint8_t base_offset;
++ uint32_t qlcv_bits_sel;
++};
++
++enum fman_pcd_engine
++{
++ E_FMAN_PCD_INVALID = 0, /**< Invalid PCD engine indicated*/
++ E_FMAN_PCD_DONE, /**< No PCD Engine indicated */
++ E_FMAN_PCD_KG, /**< Keygen indicated */
++ E_FMAN_PCD_CC, /**< Coarse classification indicated */
++ E_FMAN_PCD_PLCR, /**< Policer indicated */
++ E_FMAN_PCD_PRS /**< Parser indicated */
++};
++
++struct fman_kg_cls_plan_params
++{
++ uint8_t entries_mask;
++ uint32_t mask_vector[FM_KG_NUM_CLS_PLAN_ENTR];
++};
++
++struct fman_kg_scheme_params
++{
++ uint32_t match_vector;
++ struct fman_kg_extract_params extract_params;
++ struct fman_kg_hash_params hash_params;
++ uint32_t base_fqid;
++ /* What we do w/features supported per FM version ?? */
++ bool bypass_fqid_gen;
++ struct fman_kg_pp_params policer_params;
++ struct fman_kg_cc_params cc_params;
++ bool update_counter;
++ /**< counter_value: Set scheme counter to the specified value;
++ * relevant only when update_counter = TRUE. */
++ uint32_t counter_value;
++ enum fman_pcd_engine next_engine;
++ /**< Next engine action code */
++ uint32_t next_engine_action;
++};
++
++
++
++int fman_kg_write_ar_wait(struct fman_kg_regs *regs, uint32_t fmkg_ar);
++void fman_kg_write_sp(struct fman_kg_regs *regs, uint32_t sp, bool add);
++void fman_kg_write_cpp(struct fman_kg_regs *regs, uint32_t cpp);
++void fman_kg_get_event(struct fman_kg_regs *regs,
++ uint32_t *event,
++ uint32_t *scheme_idx);
++void fman_kg_init(struct fman_kg_regs *regs,
++ uint32_t exceptions,
++ uint32_t dflt_nia);
++void fman_kg_enable_scheme_interrupts(struct fman_kg_regs *regs);
++void fman_kg_enable(struct fman_kg_regs *regs);
++void fman_kg_disable(struct fman_kg_regs *regs);
++int fman_kg_write_bind_cls_plans(struct fman_kg_regs *regs,
++ uint8_t hwport_id,
++ uint32_t bind_cls_plans);
++int fman_kg_build_bind_cls_plans(uint8_t grp_base,
++ uint8_t grp_mask,
++ uint32_t *bind_cls_plans);
++int fman_kg_write_bind_schemes(struct fman_kg_regs *regs,
++ uint8_t hwport_id,
++ uint32_t schemes);
++int fman_kg_write_cls_plan(struct fman_kg_regs *regs,
++ uint8_t grp_id,
++ uint8_t entries_mask,
++ uint8_t hwport_id,
++ struct fman_kg_cp_regs *cls_plan_regs);
++int fman_kg_build_cls_plan(struct fman_kg_cls_plan_params *params,
++ struct fman_kg_cp_regs *cls_plan_regs);
++uint32_t fman_kg_get_schemes_total_counter(struct fman_kg_regs *regs);
++int fman_kg_set_scheme_counter(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id,
++ uint32_t counter);
++int fman_kg_get_scheme_counter(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id,
++ uint32_t *counter);
++int fman_kg_delete_scheme(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id);
++int fman_kg_write_scheme(struct fman_kg_regs *regs,
++ uint8_t scheme_id,
++ uint8_t hwport_id,
++ struct fman_kg_scheme_regs *scheme_regs,
++ bool update_counter);
++int fman_kg_build_scheme(struct fman_kg_scheme_params *params,
++ struct fman_kg_scheme_regs *scheme_regs);
++void fman_kg_get_capture(struct fman_kg_regs *regs,
++ struct fman_kg_ex_ecc_attr *ecc_attr,
++ bool clear);
++void fman_kg_get_exception(struct fman_kg_regs *regs,
++ uint32_t *events,
++ uint32_t *scheme_ids,
++ bool clear);
++void fman_kg_set_exception(struct fman_kg_regs *regs,
++ uint32_t exception,
++ bool enable);
++void fman_kg_set_dflt_val(struct fman_kg_regs *regs,
++ uint8_t def_id,
++ uint32_t val);
++void fman_kg_set_data_after_prs(struct fman_kg_regs *regs, uint8_t offset);
++
++
++
++/**************************************************************************//**
++ @Description NIA Description
++*//***************************************************************************/
++#define KG_NIA_ORDER_RESTOR 0x00800000
++#define KG_NIA_ENG_FM_CTL 0x00000000
++#define KG_NIA_ENG_PRS 0x00440000
++#define KG_NIA_ENG_KG 0x00480000
++#define KG_NIA_ENG_PLCR 0x004C0000
++#define KG_NIA_ENG_BMI 0x00500000
++#define KG_NIA_ENG_QMI_ENQ 0x00540000
++#define KG_NIA_ENG_QMI_DEQ 0x00580000
++#define KG_NIA_ENG_MASK 0x007C0000
++
++#define KG_NIA_AC_MASK 0x0003FFFF
++
++#define KG_NIA_INVALID 0xFFFFFFFF
++
++static __inline__ uint32_t fm_kg_build_nia(enum fman_pcd_engine next_engine,
++ uint32_t next_engine_action)
++{
++ uint32_t nia;
++
++ if (next_engine_action & ~KG_NIA_AC_MASK)
++ return KG_NIA_INVALID;
++
++ switch (next_engine) {
++ case E_FMAN_PCD_DONE:
++ nia = KG_NIA_ENG_BMI | next_engine_action;
++ break;
++
++ case E_FMAN_PCD_KG:
++ nia = KG_NIA_ENG_KG | next_engine_action;
++ break;
++
++ case E_FMAN_PCD_CC:
++ nia = KG_NIA_ENG_FM_CTL | next_engine_action;
++ break;
++
++ case E_FMAN_PCD_PLCR:
++ nia = KG_NIA_ENG_PLCR | next_engine_action;
++ break;
++
++ default:
++ nia = KG_NIA_INVALID;
++ }
++
++ return nia;
++}
++
++#endif /* __FSL_FMAN_KG_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac.h
+new file mode 100644
+index 00000000..0dd8286b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac.h
+@@ -0,0 +1,427 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __FSL_FMAN_MEMAC_H
++#define __FSL_FMAN_MEMAC_H
++
++#include "common/general.h"
++#include "fsl_enet.h"
++
++
++#define MEMAC_NUM_OF_PADDRS 7 /* Num of additional exact match MAC adr regs */
++
++/* Control and Configuration Register (COMMAND_CONFIG) */
++#define CMD_CFG_MG 0x80000000 /* 00 Magic Packet detection */
++#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
++#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
++#define CMD_CFG_SFD_ANY 0x00200000 /* 10 Disable SFD check */
++#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
++#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
++#define CMD_CFG_SEND_IDLE 0x00010000 /* 15 Force idle generation */
++#define CMD_CFG_CNT_FRM_EN 0x00002000 /* 18 Control frame rx enable */
++#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
++#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
++#define CMD_CFG_LOOPBACK_EN 0x00000400 /* 21 XGMII/GMII loopback enable */
++#define CMD_CFG_TX_ADDR_INS 0x00000200 /* 22 Tx source MAC addr insertion */
++#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
++#define CMD_CFG_PAUSE_FWD 0x00000080 /* 24 Terminate/frwd Pause frames */
++#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
++#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
++#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
++#define CMD_CFG_WAN_MODE 0x00000008 /* 28 WAN mode enable */
++#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
++#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
++
++/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
++#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000
++#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF
++#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000
++#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000
++#define TX_FIFO_SECTIONS_TX_EMPTY_PFC_10G 0x00360000
++#define TX_FIFO_SECTIONS_TX_EMPTY_PFC_1G 0x00040000
++#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019
++#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020
++#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060
++
++#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \
++_val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
++((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
++ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) : \
++ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));
++
++#define GET_TX_EMPTY_PFC_VALUE(_val) \
++_val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
++((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
++ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_PFC_10G) : \
++ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_PFC_1G));
++
++/* Interface Mode Register (IF_MODE) */
++#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
++#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
++#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
++#define IF_MODE_RGMII 0x00000004
++#define IF_MODE_RGMII_AUTO 0x00008000
++#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */
++#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */
++#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */
++#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */
++#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
++#define IF_MODE_HD 0x00000040 /* Half duplex operation */
++
++/* Hash table Control Register (HASHTABLE_CTRL) */
++#define HASH_CTRL_MCAST_SHIFT 26
++#define HASH_CTRL_MCAST_EN 0x00000100 /* 23 Mcast frame rx for hash */
++#define HASH_CTRL_ADDR_MASK 0x0000003F /* 26-31 Hash table address code */
++
++#define GROUP_ADDRESS 0x0000010000000000LL /* MAC mcast indication */
++#define HASH_TABLE_SIZE 64 /* Hash tbl size */
++
++/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
++#define MEMAC_TX_IPG_LENGTH_MASK 0x0000003F
++
++/* Statistics Configuration Register (STATN_CONFIG) */
++#define STATS_CFG_CLR 0x00000004 /* 29 Reset all counters */
++#define STATS_CFG_CLR_ON_RD 0x00000002 /* 30 Clear on read */
++#define STATS_CFG_SATURATE 0x00000001 /* 31 Saturate at the maximum val */
++
++/* Interrupt Mask Register (IMASK) */
++#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */
++#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */
++#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */
++#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */
++
++#define MEMAC_ALL_ERRS_IMASK \
++ ((uint32_t)(MEMAC_IMASK_TSECC_ER | \
++ MEMAC_IMASK_TECC_ER | \
++ MEMAC_IMASK_RECC_ER | \
++ MEMAC_IMASK_MGI))
++
++#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */
++#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */
++#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */
++#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */
++#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error */
++#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */
++#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
++#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */
++#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */
++#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */
++#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */
++#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */
++#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */
++#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */
++#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */
++#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */
++#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */
++
++enum memac_counters {
++ E_MEMAC_COUNTER_R64,
++ E_MEMAC_COUNTER_R127,
++ E_MEMAC_COUNTER_R255,
++ E_MEMAC_COUNTER_R511,
++ E_MEMAC_COUNTER_R1023,
++ E_MEMAC_COUNTER_R1518,
++ E_MEMAC_COUNTER_R1519X,
++ E_MEMAC_COUNTER_RFRG,
++ E_MEMAC_COUNTER_RJBR,
++ E_MEMAC_COUNTER_RDRP,
++ E_MEMAC_COUNTER_RALN,
++ E_MEMAC_COUNTER_TUND,
++ E_MEMAC_COUNTER_ROVR,
++ E_MEMAC_COUNTER_RXPF,
++ E_MEMAC_COUNTER_TXPF,
++ E_MEMAC_COUNTER_ROCT,
++ E_MEMAC_COUNTER_RMCA,
++ E_MEMAC_COUNTER_RBCA,
++ E_MEMAC_COUNTER_RPKT,
++ E_MEMAC_COUNTER_RUCA,
++ E_MEMAC_COUNTER_RERR,
++ E_MEMAC_COUNTER_TOCT,
++ E_MEMAC_COUNTER_TMCA,
++ E_MEMAC_COUNTER_TBCA,
++ E_MEMAC_COUNTER_TUCA,
++ E_MEMAC_COUNTER_TERR
++};
++
++#define DEFAULT_PAUSE_QUANTA 0xf000
++#define DEFAULT_FRAME_LENGTH 0x600
++#define DEFAULT_TX_IPG_LENGTH 12
++
++/*
++ * memory map
++ */
++
++struct mac_addr {
++ uint32_t mac_addr_l; /* Lower 32 bits of 48-bit MAC address */
++ uint32_t mac_addr_u; /* Upper 16 bits of 48-bit MAC address */
++};
++
++struct memac_regs {
++ /* General Control and Status */
++ uint32_t res0000[2];
++ uint32_t command_config; /* 0x008 Ctrl and cfg */
++ struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */
++ uint32_t maxfrm; /* 0x014 Max frame length */
++ uint32_t res0018[1];
++ uint32_t rx_fifo_sections; /* Receive FIFO configuration reg */
++ uint32_t tx_fifo_sections; /* Transmit FIFO configuration reg */
++ uint32_t res0024[2];
++ uint32_t hashtable_ctrl; /* 0x02C Hash table control */
++ uint32_t res0030[4];
++ uint32_t ievent; /* 0x040 Interrupt event */
++ uint32_t tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */
++ uint32_t res0048;
++ uint32_t imask; /* 0x04C Interrupt mask */
++ uint32_t res0050;
++ uint32_t pause_quanta[4]; /* 0x054 Pause quanta */
++ uint32_t pause_thresh[4]; /* 0x064 Pause quanta threshold */
++ uint32_t rx_pause_status; /* 0x074 Receive pause status */
++ uint32_t res0078[2];
++ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS]; /* 0x80-0x0B4 mac padr */
++ uint32_t lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */
++ uint32_t sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */
++ uint32_t res00c0[8];
++ uint32_t statn_config; /* 0x0E0 Statistics configuration */
++ uint32_t res00e4[7];
++ /* Rx Statistics Counter */
++ uint32_t reoct_l;
++ uint32_t reoct_u;
++ uint32_t roct_l;
++ uint32_t roct_u;
++ uint32_t raln_l;
++ uint32_t raln_u;
++ uint32_t rxpf_l;
++ uint32_t rxpf_u;
++ uint32_t rfrm_l;
++ uint32_t rfrm_u;
++ uint32_t rfcs_l;
++ uint32_t rfcs_u;
++ uint32_t rvlan_l;
++ uint32_t rvlan_u;
++ uint32_t rerr_l;
++ uint32_t rerr_u;
++ uint32_t ruca_l;
++ uint32_t ruca_u;
++ uint32_t rmca_l;
++ uint32_t rmca_u;
++ uint32_t rbca_l;
++ uint32_t rbca_u;
++ uint32_t rdrp_l;
++ uint32_t rdrp_u;
++ uint32_t rpkt_l;
++ uint32_t rpkt_u;
++ uint32_t rund_l;
++ uint32_t rund_u;
++ uint32_t r64_l;
++ uint32_t r64_u;
++ uint32_t r127_l;
++ uint32_t r127_u;
++ uint32_t r255_l;
++ uint32_t r255_u;
++ uint32_t r511_l;
++ uint32_t r511_u;
++ uint32_t r1023_l;
++ uint32_t r1023_u;
++ uint32_t r1518_l;
++ uint32_t r1518_u;
++ uint32_t r1519x_l;
++ uint32_t r1519x_u;
++ uint32_t rovr_l;
++ uint32_t rovr_u;
++ uint32_t rjbr_l;
++ uint32_t rjbr_u;
++ uint32_t rfrg_l;
++ uint32_t rfrg_u;
++ uint32_t rcnp_l;
++ uint32_t rcnp_u;
++ uint32_t rdrntp_l;
++ uint32_t rdrntp_u;
++ uint32_t res01d0[12];
++ /* Tx Statistics Counter */
++ uint32_t teoct_l;
++ uint32_t teoct_u;
++ uint32_t toct_l;
++ uint32_t toct_u;
++ uint32_t res0210[2];
++ uint32_t txpf_l;
++ uint32_t txpf_u;
++ uint32_t tfrm_l;
++ uint32_t tfrm_u;
++ uint32_t tfcs_l;
++ uint32_t tfcs_u;
++ uint32_t tvlan_l;
++ uint32_t tvlan_u;
++ uint32_t terr_l;
++ uint32_t terr_u;
++ uint32_t tuca_l;
++ uint32_t tuca_u;
++ uint32_t tmca_l;
++ uint32_t tmca_u;
++ uint32_t tbca_l;
++ uint32_t tbca_u;
++ uint32_t res0258[2];
++ uint32_t tpkt_l;
++ uint32_t tpkt_u;
++ uint32_t tund_l;
++ uint32_t tund_u;
++ uint32_t t64_l;
++ uint32_t t64_u;
++ uint32_t t127_l;
++ uint32_t t127_u;
++ uint32_t t255_l;
++ uint32_t t255_u;
++ uint32_t t511_l;
++ uint32_t t511_u;
++ uint32_t t1023_l;
++ uint32_t t1023_u;
++ uint32_t t1518_l;
++ uint32_t t1518_u;
++ uint32_t t1519x_l;
++ uint32_t t1519x_u;
++ uint32_t res02a8[6];
++ uint32_t tcnp_l;
++ uint32_t tcnp_u;
++ uint32_t res02c8[14];
++ /* Line Interface Control */
++ uint32_t if_mode; /* 0x300 Interface Mode Control */
++ uint32_t if_status; /* 0x304 Interface Status */
++ uint32_t res0308[14];
++ /* HiGig/2 */
++ uint32_t hg_config; /* 0x340 Control and cfg */
++ uint32_t res0344[3];
++ uint32_t hg_pause_quanta; /* 0x350 Pause quanta */
++ uint32_t res0354[3];
++ uint32_t hg_pause_thresh; /* 0x360 Pause quanta threshold */
++ uint32_t res0364[3];
++ uint32_t hgrx_pause_status; /* 0x370 Receive pause status */
++ uint32_t hg_fifos_status; /* 0x374 fifos status */
++ uint32_t rhm; /* 0x378 rx messages counter */
++ uint32_t thm; /* 0x37C tx messages counter */
++};
++
++struct memac_cfg {
++ bool reset_on_init;
++ bool rx_error_discard;
++ bool pause_ignore;
++ bool pause_forward_enable;
++ bool no_length_check_enable;
++ bool cmd_frame_enable;
++ bool send_idle_enable;
++ bool wan_mode_enable;
++ bool promiscuous_mode_enable;
++ bool tx_addr_ins_enable;
++ bool loopback_enable;
++ bool lgth_check_nostdr;
++ bool time_stamp_enable;
++ bool pad_enable;
++ bool phy_tx_ena_on;
++ bool rx_sfd_any;
++ bool rx_pbl_fwd;
++ bool tx_pbl_fwd;
++ bool debug_mode;
++ bool wake_on_lan;
++ uint16_t max_frame_length;
++ uint16_t pause_quanta;
++ uint32_t tx_ipg_length;
++};
++
++
++/**
++ * fman_memac_defconfig() - Get default MEMAC configuration
++ * @cfg: pointer to configuration structure.
++ *
++ * Call this function to obtain a default set of configuration values for
++ * initializing MEMAC. The user can overwrite any of the values before calling
++ * fman_memac_init(), if specific configuration needs to be applied.
++ */
++void fman_memac_defconfig(struct memac_cfg *cfg);
++
++int fman_memac_init(struct memac_regs *regs,
++ struct memac_cfg *cfg,
++ enum enet_interface enet_interface,
++ enum enet_speed enet_speed,
++ bool slow_10g_if,
++ uint32_t exceptions);
++
++void fman_memac_enable(struct memac_regs *regs, bool apply_rx, bool apply_tx);
++
++void fman_memac_disable(struct memac_regs *regs, bool apply_rx, bool apply_tx);
++
++void fman_memac_set_promiscuous(struct memac_regs *regs, bool val);
++
++void fman_memac_add_addr_in_paddr(struct memac_regs *regs,
++ uint8_t *adr,
++ uint8_t paddr_num);
++
++void fman_memac_clear_addr_in_paddr(struct memac_regs *regs,
++ uint8_t paddr_num);
++
++uint64_t fman_memac_get_counter(struct memac_regs *regs,
++ enum memac_counters reg_name);
++
++void fman_memac_set_tx_pause_frames(struct memac_regs *regs,
++ uint8_t priority, uint16_t pauseTime, uint16_t threshTime);
++
++uint16_t fman_memac_get_max_frame_len(struct memac_regs *regs);
++
++void fman_memac_set_exception(struct memac_regs *regs, uint32_t val,
++ bool enable);
++
++void fman_memac_reset_stat(struct memac_regs *regs);
++
++void fman_memac_reset(struct memac_regs *regs);
++
++void fman_memac_reset_filter_table(struct memac_regs *regs);
++
++void fman_memac_set_hash_table_entry(struct memac_regs *regs, uint32_t crc);
++
++void fman_memac_set_hash_table(struct memac_regs *regs, uint32_t val);
++
++void fman_memac_set_rx_ignore_pause_frames(struct memac_regs *regs,
++ bool enable);
++
++void fman_memac_set_wol(struct memac_regs *regs, bool enable);
++
++uint32_t fman_memac_get_event(struct memac_regs *regs, uint32_t ev_mask);
++
++void fman_memac_ack_event(struct memac_regs *regs, uint32_t ev_mask);
++
++uint32_t fman_memac_get_interrupt_mask(struct memac_regs *regs);
++
++void fman_memac_adjust_link(struct memac_regs *regs,
++ enum enet_interface iface_mode,
++ enum enet_speed speed, bool full_dx);
++
++
++
++#endif /*__FSL_FMAN_MEMAC_H*/
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h
+new file mode 100755
+index 00000000..b4304450
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h
+@@ -0,0 +1,78 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_MEMAC_MII_ACC_H
++#define __FSL_FMAN_MEMAC_MII_ACC_H
++
++#include "common/general.h"
++#include "fsl_enet.h"
++/* MII Management Registers */
++#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
++#define MDIO_CFG_CLK_DIV_SHIFT 7
++#define MDIO_CFG_HOLD_MASK 0x0000001c
++#define MDIO_CFG_ENC45 0x00000040
++#define MDIO_CFG_READ_ERR 0x00000002
++#define MDIO_CFG_BSY 0x00000001
++
++#define MDIO_CTL_PHY_ADDR_SHIFT 5
++#define MDIO_CTL_READ 0x00008000
++
++#define MDIO_DATA_BSY 0x80000000
++
++/*MEMAC Internal PHY Registers - SGMII */
++#define PHY_SGMII_CR_PHY_RESET 0x8000
++#define PHY_SGMII_CR_RESET_AN 0x0200
++#define PHY_SGMII_CR_DEF_VAL 0x1140
++#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
++#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
++#define PHY_SGMII_IF_MODE_AN 0x0002
++#define PHY_SGMII_IF_MODE_SGMII 0x0001
++#define PHY_SGMII_IF_MODE_1000X 0x0000
++
++/*----------------------------------------------------*/
++/* MII Configuration Control Memory Map Registers */
++/*----------------------------------------------------*/
++struct memac_mii_access_mem_map {
++ uint32_t mdio_cfg; /* 0x030 */
++ uint32_t mdio_ctrl; /* 0x034 */
++ uint32_t mdio_data; /* 0x038 */
++ uint32_t mdio_addr; /* 0x03c */
++};
++
++int fman_memac_mii_read_phy_reg(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t *data,
++ enum enet_speed enet_speed);
++int fman_memac_mii_write_phy_reg(struct memac_mii_access_mem_map *mii_regs,
++ uint8_t phy_addr, uint8_t reg, uint16_t data,
++ enum enet_speed enet_speed);
++
++#endif /* __MAC_API_MEMAC_MII_ACC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_port.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_port.h
+new file mode 100755
+index 00000000..080a23e9
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_port.h
+@@ -0,0 +1,593 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_PORT_H
++#define __FSL_FMAN_PORT_H
++
++#include "fsl_fman_sp.h"
++
++/** @Collection Registers bit fields */
++
++/** @Description BMI defines */
++#define BMI_EBD_EN 0x80000000
++
++#define BMI_PORT_CFG_EN 0x80000000
++#define BMI_PORT_CFG_FDOVR 0x02000000
++#define BMI_PORT_CFG_IM 0x01000000
++
++#define BMI_PORT_STATUS_BSY 0x80000000
++
++#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
++#define BMI_DMA_ATTR_IC_STASH_ON 0x10000000
++#define BMI_DMA_ATTR_HDR_STASH_ON 0x04000000
++#define BMI_DMA_ATTR_SG_STASH_ON 0x01000000
++#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
++
++#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
++#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000
++
++#define BMI_TX_FRAME_END_CS_IGNORE_SHIFT 24
++#define BMI_RX_FRAME_END_CS_IGNORE_SHIFT 24
++#define BMI_RX_FRAME_END_CUT_SHIFT 16
++
++#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT
++#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT
++
++#define BMI_INT_BUF_MARG_SHIFT 28
++#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT
++
++#define BMI_CMD_MR_LEAC 0x00200000
++#define BMI_CMD_MR_SLEAC 0x00100000
++#define BMI_CMD_MR_MA 0x00080000
++#define BMI_CMD_MR_DEAS 0x00040000
++#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
++ BMI_CMD_MR_SLEAC | \
++ BMI_CMD_MR_MA | \
++ BMI_CMD_MR_DEAS)
++#define BMI_CMD_TX_MR_DEF 0
++#define BMI_CMD_OP_MR_DEF (BMI_CMD_MR_DEAS | \
++ BMI_CMD_MR_MA)
++
++#define BMI_CMD_ATTR_ORDER 0x80000000
++#define BMI_CMD_ATTR_SYNC 0x02000000
++#define BMI_CMD_ATTR_COLOR_SHIFT 26
++
++#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12
++#define BMI_NEXT_ENG_FD_BITS_SHIFT 24
++#define BMI_FRAME_END_CS_IGNORE_SHIFT 24
++
++#define BMI_COUNTERS_EN 0x80000000
++
++#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID
++#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER
++#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP
++#define BMI_EXT_BUF_POOL_ID_SHIFT 16
++#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
++#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16
++
++#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
++#define BMI_TX_FIFO_PIPELINE_DEPTH_SHIFT 12
++
++#define MAX_PERFORMANCE_TASK_COMP 64
++#define MAX_PERFORMANCE_RX_QUEUE_COMP 64
++#define MAX_PERFORMANCE_TX_QUEUE_COMP 8
++#define MAX_PERFORMANCE_DMA_COMP 16
++#define MAX_PERFORMANCE_FIFO_COMP 1024
++
++#define BMI_PERFORMANCE_TASK_COMP_SHIFT 24
++#define BMI_PERFORMANCE_QUEUE_COMP_SHIFT 16
++#define BMI_PERFORMANCE_DMA_COMP_SHIFT 12
++
++#define BMI_RATE_LIMIT_GRAN_TX 16000 /* In Kbps */
++#define BMI_RATE_LIMIT_GRAN_OP 10000 /* In frames */
++#define BMI_RATE_LIMIT_MAX_RATE_IN_GRAN_UNITS 1024
++#define BMI_RATE_LIMIT_MAX_BURST_SIZE 1024 /* In KBytes */
++#define BMI_RATE_LIMIT_MAX_BURST_SHIFT 16
++#define BMI_RATE_LIMIT_HIGH_BURST_SIZE_GRAN 0x80000000
++#define BMI_RATE_LIMIT_SCALE_TSBS_SHIFT 16
++#define BMI_RATE_LIMIT_SCALE_EN 0x80000000
++#define BMI_SG_DISABLE FMAN_SP_SG_DISABLE
++
++/** @Description QMI defines */
++#define QMI_PORT_CFG_EN 0x80000000
++#define QMI_PORT_CFG_EN_COUNTERS 0x10000000
++
++#define QMI_PORT_STATUS_DEQ_TNUM_BSY 0x80000000
++#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
++
++#define QMI_DEQ_CFG_PRI 0x80000000
++#define QMI_DEQ_CFG_TYPE1 0x10000000
++#define QMI_DEQ_CFG_TYPE2 0x20000000
++#define QMI_DEQ_CFG_TYPE3 0x30000000
++#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000
++#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000
++#define QMI_DEQ_CFG_SP_MASK 0xf
++#define QMI_DEQ_CFG_SP_SHIFT 20
++
++
++/** @Description General port defines */
++#define FMAN_PORT_EXT_POOLS_NUM(fm_rev_maj) \
++ (((fm_rev_maj) == 4) ? 4 : 8)
++#define FMAN_PORT_MAX_EXT_POOLS_NUM 8
++#define FMAN_PORT_OBS_EXT_POOLS_NUM 2
++#define FMAN_PORT_CG_MAP_NUM 8
++#define FMAN_PORT_PRS_RESULT_WORDS_NUM 8
++#define FMAN_PORT_BMI_FIFO_UNITS 0x100
++#define FMAN_PORT_IC_OFFSET_UNITS 0x10
++
++
++/** @Collection FM Port Register Map */
++
++/** @Description BMI Rx port register map */
++struct fman_port_rx_bmi_regs {
++ uint32_t fmbm_rcfg; /**< Rx Configuration */
++ uint32_t fmbm_rst; /**< Rx Status */
++ uint32_t fmbm_rda; /**< Rx DMA attributes*/
++ uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
++ uint32_t fmbm_rfed; /**< Rx Frame End Data*/
++ uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
++ uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
++ uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
++ uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
++ uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
++ uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
++ uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
++ uint32_t fmbm_rpp; /**< Rx Policer Profile */
++ uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
++ uint32_t fmbm_reth; /**< Rx Excessive Threshold */
++ uint32_t reserved003c[1]; /**< (0x03C 0x03F) */
++ uint32_t fmbm_rprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
++ /**< Rx Parse Results Array Init*/
++ uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
++ uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
++ uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
++ uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
++ uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
++ uint32_t reserved0074[0x2]; /**< (0x074-0x07C) */
++ uint32_t fmbm_rcmne; /**< Rx Frame Continuous Mode Next Engine */
++ uint32_t reserved0080[0x20];/**< (0x080 0x0FF) */
++ uint32_t fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
++ /**< Buffer Manager pool Information-*/
++ uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
++ /**< Allocate Counter-*/
++ uint32_t reserved0130[8];
++ /**< 0x130/0x140 - 0x15F reserved -*/
++ uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
++ /**< Congestion Group Map*/
++ uint32_t fmbm_mpd; /**< BM Pool Depletion */
++ uint32_t reserved0184[0x1F]; /**< (0x184 0x1FF) */
++ uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
++ uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
++ uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
++ uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
++ uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
++ uint32_t fmbm_rfdc; /**< Rx Frame Discard Counter*/
++ uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
++ uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard nntr*/
++ uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter*/
++ uint32_t reserved0224[0x17]; /**< (0x224 0x27F) */
++ uint32_t fmbm_rpc; /**< Rx Performance Counters*/
++ uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
++ uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
++ uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
++ uint32_t fmbm_rrquc; /**< Rx Receive Queue Utilization cntr*/
++ uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
++ uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
++ uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
++ uint32_t reserved02a0[0x18]; /**< (0x2A0 0x2FF) */
++ uint32_t fmbm_rdbg; /**< Rx Debug-*/
++};
++
++/** @Description BMI Tx port register map */
++struct fman_port_tx_bmi_regs {
++ uint32_t fmbm_tcfg; /**< Tx Configuration */
++ uint32_t fmbm_tst; /**< Tx Status */
++ uint32_t fmbm_tda; /**< Tx DMA attributes */
++ uint32_t fmbm_tfp; /**< Tx FIFO Parameters */
++ uint32_t fmbm_tfed; /**< Tx Frame End Data */
++ uint32_t fmbm_ticp; /**< Tx Internal Context Parameters */
++ uint32_t fmbm_tfdne; /**< Tx Frame Dequeue Next Engine. */
++ uint32_t fmbm_tfca; /**< Tx Frame Command attribute. */
++ uint32_t fmbm_tcfqid; /**< Tx Confirmation Frame Queue ID. */
++ uint32_t fmbm_tefqid; /**< Tx Frame Error Queue ID */
++ uint32_t fmbm_tfene; /**< Tx Frame Enqueue Next Engine */
++ uint32_t fmbm_trlmts; /**< Tx Rate Limiter Scale */
++ uint32_t fmbm_trlmt; /**< Tx Rate Limiter */
++ uint32_t reserved0034[0x0e]; /**< (0x034-0x6c) */
++ uint32_t fmbm_tccb; /**< Tx Coarse Classification base */
++ uint32_t fmbm_tfne; /**< Tx Frame Next Engine */
++ uint32_t fmbm_tpfcm[0x02]; /**< Tx Priority based Flow Control (PFC) Mapping */
++ uint32_t fmbm_tcmne; /**< Tx Frame Continuous Mode Next Engine */
++ uint32_t reserved0080[0x60]; /**< (0x080-0x200) */
++ uint32_t fmbm_tstc; /**< Tx Statistics Counters */
++ uint32_t fmbm_tfrc; /**< Tx Frame Counter */
++ uint32_t fmbm_tfdc; /**< Tx Frames Discard Counter */
++ uint32_t fmbm_tfledc; /**< Tx Frame len error discard cntr */
++ uint32_t fmbm_tfufdc; /**< Tx Frame unsprt frmt discard cntr*/
++ uint32_t fmbm_tbdc; /**< Tx Buffers Deallocate Counter */
++ uint32_t reserved0218[0x1A]; /**< (0x218-0x280) */
++ uint32_t fmbm_tpc; /**< Tx Performance Counters*/
++ uint32_t fmbm_tpcp; /**< Tx Performance Count Parameters*/
++ uint32_t fmbm_tccn; /**< Tx Cycle Counter*/
++ uint32_t fmbm_ttuc; /**< Tx Tasks Utilization Counter*/
++ uint32_t fmbm_ttcquc; /**< Tx Transmit conf Q util Counter*/
++ uint32_t fmbm_tduc; /**< Tx DMA Utilization Counter*/
++ uint32_t fmbm_tfuc; /**< Tx FIFO Utilization Counter*/
++};
++
++/** @Description BMI O/H port register map */
++struct fman_port_oh_bmi_regs {
++ uint32_t fmbm_ocfg; /**< O/H Configuration */
++ uint32_t fmbm_ost; /**< O/H Status */
++ uint32_t fmbm_oda; /**< O/H DMA attributes */
++ uint32_t fmbm_oicp; /**< O/H Internal Context Parameters */
++ uint32_t fmbm_ofdne; /**< O/H Frame Dequeue Next Engine */
++ uint32_t fmbm_ofne; /**< O/H Frame Next Engine */
++ uint32_t fmbm_ofca; /**< O/H Frame Command Attributes. */
++ uint32_t fmbm_ofpne; /**< O/H Frame Parser Next Engine */
++ uint32_t fmbm_opso; /**< O/H Parse Start Offset */
++ uint32_t fmbm_opp; /**< O/H Policer Profile */
++ uint32_t fmbm_occb; /**< O/H Coarse Classification base */
++ uint32_t fmbm_oim; /**< O/H Internal margins*/
++ uint32_t fmbm_ofp; /**< O/H Fifo Parameters*/
++ uint32_t fmbm_ofed; /**< O/H Frame End Data*/
++ uint32_t reserved0030[2]; /**< (0x038 - 0x03F) */
++ uint32_t fmbm_oprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
++ /**< O/H Parse Results Array Initialization */
++ uint32_t fmbm_ofqid; /**< O/H Frame Queue ID */
++ uint32_t fmbm_oefqid; /**< O/H Error Frame Queue ID */
++ uint32_t fmbm_ofsdm; /**< O/H Frame Status Discard Mask */
++ uint32_t fmbm_ofsem; /**< O/H Frame Status Error Mask */
++ uint32_t fmbm_ofene; /**< O/H Frame Enqueue Next Engine */
++ uint32_t fmbm_orlmts; /**< O/H Rate Limiter Scale */
++ uint32_t fmbm_orlmt; /**< O/H Rate Limiter */
++ uint32_t fmbm_ocmne; /**< O/H Continuous Mode Next Engine */
++ uint32_t reserved0080[0x20]; /**< 0x080 - 0x0FF Reserved */
++ uint32_t fmbm_oebmpi[2]; /**< Buf Mngr Observed Pool Info */
++ uint32_t reserved0108[0x16]; /**< 0x108 - 0x15F Reserved */
++ uint32_t fmbm_ocgm[FMAN_PORT_CG_MAP_NUM]; /**< Observed Congestion Group Map */
++ uint32_t fmbm_ompd; /**< Observed BMan Pool Depletion */
++ uint32_t reserved0184[0x1F]; /**< 0x184 - 0x1FF Reserved */
++ uint32_t fmbm_ostc; /**< O/H Statistics Counters */
++ uint32_t fmbm_ofrc; /**< O/H Frame Counter */
++ uint32_t fmbm_ofdc; /**< O/H Frames Discard Counter */
++ uint32_t fmbm_ofledc; /**< O/H Frames Len Err Discard Cntr */
++ uint32_t fmbm_ofufdc; /**< O/H Frames Unsprtd Discard Cutr */
++ uint32_t fmbm_offc; /**< O/H Filter Frames Counter */
++ uint32_t fmbm_ofwdc; /**< Rx Frames WRED Discard Counter */
++ uint32_t fmbm_ofldec; /**< O/H Frames List DMA Error Cntr */
++ uint32_t fmbm_obdc; /**< O/H Buffers Deallocate Counter */
++ uint32_t reserved0218[0x17]; /**< (0x218 - 0x27F) */
++ uint32_t fmbm_opc; /**< O/H Performance Counters */
++ uint32_t fmbm_opcp; /**< O/H Performance Count Parameters */
++ uint32_t fmbm_occn; /**< O/H Cycle Counter */
++ uint32_t fmbm_otuc; /**< O/H Tasks Utilization Counter */
++ uint32_t fmbm_oduc; /**< O/H DMA Utilization Counter */
++ uint32_t fmbm_ofuc; /**< O/H FIFO Utilization Counter */
++};
++
++/** @Description BMI port register map */
++union fman_port_bmi_regs {
++ struct fman_port_rx_bmi_regs rx;
++ struct fman_port_tx_bmi_regs tx;
++ struct fman_port_oh_bmi_regs oh;
++};
++
++/** @Description QMI port register map */
++struct fman_port_qmi_regs {
++ uint32_t fmqm_pnc; /**< PortID n Configuration Register */
++ uint32_t fmqm_pns; /**< PortID n Status Register */
++ uint32_t fmqm_pnts; /**< PortID n Task Status Register */
++ uint32_t reserved00c[4]; /**< 0xn00C - 0xn01B */
++ uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
++ uint32_t fmqm_pnetfc; /**< PortID n Enq Total Frame Counter */
++ uint32_t reserved024[2]; /**< 0xn024 - 0x02B */
++ uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
++ uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
++ uint32_t fmqm_pndtfc; /**< PortID n Dequeue tot Frame cntr */
++ uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID Dflt Cntr */
++ uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
++};
++
++
++enum fman_port_dma_swap {
++ E_FMAN_PORT_DMA_NO_SWAP, /**< No swap, transfer data as is */
++ E_FMAN_PORT_DMA_SWAP_LE,
++ /**< The transferred data should be swapped in PPC Little Endian mode */
++ E_FMAN_PORT_DMA_SWAP_BE
++ /**< The transferred data should be swapped in Big Endian mode */
++};
++
++/* Default port color */
++enum fman_port_color {
++ E_FMAN_PORT_COLOR_GREEN, /**< Default port color is green */
++ E_FMAN_PORT_COLOR_YELLOW, /**< Default port color is yellow */
++ E_FMAN_PORT_COLOR_RED, /**< Default port color is red */
++ E_FMAN_PORT_COLOR_OVERRIDE /**< Ignore color */
++};
++
++/* QMI dequeue from the SP channel - types */
++enum fman_port_deq_type {
++ E_FMAN_PORT_DEQ_BY_PRI,
++ /**< Priority precedence and Intra-Class scheduling */
++ E_FMAN_PORT_DEQ_ACTIVE_FQ,
++ /**< Active FQ precedence and Intra-Class scheduling */
++ E_FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
++ /**< Active FQ precedence and override Intra-Class scheduling */
++};
++
++/* QMI dequeue prefetch modes */
++enum fman_port_deq_prefetch {
++ E_FMAN_PORT_DEQ_NO_PREFETCH, /**< No prefetch mode */
++ E_FMAN_PORT_DEQ_PART_PREFETCH, /**< Partial prefetch mode */
++ E_FMAN_PORT_DEQ_FULL_PREFETCH /**< Full prefetch mode */
++};
++
++/* Parameters for defining performance counters behavior */
++struct fman_port_perf_cnt_params {
++ uint8_t task_val; /**< Task compare value */
++ uint8_t queue_val;
++ /**< Rx or Tx conf queue compare value (unused for O/H ports) */
++ uint8_t dma_val; /**< Dma compare value */
++ uint32_t fifo_val; /**< Fifo compare value (in bytes) */
++};
++
++/** @Description FM Port configuration structure, used at init */
++struct fman_port_cfg {
++ struct fman_port_perf_cnt_params perf_cnt_params;
++ /* BMI parameters */
++ enum fman_port_dma_swap dma_swap_data;
++ bool dma_ic_stash_on;
++ bool dma_header_stash_on;
++ bool dma_sg_stash_on;
++ bool dma_write_optimize;
++ uint16_t ic_ext_offset;
++ uint8_t ic_int_offset;
++ uint16_t ic_size;
++ enum fman_port_color color;
++ bool sync_req;
++ bool discard_override;
++ uint8_t checksum_bytes_ignore;
++ uint8_t rx_cut_end_bytes;
++ uint32_t rx_pri_elevation;
++ uint32_t rx_fifo_thr;
++ uint8_t rx_fd_bits;
++ uint8_t int_buf_start_margin;
++ uint16_t ext_buf_start_margin;
++ uint16_t ext_buf_end_margin;
++ uint32_t tx_fifo_min_level;
++ uint32_t tx_fifo_low_comf_level;
++ uint8_t tx_fifo_deq_pipeline_depth;
++ bool stats_counters_enable;
++ bool perf_counters_enable;
++ /* QMI parameters */
++ bool deq_high_pri;
++ enum fman_port_deq_type deq_type;
++ enum fman_port_deq_prefetch deq_prefetch_opt;
++ uint16_t deq_byte_cnt;
++ bool queue_counters_enable;
++ bool no_scatter_gather;
++ int errata_A006675;
++ int errata_A006320;
++ int excessive_threshold_register;
++ int fmbm_rebm_has_sgd;
++ int fmbm_tfne_has_features;
++ int qmi_deq_options_support;
++};
++
++enum fman_port_type {
++ E_FMAN_PORT_TYPE_OP = 0,
++ /**< Offline parsing port, shares id-s with
++ * host command, so must have exclusive id-s */
++ E_FMAN_PORT_TYPE_RX, /**< 1G Rx port */
++ E_FMAN_PORT_TYPE_RX_10G, /**< 10G Rx port */
++ E_FMAN_PORT_TYPE_TX, /**< 1G Tx port */
++ E_FMAN_PORT_TYPE_TX_10G, /**< 10G Tx port */
++ E_FMAN_PORT_TYPE_DUMMY,
++ E_FMAN_PORT_TYPE_HC = E_FMAN_PORT_TYPE_DUMMY
++ /**< Host command port, shares id-s with
++ * offline parsing ports, so must have exclusive id-s */
++};
++
++struct fman_port_params {
++ uint32_t discard_mask;
++ uint32_t err_mask;
++ uint32_t dflt_fqid;
++ uint32_t err_fqid;
++ uint8_t deq_sp;
++ bool dont_release_buf;
++};
++
++/* Port context - used by most API functions */
++struct fman_port {
++ enum fman_port_type type;
++ uint8_t fm_rev_maj;
++ uint8_t fm_rev_min;
++ union fman_port_bmi_regs *bmi_regs;
++ struct fman_port_qmi_regs *qmi_regs;
++ bool im_en;
++ uint8_t ext_pools_num;
++};
++
++/** @Description External buffer pools configuration */
++struct fman_port_bpools {
++ uint8_t count; /**< Num of pools to set up */
++ bool counters_enable; /**< Enable allocate counters */
++ uint8_t grp_bp_depleted_num;
++ /**< Number of depleted pools - if reached the BMI indicates
++ * the MAC to send a pause frame */
++ struct {
++ uint8_t bpid; /**< BM pool ID */
++ uint16_t size;
++ /**< Pool's size - must be in ascending order */
++ bool is_backup;
++ /**< If this is a backup pool */
++ bool grp_bp_depleted;
++ /**< Consider this buffer in multiple pools depletion criteria*/
++ bool single_bp_depleted;
++ /**< Consider this buffer in single pool depletion criteria */
++ bool pfc_priorities_en;
++ } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
++};
++
++enum fman_port_rate_limiter_scale_down {
++ E_FMAN_PORT_RATE_DOWN_NONE,
++ E_FMAN_PORT_RATE_DOWN_BY_2,
++ E_FMAN_PORT_RATE_DOWN_BY_4,
++ E_FMAN_PORT_RATE_DOWN_BY_8
++};
++
++/* Rate limiter configuration */
++struct fman_port_rate_limiter {
++ uint8_t count_1micro_bit;
++ bool high_burst_size_gran;
++ /**< Defines burst_size granularity for OP ports; when TRUE,
++ * burst_size below counts in frames, otherwise in 10^3 frames */
++ uint16_t burst_size;
++ /**< Max burst size, in KBytes for Tx port, according to
++ * high_burst_size_gran definition for OP port */
++ uint32_t rate;
++ /**< In Kbps for Tx port, in frames/sec for OP port */
++ enum fman_port_rate_limiter_scale_down rate_factor;
++};
++
++/* BMI statistics counters */
++enum fman_port_stats_counters {
++ E_FMAN_PORT_STATS_CNT_FRAME,
++ /**< Number of processed frames; valid for all ports */
++ E_FMAN_PORT_STATS_CNT_DISCARD,
++ /**< For Rx ports - frames discarded by QMAN, for Tx or O/H ports -
++ * frames discarded due to DMA error; valid for all ports */
++ E_FMAN_PORT_STATS_CNT_DEALLOC_BUF,
++ /**< Number of buffer deallocate operations; valid for all ports */
++ E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME,
++ /**< Number of bad Rx frames, like CRC error, Rx FIFO overflow etc;
++ * valid for Rx ports only */
++ E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME,
++ /**< Number of Rx oversized frames, that is frames exceeding max frame
++ * size configured for the corresponding ETH controller;
++ * valid for Rx ports only */
++ E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF,
++ /**< Frames discarded due to lack of external buffers; valid for
++ * Rx ports only */
++ E_FMAN_PORT_STATS_CNT_LEN_ERR,
++ /**< Frames discarded due to frame length error; valid for Tx and
++ * O/H ports only */
++ E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT,
++ /**< Frames discarded due to unsupported FD format; valid for Tx
++ * and O/H ports only */
++ E_FMAN_PORT_STATS_CNT_FILTERED_FRAME,
++ /**< Number of frames filtered out by PCD module; valid for
++ * Rx and OP ports only */
++ E_FMAN_PORT_STATS_CNT_DMA_ERR,
++ /**< Frames rejected by QMAN that were not able to release their
++ * buffers due to DMA error; valid for Rx and O/H ports only */
++ E_FMAN_PORT_STATS_CNT_WRED_DISCARD
++ /**< Frames going through O/H port that were not able to to enter the
++ * return queue due to WRED algorithm; valid for O/H ports only */
++};
++
++/* BMI performance counters */
++enum fman_port_perf_counters {
++ E_FMAN_PORT_PERF_CNT_CYCLE, /**< Cycle counter */
++ E_FMAN_PORT_PERF_CNT_TASK_UTIL, /**< Tasks utilization counter */
++ E_FMAN_PORT_PERF_CNT_QUEUE_UTIL,
++ /**< For Rx ports - Rx queue utilization, for Tx ports - Tx conf queue
++ * utilization; not valid for O/H ports */
++ E_FMAN_PORT_PERF_CNT_DMA_UTIL, /**< DMA utilization counter */
++ E_FMAN_PORT_PERF_CNT_FIFO_UTIL, /**< FIFO utilization counter */
++ E_FMAN_PORT_PERF_CNT_RX_PAUSE
++ /**< Number of cycles in which Rx pause activation control is on;
++ * valid for Rx ports only */
++};
++
++/* QMI counters */
++enum fman_port_qmi_counters {
++ E_FMAN_PORT_ENQ_TOTAL, /**< EnQ tot frame cntr */
++ E_FMAN_PORT_DEQ_TOTAL, /**< DeQ tot frame cntr; invalid for Rx ports */
++ E_FMAN_PORT_DEQ_FROM_DFLT,
++ /**< Dequeue from default FQID counter not valid for Rx ports */
++ E_FMAN_PORT_DEQ_CONFIRM /**< DeQ confirm cntr invalid for Rx ports */
++};
++
++
++/** @Collection FM Port API */
++void fman_port_defconfig(struct fman_port_cfg *cfg, enum fman_port_type type);
++int fman_port_init(struct fman_port *port,
++ struct fman_port_cfg *cfg,
++ struct fman_port_params *params);
++int fman_port_enable(struct fman_port *port);
++int fman_port_disable(const struct fman_port *port);
++int fman_port_set_bpools(const struct fman_port *port,
++ const struct fman_port_bpools *bp);
++int fman_port_set_rate_limiter(struct fman_port *port,
++ struct fman_port_rate_limiter *rate_limiter);
++int fman_port_delete_rate_limiter(struct fman_port *port);
++int fman_port_set_err_mask(struct fman_port *port, uint32_t err_mask);
++int fman_port_set_discard_mask(struct fman_port *port, uint32_t discard_mask);
++int fman_port_modify_rx_fd_bits(struct fman_port *port,
++ uint8_t rx_fd_bits,
++ bool add);
++int fman_port_set_perf_cnt_params(struct fman_port *port,
++ struct fman_port_perf_cnt_params *params);
++int fman_port_set_stats_cnt_mode(struct fman_port *port, bool enable);
++int fman_port_set_perf_cnt_mode(struct fman_port *port, bool enable);
++int fman_port_set_queue_cnt_mode(struct fman_port *port, bool enable);
++int fman_port_set_bpool_cnt_mode(struct fman_port *port,
++ uint8_t bpid,
++ bool enable);
++uint32_t fman_port_get_stats_counter(struct fman_port *port,
++ enum fman_port_stats_counters counter);
++void fman_port_set_stats_counter(struct fman_port *port,
++ enum fman_port_stats_counters counter,
++ uint32_t value);
++uint32_t fman_port_get_perf_counter(struct fman_port *port,
++ enum fman_port_perf_counters counter);
++void fman_port_set_perf_counter(struct fman_port *port,
++ enum fman_port_perf_counters counter,
++ uint32_t value);
++uint32_t fman_port_get_qmi_counter(struct fman_port *port,
++ enum fman_port_qmi_counters counter);
++void fman_port_set_qmi_counter(struct fman_port *port,
++ enum fman_port_qmi_counters counter,
++ uint32_t value);
++uint32_t fman_port_get_bpool_counter(struct fman_port *port, uint8_t bpid);
++void fman_port_set_bpool_counter(struct fman_port *port,
++ uint8_t bpid,
++ uint32_t value);
++int fman_port_add_congestion_grps(struct fman_port *port,
++ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM]);
++int fman_port_remove_congestion_grps(struct fman_port *port,
++ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM]);
++
++
++#endif /* __FSL_FMAN_PORT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_prs.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_prs.h
+new file mode 100644
+index 00000000..b18997dc
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_prs.h
+@@ -0,0 +1,102 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_PRS_H
++#define __FSL_FMAN_PRS_H
++
++#include "common/general.h"
++
++#define FM_PCD_EX_PRS_DOUBLE_ECC 0x02000000
++#define FM_PCD_EX_PRS_SINGLE_ECC 0x01000000
++
++#define FM_PCD_PRS_PPSC_ALL_PORTS 0xffff0000
++#define FM_PCD_PRS_RPIMAC_EN 0x00000001
++#define FM_PCD_PRS_PORT_IDLE_STS 0xffff0000
++#define FM_PCD_PRS_SINGLE_ECC 0x00004000
++#define FM_PCD_PRS_DOUBLE_ECC 0x00004000
++#define PRS_MAX_CYCLE_LIMIT 8191
++
++#define DEFAULT_MAX_PRS_CYC_LIM 0
++
++struct fman_prs_regs {
++ uint32_t fmpr_rpclim;
++ uint32_t fmpr_rpimac;
++ uint32_t pmeec;
++ uint32_t res00c[5];
++ uint32_t fmpr_pevr;
++ uint32_t fmpr_pever;
++ uint32_t res028;
++ uint32_t fmpr_perr;
++ uint32_t fmpr_perer;
++ uint32_t res034;
++ uint32_t res038[10];
++ uint32_t fmpr_ppsc;
++ uint32_t res064;
++ uint32_t fmpr_pds;
++ uint32_t fmpr_l2rrs;
++ uint32_t fmpr_l3rrs;
++ uint32_t fmpr_l4rrs;
++ uint32_t fmpr_srrs;
++ uint32_t fmpr_l2rres;
++ uint32_t fmpr_l3rres;
++ uint32_t fmpr_l4rres;
++ uint32_t fmpr_srres;
++ uint32_t fmpr_spcs;
++ uint32_t fmpr_spscs;
++ uint32_t fmpr_hxscs;
++ uint32_t fmpr_mrcs;
++ uint32_t fmpr_mwcs;
++ uint32_t fmpr_mrscs;
++ uint32_t fmpr_mwscs;
++ uint32_t fmpr_fcscs;
++};
++
++struct fman_prs_cfg {
++ uint32_t port_id_stat;
++ uint16_t max_prs_cyc_lim;
++ uint32_t prs_exceptions;
++};
++
++uint32_t fman_prs_get_err_event(struct fman_prs_regs *regs, uint32_t ev_mask);
++uint32_t fman_prs_get_err_ev_mask(struct fman_prs_regs *regs);
++void fman_prs_ack_err_event(struct fman_prs_regs *regs, uint32_t event);
++uint32_t fman_prs_get_expt_event(struct fman_prs_regs *regs, uint32_t ev_mask);
++uint32_t fman_prs_get_expt_ev_mask(struct fman_prs_regs *regs);
++void fman_prs_ack_expt_event(struct fman_prs_regs *regs, uint32_t event);
++void fman_prs_defconfig(struct fman_prs_cfg *cfg);
++int fman_prs_init(struct fman_prs_regs *regs, struct fman_prs_cfg *cfg);
++void fman_prs_enable(struct fman_prs_regs *regs);
++void fman_prs_disable(struct fman_prs_regs *regs);
++int fman_prs_is_enabled(struct fman_prs_regs *regs);
++void fman_prs_set_stst_port_msk(struct fman_prs_regs *regs, uint32_t pid_msk);
++void fman_prs_set_stst(struct fman_prs_regs *regs, bool enable);
++#endif /* __FSL_FMAN_PRS_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_rtc.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_rtc.h
+new file mode 100755
+index 00000000..f6b69a1f
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_rtc.h
+@@ -0,0 +1,449 @@
++/*
++ * Copyright 2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_RTC_H
++#define __FSL_FMAN_RTC_H
++
++#include "common/general.h"
++
++/* FM RTC Registers definitions */
++#define FMAN_RTC_TMR_CTRL_ALMP1 0x80000000
++#define FMAN_RTC_TMR_CTRL_ALMP2 0x40000000
++#define FMAN_RTC_TMR_CTRL_FS 0x10000000
++#define FMAN_RTC_TMR_CTRL_PP1L 0x08000000
++#define FMAN_RTC_TMR_CTRL_PP2L 0x04000000
++#define FMAN_RTC_TMR_CTRL_TCLK_PERIOD_MASK 0x03FF0000
++#define FMAN_RTC_TMR_CTRL_FRD 0x00004000
++#define FMAN_RTC_TMR_CTRL_SLV 0x00002000
++#define FMAN_RTC_TMR_CTRL_ETEP1 0x00000100
++#define FMAN_RTC_TMR_CTRL_COPH 0x00000080
++#define FMAN_RTC_TMR_CTRL_CIPH 0x00000040
++#define FMAN_RTC_TMR_CTRL_TMSR 0x00000020
++#define FMAN_RTC_TMR_CTRL_DBG 0x00000010
++#define FMAN_RTC_TMR_CTRL_BYP 0x00000008
++#define FMAN_RTC_TMR_CTRL_TE 0x00000004
++#define FMAN_RTC_TMR_CTRL_CKSEL_OSC_CLK 0x00000003
++#define FMAN_RTC_TMR_CTRL_CKSEL_MAC_CLK 0x00000001
++#define FMAN_RTC_TMR_CTRL_CKSEL_EXT_CLK 0x00000000
++#define FMAN_RTC_TMR_CTRL_TCLK_PERIOD_SHIFT 16
++
++#define FMAN_RTC_TMR_TEVENT_ETS2 0x02000000
++#define FMAN_RTC_TMR_TEVENT_ETS1 0x01000000
++#define FMAN_RTC_TMR_TEVENT_ALM2 0x00020000
++#define FMAN_RTC_TMR_TEVENT_ALM1 0x00010000
++#define FMAN_RTC_TMR_TEVENT_PP1 0x00000080
++#define FMAN_RTC_TMR_TEVENT_PP2 0x00000040
++#define FMAN_RTC_TMR_TEVENT_PP3 0x00000020
++#define FMAN_RTC_TMR_TEVENT_ALL (FMAN_RTC_TMR_TEVENT_ETS2 |\
++ FMAN_RTC_TMR_TEVENT_ETS1 |\
++ FMAN_RTC_TMR_TEVENT_ALM2 |\
++ FMAN_RTC_TMR_TEVENT_ALM1 |\
++ FMAN_RTC_TMR_TEVENT_PP1 |\
++ FMAN_RTC_TMR_TEVENT_PP2 |\
++ FMAN_RTC_TMR_TEVENT_PP3)
++
++#define FMAN_RTC_TMR_PRSC_OCK_MASK 0x0000FFFF
++
++/**************************************************************************//**
++ @Description FM RTC Alarm Polarity Options.
++*//***************************************************************************/
++enum fman_rtc_alarm_polarity {
++ E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH, /**< Active-high output polarity */
++ E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW /**< Active-low output polarity */
++};
++
++/**************************************************************************//**
++ @Description FM RTC Trigger Polarity Options.
++*//***************************************************************************/
++enum fman_rtc_trigger_polarity {
++ E_FMAN_RTC_TRIGGER_ON_RISING_EDGE, /**< Trigger on rising edge */
++ E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE /**< Trigger on falling edge */
++};
++
++/**************************************************************************//**
++ @Description IEEE1588 Timer Module FM RTC Optional Clock Sources.
++*//***************************************************************************/
++enum fman_src_clock {
++ E_FMAN_RTC_SOURCE_CLOCK_EXTERNAL, /**< external high precision timer
++ reference clock */
++ E_FMAN_RTC_SOURCE_CLOCK_SYSTEM, /**< MAC system clock */
++ E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR /**< RTC clock oscilator */
++};
++
++/* RTC default values */
++#define DEFAULT_SRC_CLOCK E_FMAN_RTC_SOURCE_CLOCK_SYSTEM
++#define DEFAULT_INVERT_INPUT_CLK_PHASE FALSE
++#define DEFAULT_INVERT_OUTPUT_CLK_PHASE FALSE
++#define DEFAULT_ALARM_POLARITY E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH
++#define DEFAULT_TRIGGER_POLARITY E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE
++#define DEFAULT_PULSE_REALIGN FALSE
++
++#define FMAN_RTC_MAX_NUM_OF_ALARMS 3
++#define FMAN_RTC_MAX_NUM_OF_PERIODIC_PULSES 4
++#define FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS 3
++
++/**************************************************************************//**
++ @Description FM RTC timer alarm
++*//***************************************************************************/
++struct t_tmr_alarm{
++ uint32_t tmr_alarm_h; /**< */
++ uint32_t tmr_alarm_l; /**< */
++};
++
++/**************************************************************************//**
++ @Description FM RTC timer Ex trigger
++*//***************************************************************************/
++struct t_tmr_ext_trigger{
++ uint32_t tmr_etts_h; /**< */
++ uint32_t tmr_etts_l; /**< */
++};
++
++struct rtc_regs {
++ uint32_t tmr_id; /* 0x000 Module ID register */
++ uint32_t tmr_id2; /* 0x004 Controller ID register */
++ uint32_t reserved0008[30];
++ uint32_t tmr_ctrl; /* 0x0080 timer control register */
++ uint32_t tmr_tevent; /* 0x0084 timer event register */
++ uint32_t tmr_temask; /* 0x0088 timer event mask register */
++ uint32_t reserved008c[3];
++ uint32_t tmr_cnt_h; /* 0x0098 timer counter high register */
++ uint32_t tmr_cnt_l; /* 0x009c timer counter low register */
++ uint32_t tmr_add; /* 0x00a0 timer drift compensation addend register */
++ uint32_t tmr_acc; /* 0x00a4 timer accumulator register */
++ uint32_t tmr_prsc; /* 0x00a8 timer prescale */
++ uint32_t reserved00ac;
++ uint32_t tmr_off_h; /* 0x00b0 timer offset high */
++ uint32_t tmr_off_l; /* 0x00b4 timer offset low */
++ struct t_tmr_alarm tmr_alarm[FMAN_RTC_MAX_NUM_OF_ALARMS]; /* 0x00b8 timer
++ alarm */
++ uint32_t tmr_fiper[FMAN_RTC_MAX_NUM_OF_PERIODIC_PULSES]; /* 0x00d0 timer
++ fixed period interval */
++ struct t_tmr_ext_trigger tmr_etts[FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS];
++ /* 0x00e0 time stamp general purpose external */
++ uint32_t reserved00f0[4];
++};
++
++struct rtc_cfg {
++ enum fman_src_clock src_clk;
++ uint32_t ext_src_clk_freq;
++ uint32_t rtc_freq_hz;
++ bool timer_slave_mode;
++ bool invert_input_clk_phase;
++ bool invert_output_clk_phase;
++ uint32_t events_mask;
++ bool bypass; /**< Indicates if frequency compensation
++ is bypassed */
++ bool pulse_realign;
++ enum fman_rtc_alarm_polarity alarm_polarity[FMAN_RTC_MAX_NUM_OF_ALARMS];
++ enum fman_rtc_trigger_polarity trigger_polarity
++ [FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS];
++};
++
++/**
++ * fman_rtc_defconfig() - Get default RTC configuration
++ * @cfg: pointer to configuration structure.
++ *
++ * Call this function to obtain a default set of configuration values for
++ * initializing RTC. The user can overwrite any of the values before calling
++ * fman_rtc_init(), if specific configuration needs to be applied.
++ */
++void fman_rtc_defconfig(struct rtc_cfg *cfg);
++
++/**
++ * fman_rtc_get_events() - Get the events
++ * @regs: Pointer to RTC register block
++ *
++ * Returns: The events
++ */
++uint32_t fman_rtc_get_events(struct rtc_regs *regs);
++
++/**
++ * fman_rtc_get_interrupt_mask() - Get the events mask
++ * @regs: Pointer to RTC register block
++ *
++ * Returns: The events mask
++ */
++uint32_t fman_rtc_get_interrupt_mask(struct rtc_regs *regs);
++
++
++/**
++ * fman_rtc_set_interrupt_mask() - Set the events mask
++ * @regs: Pointer to RTC register block
++ * @mask: The mask to set
++ */
++void fman_rtc_set_interrupt_mask(struct rtc_regs *regs, uint32_t mask);
++
++/**
++ * fman_rtc_get_event() - Check if specific events occurred
++ * @regs: Pointer to RTC register block
++ * @ev_mask: a mask of the events to check
++ *
++ * Returns: 0 if the events did not occur. Non zero if one of the events occurred
++ */
++uint32_t fman_rtc_get_event(struct rtc_regs *regs, uint32_t ev_mask);
++
++/**
++ * fman_rtc_check_and_clear_event() - Clear events which are on
++ * @regs: Pointer to RTC register block
++ *
++ * Returns: A mask of the events which were cleared
++ */
++uint32_t fman_rtc_check_and_clear_event(struct rtc_regs *regs);
++
++/**
++ * fman_rtc_ack_event() - Clear events
++ * @regs: Pointer to RTC register block
++ * @events: The events to disable
++ */
++void fman_rtc_ack_event(struct rtc_regs *regs, uint32_t events);
++
++/**
++ * fman_rtc_enable_interupt() - Enable events interrupts
++ * @regs: Pointer to RTC register block
++ * @mask: The events to disable
++ */
++void fman_rtc_enable_interupt(struct rtc_regs *regs, uint32_t mask);
++
++/**
++ * fman_rtc_disable_interupt() - Disable events interrupts
++ * @regs: Pointer to RTC register block
++ * @mask: The events to disable
++ */
++void fman_rtc_disable_interupt(struct rtc_regs *regs, uint32_t mask);
++
++/**
++ * fman_rtc_get_timer_ctrl() - Get the control register
++ * @regs: Pointer to RTC register block
++ *
++ * Returns: The control register value
++ */
++uint32_t fman_rtc_get_timer_ctrl(struct rtc_regs *regs);
++
++/**
++ * fman_rtc_set_timer_ctrl() - Set timer control register
++ * @regs: Pointer to RTC register block
++ * @val: The value to set
++ */
++void fman_rtc_set_timer_ctrl(struct rtc_regs *regs, uint32_t val);
++
++/**
++ * fman_rtc_get_frequency_compensation() - Get the frequency compensation
++ * @regs: Pointer to RTC register block
++ *
++ * Returns: The timer counter
++ */
++uint32_t fman_rtc_get_frequency_compensation(struct rtc_regs *regs);
++
++/**
++ * fman_rtc_set_frequency_compensation() - Set frequency compensation
++ * @regs: Pointer to RTC register block
++ * @val: The value to set
++ */
++void fman_rtc_set_frequency_compensation(struct rtc_regs *regs, uint32_t val);
++
++/**
++ * fman_rtc_get_trigger_stamp() - Get a trigger stamp
++ * @regs: Pointer to RTC register block
++ * @id: The id of the trigger stamp
++ *
++ * Returns: The time stamp
++ */
++uint64_t fman_rtc_get_trigger_stamp(struct rtc_regs *regs, int id);
++
++/**
++ * fman_rtc_set_timer_alarm_l() - Set timer alarm low register
++ * @regs: Pointer to RTC register block
++ * @index: The index of alarm to set
++ * @val: The value to set
++ */
++void fman_rtc_set_timer_alarm_l(struct rtc_regs *regs, int index,
++ uint32_t val);
++
++/**
++ * fman_rtc_set_timer_alarm() - Set timer alarm
++ * @regs: Pointer to RTC register block
++ * @index: The index of alarm to set
++ * @val: The value to set
++ */
++void fman_rtc_set_timer_alarm(struct rtc_regs *regs, int index, int64_t val);
++
++/**
++ * fman_rtc_set_timer_fiper() - Set timer fiper
++ * @regs: Pointer to RTC register block
++ * @index: The index of fiper to set
++ * @val: The value to set
++ */
++void fman_rtc_set_timer_fiper(struct rtc_regs *regs, int index, uint32_t val);
++
++/**
++ * fman_rtc_set_timer_offset() - Set timer offset
++ * @regs: Pointer to RTC register block
++ * @val: The value to set
++ */
++void fman_rtc_set_timer_offset(struct rtc_regs *regs, int64_t val);
++
++/**
++ * fman_rtc_get_timer() - Get the timer counter
++ * @regs: Pointer to RTC register block
++ *
++ * Returns: The timer counter
++ */
++static inline uint64_t fman_rtc_get_timer(struct rtc_regs *regs)
++{
++ uint64_t time;
++ /* TMR_CNT_L must be read first to get an accurate value */
++ time = (uint64_t)ioread32be(&regs->tmr_cnt_l);
++ time |= ((uint64_t)ioread32be(&regs->tmr_cnt_h) << 32);
++
++ return time;
++}
++
++/**
++ * fman_rtc_set_timer() - Set timer counter
++ * @regs: Pointer to RTC register block
++ * @val: The value to set
++ */
++static inline void fman_rtc_set_timer(struct rtc_regs *regs, int64_t val)
++{
++ iowrite32be((uint32_t)val, &regs->tmr_cnt_l);
++ iowrite32be((uint32_t)(val >> 32), &regs->tmr_cnt_h);
++}
++
++/**
++ * fman_rtc_timers_soft_reset() - Soft reset
++ * @regs: Pointer to RTC register block
++ *
++ * Resets all the timer registers and state machines for the 1588 IP and
++ * the attached client 1588
++ */
++void fman_rtc_timers_soft_reset(struct rtc_regs *regs);
++
++/**
++ * fman_rtc_clear_external_trigger() - Clear an external trigger
++ * @regs: Pointer to RTC register block
++ * @id: The id of the trigger to clear
++ */
++void fman_rtc_clear_external_trigger(struct rtc_regs *regs, int id);
++
++/**
++ * fman_rtc_clear_periodic_pulse() - Clear periodic pulse
++ * @regs: Pointer to RTC register block
++ * @id: The id of the fiper to clear
++ */
++void fman_rtc_clear_periodic_pulse(struct rtc_regs *regs, int id);
++
++/**
++ * fman_rtc_enable() - Enable RTC hardware block
++ * @regs: Pointer to RTC register block
++ */
++void fman_rtc_enable(struct rtc_regs *regs, bool reset_clock);
++
++/**
++ * fman_rtc_is_enabled() - Is RTC hardware block enabled
++ * @regs: Pointer to RTC register block
++ *
++ * Return: TRUE if enabled
++ */
++bool fman_rtc_is_enabled(struct rtc_regs *regs);
++
++/**
++ * fman_rtc_disable() - Disable RTC hardware block
++ * @regs: Pointer to RTC register block
++ */
++void fman_rtc_disable(struct rtc_regs *regs);
++
++/**
++ * fman_rtc_init() - Init RTC hardware block
++ * @cfg: RTC configuration data
++ * @regs: Pointer to RTC register block
++ * @num_alarms: Number of alarms in RTC
++ * @num_fipers: Number of fipers in RTC
++ * @num_ext_triggers: Number of external triggers in RTC
++ * @freq_compensation: Frequency compensation
++ * @output_clock_divisor: Output clock divisor
++ *
++ * This function initializes RTC and applies basic configuration.
++ */
++void fman_rtc_init(struct rtc_cfg *cfg, struct rtc_regs *regs, int num_alarms,
++ int num_fipers, int num_ext_triggers, bool init_freq_comp,
++ uint32_t freq_compensation, uint32_t output_clock_divisor);
++
++/**
++ * fman_rtc_set_alarm() - Set an alarm
++ * @regs: Pointer to RTC register block
++ * @id: id of alarm
++ * @val: value to write
++ * @enable: should interrupt be enabled
++ */
++void fman_rtc_set_alarm(struct rtc_regs *regs, int id, uint32_t val, bool enable);
++
++/**
++ * fman_rtc_set_periodic_pulse() - Set an alarm
++ * @regs: Pointer to RTC register block
++ * @id: id of fiper
++ * @val: value to write
++ * @enable: should interrupt be enabled
++ */
++void fman_rtc_set_periodic_pulse(struct rtc_regs *regs, int id, uint32_t val,
++ bool enable);
++
++/**
++ * fman_rtc_set_ext_trigger() - Set an external trigger
++ * @regs: Pointer to RTC register block
++ * @id: id of trigger
++ * @enable: should interrupt be enabled
++ * @use_pulse_as_input: use the pulse as input
++ */
++void fman_rtc_set_ext_trigger(struct rtc_regs *regs, int id, bool enable,
++ bool use_pulse_as_input);
++
++struct fm_rtc_alarm_params {
++ uint8_t alarm_id; /**< 0 or 1 */
++ uint64_t alarm_time; /**< In nanoseconds, the time when the
++ alarm should go off - must be a
++ multiple of the RTC period */
++ void (*f_alarm_callback)(void* app, uint8_t id); /**< This routine will
++ be called when RTC reaches alarmTime */
++ bool clear_on_expiration; /**< TRUE to turn off the alarm once
++ expired.*/
++};
++
++struct fm_rtc_periodic_pulse_params {
++ uint8_t periodic_pulse_id; /**< 0 or 1 */
++ uint64_t periodic_pulse_period; /**< In Nanoseconds. Must be a multiple
++ of the RTC period */
++ void (*f_periodic_pulse_callback)(void* app, uint8_t id); /**< This
++ routine will be called every
++ periodicPulsePeriod. */
++};
++
++#endif /* __FSL_FMAN_RTC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_sp.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_sp.h
+new file mode 100755
+index 00000000..f8ef7d56
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_sp.h
+@@ -0,0 +1,138 @@
++/*
++ * Copyright 2013 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_SP_H
++#define __FSL_FMAN_SP_H
++
++#include "common/general.h"
++#include "fsl_fman.h"
++
++
++struct fm_pcd_storage_profile_regs{
++ uint32_t fm_sp_ebmpi[8];
++ /*offset 0 - 0xc*/
++ /**< Buffer Manager pool Information */
++
++ uint32_t fm_sp_acnt; /*offset 0x20*/
++ uint32_t fm_sp_ebm; /*offset 0x24*/
++ uint32_t fm_sp_da; /*offset 0x28*/
++ uint32_t fm_sp_icp; /*offset 0x2c*/
++ uint32_t fm_sp_mpd; /*offset 0x30*/
++ uint32_t res1[2]; /*offset 0x34 - 0x38*/
++ uint32_t fm_sp_spliodn; /*offset 0x3c*/
++};
++
++/**************************************************************************//**
++ @Description structure for defining internal context copying
++*//***************************************************************************/
++struct fman_sp_int_context_data_copy{
++ uint16_t ext_buf_offset; /**< Offset in External buffer to which
++ internal context is copied to (Rx)
++ or taken from (Tx, Op). */
++ uint8_t int_context_offset; /**< Offset within internal context to copy
++ from (Rx) or to copy to (Tx, Op).*/
++ uint16_t size; /**< Internal offset size to be copied */
++};
++
++/**************************************************************************//**
++ @Description struct for defining external buffer margins
++*//***************************************************************************/
++struct fman_sp_buf_margins{
++ uint16_t start_margins; /**< Number of bytes to be left at the
++ beginning of the external buffer (must be
++ divisible by 16) */
++ uint16_t end_margins; /**< number of bytes to be left at the end of
++ the external buffer(must be divisible by 16)*/
++};
++
++struct fm_storage_profile_params {
++ struct fman_ext_pools fm_ext_pools;
++ struct fman_backup_bm_pools backup_pools;
++ struct fman_sp_int_context_data_copy *int_context;
++ struct fman_sp_buf_margins *buf_margins;
++ enum fman_dma_swap_option dma_swap_data;
++ enum fman_dma_cache_option int_context_cache_attr;
++ enum fman_dma_cache_option header_cache_attr;
++ enum fman_dma_cache_option scatter_gather_cache_attr;
++ bool dma_write_optimize;
++ uint16_t liodn_offset;
++ bool no_scather_gather;
++ struct fman_buf_pool_depletion buf_pool_depletion;
++};
++
++/**************************************************************************//**
++ @Description Registers bit fields
++*//***************************************************************************/
++#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000
++#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000
++#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000
++#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000
++#define FMAN_SP_SG_DISABLE 0x80000000
++
++/* shifts */
++#define FMAN_SP_EXT_BUF_POOL_ID_SHIFT 16
++#define FMAN_SP_POOL_DEP_NUM_OF_POOLS_SHIFT 16
++#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
++#define FMAN_SP_EXT_BUF_MARG_END_SHIFT 0
++#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30
++#define FMAN_SP_DMA_ATTR_IC_CACHE_SHIFT 28
++#define FMAN_SP_DMA_ATTR_HDR_CACHE_SHIFT 26
++#define FMAN_SP_DMA_ATTR_SG_CACHE_SHIFT 24
++#define FMAN_SP_IC_TO_EXT_SHIFT 16
++#define FMAN_SP_IC_FROM_INT_SHIFT 8
++#define FMAN_SP_IC_SIZE_SHIFT 0
++
++/**************************************************************************//**
++ @Description defaults
++*//***************************************************************************/
++#define DEFAULT_FMAN_SP_DMA_SWAP_DATA FMAN_DMA_NO_SWP
++#define DEFAULT_FMAN_SP_DMA_INT_CONTEXT_CACHE_ATTR FMAN_DMA_NO_STASH
++#define DEFAULT_FMAN_SP_DMA_HEADER_CACHE_ATTR FMAN_DMA_NO_STASH
++#define DEFAULT_FMAN_SP_DMA_SCATTER_GATHER_CACHE_ATTR FMAN_DMA_NO_STASH
++#define DEFAULT_FMAN_SP_DMA_WRITE_OPTIMIZE TRUE
++#define DEFAULT_FMAN_SP_NO_SCATTER_GATHER FALSE
++
++void fman_vsp_defconfig(struct fm_storage_profile_params *cfg);
++
++void fman_vsp_init(struct fm_pcd_storage_profile_regs *regs,
++ uint16_t index, struct fm_storage_profile_params *fm_vsp_params,
++ int port_max_num_of_ext_pools, int bm_max_num_of_pools,
++ int max_num_of_pfc_priorities);
++
++uint32_t fman_vsp_get_statistics(struct fm_pcd_storage_profile_regs *regs,
++ uint16_t index);
++
++void fman_vsp_set_statistics(struct fm_pcd_storage_profile_regs *regs,
++ uint16_t index, uint32_t value);
++
++
++#endif /* __FSL_FMAN_SP_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_tgec.h b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_tgec.h
+new file mode 100644
+index 00000000..a0373141
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_tgec.h
+@@ -0,0 +1,479 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __FSL_FMAN_TGEC_H
++#define __FSL_FMAN_TGEC_H
++
++#include "common/general.h"
++#include "fsl_enet.h"
++
++
++/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
++#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
++
++enum tgec_counters {
++ E_TGEC_COUNTER_R64,
++ E_TGEC_COUNTER_R127,
++ E_TGEC_COUNTER_R255,
++ E_TGEC_COUNTER_R511,
++ E_TGEC_COUNTER_R1023,
++ E_TGEC_COUNTER_R1518,
++ E_TGEC_COUNTER_R1519X,
++ E_TGEC_COUNTER_TRFRG,
++ E_TGEC_COUNTER_TRJBR,
++ E_TGEC_COUNTER_RDRP,
++ E_TGEC_COUNTER_RALN,
++ E_TGEC_COUNTER_TRUND,
++ E_TGEC_COUNTER_TROVR,
++ E_TGEC_COUNTER_RXPF,
++ E_TGEC_COUNTER_TXPF,
++ E_TGEC_COUNTER_ROCT,
++ E_TGEC_COUNTER_RMCA,
++ E_TGEC_COUNTER_RBCA,
++ E_TGEC_COUNTER_RPKT,
++ E_TGEC_COUNTER_RUCA,
++ E_TGEC_COUNTER_RERR,
++ E_TGEC_COUNTER_TOCT,
++ E_TGEC_COUNTER_TMCA,
++ E_TGEC_COUNTER_TBCA,
++ E_TGEC_COUNTER_TUCA,
++ E_TGEC_COUNTER_TERR
++};
++
++/* Command and Configuration Register (COMMAND_CONFIG) */
++#define CMD_CFG_EN_TIMESTAMP 0x00100000
++#define CMD_CFG_TX_ADDR_INS_SEL 0x00080000
++#define CMD_CFG_NO_LEN_CHK 0x00020000
++#define CMD_CFG_SEND_IDLE 0x00010000
++#define CMD_CFG_RX_ER_DISC 0x00004000
++#define CMD_CFG_CMD_FRM_EN 0x00002000
++#define CMD_CFG_STAT_CLR 0x00001000
++#define CMD_CFG_LOOPBACK_EN 0x00000400
++#define CMD_CFG_TX_ADDR_INS 0x00000200
++#define CMD_CFG_PAUSE_IGNORE 0x00000100
++#define CMD_CFG_PAUSE_FWD 0x00000080
++#define CMD_CFG_PROMIS_EN 0x00000010
++#define CMD_CFG_WAN_MODE 0x00000008
++#define CMD_CFG_RX_EN 0x00000002
++#define CMD_CFG_TX_EN 0x00000001
++
++/* Interrupt Mask Register (IMASK) */
++#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000
++#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000
++#define TGEC_IMASK_REM_FAULT 0x00004000
++#define TGEC_IMASK_LOC_FAULT 0x00002000
++#define TGEC_IMASK_TX_ECC_ER 0x00001000
++#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800
++#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400
++#define TGEC_IMASK_TX_ER 0x00000200
++#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100
++#define TGEC_IMASK_RX_ECC_ER 0x00000080
++#define TGEC_IMASK_RX_JAB_FRM 0x00000040
++#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020
++#define TGEC_IMASK_RX_RUNT_FRM 0x00000010
++#define TGEC_IMASK_RX_FRAG_FRM 0x00000008
++#define TGEC_IMASK_RX_LEN_ER 0x00000004
++#define TGEC_IMASK_RX_CRC_ER 0x00000002
++#define TGEC_IMASK_RX_ALIGN_ER 0x00000001
++
++#define TGEC_EVENTS_MASK \
++ ((uint32_t)(TGEC_IMASK_MDIO_SCAN_EVENT | \
++ TGEC_IMASK_MDIO_CMD_CMPL | \
++ TGEC_IMASK_REM_FAULT | \
++ TGEC_IMASK_LOC_FAULT | \
++ TGEC_IMASK_TX_ECC_ER | \
++ TGEC_IMASK_TX_FIFO_UNFL | \
++ TGEC_IMASK_TX_FIFO_OVFL | \
++ TGEC_IMASK_TX_ER | \
++ TGEC_IMASK_RX_FIFO_OVFL | \
++ TGEC_IMASK_RX_ECC_ER | \
++ TGEC_IMASK_RX_JAB_FRM | \
++ TGEC_IMASK_RX_OVRSZ_FRM | \
++ TGEC_IMASK_RX_RUNT_FRM | \
++ TGEC_IMASK_RX_FRAG_FRM | \
++ TGEC_IMASK_RX_LEN_ER | \
++ TGEC_IMASK_RX_CRC_ER | \
++ TGEC_IMASK_RX_ALIGN_ER))
++
++/* Hashtable Control Register (HASHTABLE_CTRL) */
++#define TGEC_HASH_MCAST_SHIFT 23
++#define TGEC_HASH_MCAST_EN 0x00000200
++#define TGEC_HASH_ADR_MSK 0x000001ff
++
++#define DEFAULT_WAN_MODE_ENABLE FALSE
++#define DEFAULT_PROMISCUOUS_MODE_ENABLE FALSE
++#define DEFAULT_PAUSE_FORWARD_ENABLE FALSE
++#define DEFAULT_PAUSE_IGNORE FALSE
++#define DEFAULT_TX_ADDR_INS_ENABLE FALSE
++#define DEFAULT_LOOPBACK_ENABLE FALSE
++#define DEFAULT_CMD_FRAME_ENABLE FALSE
++#define DEFAULT_RX_ERROR_DISCARD FALSE
++#define DEFAULT_SEND_IDLE_ENABLE FALSE
++#define DEFAULT_NO_LENGTH_CHECK_ENABLE TRUE
++#define DEFAULT_LGTH_CHECK_NOSTDR FALSE
++#define DEFAULT_TIME_STAMP_ENABLE FALSE
++#define DEFAULT_TX_IPG_LENGTH 12
++#define DEFAULT_MAX_FRAME_LENGTH 0x600
++#define DEFAULT_PAUSE_QUANT 0xf000
++
++/*
++ * 10G memory map
++ */
++struct tgec_regs {
++ uint32_t tgec_id; /* 0x000 Controller ID */
++ uint32_t reserved001[1]; /* 0x004 */
++ uint32_t command_config; /* 0x008 Control and configuration */
++ uint32_t mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */
++ uint32_t mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */
++ uint32_t maxfrm; /* 0x014 Maximum frame length */
++ uint32_t pause_quant; /* 0x018 Pause quanta */
++ uint32_t rx_fifo_sections; /* 0x01c */
++ uint32_t tx_fifo_sections; /* 0x020 */
++ uint32_t rx_fifo_almost_f_e; /* 0x024 */
++ uint32_t tx_fifo_almost_f_e; /* 0x028 */
++ uint32_t hashtable_ctrl; /* 0x02c Hash table control*/
++ uint32_t mdio_cfg_status; /* 0x030 */
++ uint32_t mdio_command; /* 0x034 */
++ uint32_t mdio_data; /* 0x038 */
++ uint32_t mdio_regaddr; /* 0x03c */
++ uint32_t status; /* 0x040 */
++ uint32_t tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */
++ uint32_t mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */
++ uint32_t mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */
++ uint32_t rx_fifo_ptr_rd; /* 0x050 */
++ uint32_t rx_fifo_ptr_wr; /* 0x054 */
++ uint32_t tx_fifo_ptr_rd; /* 0x058 */
++ uint32_t tx_fifo_ptr_wr; /* 0x05c */
++ uint32_t imask; /* 0x060 Interrupt mask */
++ uint32_t ievent; /* 0x064 Interrupt event */
++ uint32_t udp_port; /* 0x068 Defines a UDP Port number */
++ uint32_t type_1588v2; /* 0x06c Type field for 1588v2 */
++ uint32_t reserved070[4]; /* 0x070 */
++ /*10Ge Statistics Counter */
++ uint32_t tfrm_u; /* 80 aFramesTransmittedOK */
++ uint32_t tfrm_l; /* 84 aFramesTransmittedOK */
++ uint32_t rfrm_u; /* 88 aFramesReceivedOK */
++ uint32_t rfrm_l; /* 8c aFramesReceivedOK */
++ uint32_t rfcs_u; /* 90 aFrameCheckSequenceErrors */
++ uint32_t rfcs_l; /* 94 aFrameCheckSequenceErrors */
++ uint32_t raln_u; /* 98 aAlignmentErrors */
++ uint32_t raln_l; /* 9c aAlignmentErrors */
++ uint32_t txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */
++ uint32_t txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */
++ uint32_t rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */
++ uint32_t rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */
++ uint32_t rlong_u; /* B0 aFrameTooLongErrors */
++ uint32_t rlong_l; /* B4 aFrameTooLongErrors */
++ uint32_t rflr_u; /* B8 aInRangeLengthErrors */
++ uint32_t rflr_l; /* Bc aInRangeLengthErrors */
++ uint32_t tvlan_u; /* C0 VLANTransmittedOK */
++ uint32_t tvlan_l; /* C4 VLANTransmittedOK */
++ uint32_t rvlan_u; /* C8 VLANReceivedOK */
++ uint32_t rvlan_l; /* Cc VLANReceivedOK */
++ uint32_t toct_u; /* D0 ifOutOctets */
++ uint32_t toct_l; /* D4 ifOutOctets */
++ uint32_t roct_u; /* D8 ifInOctets */
++ uint32_t roct_l; /* Dc ifInOctets */
++ uint32_t ruca_u; /* E0 ifInUcastPkts */
++ uint32_t ruca_l; /* E4 ifInUcastPkts */
++ uint32_t rmca_u; /* E8 ifInMulticastPkts */
++ uint32_t rmca_l; /* Ec ifInMulticastPkts */
++ uint32_t rbca_u; /* F0 ifInBroadcastPkts */
++ uint32_t rbca_l; /* F4 ifInBroadcastPkts */
++ uint32_t terr_u; /* F8 ifOutErrors */
++ uint32_t terr_l; /* Fc ifOutErrors */
++ uint32_t reserved100[2]; /* 100-108*/
++ uint32_t tuca_u; /* 108 ifOutUcastPkts */
++ uint32_t tuca_l; /* 10c ifOutUcastPkts */
++ uint32_t tmca_u; /* 110 ifOutMulticastPkts */
++ uint32_t tmca_l; /* 114 ifOutMulticastPkts */
++ uint32_t tbca_u; /* 118 ifOutBroadcastPkts */
++ uint32_t tbca_l; /* 11c ifOutBroadcastPkts */
++ uint32_t rdrp_u; /* 120 etherStatsDropEvents */
++ uint32_t rdrp_l; /* 124 etherStatsDropEvents */
++ uint32_t reoct_u; /* 128 etherStatsOctets */
++ uint32_t reoct_l; /* 12c etherStatsOctets */
++ uint32_t rpkt_u; /* 130 etherStatsPkts */
++ uint32_t rpkt_l; /* 134 etherStatsPkts */
++ uint32_t trund_u; /* 138 etherStatsUndersizePkts */
++ uint32_t trund_l; /* 13c etherStatsUndersizePkts */
++ uint32_t r64_u; /* 140 etherStatsPkts64Octets */
++ uint32_t r64_l; /* 144 etherStatsPkts64Octets */
++ uint32_t r127_u; /* 148 etherStatsPkts65to127Octets */
++ uint32_t r127_l; /* 14c etherStatsPkts65to127Octets */
++ uint32_t r255_u; /* 150 etherStatsPkts128to255Octets */
++ uint32_t r255_l; /* 154 etherStatsPkts128to255Octets */
++ uint32_t r511_u; /* 158 etherStatsPkts256to511Octets */
++ uint32_t r511_l; /* 15c etherStatsPkts256to511Octets */
++ uint32_t r1023_u; /* 160 etherStatsPkts512to1023Octets */
++ uint32_t r1023_l; /* 164 etherStatsPkts512to1023Octets */
++ uint32_t r1518_u; /* 168 etherStatsPkts1024to1518Octets */
++ uint32_t r1518_l; /* 16c etherStatsPkts1024to1518Octets */
++ uint32_t r1519x_u; /* 170 etherStatsPkts1519toX */
++ uint32_t r1519x_l; /* 174 etherStatsPkts1519toX */
++ uint32_t trovr_u; /* 178 etherStatsOversizePkts */
++ uint32_t trovr_l; /* 17c etherStatsOversizePkts */
++ uint32_t trjbr_u; /* 180 etherStatsJabbers */
++ uint32_t trjbr_l; /* 184 etherStatsJabbers */
++ uint32_t trfrg_u; /* 188 etherStatsFragments */
++ uint32_t trfrg_l; /* 18C etherStatsFragments */
++ uint32_t rerr_u; /* 190 ifInErrors */
++ uint32_t rerr_l; /* 194 ifInErrors */
++};
++
++/**
++ * struct tgec_cfg - TGEC configuration
++ *
++ * @rx_error_discard: Receive Erroneous Frame Discard Enable. When set to 1
++ * any frame received with an error is discarded in the
++ * Core and not forwarded to the Client interface.
++ * When set to 0 (Reset value), erroneous Frames are
++ * forwarded to the Client interface with ff_rx_err
++ * asserted.
++ * @pause_ignore: Ignore Pause Frame Quanta. If set to 1 received pause
++ * frames are ignored by the MAC. When set to 0
++ * (Reset value) the transmit process is stopped for the
++ * amount of time specified in the pause quanta received
++ * within a pause frame.
++ * @pause_forward_enable:
++ * Terminate / Forward Pause Frames. If set to 1 pause
++ * frames are forwarded to the user application. When set
++ * to 0 (Reset value) pause frames are terminated and
++ * discarded within the MAC.
++ * @no_length_check_enable:
++ * Payload Length Check Disable. When set to 0
++ * (Reset value), the Core checks the frame's payload
++ * length with the Frame Length/Type field, when set to 1
++ * the payload length check is disabled.
++ * @cmd_frame_enable: Enables reception of all command frames. When set to 1
++ * all Command Frames are accepted, when set to 0
++ * (Reset Value) only Pause Frames are accepted and all
++ * other Command Frames are rejected.
++ * @send_idle_enable: Force Idle Generation. When set to 1, the MAC
++ * permanently sends XGMII Idle sequences even when faults
++ * are received.
++ * @wan_mode_enable: WAN Mode Enable. Sets WAN mode (1) or LAN mode
++ * (0, default) of operation.
++ * @promiscuous_mode_enable:
++ * Enables MAC promiscuous operation. When set to 1, all
++ * frames are received without any MAC address filtering,
++ * when set to 0 (Reset value) Unicast Frames with a
++ * destination address not matching the Core MAC Address
++ * (MAC Address programmed in Registers MAC_ADDR_0 and
++ * MAC_ADDR_1 or the MAC address programmed in Registers
++ * MAC_ADDR_2 and MAC_ADDR_3) are rejected.
++ * @tx_addr_ins_enable: Set Source MAC Address on Transmit. If set to 1 the
++ * MAC overwrites the source MAC address received from the
++ * Client Interface with one of the MAC addresses. If set
++ * to 0 (Reset value), the source MAC address from the
++ * Client Interface is transmitted unmodified to the line.
++ * @loopback_enable: PHY Interface Loopback. When set to 1, the signal
++ * loop_ena is set to '1', when set to 0 (Reset value)
++ * the signal loop_ena is set to 0.
++ * @lgth_check_nostdr: The Core interprets the Length/Type field differently
++ * depending on the value of this Bit
++ * @time_stamp_enable: This bit selects between enabling and disabling the
++ * IEEE 1588 functionality. 1: IEEE 1588 is enabled
++ * 0: IEEE 1588 is disabled
++ * @max_frame_length: Maximum supported received frame length.
++ * The 10GEC MAC supports reception of any frame size up
++ * to 16,352 bytes (0x3FE0). Typical settings are
++ * 0x05EE (1,518 bytes) for standard frames.
++ * Default setting is 0x0600 (1,536 bytes).
++ * Received frames that exceed this stated maximum
++ * are truncated.
++ * @pause_quant: Pause quanta value used with transmitted pause frames.
++ * Each quanta represents a 512 bit-times.
++ * @tx_ipg_length: Transmit Inter-Packet-Gap (IPG) value. A 6-bit value:
++ * Depending on LAN or WAN mode of operation the value has
++ * the following meaning: - LAN Mode: Number of octets in
++ * steps of 4. Valid values are 8, 12, 16, ... 100. DIC is
++ * fully supported (see 10.6.1 page 49) for any setting. A
++ * default of 12 (reset value) must be set to conform to
++ * IEEE802.3ae. Warning: When set to 8, PCS layers may not
++ * be able to perform clock rate compensation. - WAN Mode:
++ * Stretch factor. Valid values are 4..15. The stretch
++ * factor is calculated as (value+1)*8. A default of 12
++ * (reset value) must be set to conform to IEEE 802.3ae
++ * (i.e. 13*8=104). A larger value shrinks the IPG
++ * (increasing bandwidth).
++ *
++ * This structure contains basic TGEC configuration and must be passed to
++ * fman_tgec_init() function. A default set of configuration values can be
++ * obtained by calling fman_tgec_defconfig().
++ */
++struct tgec_cfg {
++ bool rx_error_discard;
++ bool pause_ignore;
++ bool pause_forward_enable;
++ bool no_length_check_enable;
++ bool cmd_frame_enable;
++ bool send_idle_enable;
++ bool wan_mode_enable;
++ bool promiscuous_mode_enable;
++ bool tx_addr_ins_enable;
++ bool loopback_enable;
++ bool lgth_check_nostdr;
++ bool time_stamp_enable;
++ uint16_t max_frame_length;
++ uint16_t pause_quant;
++ uint32_t tx_ipg_length;
++ bool skip_fman11_workaround;
++};
++
++
++void fman_tgec_defconfig(struct tgec_cfg *cfg);
++
++/**
++ * fman_tgec_init() - Init tgec hardware block
++ * @regs: Pointer to tgec register block
++ * @cfg: tgec configuration data
++ * @exceptions_mask: initial exceptions mask
++ *
++ * This function initializes the tgec controller and applies its
++ * basic configuration.
++ *
++ * Returns: 0 if successful, an error code otherwise.
++ */
++
++int fman_tgec_init(struct tgec_regs *regs, struct tgec_cfg *cfg,
++ uint32_t exception_mask);
++
++void fman_tgec_enable(struct tgec_regs *regs, bool apply_rx, bool apply_tx);
++
++void fman_tgec_disable(struct tgec_regs *regs, bool apply_rx, bool apply_tx);
++
++uint32_t fman_tgec_get_revision(struct tgec_regs *regs);
++
++void fman_tgec_set_mac_address(struct tgec_regs *regs, uint8_t *macaddr);
++
++void fman_tgec_set_promiscuous(struct tgec_regs *regs, bool val);
++
++/**
++ * fman_tgec_reset_stat() - Completely resets all TGEC HW counters
++ * @regs: Pointer to TGEC register block
++ */
++void fman_tgec_reset_stat(struct tgec_regs *regs);
++
++/**
++ * fman_tgec_get_counter() - Reads TGEC HW counters
++ * @regs: Pointer to TGEC register block
++ * @reg_name: Counter name according to the appropriate enum
++ *
++ * Returns: Required counter value
++ */
++uint64_t fman_tgec_get_counter(struct tgec_regs *regs,
++ enum tgec_counters reg_name);
++
++/**
++ * fman_tgec_set_hash_table() - Sets the Hashtable Control Register
++ * @regs: Pointer to TGEC register block
++ * @value: Value to be written in Hashtable Control Register
++ */
++void fman_tgec_set_hash_table(struct tgec_regs *regs, uint32_t value);
++
++/**
++ * fman_tgec_set_tx_pause_frames() - Sets the Pause Quanta Register
++ * @regs: Pointer to TGEC register block
++ * @pause_time: Pause quanta value used with transmitted pause frames.
++ * Each quanta represents a 512 bit-times
++ */
++void fman_tgec_set_tx_pause_frames(struct tgec_regs *regs, uint16_t pause_time);
++
++/**
++ * fman_tgec_set_rx_ignore_pause_frames() - Changes the policy WRT pause frames
++ * @regs: Pointer to TGEC register block
++ * @en: Ignore/Respond to pause frame quanta
++ *
++ * Sets the value of PAUSE_IGNORE field in the COMMAND_CONFIG Register
++ * 0 - MAC stops transmit process for the duration specified
++ * in the Pause frame quanta of a received Pause frame.
++ * 1 - MAC ignores received Pause frames.
++ */
++void fman_tgec_set_rx_ignore_pause_frames(struct tgec_regs *regs, bool en);
++
++/**
++ * fman_tgec_enable_1588_time_stamp() - change timestamp functionality
++ * @regs: Pointer to TGEC register block
++ * @en: enable/disable timestamp functionality
++ *
++ * Sets the value of EN_TIMESTAMP field in the COMMAND_CONFIG Register
++ * IEEE 1588 timestamp functionality control:
++ * 0 disabled, 1 enabled
++ */
++
++void fman_tgec_enable_1588_time_stamp(struct tgec_regs *regs, bool en);
++
++uint32_t fman_tgec_get_event(struct tgec_regs *regs, uint32_t ev_mask);
++
++void fman_tgec_ack_event(struct tgec_regs *regs, uint32_t ev_mask);
++
++uint32_t fman_tgec_get_interrupt_mask(struct tgec_regs *regs);
++
++/**
++ * fman_tgec_add_addr_in_paddr() - Sets additional exact match MAC address
++ * @regs: Pointer to TGEC register block
++ * @addr_ptr: Pointer to 6-byte array containing the MAC address
++ *
++ * Sets the additional station MAC address
++ */
++void fman_tgec_add_addr_in_paddr(struct tgec_regs *regs, uint8_t *addr_ptr);
++
++void fman_tgec_clear_addr_in_paddr(struct tgec_regs *regs);
++
++void fman_tgec_enable_interrupt(struct tgec_regs *regs, uint32_t ev_mask);
++
++void fman_tgec_disable_interrupt(struct tgec_regs *regs, uint32_t ev_mask);
++
++void fman_tgec_reset_filter_table(struct tgec_regs *regs);
++
++void fman_tgec_set_hash_table_entry(struct tgec_regs *regs, uint32_t crc);
++
++
++/**
++ * fman_tgec_get_max_frame_len() - Returns the maximum frame length value
++ * @regs: Pointer to TGEC register block
++ */
++uint16_t fman_tgec_get_max_frame_len(struct tgec_regs *regs);
++
++/**
++ * fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007() - Initialize the
++ * main tgec configuration parameters
++ * @regs: Pointer to TGEC register block
++ *
++ * TODO
++ */
++void fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007(struct tgec_regs
++ *regs);
++
++
++#endif /* __FSL_FMAN_TGEC_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/dpaa_integration_ext.h
+new file mode 100644
+index 00000000..0346cf60
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/dpaa_integration_ext.h
+@@ -0,0 +1,291 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++
++ @File dpaa_integration_ext.h
++
++ @Description T4240 FM external definitions and structures.
++*//***************************************************************************/
++#ifndef __DPAA_INTEGRATION_EXT_H
++#define __DPAA_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++
++
++#define DPAA_VERSION 11
++
++/**************************************************************************//**
++ @Description DPAA SW Portals Enumeration.
++*//***************************************************************************/
++typedef enum
++{
++ e_DPAA_SWPORTAL0 = 0,
++ e_DPAA_SWPORTAL1,
++ e_DPAA_SWPORTAL2,
++ e_DPAA_SWPORTAL3,
++ e_DPAA_SWPORTAL4,
++ e_DPAA_SWPORTAL5,
++ e_DPAA_SWPORTAL6,
++ e_DPAA_SWPORTAL7,
++ e_DPAA_SWPORTAL8,
++ e_DPAA_SWPORTAL9,
++ e_DPAA_SWPORTAL10,
++ e_DPAA_SWPORTAL11,
++ e_DPAA_SWPORTAL12,
++ e_DPAA_SWPORTAL13,
++ e_DPAA_SWPORTAL14,
++ e_DPAA_SWPORTAL15,
++ e_DPAA_SWPORTAL16,
++ e_DPAA_SWPORTAL17,
++ e_DPAA_SWPORTAL18,
++ e_DPAA_SWPORTAL19,
++ e_DPAA_SWPORTAL20,
++ e_DPAA_SWPORTAL21,
++ e_DPAA_SWPORTAL22,
++ e_DPAA_SWPORTAL23,
++ e_DPAA_SWPORTAL24,
++ e_DPAA_SWPORTAL_DUMMY_LAST
++} e_DpaaSwPortal;
++
++/**************************************************************************//**
++ @Description DPAA Direct Connect Portals Enumeration.
++*//***************************************************************************/
++typedef enum
++{
++ e_DPAA_DCPORTAL0 = 0,
++ e_DPAA_DCPORTAL1,
++ e_DPAA_DCPORTAL2,
++ e_DPAA_DCPORTAL_DUMMY_LAST
++} e_DpaaDcPortal;
++
++#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
++#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
++
++/*****************************************************************************
++ QMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
++#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
++#define QM_MAX_NUM_OF_CGS 256 /**< Congestion groups number */
++#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE)
++ /**< FQIDs range - 24 bits */
++
++/**************************************************************************//**
++ @Description Work Queue Channel assignments in QMan.
++*//***************************************************************************/
++typedef enum
++{
++ e_QM_FQ_CHANNEL_SWPORTAL0 = 0x0, /**< Dedicated channels serviced by software portals 0 to 24 */
++ e_QM_FQ_CHANNEL_SWPORTAL1,
++ e_QM_FQ_CHANNEL_SWPORTAL2,
++ e_QM_FQ_CHANNEL_SWPORTAL3,
++ e_QM_FQ_CHANNEL_SWPORTAL4,
++ e_QM_FQ_CHANNEL_SWPORTAL5,
++ e_QM_FQ_CHANNEL_SWPORTAL6,
++ e_QM_FQ_CHANNEL_SWPORTAL7,
++ e_QM_FQ_CHANNEL_SWPORTAL8,
++ e_QM_FQ_CHANNEL_SWPORTAL9,
++ e_QM_FQ_CHANNEL_SWPORTAL10,
++ e_QM_FQ_CHANNEL_SWPORTAL11,
++ e_QM_FQ_CHANNEL_SWPORTAL12,
++ e_QM_FQ_CHANNEL_SWPORTAL13,
++ e_QM_FQ_CHANNEL_SWPORTAL14,
++ e_QM_FQ_CHANNEL_SWPORTAL15,
++ e_QM_FQ_CHANNEL_SWPORTAL16,
++ e_QM_FQ_CHANNEL_SWPORTAL17,
++ e_QM_FQ_CHANNEL_SWPORTAL18,
++ e_QM_FQ_CHANNEL_SWPORTAL19,
++ e_QM_FQ_CHANNEL_SWPORTAL20,
++ e_QM_FQ_CHANNEL_SWPORTAL21,
++ e_QM_FQ_CHANNEL_SWPORTAL22,
++ e_QM_FQ_CHANNEL_SWPORTAL23,
++ e_QM_FQ_CHANNEL_SWPORTAL24,
++
++ e_QM_FQ_CHANNEL_POOL1 = 0x401, /**< Pool channels that can be serviced by any of the software portals */
++ e_QM_FQ_CHANNEL_POOL2,
++ e_QM_FQ_CHANNEL_POOL3,
++ e_QM_FQ_CHANNEL_POOL4,
++ e_QM_FQ_CHANNEL_POOL5,
++ e_QM_FQ_CHANNEL_POOL6,
++ e_QM_FQ_CHANNEL_POOL7,
++ e_QM_FQ_CHANNEL_POOL8,
++ e_QM_FQ_CHANNEL_POOL9,
++ e_QM_FQ_CHANNEL_POOL10,
++ e_QM_FQ_CHANNEL_POOL11,
++ e_QM_FQ_CHANNEL_POOL12,
++ e_QM_FQ_CHANNEL_POOL13,
++ e_QM_FQ_CHANNEL_POOL14,
++ e_QM_FQ_CHANNEL_POOL15,
++
++ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x800, /**< Dedicated channels serviced by Direct Connect Portal 0:
++ connected to FMan 0; assigned in incrementing order to
++ each sub-portal (SP) in the portal */
++ e_QM_FQ_CHANNEL_FMAN0_SP1,
++ e_QM_FQ_CHANNEL_FMAN0_SP2,
++ e_QM_FQ_CHANNEL_FMAN0_SP3,
++ e_QM_FQ_CHANNEL_FMAN0_SP4,
++ e_QM_FQ_CHANNEL_FMAN0_SP5,
++ e_QM_FQ_CHANNEL_FMAN0_SP6,
++ e_QM_FQ_CHANNEL_FMAN0_SP7,
++ e_QM_FQ_CHANNEL_FMAN0_SP8,
++ e_QM_FQ_CHANNEL_FMAN0_SP9,
++ e_QM_FQ_CHANNEL_FMAN0_SP10,
++ e_QM_FQ_CHANNEL_FMAN0_SP11,
++ e_QM_FQ_CHANNEL_FMAN0_SP12,
++ e_QM_FQ_CHANNEL_FMAN0_SP13,
++ e_QM_FQ_CHANNEL_FMAN0_SP14,
++ e_QM_FQ_CHANNEL_FMAN0_SP15,
++
++ e_QM_FQ_CHANNEL_RMAN_SP0 = 0x820, /**< Dedicated channels serviced by Direct Connect Portal 1: connected to RMan */
++ e_QM_FQ_CHANNEL_RMAN_SP1,
++
++ e_QM_FQ_CHANNEL_CAAM = 0x840 /**< Dedicated channel serviced by Direct Connect Portal 2:
++ connected to SEC */
++} e_QmFQChannel;
++
++/*****************************************************************************
++ BMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
++
++/*****************************************************************************
++ SEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define SEC_NUM_OF_DECOS 3
++#define SEC_ALL_DECOS_MASK 0x00000003
++
++
++/*****************************************************************************
++ FM INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define INTG_MAX_NUM_OF_FM 2
++/* Ports defines */
++#define FM_MAX_NUM_OF_1G_MACS 6
++#define FM_MAX_NUM_OF_10G_MACS 2
++#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
++#define FM_MAX_NUM_OF_OH_PORTS 6
++
++#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
++
++#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
++
++#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
++#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
++#define FM_MAX_NUM_OF_SUB_PORTALS 16
++#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
++
++#define FM_VSP_MAX_NUM_OF_ENTRIES 64
++#define FM_MAX_NUM_OF_PFC_PRIORITIES 8
++
++/* RAMs defines */
++#define FM_MURAM_SIZE (384 * KILOBYTE)
++#define FM_IRAM_SIZE(major, minor) (64 * KILOBYTE)
++#define FM_NUM_OF_CTRL 4
++
++/* PCD defines */
++#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
++#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
++#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
++#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000600 /**< Number of bytes saved for patches */
++#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
++
++/* RTC defines */
++#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
++#define FM_RTC_NUM_OF_PERIODIC_PULSES 3 /**< RTC number of periodic pulses */
++#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
++
++/* QMI defines */
++#define QMI_MAX_NUM_OF_TNUMS 64
++#define QMI_DEF_TNUMS_THRESH 32
++/* FPM defines */
++#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
++
++/* DMA defines */
++#define DMA_THRESH_MAX_COMMQ 83
++#define DMA_THRESH_MAX_BUF 127
++
++/* BMI defines */
++#define BMI_MAX_NUM_OF_TASKS 128
++#define BMI_MAX_NUM_OF_DMAS 84
++
++#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
++#define PORT_MAX_WEIGHT 16
++
++#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
++
++/* Unique T4240 */
++#define FM_OP_OPEN_DMA_MIN_LIMIT
++#define FM_NO_RESTRICT_ON_ACCESS_RSRC
++#define FM_NO_OP_OBSERVED_POOLS
++#define FM_FRAME_END_PARAMS_FOR_OP
++#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
++#define FM_QMI_NO_SINGLE_ECC_EXCEPTION
++
++#define FM_NO_GUARANTEED_RESET_VALUES
++
++/* FM errata */
++#define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
++#define FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127
++#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
++#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
++#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
++#define FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273
++
++#define FM_BCB_ERRATA_BMI_SW001
++#define FM_LEN_CHECK_ERRATA_FMAN_SW002
++#define FM_AID_MODE_NO_TNUM_SW005 /* refer to pdm TKT068794 - only support of port_id on aid */
++#define FM_ERROR_VSP_NO_MATCH_SW006 /* refer to pdm TKT174304 - no match between errorQ and VSP */
++
++/*****************************************************************************
++ RMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define RM_MAX_NUM_OF_IB 4 /**< Number of inbound blocks */
++#define RM_NUM_OF_IBCU 8 /**< NUmber of classification units in an inbound block */
++
++/* RMan erratas */
++#define RM_ERRONEOUS_ACK_ERRATA_RMAN_A006756
++
++/*****************************************************************************
++ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define NUM_OF_RX_SC 16
++#define NUM_OF_TX_SC 16
++
++#define NUM_OF_SA_PER_RX_SC 2
++#define NUM_OF_SA_PER_TX_SC 2
++
++#endif /* __DPAA_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_ext.h
+new file mode 100644
+index 00000000..0d62dd15
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_ext.h
+@@ -0,0 +1,71 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++
++ @File part_ext.h
++
++ @Description Definitions for the part (integration) module.
++*//***************************************************************************/
++
++#ifndef __PART_EXT_H
++#define __PART_EXT_H
++
++#include "std_ext.h"
++#include "part_integration_ext.h"
++
++#if !(defined(P1023) || \
++ defined(P2041) || \
++ defined(P3041) || \
++ defined(P4080) || \
++ defined(P5020) || \
++ defined(P5040) || \
++ defined(B4860) || \
++ defined(T4240))
++#error "unable to proceed without chip-definition"
++#endif
++
++
++/**************************************************************************//*
++ @Description Part data structure - must be contained in any integration
++ data structure.
++*//***************************************************************************/
++typedef struct t_Part
++{
++ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
++ /**< Returns the address of the module's memory map base. */
++ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
++ /**< Returns the module's ID according to its memory map base. */
++} t_Part;
++
++
++#endif /* __PART_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_integration_ext.h
+new file mode 100644
+index 00000000..3254c766
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_integration_ext.h
+@@ -0,0 +1,304 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++
++ @File part_integration_ext.h
++
++ @Description T4240 external definitions and structures.
++*//***************************************************************************/
++#ifndef __PART_INTEGRATION_EXT_H
++#define __PART_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++#include "ddr_std_ext.h"
++#include "enet_ext.h"
++#include "dpaa_integration_ext.h"
++
++
++/**************************************************************************//**
++ @Group T4240_chip_id T4240 Application Programming Interface
++
++ @Description T4240 Chip functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++#define CORE_E6500
++
++#define INTG_MAX_NUM_OF_CORES 24
++
++
++/**************************************************************************//**
++ @Description Module types.
++*//***************************************************************************/
++typedef enum e_ModuleId
++{
++ e_MODULE_ID_DUART_1 = 0,
++ e_MODULE_ID_DUART_2,
++ e_MODULE_ID_DUART_3,
++ e_MODULE_ID_DUART_4,
++ e_MODULE_ID_LAW,
++ e_MODULE_ID_IFC,
++ e_MODULE_ID_PAMU,
++ e_MODULE_ID_QM, /**< Queue manager module */
++ e_MODULE_ID_BM, /**< Buffer manager module */
++ e_MODULE_ID_QM_CE_PORTAL_0,
++ e_MODULE_ID_QM_CI_PORTAL_0,
++ e_MODULE_ID_QM_CE_PORTAL_1,
++ e_MODULE_ID_QM_CI_PORTAL_1,
++ e_MODULE_ID_QM_CE_PORTAL_2,
++ e_MODULE_ID_QM_CI_PORTAL_2,
++ e_MODULE_ID_QM_CE_PORTAL_3,
++ e_MODULE_ID_QM_CI_PORTAL_3,
++ e_MODULE_ID_QM_CE_PORTAL_4,
++ e_MODULE_ID_QM_CI_PORTAL_4,
++ e_MODULE_ID_QM_CE_PORTAL_5,
++ e_MODULE_ID_QM_CI_PORTAL_5,
++ e_MODULE_ID_QM_CE_PORTAL_6,
++ e_MODULE_ID_QM_CI_PORTAL_6,
++ e_MODULE_ID_QM_CE_PORTAL_7,
++ e_MODULE_ID_QM_CI_PORTAL_7,
++ e_MODULE_ID_QM_CE_PORTAL_8,
++ e_MODULE_ID_QM_CI_PORTAL_8,
++ e_MODULE_ID_QM_CE_PORTAL_9,
++ e_MODULE_ID_QM_CI_PORTAL_9,
++ e_MODULE_ID_BM_CE_PORTAL_0,
++ e_MODULE_ID_BM_CI_PORTAL_0,
++ e_MODULE_ID_BM_CE_PORTAL_1,
++ e_MODULE_ID_BM_CI_PORTAL_1,
++ e_MODULE_ID_BM_CE_PORTAL_2,
++ e_MODULE_ID_BM_CI_PORTAL_2,
++ e_MODULE_ID_BM_CE_PORTAL_3,
++ e_MODULE_ID_BM_CI_PORTAL_3,
++ e_MODULE_ID_BM_CE_PORTAL_4,
++ e_MODULE_ID_BM_CI_PORTAL_4,
++ e_MODULE_ID_BM_CE_PORTAL_5,
++ e_MODULE_ID_BM_CI_PORTAL_5,
++ e_MODULE_ID_BM_CE_PORTAL_6,
++ e_MODULE_ID_BM_CI_PORTAL_6,
++ e_MODULE_ID_BM_CE_PORTAL_7,
++ e_MODULE_ID_BM_CI_PORTAL_7,
++ e_MODULE_ID_BM_CE_PORTAL_8,
++ e_MODULE_ID_BM_CI_PORTAL_8,
++ e_MODULE_ID_BM_CE_PORTAL_9,
++ e_MODULE_ID_BM_CI_PORTAL_9,
++ e_MODULE_ID_FM, /**< Frame manager module */
++ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
++ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
++ e_MODULE_ID_FM_BMI, /**< FM BMI block */
++ e_MODULE_ID_FM_QMI, /**< FM QMI block */
++ e_MODULE_ID_FM_PARSER, /**< FM parser block */
++ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO5, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO6, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO7, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx2, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx3, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx4, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx5, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx6, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_10GRx1, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_10GRx2, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx2, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx3, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx4, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx5, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx6, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_10GTx1, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_10GTx2, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM_PLCR, /**< FM Policer */
++ e_MODULE_ID_FM_KG, /**< FM Keygen */
++ e_MODULE_ID_FM_DMA, /**< FM DMA */
++ e_MODULE_ID_FM_FPM, /**< FM FPM */
++ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
++ e_MODULE_ID_FM_1GMDIO, /**< FM 1G MDIO MAC */
++ e_MODULE_ID_FM_10GMDIO, /**< FM 10G MDIO */
++ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
++ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
++ e_MODULE_ID_FM_1GMAC2, /**< FM 1G MAC #2 */
++ e_MODULE_ID_FM_1GMAC3, /**< FM 1G MAC #3 */
++ e_MODULE_ID_FM_1GMAC4, /**< FM 1G MAC #4 */
++ e_MODULE_ID_FM_1GMAC5, /**< FM 1G MAC #5 */
++ e_MODULE_ID_FM_1GMAC6, /**< FM 1G MAC #6 */
++ e_MODULE_ID_FM_10GMAC1, /**< FM 10G MAC */
++ e_MODULE_ID_FM_10GMAC2, /**< FM 10G MAC */
++
++ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
++ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
++ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
++ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
++ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
++ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
++ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
++ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
++ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
++ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
++ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
++ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
++
++ e_MODULE_ID_PIC, /**< PIC */
++ e_MODULE_ID_GPIO, /**< GPIO */
++ e_MODULE_ID_SERDES, /**< SERDES */
++ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
++ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
++
++ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
++
++ e_MODULE_ID_DUMMY_LAST
++} e_ModuleId;
++
++#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
++
++#if 0 /* using unified values */
++/*****************************************************************************
++ INTEGRATION-SPECIFIC MODULE CODES
++******************************************************************************/
++#define MODULE_UNKNOWN 0x00000000
++#define MODULE_MEM 0x00010000
++#define MODULE_MM 0x00020000
++#define MODULE_CORE 0x00030000
++#define MODULE_T4240 0x00040000
++#define MODULE_T4240_PLATFORM 0x00050000
++#define MODULE_PM 0x00060000
++#define MODULE_MMU 0x00070000
++#define MODULE_PIC 0x00080000
++#define MODULE_CPC 0x00090000
++#define MODULE_DUART 0x000a0000
++#define MODULE_SERDES 0x000b0000
++#define MODULE_PIO 0x000c0000
++#define MODULE_QM 0x000d0000
++#define MODULE_BM 0x000e0000
++#define MODULE_SEC 0x000f0000
++#define MODULE_LAW 0x00100000
++#define MODULE_LBC 0x00110000
++#define MODULE_PAMU 0x00120000
++#define MODULE_FM 0x00130000
++#define MODULE_FM_MURAM 0x00140000
++#define MODULE_FM_PCD 0x00150000
++#define MODULE_FM_RTC 0x00160000
++#define MODULE_FM_MAC 0x00170000
++#define MODULE_FM_PORT 0x00180000
++#define MODULE_FM_SP 0x00190000
++#define MODULE_DPA_PORT 0x001a0000
++#define MODULE_MII 0x001b0000
++#define MODULE_I2C 0x001c0000
++#define MODULE_DMA 0x001d0000
++#define MODULE_DDR 0x001e0000
++#define MODULE_ESPI 0x001f0000
++#define MODULE_DPAA_IPSEC 0x00200000
++#endif /* using unified values */
++
++/*****************************************************************************
++ PAMU INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define PAMU_NUM_OF_PARTITIONS 4
++
++/*****************************************************************************
++ LAW INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define LAW_NUM_OF_WINDOWS 32
++#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4 Kbytes */
++#define LAW_MAX_WINDOW_SIZE 0x0000010000000000LL /**< 1 Tbytes for 40-bit address space */
++
++
++/*****************************************************************************
++ LBC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++/**************************************************************************//**
++ @Group lbc_exception_grp LBC Exception Unit
++
++ @Description LBC Exception unit API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Anchor lbc_exbm
++
++ @Collection LBC Errors Bit Mask
++
++ These errors are reported through the exceptions callback..
++ The values can be or'ed in any combination in the errors mask
++ parameter of the errors report structure.
++
++ These errors can also be passed as a bit-mask to
++ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
++ for enabling or disabling error checking.
++ @{
++*//***************************************************************************/
++#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
++#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
++#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
++#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
++
++#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
++ LBC_ERR_WRITE_PROTECT | LBC_ERR_CHIP_SELECT)
++ /**< All possible errors */
++/* @} */
++/** @} */ /* end of lbc_exception_grp group */
++
++#define LBC_INCORRECT_ERROR_REPORT_ERRATA
++
++#define LBC_NUM_OF_BANKS 8
++#define LBC_MAX_CS_SIZE 0x0000000100000000LL /* Up to 4G memory block size */
++#define LBC_PARITY_SUPPORT
++#define LBC_ADDRESS_HOLD_TIME_CTRL
++#define LBC_HIGH_CLK_DIVIDERS
++#define LBC_FCM_AVAILABLE
++
++/*****************************************************************************
++ GPIO INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define GPIO_PORT_OFFSET_0x1000
++
++#define GPIO_NUM_OF_PORTS 3 /**< Number of ports in GPIO module;
++ Each port contains up to 32 I/O pins. */
++
++#define GPIO_VALID_PIN_MASKS \
++ { /* Port A */ 0xFFFFFFFF, \
++ /* Port B */ 0xFFFFFFFF, \
++ /* Port C */ 0xFFFFFFFF }
++
++#define GPIO_VALID_INTR_MASKS \
++ { /* Port A */ 0xFFFFFFFF, \
++ /* Port B */ 0xFFFFFFFF, \
++ /* Port C */ 0xFFFFFFFF }
++
++
++
++#endif /* __PART_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/dpaa_integration_ext.h
+new file mode 100644
+index 00000000..f7f8eb07
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/dpaa_integration_ext.h
+@@ -0,0 +1,293 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++
++ @File dpaa_integration_ext.h
++
++ @Description T4240 FM external definitions and structures.
++*//***************************************************************************/
++#ifndef __DPAA_INTEGRATION_EXT_H
++#define __DPAA_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++
++
++#define DPAA_VERSION 11
++
++/**************************************************************************//**
++ @Description DPAA SW Portals Enumeration.
++*//***************************************************************************/
++typedef enum
++{
++ e_DPAA_SWPORTAL0 = 0,
++ e_DPAA_SWPORTAL1,
++ e_DPAA_SWPORTAL2,
++ e_DPAA_SWPORTAL3,
++ e_DPAA_SWPORTAL4,
++ e_DPAA_SWPORTAL5,
++ e_DPAA_SWPORTAL6,
++ e_DPAA_SWPORTAL7,
++ e_DPAA_SWPORTAL8,
++ e_DPAA_SWPORTAL9,
++ e_DPAA_SWPORTAL10,
++ e_DPAA_SWPORTAL11,
++ e_DPAA_SWPORTAL12,
++ e_DPAA_SWPORTAL13,
++ e_DPAA_SWPORTAL14,
++ e_DPAA_SWPORTAL15,
++ e_DPAA_SWPORTAL16,
++ e_DPAA_SWPORTAL17,
++ e_DPAA_SWPORTAL18,
++ e_DPAA_SWPORTAL19,
++ e_DPAA_SWPORTAL20,
++ e_DPAA_SWPORTAL21,
++ e_DPAA_SWPORTAL22,
++ e_DPAA_SWPORTAL23,
++ e_DPAA_SWPORTAL24,
++ e_DPAA_SWPORTAL_DUMMY_LAST
++} e_DpaaSwPortal;
++
++/**************************************************************************//**
++ @Description DPAA Direct Connect Portals Enumeration.
++*//***************************************************************************/
++typedef enum
++{
++ e_DPAA_DCPORTAL0 = 0,
++ e_DPAA_DCPORTAL1,
++ e_DPAA_DCPORTAL2,
++ e_DPAA_DCPORTAL_DUMMY_LAST
++} e_DpaaDcPortal;
++
++#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
++#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
++
++/*****************************************************************************
++ QMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
++#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
++#define QM_MAX_NUM_OF_CGS 256 /**< Congestion groups number */
++#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE)
++ /**< FQIDs range - 24 bits */
++
++/**************************************************************************//**
++ @Description Work Queue Channel assignments in QMan.
++*//***************************************************************************/
++typedef enum
++{
++ e_QM_FQ_CHANNEL_SWPORTAL0 = 0x0, /**< Dedicated channels serviced by software portals 0 to 24 */
++ e_QM_FQ_CHANNEL_SWPORTAL1,
++ e_QM_FQ_CHANNEL_SWPORTAL2,
++ e_QM_FQ_CHANNEL_SWPORTAL3,
++ e_QM_FQ_CHANNEL_SWPORTAL4,
++ e_QM_FQ_CHANNEL_SWPORTAL5,
++ e_QM_FQ_CHANNEL_SWPORTAL6,
++ e_QM_FQ_CHANNEL_SWPORTAL7,
++ e_QM_FQ_CHANNEL_SWPORTAL8,
++ e_QM_FQ_CHANNEL_SWPORTAL9,
++ e_QM_FQ_CHANNEL_SWPORTAL10,
++ e_QM_FQ_CHANNEL_SWPORTAL11,
++ e_QM_FQ_CHANNEL_SWPORTAL12,
++ e_QM_FQ_CHANNEL_SWPORTAL13,
++ e_QM_FQ_CHANNEL_SWPORTAL14,
++ e_QM_FQ_CHANNEL_SWPORTAL15,
++ e_QM_FQ_CHANNEL_SWPORTAL16,
++ e_QM_FQ_CHANNEL_SWPORTAL17,
++ e_QM_FQ_CHANNEL_SWPORTAL18,
++ e_QM_FQ_CHANNEL_SWPORTAL19,
++ e_QM_FQ_CHANNEL_SWPORTAL20,
++ e_QM_FQ_CHANNEL_SWPORTAL21,
++ e_QM_FQ_CHANNEL_SWPORTAL22,
++ e_QM_FQ_CHANNEL_SWPORTAL23,
++ e_QM_FQ_CHANNEL_SWPORTAL24,
++
++ e_QM_FQ_CHANNEL_POOL1 = 0x401, /**< Pool channels that can be serviced by any of the software portals */
++ e_QM_FQ_CHANNEL_POOL2,
++ e_QM_FQ_CHANNEL_POOL3,
++ e_QM_FQ_CHANNEL_POOL4,
++ e_QM_FQ_CHANNEL_POOL5,
++ e_QM_FQ_CHANNEL_POOL6,
++ e_QM_FQ_CHANNEL_POOL7,
++ e_QM_FQ_CHANNEL_POOL8,
++ e_QM_FQ_CHANNEL_POOL9,
++ e_QM_FQ_CHANNEL_POOL10,
++ e_QM_FQ_CHANNEL_POOL11,
++ e_QM_FQ_CHANNEL_POOL12,
++ e_QM_FQ_CHANNEL_POOL13,
++ e_QM_FQ_CHANNEL_POOL14,
++ e_QM_FQ_CHANNEL_POOL15,
++
++ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x800, /**< Dedicated channels serviced by Direct Connect Portal 0:
++ connected to FMan 0; assigned in incrementing order to
++ each sub-portal (SP) in the portal */
++ e_QM_FQ_CHANNEL_FMAN0_SP1,
++ e_QM_FQ_CHANNEL_FMAN0_SP2,
++ e_QM_FQ_CHANNEL_FMAN0_SP3,
++ e_QM_FQ_CHANNEL_FMAN0_SP4,
++ e_QM_FQ_CHANNEL_FMAN0_SP5,
++ e_QM_FQ_CHANNEL_FMAN0_SP6,
++ e_QM_FQ_CHANNEL_FMAN0_SP7,
++ e_QM_FQ_CHANNEL_FMAN0_SP8,
++ e_QM_FQ_CHANNEL_FMAN0_SP9,
++ e_QM_FQ_CHANNEL_FMAN0_SP10,
++ e_QM_FQ_CHANNEL_FMAN0_SP11,
++ e_QM_FQ_CHANNEL_FMAN0_SP12,
++ e_QM_FQ_CHANNEL_FMAN0_SP13,
++ e_QM_FQ_CHANNEL_FMAN0_SP14,
++ e_QM_FQ_CHANNEL_FMAN0_SP15,
++
++ e_QM_FQ_CHANNEL_RMAN_SP0 = 0x820, /**< Dedicated channels serviced by Direct Connect Portal 1: connected to RMan */
++ e_QM_FQ_CHANNEL_RMAN_SP1,
++
++ e_QM_FQ_CHANNEL_CAAM = 0x840 /**< Dedicated channel serviced by Direct Connect Portal 2:
++ connected to SEC */
++} e_QmFQChannel;
++
++/*****************************************************************************
++ BMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
++
++/*****************************************************************************
++ SEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define SEC_NUM_OF_DECOS 3
++#define SEC_ALL_DECOS_MASK 0x00000003
++
++
++/*****************************************************************************
++ FM INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define INTG_MAX_NUM_OF_FM 1
++/* Ports defines */
++#define FM_MAX_NUM_OF_1G_MACS 5
++#define FM_MAX_NUM_OF_10G_MACS 1
++#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
++#define FM_MAX_NUM_OF_OH_PORTS 4
++
++#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
++
++#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
++
++#define FM_MAX_NUM_OF_MACSECS 1 /* Should be updated */
++
++#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
++#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
++#define FM_MAX_NUM_OF_SUB_PORTALS 16
++#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
++
++#define FM_VSP_MAX_NUM_OF_ENTRIES 32
++#define FM_MAX_NUM_OF_PFC_PRIORITIES 8
++
++/* RAMs defines */
++#define FM_MURAM_SIZE (192 * KILOBYTE)
++#define FM_IRAM_SIZE(major, minor) \
++ (((major == 6) && ((minor == 4) )) ? (64 * KILOBYTE) : (32 * KILOBYTE))
++#define FM_NUM_OF_CTRL 2
++
++/* PCD defines */
++#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
++#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
++#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
++#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000600 /**< Number of bytes saved for patches */
++#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
++
++/* RTC defines */
++#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
++#define FM_RTC_NUM_OF_PERIODIC_PULSES 3 /**< RTC number of periodic pulses */
++#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
++
++/* QMI defines */
++#define QMI_MAX_NUM_OF_TNUMS 64
++#define QMI_DEF_TNUMS_THRESH 32
++/* FPM defines */
++#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
++
++/* DMA defines */
++#define DMA_THRESH_MAX_COMMQ 83
++#define DMA_THRESH_MAX_BUF 127
++
++/* BMI defines */
++#define BMI_MAX_NUM_OF_TASKS 64
++#define BMI_MAX_NUM_OF_DMAS 32
++
++#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
++#define PORT_MAX_WEIGHT 16
++
++#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
++
++/* Unique T4240 */
++#define FM_OP_OPEN_DMA_MIN_LIMIT
++#define FM_NO_RESTRICT_ON_ACCESS_RSRC
++#define FM_NO_OP_OBSERVED_POOLS
++#define FM_FRAME_END_PARAMS_FOR_OP
++#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
++#define FM_QMI_NO_SINGLE_ECC_EXCEPTION
++
++#define FM_NO_GUARANTEED_RESET_VALUES
++
++/* FM errata */
++#define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
++#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
++#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
++#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
++#define FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273
++
++#define FM_BCB_ERRATA_BMI_SW001
++#define FM_LEN_CHECK_ERRATA_FMAN_SW002
++#define FM_AID_MODE_NO_TNUM_SW005 /* refer to pdm TKT068794 - only support of port_id on aid */
++#define FM_ERROR_VSP_NO_MATCH_SW006 /* refer to pdm TKT174304 - no match between errorQ and VSP */
++
++/*****************************************************************************
++ RMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define RM_MAX_NUM_OF_IB 4 /**< Number of inbound blocks */
++#define RM_NUM_OF_IBCU 8 /**< NUmber of classification units in an inbound block */
++
++/* RMan erratas */
++#define RM_ERRONEOUS_ACK_ERRATA_RMAN_A006756
++
++/*****************************************************************************
++ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define NUM_OF_RX_SC 16
++#define NUM_OF_TX_SC 16
++
++#define NUM_OF_SA_PER_RX_SC 2
++#define NUM_OF_SA_PER_TX_SC 2
++
++#endif /* __DPAA_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_ext.h
+new file mode 100644
+index 00000000..ba9732ee
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_ext.h
+@@ -0,0 +1,59 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++
++ @File part_ext.h
++
++ @Description Definitions for the part (integration) module.
++*//***************************************************************************/
++
++#ifndef __PART_EXT_H
++#define __PART_EXT_H
++
++#include "std_ext.h"
++#include "part_integration_ext.h"
++
++/**************************************************************************//*
++ @Description Part data structure - must be contained in any integration
++ data structure.
++*//***************************************************************************/
++typedef struct t_Part
++{
++ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
++ /**< Returns the address of the module's memory map base. */
++ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
++ /**< Returns the module's ID according to its memory map base. */
++} t_Part;
++
++
++#endif /* __PART_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_integration_ext.h
+new file mode 100644
+index 00000000..3254c766
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_integration_ext.h
+@@ -0,0 +1,304 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++
++ @File part_integration_ext.h
++
++ @Description T4240 external definitions and structures.
++*//***************************************************************************/
++#ifndef __PART_INTEGRATION_EXT_H
++#define __PART_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++#include "ddr_std_ext.h"
++#include "enet_ext.h"
++#include "dpaa_integration_ext.h"
++
++
++/**************************************************************************//**
++ @Group T4240_chip_id T4240 Application Programming Interface
++
++ @Description T4240 Chip functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++#define CORE_E6500
++
++#define INTG_MAX_NUM_OF_CORES 24
++
++
++/**************************************************************************//**
++ @Description Module types.
++*//***************************************************************************/
++typedef enum e_ModuleId
++{
++ e_MODULE_ID_DUART_1 = 0,
++ e_MODULE_ID_DUART_2,
++ e_MODULE_ID_DUART_3,
++ e_MODULE_ID_DUART_4,
++ e_MODULE_ID_LAW,
++ e_MODULE_ID_IFC,
++ e_MODULE_ID_PAMU,
++ e_MODULE_ID_QM, /**< Queue manager module */
++ e_MODULE_ID_BM, /**< Buffer manager module */
++ e_MODULE_ID_QM_CE_PORTAL_0,
++ e_MODULE_ID_QM_CI_PORTAL_0,
++ e_MODULE_ID_QM_CE_PORTAL_1,
++ e_MODULE_ID_QM_CI_PORTAL_1,
++ e_MODULE_ID_QM_CE_PORTAL_2,
++ e_MODULE_ID_QM_CI_PORTAL_2,
++ e_MODULE_ID_QM_CE_PORTAL_3,
++ e_MODULE_ID_QM_CI_PORTAL_3,
++ e_MODULE_ID_QM_CE_PORTAL_4,
++ e_MODULE_ID_QM_CI_PORTAL_4,
++ e_MODULE_ID_QM_CE_PORTAL_5,
++ e_MODULE_ID_QM_CI_PORTAL_5,
++ e_MODULE_ID_QM_CE_PORTAL_6,
++ e_MODULE_ID_QM_CI_PORTAL_6,
++ e_MODULE_ID_QM_CE_PORTAL_7,
++ e_MODULE_ID_QM_CI_PORTAL_7,
++ e_MODULE_ID_QM_CE_PORTAL_8,
++ e_MODULE_ID_QM_CI_PORTAL_8,
++ e_MODULE_ID_QM_CE_PORTAL_9,
++ e_MODULE_ID_QM_CI_PORTAL_9,
++ e_MODULE_ID_BM_CE_PORTAL_0,
++ e_MODULE_ID_BM_CI_PORTAL_0,
++ e_MODULE_ID_BM_CE_PORTAL_1,
++ e_MODULE_ID_BM_CI_PORTAL_1,
++ e_MODULE_ID_BM_CE_PORTAL_2,
++ e_MODULE_ID_BM_CI_PORTAL_2,
++ e_MODULE_ID_BM_CE_PORTAL_3,
++ e_MODULE_ID_BM_CI_PORTAL_3,
++ e_MODULE_ID_BM_CE_PORTAL_4,
++ e_MODULE_ID_BM_CI_PORTAL_4,
++ e_MODULE_ID_BM_CE_PORTAL_5,
++ e_MODULE_ID_BM_CI_PORTAL_5,
++ e_MODULE_ID_BM_CE_PORTAL_6,
++ e_MODULE_ID_BM_CI_PORTAL_6,
++ e_MODULE_ID_BM_CE_PORTAL_7,
++ e_MODULE_ID_BM_CI_PORTAL_7,
++ e_MODULE_ID_BM_CE_PORTAL_8,
++ e_MODULE_ID_BM_CI_PORTAL_8,
++ e_MODULE_ID_BM_CE_PORTAL_9,
++ e_MODULE_ID_BM_CI_PORTAL_9,
++ e_MODULE_ID_FM, /**< Frame manager module */
++ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
++ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
++ e_MODULE_ID_FM_BMI, /**< FM BMI block */
++ e_MODULE_ID_FM_QMI, /**< FM QMI block */
++ e_MODULE_ID_FM_PARSER, /**< FM parser block */
++ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO5, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO6, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO7, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx2, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx3, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx4, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx5, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx6, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_10GRx1, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_10GRx2, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx2, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx3, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx4, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx5, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx6, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_10GTx1, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_10GTx2, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM_PLCR, /**< FM Policer */
++ e_MODULE_ID_FM_KG, /**< FM Keygen */
++ e_MODULE_ID_FM_DMA, /**< FM DMA */
++ e_MODULE_ID_FM_FPM, /**< FM FPM */
++ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
++ e_MODULE_ID_FM_1GMDIO, /**< FM 1G MDIO MAC */
++ e_MODULE_ID_FM_10GMDIO, /**< FM 10G MDIO */
++ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
++ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
++ e_MODULE_ID_FM_1GMAC2, /**< FM 1G MAC #2 */
++ e_MODULE_ID_FM_1GMAC3, /**< FM 1G MAC #3 */
++ e_MODULE_ID_FM_1GMAC4, /**< FM 1G MAC #4 */
++ e_MODULE_ID_FM_1GMAC5, /**< FM 1G MAC #5 */
++ e_MODULE_ID_FM_1GMAC6, /**< FM 1G MAC #6 */
++ e_MODULE_ID_FM_10GMAC1, /**< FM 10G MAC */
++ e_MODULE_ID_FM_10GMAC2, /**< FM 10G MAC */
++
++ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
++ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
++ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
++ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
++ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
++ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
++ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
++ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
++ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
++ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
++ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
++ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
++
++ e_MODULE_ID_PIC, /**< PIC */
++ e_MODULE_ID_GPIO, /**< GPIO */
++ e_MODULE_ID_SERDES, /**< SERDES */
++ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
++ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
++
++ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
++
++ e_MODULE_ID_DUMMY_LAST
++} e_ModuleId;
++
++#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
++
++#if 0 /* using unified values */
++/*****************************************************************************
++ INTEGRATION-SPECIFIC MODULE CODES
++******************************************************************************/
++#define MODULE_UNKNOWN 0x00000000
++#define MODULE_MEM 0x00010000
++#define MODULE_MM 0x00020000
++#define MODULE_CORE 0x00030000
++#define MODULE_T4240 0x00040000
++#define MODULE_T4240_PLATFORM 0x00050000
++#define MODULE_PM 0x00060000
++#define MODULE_MMU 0x00070000
++#define MODULE_PIC 0x00080000
++#define MODULE_CPC 0x00090000
++#define MODULE_DUART 0x000a0000
++#define MODULE_SERDES 0x000b0000
++#define MODULE_PIO 0x000c0000
++#define MODULE_QM 0x000d0000
++#define MODULE_BM 0x000e0000
++#define MODULE_SEC 0x000f0000
++#define MODULE_LAW 0x00100000
++#define MODULE_LBC 0x00110000
++#define MODULE_PAMU 0x00120000
++#define MODULE_FM 0x00130000
++#define MODULE_FM_MURAM 0x00140000
++#define MODULE_FM_PCD 0x00150000
++#define MODULE_FM_RTC 0x00160000
++#define MODULE_FM_MAC 0x00170000
++#define MODULE_FM_PORT 0x00180000
++#define MODULE_FM_SP 0x00190000
++#define MODULE_DPA_PORT 0x001a0000
++#define MODULE_MII 0x001b0000
++#define MODULE_I2C 0x001c0000
++#define MODULE_DMA 0x001d0000
++#define MODULE_DDR 0x001e0000
++#define MODULE_ESPI 0x001f0000
++#define MODULE_DPAA_IPSEC 0x00200000
++#endif /* using unified values */
++
++/*****************************************************************************
++ PAMU INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define PAMU_NUM_OF_PARTITIONS 4
++
++/*****************************************************************************
++ LAW INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define LAW_NUM_OF_WINDOWS 32
++#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4 Kbytes */
++#define LAW_MAX_WINDOW_SIZE 0x0000010000000000LL /**< 1 Tbytes for 40-bit address space */
++
++
++/*****************************************************************************
++ LBC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++/**************************************************************************//**
++ @Group lbc_exception_grp LBC Exception Unit
++
++ @Description LBC Exception unit API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Anchor lbc_exbm
++
++ @Collection LBC Errors Bit Mask
++
++ These errors are reported through the exceptions callback..
++ The values can be or'ed in any combination in the errors mask
++ parameter of the errors report structure.
++
++ These errors can also be passed as a bit-mask to
++ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
++ for enabling or disabling error checking.
++ @{
++*//***************************************************************************/
++#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
++#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
++#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
++#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
++
++#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
++ LBC_ERR_WRITE_PROTECT | LBC_ERR_CHIP_SELECT)
++ /**< All possible errors */
++/* @} */
++/** @} */ /* end of lbc_exception_grp group */
++
++#define LBC_INCORRECT_ERROR_REPORT_ERRATA
++
++#define LBC_NUM_OF_BANKS 8
++#define LBC_MAX_CS_SIZE 0x0000000100000000LL /* Up to 4G memory block size */
++#define LBC_PARITY_SUPPORT
++#define LBC_ADDRESS_HOLD_TIME_CTRL
++#define LBC_HIGH_CLK_DIVIDERS
++#define LBC_FCM_AVAILABLE
++
++/*****************************************************************************
++ GPIO INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define GPIO_PORT_OFFSET_0x1000
++
++#define GPIO_NUM_OF_PORTS 3 /**< Number of ports in GPIO module;
++ Each port contains up to 32 I/O pins. */
++
++#define GPIO_VALID_PIN_MASKS \
++ { /* Port A */ 0xFFFFFFFF, \
++ /* Port B */ 0xFFFFFFFF, \
++ /* Port C */ 0xFFFFFFFF }
++
++#define GPIO_VALID_INTR_MASKS \
++ { /* Port A */ 0xFFFFFFFF, \
++ /* Port B */ 0xFFFFFFFF, \
++ /* Port C */ 0xFFFFFFFF }
++
++
++
++#endif /* __PART_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/dpaa_integration_ext.h
+new file mode 100644
+index 00000000..5a8f3583
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/dpaa_integration_ext.h
+@@ -0,0 +1,291 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++
++ @File dpaa_integration_ext.h
++
++ @Description T4240 FM external definitions and structures.
++*//***************************************************************************/
++#ifndef __DPAA_INTEGRATION_EXT_H
++#define __DPAA_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++
++
++#define DPAA_VERSION 11
++
++/**************************************************************************//**
++ @Description DPAA SW Portals Enumeration.
++*//***************************************************************************/
++typedef enum
++{
++ e_DPAA_SWPORTAL0 = 0,
++ e_DPAA_SWPORTAL1,
++ e_DPAA_SWPORTAL2,
++ e_DPAA_SWPORTAL3,
++ e_DPAA_SWPORTAL4,
++ e_DPAA_SWPORTAL5,
++ e_DPAA_SWPORTAL6,
++ e_DPAA_SWPORTAL7,
++ e_DPAA_SWPORTAL8,
++ e_DPAA_SWPORTAL9,
++ e_DPAA_SWPORTAL10,
++ e_DPAA_SWPORTAL11,
++ e_DPAA_SWPORTAL12,
++ e_DPAA_SWPORTAL13,
++ e_DPAA_SWPORTAL14,
++ e_DPAA_SWPORTAL15,
++ e_DPAA_SWPORTAL16,
++ e_DPAA_SWPORTAL17,
++ e_DPAA_SWPORTAL18,
++ e_DPAA_SWPORTAL19,
++ e_DPAA_SWPORTAL20,
++ e_DPAA_SWPORTAL21,
++ e_DPAA_SWPORTAL22,
++ e_DPAA_SWPORTAL23,
++ e_DPAA_SWPORTAL24,
++ e_DPAA_SWPORTAL_DUMMY_LAST
++} e_DpaaSwPortal;
++
++/**************************************************************************//**
++ @Description DPAA Direct Connect Portals Enumeration.
++*//***************************************************************************/
++typedef enum
++{
++ e_DPAA_DCPORTAL0 = 0,
++ e_DPAA_DCPORTAL1,
++ e_DPAA_DCPORTAL2,
++ e_DPAA_DCPORTAL_DUMMY_LAST
++} e_DpaaDcPortal;
++
++#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
++#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
++
++/*****************************************************************************
++ QMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
++#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
++#define QM_MAX_NUM_OF_CGS 256 /**< Congestion groups number */
++#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE)
++ /**< FQIDs range - 24 bits */
++
++/**************************************************************************//**
++ @Description Work Queue Channel assignments in QMan.
++*//***************************************************************************/
++typedef enum
++{
++ e_QM_FQ_CHANNEL_SWPORTAL0 = 0x0, /**< Dedicated channels serviced by software portals 0 to 24 */
++ e_QM_FQ_CHANNEL_SWPORTAL1,
++ e_QM_FQ_CHANNEL_SWPORTAL2,
++ e_QM_FQ_CHANNEL_SWPORTAL3,
++ e_QM_FQ_CHANNEL_SWPORTAL4,
++ e_QM_FQ_CHANNEL_SWPORTAL5,
++ e_QM_FQ_CHANNEL_SWPORTAL6,
++ e_QM_FQ_CHANNEL_SWPORTAL7,
++ e_QM_FQ_CHANNEL_SWPORTAL8,
++ e_QM_FQ_CHANNEL_SWPORTAL9,
++ e_QM_FQ_CHANNEL_SWPORTAL10,
++ e_QM_FQ_CHANNEL_SWPORTAL11,
++ e_QM_FQ_CHANNEL_SWPORTAL12,
++ e_QM_FQ_CHANNEL_SWPORTAL13,
++ e_QM_FQ_CHANNEL_SWPORTAL14,
++ e_QM_FQ_CHANNEL_SWPORTAL15,
++ e_QM_FQ_CHANNEL_SWPORTAL16,
++ e_QM_FQ_CHANNEL_SWPORTAL17,
++ e_QM_FQ_CHANNEL_SWPORTAL18,
++ e_QM_FQ_CHANNEL_SWPORTAL19,
++ e_QM_FQ_CHANNEL_SWPORTAL20,
++ e_QM_FQ_CHANNEL_SWPORTAL21,
++ e_QM_FQ_CHANNEL_SWPORTAL22,
++ e_QM_FQ_CHANNEL_SWPORTAL23,
++ e_QM_FQ_CHANNEL_SWPORTAL24,
++
++ e_QM_FQ_CHANNEL_POOL1 = 0x401, /**< Pool channels that can be serviced by any of the software portals */
++ e_QM_FQ_CHANNEL_POOL2,
++ e_QM_FQ_CHANNEL_POOL3,
++ e_QM_FQ_CHANNEL_POOL4,
++ e_QM_FQ_CHANNEL_POOL5,
++ e_QM_FQ_CHANNEL_POOL6,
++ e_QM_FQ_CHANNEL_POOL7,
++ e_QM_FQ_CHANNEL_POOL8,
++ e_QM_FQ_CHANNEL_POOL9,
++ e_QM_FQ_CHANNEL_POOL10,
++ e_QM_FQ_CHANNEL_POOL11,
++ e_QM_FQ_CHANNEL_POOL12,
++ e_QM_FQ_CHANNEL_POOL13,
++ e_QM_FQ_CHANNEL_POOL14,
++ e_QM_FQ_CHANNEL_POOL15,
++
++ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x800, /**< Dedicated channels serviced by Direct Connect Portal 0:
++ connected to FMan 0; assigned in incrementing order to
++ each sub-portal (SP) in the portal */
++ e_QM_FQ_CHANNEL_FMAN0_SP1,
++ e_QM_FQ_CHANNEL_FMAN0_SP2,
++ e_QM_FQ_CHANNEL_FMAN0_SP3,
++ e_QM_FQ_CHANNEL_FMAN0_SP4,
++ e_QM_FQ_CHANNEL_FMAN0_SP5,
++ e_QM_FQ_CHANNEL_FMAN0_SP6,
++ e_QM_FQ_CHANNEL_FMAN0_SP7,
++ e_QM_FQ_CHANNEL_FMAN0_SP8,
++ e_QM_FQ_CHANNEL_FMAN0_SP9,
++ e_QM_FQ_CHANNEL_FMAN0_SP10,
++ e_QM_FQ_CHANNEL_FMAN0_SP11,
++ e_QM_FQ_CHANNEL_FMAN0_SP12,
++ e_QM_FQ_CHANNEL_FMAN0_SP13,
++ e_QM_FQ_CHANNEL_FMAN0_SP14,
++ e_QM_FQ_CHANNEL_FMAN0_SP15,
++
++ e_QM_FQ_CHANNEL_RMAN_SP0 = 0x820, /**< Dedicated channels serviced by Direct Connect Portal 1: connected to RMan */
++ e_QM_FQ_CHANNEL_RMAN_SP1,
++
++ e_QM_FQ_CHANNEL_CAAM = 0x840 /**< Dedicated channel serviced by Direct Connect Portal 2:
++ connected to SEC */
++} e_QmFQChannel;
++
++/*****************************************************************************
++ BMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
++
++/*****************************************************************************
++ SEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define SEC_NUM_OF_DECOS 3
++#define SEC_ALL_DECOS_MASK 0x00000003
++
++
++/*****************************************************************************
++ FM INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define INTG_MAX_NUM_OF_FM 2
++
++/* Ports defines */
++#define FM_MAX_NUM_OF_1G_MACS 6
++#define FM_MAX_NUM_OF_10G_MACS 2
++#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
++#define FM_MAX_NUM_OF_OH_PORTS 6
++
++#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
++
++#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
++
++#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
++#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
++#define FM_MAX_NUM_OF_SUB_PORTALS 16
++#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
++
++#define FM_VSP_MAX_NUM_OF_ENTRIES 64
++#define FM_MAX_NUM_OF_PFC_PRIORITIES 8
++
++/* RAMs defines */
++#define FM_MURAM_SIZE (384 * KILOBYTE)
++#define FM_IRAM_SIZE(major, minor) (64 * KILOBYTE)
++#define FM_NUM_OF_CTRL 4
++
++/* PCD defines */
++#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
++#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
++#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
++#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000600 /**< Number of bytes saved for patches */
++#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
++
++/* RTC defines */
++#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
++#define FM_RTC_NUM_OF_PERIODIC_PULSES 3 /**< RTC number of periodic pulses */
++#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
++
++/* QMI defines */
++#define QMI_MAX_NUM_OF_TNUMS 64
++#define QMI_DEF_TNUMS_THRESH 32
++/* FPM defines */
++#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
++
++/* DMA defines */
++#define DMA_THRESH_MAX_COMMQ 83
++#define DMA_THRESH_MAX_BUF 127
++
++/* BMI defines */
++#define BMI_MAX_NUM_OF_TASKS 128
++#define BMI_MAX_NUM_OF_DMAS 84
++
++#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
++#define PORT_MAX_WEIGHT 16
++
++#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
++
++/* Unique T4240 */
++#define FM_OP_OPEN_DMA_MIN_LIMIT
++#define FM_NO_RESTRICT_ON_ACCESS_RSRC
++#define FM_NO_OP_OBSERVED_POOLS
++#define FM_FRAME_END_PARAMS_FOR_OP
++#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
++#define FM_QMI_NO_SINGLE_ECC_EXCEPTION
++
++#define FM_NO_GUARANTEED_RESET_VALUES
++
++/* FM errata */
++#define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
++#define FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127
++#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
++#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
++#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
++
++#define FM_BCB_ERRATA_BMI_SW001
++#define FM_LEN_CHECK_ERRATA_FMAN_SW002
++#define FM_AID_MODE_NO_TNUM_SW005 /* refer to pdm TKT068794 - only support of port_id on aid */
++#define FM_ERROR_VSP_NO_MATCH_SW006 /* refer to pdm TKT174304 - no match between errorQ and VSP */
++
++/*****************************************************************************
++ RMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define RM_MAX_NUM_OF_IB 4 /**< Number of inbound blocks */
++#define RM_NUM_OF_IBCU 8 /**< NUmber of classification units in an inbound block */
++
++/* RMan erratas */
++#define RM_ERRONEOUS_ACK_ERRATA_RMAN_A006756
++
++/*****************************************************************************
++ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define NUM_OF_RX_SC 16
++#define NUM_OF_TX_SC 16
++
++#define NUM_OF_SA_PER_RX_SC 2
++#define NUM_OF_SA_PER_TX_SC 2
++
++#endif /* __DPAA_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_ext.h
+new file mode 100644
+index 00000000..4787e19c
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_ext.h
+@@ -0,0 +1,64 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++
++ @File part_ext.h
++
++ @Description Definitions for the part (integration) module.
++*//***************************************************************************/
++
++#ifndef __PART_EXT_H
++#define __PART_EXT_H
++
++#include "std_ext.h"
++#include "part_integration_ext.h"
++
++#if !(defined(LS1043))
++#error "unable to proceed without chip-definition"
++#endif
++
++
++/**************************************************************************//*
++ @Description Part data structure - must be contained in any integration
++ data structure.
++*//***************************************************************************/
++typedef struct t_Part
++{
++ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
++ /**< Returns the address of the module's memory map base. */
++ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
++ /**< Returns the module's ID according to its memory map base. */
++} t_Part;
++
++
++#endif /* __PART_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_integration_ext.h
+new file mode 100644
+index 00000000..85ba2a47
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_integration_ext.h
+@@ -0,0 +1,185 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++
++ @File part_integration_ext.h
++
++ @Description T4240 external definitions and structures.
++*//***************************************************************************/
++#ifndef __PART_INTEGRATION_EXT_H
++#define __PART_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++#include "ddr_std_ext.h"
++#include "enet_ext.h"
++#include "dpaa_integration_ext.h"
++
++
++/**************************************************************************//**
++ @Group T4240_chip_id T4240 Application Programming Interface
++
++ @Description T4240 Chip functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++#define INTG_MAX_NUM_OF_CORES 4
++
++/**************************************************************************//**
++ @Description Module types.
++*//***************************************************************************/
++typedef enum e_ModuleId
++{
++ e_MODULE_ID_DUART_1 = 0,
++ e_MODULE_ID_DUART_2,
++ e_MODULE_ID_DUART_3,
++ e_MODULE_ID_DUART_4,
++ e_MODULE_ID_LAW,
++ e_MODULE_ID_IFC,
++ e_MODULE_ID_PAMU,
++ e_MODULE_ID_QM, /**< Queue manager module */
++ e_MODULE_ID_BM, /**< Buffer manager module */
++ e_MODULE_ID_QM_CE_PORTAL_0,
++ e_MODULE_ID_QM_CI_PORTAL_0,
++ e_MODULE_ID_QM_CE_PORTAL_1,
++ e_MODULE_ID_QM_CI_PORTAL_1,
++ e_MODULE_ID_QM_CE_PORTAL_2,
++ e_MODULE_ID_QM_CI_PORTAL_2,
++ e_MODULE_ID_QM_CE_PORTAL_3,
++ e_MODULE_ID_QM_CI_PORTAL_3,
++ e_MODULE_ID_QM_CE_PORTAL_4,
++ e_MODULE_ID_QM_CI_PORTAL_4,
++ e_MODULE_ID_QM_CE_PORTAL_5,
++ e_MODULE_ID_QM_CI_PORTAL_5,
++ e_MODULE_ID_QM_CE_PORTAL_6,
++ e_MODULE_ID_QM_CI_PORTAL_6,
++ e_MODULE_ID_QM_CE_PORTAL_7,
++ e_MODULE_ID_QM_CI_PORTAL_7,
++ e_MODULE_ID_QM_CE_PORTAL_8,
++ e_MODULE_ID_QM_CI_PORTAL_8,
++ e_MODULE_ID_QM_CE_PORTAL_9,
++ e_MODULE_ID_QM_CI_PORTAL_9,
++ e_MODULE_ID_BM_CE_PORTAL_0,
++ e_MODULE_ID_BM_CI_PORTAL_0,
++ e_MODULE_ID_BM_CE_PORTAL_1,
++ e_MODULE_ID_BM_CI_PORTAL_1,
++ e_MODULE_ID_BM_CE_PORTAL_2,
++ e_MODULE_ID_BM_CI_PORTAL_2,
++ e_MODULE_ID_BM_CE_PORTAL_3,
++ e_MODULE_ID_BM_CI_PORTAL_3,
++ e_MODULE_ID_BM_CE_PORTAL_4,
++ e_MODULE_ID_BM_CI_PORTAL_4,
++ e_MODULE_ID_BM_CE_PORTAL_5,
++ e_MODULE_ID_BM_CI_PORTAL_5,
++ e_MODULE_ID_BM_CE_PORTAL_6,
++ e_MODULE_ID_BM_CI_PORTAL_6,
++ e_MODULE_ID_BM_CE_PORTAL_7,
++ e_MODULE_ID_BM_CI_PORTAL_7,
++ e_MODULE_ID_BM_CE_PORTAL_8,
++ e_MODULE_ID_BM_CI_PORTAL_8,
++ e_MODULE_ID_BM_CE_PORTAL_9,
++ e_MODULE_ID_BM_CI_PORTAL_9,
++ e_MODULE_ID_FM, /**< Frame manager module */
++ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
++ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
++ e_MODULE_ID_FM_BMI, /**< FM BMI block */
++ e_MODULE_ID_FM_QMI, /**< FM QMI block */
++ e_MODULE_ID_FM_PARSER, /**< FM parser block */
++ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO5, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO6, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO7, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx2, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx3, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx4, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx5, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx6, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_10GRx1, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_10GRx2, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx2, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx3, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx4, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx5, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx6, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_10GTx1, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM_PORT_10GTx2, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM_PLCR, /**< FM Policer */
++ e_MODULE_ID_FM_KG, /**< FM Keygen */
++ e_MODULE_ID_FM_DMA, /**< FM DMA */
++ e_MODULE_ID_FM_FPM, /**< FM FPM */
++ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
++ e_MODULE_ID_FM_1GMDIO, /**< FM 1G MDIO MAC */
++ e_MODULE_ID_FM_10GMDIO, /**< FM 10G MDIO */
++ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
++ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
++ e_MODULE_ID_FM_1GMAC2, /**< FM 1G MAC #2 */
++ e_MODULE_ID_FM_1GMAC3, /**< FM 1G MAC #3 */
++ e_MODULE_ID_FM_1GMAC4, /**< FM 1G MAC #4 */
++ e_MODULE_ID_FM_1GMAC5, /**< FM 1G MAC #5 */
++ e_MODULE_ID_FM_1GMAC6, /**< FM 1G MAC #6 */
++ e_MODULE_ID_FM_10GMAC1, /**< FM 10G MAC */
++ e_MODULE_ID_FM_10GMAC2, /**< FM 10G MAC */
++
++ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
++ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
++ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
++ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
++ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
++ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
++ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
++ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
++ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
++ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
++ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
++ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
++
++ e_MODULE_ID_PIC, /**< PIC */
++ e_MODULE_ID_GPIO, /**< GPIO */
++ e_MODULE_ID_SERDES, /**< SERDES */
++ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
++ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
++
++ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
++
++ e_MODULE_ID_DUMMY_LAST
++} e_ModuleId;
++
++#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
++
++
++#endif /* __PART_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/dpaa_integration_ext.h
+new file mode 100644
+index 00000000..7b5390de
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/dpaa_integration_ext.h
+@@ -0,0 +1,213 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**
++
++ @File dpaa_integration_ext.h
++
++ @Description P1023 FM external definitions and structures.
++*//***************************************************************************/
++#ifndef __DPAA_INTEGRATION_EXT_H
++#define __DPAA_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++
++
++#define DPAA_VERSION 10
++
++typedef enum e_DpaaSwPortal {
++ e_DPAA_SWPORTAL0 = 0,
++ e_DPAA_SWPORTAL1,
++ e_DPAA_SWPORTAL2,
++ e_DPAA_SWPORTAL_DUMMY_LAST
++} e_DpaaSwPortal;
++
++typedef enum {
++ e_DPAA_DCPORTAL0 = 0,
++ e_DPAA_DCPORTAL2,
++ e_DPAA_DCPORTAL_DUMMY_LAST
++} e_DpaaDcPortal;
++
++#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
++#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
++
++/*****************************************************************************
++ QMAN INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define QM_MAX_NUM_OF_POOL_CHANNELS 3
++#define QM_MAX_NUM_OF_WQ 8
++#define QM_MAX_NUM_OF_SWP_AS 2
++#define QM_MAX_NUM_OF_CGS 64
++#define QM_MAX_NUM_OF_FQIDS (16*MEGABYTE)
++
++typedef enum {
++ e_QM_FQ_CHANNEL_SWPORTAL0 = 0,
++ e_QM_FQ_CHANNEL_SWPORTAL1,
++ e_QM_FQ_CHANNEL_SWPORTAL2,
++
++ e_QM_FQ_CHANNEL_POOL1 = 0x21,
++ e_QM_FQ_CHANNEL_POOL2,
++ e_QM_FQ_CHANNEL_POOL3,
++
++ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x40,
++ e_QM_FQ_CHANNEL_FMAN0_SP1,
++ e_QM_FQ_CHANNEL_FMAN0_SP2,
++ e_QM_FQ_CHANNEL_FMAN0_SP3,
++ e_QM_FQ_CHANNEL_FMAN0_SP4,
++ e_QM_FQ_CHANNEL_FMAN0_SP5,
++ e_QM_FQ_CHANNEL_FMAN0_SP6,
++
++
++ e_QM_FQ_CHANNEL_CAAM = 0x80
++} e_QmFQChannel;
++
++/*****************************************************************************
++ BMAN INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define BM_MAX_NUM_OF_POOLS 8
++
++/*****************************************************************************
++ SEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define SEC_NUM_OF_DECOS 2
++#define SEC_ALL_DECOS_MASK 0x00000003
++#define SEC_RNGB
++#define SEC_NO_ESP_TRAILER_REMOVAL
++
++/*****************************************************************************
++ FM INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define INTG_MAX_NUM_OF_FM 1
++
++/* Ports defines */
++#define FM_MAX_NUM_OF_1G_MACS 2
++#define FM_MAX_NUM_OF_10G_MACS 0
++#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
++#define FM_MAX_NUM_OF_OH_PORTS 5
++
++#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
++
++#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
++
++#define FM_MAX_NUM_OF_MACSECS 1
++
++#define FM_MACSEC_SUPPORT
++
++#define FM_LOW_END_RESTRICTION /* prevents the use of TX port 1 with OP port 0 */
++
++#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
++#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 2 /**< Number of Offline parsing port external BM pools per Rx port */
++#define FM_PORT_NUM_OF_CONGESTION_GRPS 32 /**< Total number of congestion groups in QM */
++#define FM_MAX_NUM_OF_SUB_PORTALS 7
++
++/* Rams defines */
++#define FM_MURAM_SIZE (64*KILOBYTE)
++#define FM_IRAM_SIZE(major, minor) (32 * KILOBYTE)
++#define FM_NUM_OF_CTRL 2
++
++/* PCD defines */
++#define FM_PCD_PLCR_NUM_ENTRIES 32 /**< Total number of policer profiles */
++#define FM_PCD_KG_NUM_OF_SCHEMES 16 /**< Total number of KG schemes */
++#define FM_PCD_MAX_NUM_OF_CLS_PLANS 128 /**< Number of classification plan entries. */
++#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000240 /**< Number of bytes saved for patches */
++#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
++
++/* RTC defines */
++#define FM_RTC_NUM_OF_ALARMS 2
++#define FM_RTC_NUM_OF_PERIODIC_PULSES 2
++#define FM_RTC_NUM_OF_EXT_TRIGGERS 2
++
++/* QMI defines */
++#define QMI_MAX_NUM_OF_TNUMS 15
++
++/* FPM defines */
++#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
++
++/* DMA defines */
++#define DMA_THRESH_MAX_COMMQ 15
++#define DMA_THRESH_MAX_BUF 7
++
++/* BMI defines */
++#define BMI_MAX_NUM_OF_TASKS 64
++#define BMI_MAX_NUM_OF_DMAS 16
++#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
++#define PORT_MAX_WEIGHT 4
++
++/*****************************************************************************
++ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define NUM_OF_RX_SC 16
++#define NUM_OF_TX_SC 16
++
++#define NUM_OF_SA_PER_RX_SC 2
++#define NUM_OF_SA_PER_TX_SC 2
++
++/**************************************************************************//**
++ @Description Enum for inter-module interrupts registration
++*//***************************************************************************/
++
++/* 1023 unique features */
++#define FM_QMI_NO_ECC_EXCEPTIONS
++#define FM_CSI_CFED_LIMIT
++#define FM_PEDANTIC_DMA
++#define FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++#define FM_FIFO_ALLOCATION_ALG
++#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
++#define FM_HAS_TOTAL_DMAS
++#define FM_KG_NO_IPPID_SUPPORT
++#define FM_NO_GUARANTEED_RESET_VALUES
++#define FM_MAC_RESET
++
++/* FM erratas */
++#define FM_RX_PREAM_4_ERRATA_DTSEC_A001
++#define FM_MAGIC_PACKET_UNRECOGNIZED_ERRATA_DTSEC2 /* No implementation, Out of LLD scope */
++
++#define FM_DEBUG_TRACE_FMAN_A004 /* No implementation, Out of LLD scope */
++#define FM_INT_BUF_LEAK_FMAN_A005 /* No implementation, Out of LLD scope. App must avoid S/G */
++
++#define FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839
++
++/* #define FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
++
++/*
++TKT056919 - axi12axi0 can hang if read request follows the single byte write on the very next cycle
++TKT038900 - FM dma lockup occur due to AXI slave protocol violation
++*/
++#define FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
++
++
++#endif /* __DPAA_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_ext.h
+new file mode 100644
+index 00000000..6814d5fb
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_ext.h
+@@ -0,0 +1,82 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++
++ @File part_ext.h
++
++ @Description Definitions for the part (integration) module.
++*//***************************************************************************/
++
++#ifndef __PART_EXT_H
++#define __PART_EXT_H
++
++#include "std_ext.h"
++#include "part_integration_ext.h"
++
++
++#if !(defined(MPC8306) || \
++ defined(MPC8309) || \
++ defined(MPC834x) || \
++ defined(MPC836x) || \
++ defined(MPC832x) || \
++ defined(MPC837x) || \
++ defined(MPC8568) || \
++ defined(MPC8569) || \
++ defined(P1020) || \
++ defined(P1021) || \
++ defined(P1022) || \
++ defined(P1023) || \
++ defined(P2020) || \
++ defined(P3041) || \
++ defined(P4080) || \
++ defined(P5020) || \
++ defined(MSC814x))
++#error "unable to proceed without chip-definition"
++#endif
++
++
++/**************************************************************************//*
++ @Description Part data structure - must be contained in any integration
++ data structure.
++*//***************************************************************************/
++typedef struct t_Part
++{
++ uint64_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
++ /**< Returns the address of the module's memory map base. */
++ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uint64_t baseAddress);
++ /**< Returns the module's ID according to its memory map base. */
++} t_Part;
++
++
++#endif /* __PART_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_integration_ext.h
+new file mode 100644
+index 00000000..e838283d
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_integration_ext.h
+@@ -0,0 +1,635 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File part_integration_ext.h
++
++ @Description P1023 external definitions and structures.
++*//***************************************************************************/
++#ifndef __PART_INTEGRATION_EXT_H
++#define __PART_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++#include "dpaa_integration_ext.h"
++
++
++/**************************************************************************//**
++ @Group 1023_chip_id P1023 Application Programming Interface
++
++ @Description P1023 Chip functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++#define INTG_MAX_NUM_OF_CORES 2
++
++
++/**************************************************************************//**
++ @Description Module types.
++*//***************************************************************************/
++typedef enum e_ModuleId
++{
++ e_MODULE_ID_LAW, /**< Local Access module */
++ e_MODULE_ID_ECM, /**< e500 Coherency Module */
++ e_MODULE_ID_DDR, /**< DDR memory controller */
++ e_MODULE_ID_I2C_1, /**< I2C 1 */
++ e_MODULE_ID_I2C_2, /**< I2C 1 */
++ e_MODULE_ID_DUART_1, /**< DUART module 1 */
++ e_MODULE_ID_DUART_2, /**< DUART module 2 */
++ e_MODULE_ID_LBC, /**< Local bus memory controller module */
++ e_MODULE_ID_PCIE_1, /**< PCI Express 1 controller module */
++ e_MODULE_ID_PCIE_ATMU_1, /**< PCI 1 ATMU Window */
++ e_MODULE_ID_PCIE_2, /**< PCI Express 2 controller module */
++ e_MODULE_ID_PCIE_ATMU_2, /**< PCI 2 ATMU Window */
++ e_MODULE_ID_PCIE_3, /**< PCI Express 3 controller module */
++ e_MODULE_ID_PCIE_ATMU_3, /**< PCI 3 ATMU Window */
++ e_MODULE_ID_MSI, /**< MSI registers */
++ e_MODULE_ID_L2_SRAM, /**< L2/SRAM Memory-Mapped controller module */
++ e_MODULE_ID_DMA_1, /**< DMA controller 1 */
++ e_MODULE_ID_DMA_2, /**< DMA controller 2 */
++ e_MODULE_ID_EPIC, /**< Programmable interrupt controller */
++ e_MODULE_ID_ESPI, /**< ESPI module */
++ e_MODULE_ID_GPIO, /**< General Purpose I/O */
++ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
++ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
++ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
++ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
++ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
++ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
++ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
++ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
++ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
++ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
++ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
++ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
++ e_MODULE_ID_USB_DR_1, /**< USB 2.0 module 1 */
++ e_MODULE_ID_USB_DR_2, /**< USB 2.0 module 2 */
++ e_MODULE_ID_ETSEC_MII_MNG, /**< MII MNG registers */
++ e_MODULE_ID_ETSEC_1, /**< ETSEC module 1 */
++ e_MODULE_ID_ETSEC_2, /**< ETSEC module 2 */
++ e_MODULE_ID_GUTS, /**< Serial DMA */
++ e_MODULE_ID_PM, /**< Performance Monitor module */
++ e_MODULE_ID_QM, /**< Queue manager module */
++ e_MODULE_ID_BM, /**< Buffer manager module */
++ e_MODULE_ID_QM_CE_PORTAL,
++ e_MODULE_ID_QM_CI_PORTAL,
++ e_MODULE_ID_BM_CE_PORTAL,
++ e_MODULE_ID_BM_CI_PORTAL,
++ e_MODULE_ID_FM, /**< Frame manager #1 module */
++ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
++ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
++ e_MODULE_ID_FM_BMI, /**< FM BMI block */
++ e_MODULE_ID_FM_QMI, /**< FM QMI block */
++ e_MODULE_ID_FM_PRS, /**< FM parser block */
++ e_MODULE_ID_FM_PORT_HO0, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM_PORT_1GRx0, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx0, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM_PLCR, /**< FM Policer */
++ e_MODULE_ID_FM_KG, /**< FM Keygen */
++ e_MODULE_ID_FM_DMA, /**< FM DMA */
++ e_MODULE_ID_FM_FPM, /**< FM FPM */
++ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
++ e_MODULE_ID_FM_1GMDIO0, /**< FM 1G MDIO MAC 0*/
++ e_MODULE_ID_FM_1GMDIO1, /**< FM 1G MDIO MAC 1*/
++ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
++ e_MODULE_ID_FM_RISC0, /**< FM risc #0 */
++ e_MODULE_ID_FM_RISC1, /**< FM risc #1 */
++ e_MODULE_ID_FM_1GMAC0, /**< FM 1G MAC #0 */
++ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
++ e_MODULE_ID_FM_MACSEC, /**< FM MACSEC */
++
++ e_MODULE_ID_DUMMY_LAST
++} e_ModuleId;
++
++#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
++
++
++#define P1023_OFFSET_LAW 0x00000C08
++#define P1023_OFFSET_ECM 0x00001000
++#define P1023_OFFSET_DDR 0x00002000
++#define P1023_OFFSET_I2C1 0x00003000
++#define P1023_OFFSET_I2C2 0x00003100
++#define P1023_OFFSET_DUART1 0x00004500
++#define P1023_OFFSET_DUART2 0x00004600
++#define P1023_OFFSET_LBC 0x00005000
++#define P1023_OFFSET_ESPI 0x00007000
++#define P1023_OFFSET_PCIE2 0x00009000
++#define P1023_OFFSET_PCIE2_ATMU 0x00009C00
++#define P1023_OFFSET_PCIE1 0x0000A000
++#define P1023_OFFSET_PCIE1_ATMU 0x0000AC00
++#define P1023_OFFSET_PCIE3 0x0000B000
++#define P1023_OFFSET_PCIE3_ATMU 0x0000BC00
++#define P1023_OFFSET_DMA2 0x0000C100
++#define P1023_OFFSET_GPIO 0x0000F000
++#define P1023_OFFSET_L2_SRAM 0x00020000
++#define P1023_OFFSET_DMA1 0x00021100
++#define P1023_OFFSET_USB1 0x00022000
++#define P1023_OFFSET_SEC_GEN 0x00030000
++#define P1023_OFFSET_SEC_JQ0 0x00031000
++#define P1023_OFFSET_SEC_JQ1 0x00032000
++#define P1023_OFFSET_SEC_JQ2 0x00033000
++#define P1023_OFFSET_SEC_JQ3 0x00034000
++#define P1023_OFFSET_SEC_RTIC 0x00036000
++#define P1023_OFFSET_SEC_QI 0x00037000
++#define P1023_OFFSET_SEC_DECO0_CCB0 0x00038000
++#define P1023_OFFSET_SEC_DECO1_CCB1 0x00039000
++#define P1023_OFFSET_SEC_DECO2_CCB2 0x0003a000
++#define P1023_OFFSET_SEC_DECO3_CCB3 0x0003b000
++#define P1023_OFFSET_SEC_DECO4_CCB4 0x0003c000
++#define P1023_OFFSET_PIC 0x00040000
++#define P1023_OFFSET_MSI 0x00041600
++#define P1023_OFFSET_AXI 0x00081000
++#define P1023_OFFSET_QM 0x00088000
++#define P1023_OFFSET_BM 0x0008A000
++#define P1022_OFFSET_PM 0x000E1000
++
++#define P1023_OFFSET_GUTIL 0x000E0000
++#define P1023_OFFSET_PM 0x000E1000
++#define P1023_OFFSET_DEBUG 0x000E2000
++#define P1023_OFFSET_SERDES 0x000E3000
++#define P1023_OFFSET_ROM 0x000F0000
++#define P1023_OFFSET_FM 0x00100000
++
++#define P1023_OFFSET_FM_MURAM (P1023_OFFSET_FM + 0x00000000)
++#define P1023_OFFSET_FM_BMI (P1023_OFFSET_FM + 0x00080000)
++#define P1023_OFFSET_FM_QMI (P1023_OFFSET_FM + 0x00080400)
++#define P1023_OFFSET_FM_PRS (P1023_OFFSET_FM + 0x00080800)
++#define P1023_OFFSET_FM_PORT_HO0 (P1023_OFFSET_FM + 0x00081000)
++#define P1023_OFFSET_FM_PORT_HO1 (P1023_OFFSET_FM + 0x00082000)
++#define P1023_OFFSET_FM_PORT_HO2 (P1023_OFFSET_FM + 0x00083000)
++#define P1023_OFFSET_FM_PORT_HO3 (P1023_OFFSET_FM + 0x00084000)
++#define P1023_OFFSET_FM_PORT_HO4 (P1023_OFFSET_FM + 0x00085000)
++#define P1023_OFFSET_FM_PORT_1GRX0 (P1023_OFFSET_FM + 0x00088000)
++#define P1023_OFFSET_FM_PORT_1GRX1 (P1023_OFFSET_FM + 0x00089000)
++#define P1023_OFFSET_FM_PORT_1GTX0 (P1023_OFFSET_FM + 0x000A8000)
++#define P1023_OFFSET_FM_PORT_1GTX1 (P1023_OFFSET_FM + 0x000A9000)
++#define P1023_OFFSET_FM_PLCR (P1023_OFFSET_FM + 0x000C0000)
++#define P1023_OFFSET_FM_KG (P1023_OFFSET_FM + 0x000C1000)
++#define P1023_OFFSET_FM_DMA (P1023_OFFSET_FM + 0x000C2000)
++#define P1023_OFFSET_FM_FPM (P1023_OFFSET_FM + 0x000C3000)
++#define P1023_OFFSET_FM_IRAM (P1023_OFFSET_FM + 0x000C4000)
++#define P1023_OFFSET_FM_PRS_IRAM (P1023_OFFSET_FM + 0x000C7000)
++#define P1023_OFFSET_FM_RISC0 (P1023_OFFSET_FM + 0x000D0000)
++#define P1023_OFFSET_FM_RISC1 (P1023_OFFSET_FM + 0x000D0400)
++#define P1023_OFFSET_FM_MACSEC (P1023_OFFSET_FM + 0x000D8000)
++#define P1023_OFFSET_FM_1GMAC0 (P1023_OFFSET_FM + 0x000E0000)
++#define P1023_OFFSET_FM_1GMDIO0 (P1023_OFFSET_FM + 0x000E1120)
++#define P1023_OFFSET_FM_1GMAC1 (P1023_OFFSET_FM + 0x000E2000)
++#define P1023_OFFSET_FM_1GMDIO1 (P1023_OFFSET_FM + 0x000E3000)
++#define P1023_OFFSET_FM_RTC (P1023_OFFSET_FM + 0x000FE000)
++
++/* Offsets relative to QM or BM portals base */
++#define P1023_OFFSET_PORTALS_CE_AREA 0x00000000 /* cache enabled area */
++#define P1023_OFFSET_PORTALS_CI_AREA 0x00100000 /* cache inhibited area */
++
++#define P1023_OFFSET_PORTALS_CE(portal) (P1023_OFFSET_PORTALS_CE_AREA + 0x4000 * (portal))
++#define P1023_OFFSET_PORTALS_CI(portal) (P1023_OFFSET_PORTALS_CI_AREA + 0x1000 * (portal))
++
++/**************************************************************************//**
++ @Description Transaction source ID (for memory controllers error reporting).
++*//***************************************************************************/
++typedef enum e_TransSrc
++{
++ e_TRANS_SRC_PCIE_2 = 0x01, /**< PCIe port 2 */
++ e_TRANS_SRC_PCIE_1 = 0x02, /**< PCIe port 1 */
++ e_TRANS_SRC_PCIE_3 = 0x03, /**< PCIe port 3 */
++ e_TRANS_SRC_LBC = 0x04, /**< Enhanced local bus */
++ e_TRANS_SRC_DPAA_SW_PORTALS = 0x0E, /**< DPAA software portals or SRAM */
++ e_TRANS_SRC_DDR = 0x0F, /**< DDR controller */
++ e_TRANS_SRC_CORE_INS_FETCH = 0x10, /**< Processor (instruction) */
++ e_TRANS_SRC_CORE_DATA = 0x11, /**< Processor (data) */
++ e_TRANS_SRC_DMA = 0x15 /**< DMA */
++} e_TransSrc;
++
++/**************************************************************************//**
++ @Description Local Access Window Target interface ID
++*//***************************************************************************/
++typedef enum e_P1023LawTargetId
++{
++ e_P1023_LAW_TARGET_PCIE_2 = 0x01, /**< PCI Express 2 target interface */
++ e_P1023_LAW_TARGET_PCIE_1 = 0x02, /**< PCI Express 1 target interface */
++ e_P1023_LAW_TARGET_PCIE_3 = 0x03, /**< PCI Express 3 target interface */
++ e_P1023_LAW_TARGET_LBC = 0x04, /**< Local bus target interface */
++ e_P1023_LAW_TARGET_QM_PORTALS = 0x0E, /**< Queue Manager Portals */
++ e_P1023_LAW_TARGET_BM_PORTALS = 0x0E, /**< Buffer Manager Portals */
++ e_P1023_LAW_TARGET_SRAM = 0x0E, /**< SRAM scratchpad */
++ e_P1023_LAW_TARGET_DDR = 0x0F, /**< DDR target interface */
++ e_P1023_LAW_TARGET_NONE = 0xFF /**< Invalid target interface */
++} e_P1023LawTargetId;
++
++
++/**************************************************************************//**
++ @Group 1023_init_grp P1023 Initialization Unit
++
++ @Description P1023 initialization unit API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description Part ID and revision number
++*//***************************************************************************/
++typedef enum e_P1023DeviceName
++{
++ e_P1023_REV_INVALID = 0x00000000, /**< Invalid revision */
++ e_SC1023_REV_1_0 = (int)0x80FC0010, /**< SC1023 rev 1.0 */
++ e_SC1023_REV_1_1 = (int)0x80FC0011, /**< SC1023 rev 1.1 */
++ e_P1023_REV_1_0 = (int)0x80FE0010, /**< P1023 rev 1.0 with security */
++ e_P1023_REV_1_1 = (int)0x80FE0011, /**< P1023 rev 1.1 with security */
++ e_P1017_REV_1_1 = (int)0x80FF0011, /**< P1017 rev 1.1 with security */
++ e_P1023_REV_1_0_NO_SEC = (int)0x80F60010, /**< P1023 rev 1.0 without security */
++ e_P1023_REV_1_1_NO_SEC = (int)0x80F60011, /**< P1023 rev 1.1 without security */
++ e_P1017_REV_1_1_NO_SEC = (int)0x80F70011 /**< P1017 rev 1.1 without security */
++} e_P1023DeviceName;
++
++/**************************************************************************//**
++ @Description structure representing P1023 initialization parameters
++*//***************************************************************************/
++typedef struct t_P1023Params
++{
++ uintptr_t ccsrBaseAddress; /**< CCSR base address (virtual) */
++ uintptr_t bmPortalsBaseAddress; /**< Portals base address (virtual) */
++ uintptr_t qmPortalsBaseAddress; /**< Portals base address (virtual) */
++} t_P1023Params;
++
++/**************************************************************************//**
++ @Function P1023_ConfigAndInit
++
++ @Description General initiation of the chip registers.
++
++ @Param[in] p_P1023Params - A pointer to data structure of parameters
++
++ @Return A handle to the P1023 data structure.
++*//***************************************************************************/
++t_Handle P1023_ConfigAndInit(t_P1023Params *p_P1023Params);
++
++/**************************************************************************//**
++ @Function P1023_Free
++
++ @Description Free all resources.
++
++ @Param h_P1023 - (In) The handle of the initialized P1023 object.
++
++ @Return E_OK on success; Other value otherwise.
++*//***************************************************************************/
++t_Error P1023_Free(t_Handle h_P1023);
++
++/**************************************************************************//**
++ @Function P1023_GetRevInfo
++
++ @Description This routine enables access to chip and revision information.
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++
++ @Return Part ID and revision.
++*//***************************************************************************/
++e_P1023DeviceName P1023_GetRevInfo(uintptr_t gutilBase);
++
++/**************************************************************************//**
++ @Function P1023_GetE500Factor
++
++ @Description Returns E500 core clock multiplication factor.
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++ @Param[in] coreId - Id of the requested core.
++ @Param[out] p_E500MulFactor - Returns E500 to CCB multification factor.
++ @Param[out] p_E500DivFactor - Returns E500 to CCB division factor.
++
++ @Return E_OK on success; Other value otherwise.
++*
++*//***************************************************************************/
++t_Error P1023_GetE500Factor(uintptr_t gutilBase,
++ uint32_t coreId,
++ uint32_t *p_E500MulFactor,
++ uint32_t *p_E500DivFactor);
++
++/**************************************************************************//**
++ @Function P1023_GetFmFactor
++
++ @Description returns FM multiplication factors. (This value is returned using
++ two parameters to avoid using float parameter).
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++ @Param[out] p_FmMulFactor - returns E500 to CCB multification factor.
++ @Param[out] p_FmDivFactor - returns E500 to CCB division factor.
++
++ @Return E_OK on success; Other value otherwise.
++*//***************************************************************************/
++t_Error P1023_GetFmFactor(uintptr_t gutilBase, uint32_t *p_FmMulFactor, uint32_t *p_FmDivFactor);
++
++/**************************************************************************//**
++ @Function P1023_GetCcbFactor
++
++ @Description returns system multiplication factor.
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++
++ @Return System multiplication factor.
++*//***************************************************************************/
++uint32_t P1023_GetCcbFactor(uintptr_t gutilBase);
++
++#if 0
++/**************************************************************************//**
++ @Function P1023_GetDdrFactor
++
++ @Description returns the multiplication factor of the clock in for the DDR clock .
++ Note: assumes the ddr_in_clk is identical to the sys_in_clk
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++ @Param p_DdrMulFactor - returns DDR in clk multification factor.
++ @Param p_DdrDivFactor - returns DDR division factor.
++
++ @Return E_OK on success; Other value otherwise..
++*//***************************************************************************/
++t_Error P1023_GetDdrFactor( uintptr_t gutilBase,
++ uint32_t *p_DdrMulFactor,
++ uint32_t *p_DdrDivFactor);
++
++/**************************************************************************//**
++ @Function P1023_GetDdrType
++
++ @Description returns the multiplication factor of the clock in for the DDR clock .
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++ @Param p_DdrType - (Out) returns DDR type DDR1/DDR2/DDR3.
++
++ @Return E_OK on success; Other value otherwise.
++*//***************************************************************************/
++t_Error P1023_GetDdrType(uintptr_t gutilBase, e_DdrType *p_DdrType );
++#endif
++
++/** @} */ /* end of 1023_init_grp group */
++/** @} */ /* end of 1023_grp group */
++
++#define CORE_E500V2
++
++#if 0 /* using unified values */
++/*****************************************************************************
++ INTEGRATION-SPECIFIC MODULE CODES
++******************************************************************************/
++#define MODULE_UNKNOWN 0x00000000
++#define MODULE_MEM 0x00010000
++#define MODULE_MM 0x00020000
++#define MODULE_CORE 0x00030000
++#define MODULE_P1023 0x00040000
++#define MODULE_MII 0x00050000
++#define MODULE_PM 0x00060000
++#define MODULE_MMU 0x00070000
++#define MODULE_PIC 0x00080000
++#define MODULE_L2_CACHE 0x00090000
++#define MODULE_DUART 0x000a0000
++#define MODULE_SERDES 0x000b0000
++#define MODULE_PIO 0x000c0000
++#define MODULE_QM 0x000d0000
++#define MODULE_BM 0x000e0000
++#define MODULE_SEC 0x000f0000
++#define MODULE_FM 0x00100000
++#define MODULE_FM_MURAM 0x00110000
++#define MODULE_FM_PCD 0x00120000
++#define MODULE_FM_RTC 0x00130000
++#define MODULE_FM_MAC 0x00140000
++#define MODULE_FM_PORT 0x00150000
++#define MODULE_FM_MACSEC 0x00160000
++#define MODULE_FM_MACSEC_SECY 0x00170000
++#define MODULE_FM_SP 0x00280000
++#define MODULE_ECM 0x00190000
++#define MODULE_DMA 0x001a0000
++#define MODULE_DDR 0x001b0000
++#define MODULE_LAW 0x001c0000
++#define MODULE_LBC 0x001d0000
++#define MODULE_I2C 0x001e0000
++#define MODULE_ESPI 0x001f0000
++#define MODULE_PCI 0x00200000
++#define MODULE_DPA_PORT 0x00210000
++#define MODULE_USB 0x00220000
++#endif /* using unified values */
++
++/*****************************************************************************
++ LBC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++/**************************************************************************//**
++ @Group lbc_exception_grp LBC Exception Unit
++
++ @Description LBC Exception unit API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Anchor lbc_exbm
++
++ @Collection LBC Errors Bit Mask
++
++ These errors are reported through the exceptions callback..
++ The values can be or'ed in any combination in the errors mask
++ parameter of the errors report structure.
++
++ These errors can also be passed as a bit-mask to
++ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
++ for enabling or disabling error checking.
++ @{
++*//***************************************************************************/
++#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
++#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
++#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
++#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
++
++#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
++ LBC_ERR_WRITE_PROTECT | LBC_ERR_CHIP_SELECT)
++ /**< All possible errors */
++/* @} */
++/** @} */ /* end of lbc_exception_grp group */
++
++#define LBC_NUM_OF_BANKS 2
++#define LBC_MAX_CS_SIZE 0x0000000100000000LL
++#define LBC_ATOMIC_OPERATION_SUPPORT
++#define LBC_PARITY_SUPPORT
++#define LBC_ADDRESS_SHIFT_SUPPORT
++#define LBC_ADDRESS_HOLD_TIME_CTRL
++#define LBC_HIGH_CLK_DIVIDERS
++#define LBC_FCM_AVAILABLE
++
++
++/*****************************************************************************
++ LAW INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define LAW_ARCH_CCB
++#define LAW_NUM_OF_WINDOWS 12
++#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4KB */
++#define LAW_MAX_WINDOW_SIZE 0x0000001000000000LL /**< 32GB */
++
++
++/*****************************************************************************
++ SPI INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define SPI_NUM_OF_CONTROLLERS 1
++
++/*****************************************************************************
++ PCI/PCIe INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++
++#define PCI_MAX_INBOUND_WINDOWS_NUM 4
++#define PCI_MAX_OUTBOUND_WINDOWS_NUM 5
++
++/**************************************************************************//**
++ @Description Target interface of an inbound window
++*//***************************************************************************/
++typedef enum e_PciTargetInterface
++{
++ e_PCI_TARGET_PCIE_2 = 0x1, /**< PCI Express target interface 2 */
++ e_PCI_TARGET_PCIE_1 = 0x2, /**< PCI Express target interface 1 */
++ e_PCI_TARGET_PCIE_3 = 0x3, /**< PCI Express target interface 3 */
++ e_PCI_TARGET_LOCAL_MEMORY = 0xF /**< Local Memory (DDR SDRAM, Local Bus, SRAM) target interface */
++
++} e_PciTargetInterface;
++
++/*****************************************************************************
++ DDR INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define DDR_NUM_OF_VALID_CS 2
++
++/*****************************************************************************
++ SEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define SEC_ERRATA_STAT_REGS_UNUSABLE
++
++/*****************************************************************************
++ DMA INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define DMA_NUM_OF_CONTROLLERS 2
++
++
++
++
++/*****************************************************************************
++ 1588 INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define PTP_V2
++
++/**************************************************************************//**
++ @Function P1023_GetMuxControlReg
++
++ @Description Returns the value of PMUXCR (Alternate Function Signal Multiplex
++ Control Register)
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++
++ @Return Value of PMUXCR
++*//***************************************************************************/
++uint32_t P1023_GetMuxControlReg(uintptr_t gutilBase);
++
++/**************************************************************************//**
++ @Function P1023_SetMuxControlReg
++
++ @Description Sets the value of PMUXCR (Alternate Function Signal Multiplex
++ Control Register)
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++ @Param[in] val - the new value for PMUXCR.
++
++ @Return None
++*//***************************************************************************/
++void P1023_SetMuxControlReg(uintptr_t gutilBase, uint32_t val);
++
++/**************************************************************************//**
++ @Function P1023_GetDeviceDisableStatusRegister
++
++ @Description Returns the value of DEVDISR (Device Disable Register)
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++
++ @Return Value of DEVDISR
++*//***************************************************************************/
++uint32_t P1023_GetDeviceDisableStatusRegister(uintptr_t gutilBase);
++
++/**************************************************************************//**
++ @Function P1023_GetPorDeviceStatusRegister
++
++ @Description Returns the value of POR Device Status Register
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++
++ @Return POR Device Status Register
++*//***************************************************************************/
++uint32_t P1023_GetPorDeviceStatusRegister(uintptr_t gutilBase);
++
++/**************************************************************************//**
++ @Function P1023_GetPorBootModeStatusRegister
++
++ @Description Returns the value of POR Boot Mode Status Register
++
++ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
++
++ @Return POR Boot Mode Status Register value
++*//***************************************************************************/
++uint32_t P1023_GetPorBootModeStatusRegister(uintptr_t gutilBase);
++
++
++#define PORDEVSR_SGMII1_DIS 0x10000000
++#define PORDEVSR_SGMII2_DIS 0x08000000
++#define PORDEVSR_ECP1 0x02000000
++#define PORDEVSR_IO_SEL 0x00780000
++#define PORDEVSR_IO_SEL_SHIFT 19
++#define PORBMSR_HA 0x00070000
++#define PORBMSR_HA_SHIFT 16
++
++#define DEVDISR_QM_BM 0x80000000
++#define DEVDISR_FM 0x40000000
++#define DEVDISR_PCIE1 0x20000000
++#define DEVDISR_MAC_SEC 0x10000000
++#define DEVDISR_ELBC 0x08000000
++#define DEVDISR_PCIE2 0x04000000
++#define DEVDISR_PCIE3 0x02000000
++#define DEVDISR_CAAM 0x01000000
++#define DEVDISR_USB0 0x00800000
++#define DEVDISR_1588 0x00020000
++#define DEVDISR_CORE0 0x00008000
++#define DEVDISR_TB0 0x00004000
++#define DEVDISR_CORE1 0x00002000
++#define DEVDISR_TB1 0x00001000
++#define DEVDISR_DMA1 0x00000400
++#define DEVDISR_DMA2 0x00000200
++#define DEVDISR_DDR 0x00000010
++#define DEVDISR_TSEC1 0x00000080
++#define DEVDISR_TSEC2 0x00000040
++#define DEVDISR_SPI 0x00000008
++#define DEVDISR_I2C 0x00000004
++#define DEVDISR_DUART 0x00000002
++
++
++#endif /* __PART_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h
+new file mode 100644
+index 00000000..6e2b925f
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h
+@@ -0,0 +1,276 @@
++/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File dpaa_integration_ext.h
++
++ @Description P3040/P4080/P5020 FM external definitions and structures.
++*//***************************************************************************/
++#ifndef __DPAA_INTEGRATION_EXT_H
++#define __DPAA_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++
++
++#define DPAA_VERSION 10
++
++typedef enum {
++ e_DPAA_SWPORTAL0 = 0,
++ e_DPAA_SWPORTAL1,
++ e_DPAA_SWPORTAL2,
++ e_DPAA_SWPORTAL3,
++ e_DPAA_SWPORTAL4,
++ e_DPAA_SWPORTAL5,
++ e_DPAA_SWPORTAL6,
++ e_DPAA_SWPORTAL7,
++ e_DPAA_SWPORTAL8,
++ e_DPAA_SWPORTAL9,
++ e_DPAA_SWPORTAL_DUMMY_LAST
++} e_DpaaSwPortal;
++
++typedef enum {
++ e_DPAA_DCPORTAL0 = 0,
++ e_DPAA_DCPORTAL1,
++ e_DPAA_DCPORTAL2,
++ e_DPAA_DCPORTAL3,
++ e_DPAA_DCPORTAL4,
++ e_DPAA_DCPORTAL_DUMMY_LAST
++} e_DpaaDcPortal;
++
++#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
++#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
++
++/*****************************************************************************
++ QMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
++#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
++#define QM_MAX_NUM_OF_SWP_AS 4
++#define QM_MAX_NUM_OF_CGS 256 /**< Number of congestion groups */
++#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE) /**< FQIDs range - 24 bits */
++
++/**************************************************************************//**
++ @Description Work Queue Channel assignments in QMan.
++*//***************************************************************************/
++typedef enum
++{
++ e_QM_FQ_CHANNEL_SWPORTAL0 = 0, /**< Dedicated channels serviced by software portals 0 to 9 */
++ e_QM_FQ_CHANNEL_SWPORTAL1,
++ e_QM_FQ_CHANNEL_SWPORTAL2,
++ e_QM_FQ_CHANNEL_SWPORTAL3,
++ e_QM_FQ_CHANNEL_SWPORTAL4,
++ e_QM_FQ_CHANNEL_SWPORTAL5,
++ e_QM_FQ_CHANNEL_SWPORTAL6,
++ e_QM_FQ_CHANNEL_SWPORTAL7,
++ e_QM_FQ_CHANNEL_SWPORTAL8,
++ e_QM_FQ_CHANNEL_SWPORTAL9,
++
++ e_QM_FQ_CHANNEL_POOL1 = 0x21, /**< Pool channels that can be serviced by any of the software portals */
++ e_QM_FQ_CHANNEL_POOL2,
++ e_QM_FQ_CHANNEL_POOL3,
++ e_QM_FQ_CHANNEL_POOL4,
++ e_QM_FQ_CHANNEL_POOL5,
++ e_QM_FQ_CHANNEL_POOL6,
++ e_QM_FQ_CHANNEL_POOL7,
++ e_QM_FQ_CHANNEL_POOL8,
++ e_QM_FQ_CHANNEL_POOL9,
++ e_QM_FQ_CHANNEL_POOL10,
++ e_QM_FQ_CHANNEL_POOL11,
++ e_QM_FQ_CHANNEL_POOL12,
++ e_QM_FQ_CHANNEL_POOL13,
++ e_QM_FQ_CHANNEL_POOL14,
++ e_QM_FQ_CHANNEL_POOL15,
++
++ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x40, /**< Dedicated channels serviced by Direct Connect Portal 0:
++ connected to FMan 0; assigned in incrementing order to
++ each sub-portal (SP) in the portal */
++ e_QM_FQ_CHANNEL_FMAN0_SP1,
++ e_QM_FQ_CHANNEL_FMAN0_SP2,
++ e_QM_FQ_CHANNEL_FMAN0_SP3,
++ e_QM_FQ_CHANNEL_FMAN0_SP4,
++ e_QM_FQ_CHANNEL_FMAN0_SP5,
++ e_QM_FQ_CHANNEL_FMAN0_SP6,
++ e_QM_FQ_CHANNEL_FMAN0_SP7,
++ e_QM_FQ_CHANNEL_FMAN0_SP8,
++ e_QM_FQ_CHANNEL_FMAN0_SP9,
++ e_QM_FQ_CHANNEL_FMAN0_SP10,
++ e_QM_FQ_CHANNEL_FMAN0_SP11,
++/* difference between 5020 and 4080 :) */
++ e_QM_FQ_CHANNEL_FMAN1_SP0 = 0x60,
++ e_QM_FQ_CHANNEL_FMAN1_SP1,
++ e_QM_FQ_CHANNEL_FMAN1_SP2,
++ e_QM_FQ_CHANNEL_FMAN1_SP3,
++ e_QM_FQ_CHANNEL_FMAN1_SP4,
++ e_QM_FQ_CHANNEL_FMAN1_SP5,
++ e_QM_FQ_CHANNEL_FMAN1_SP6,
++ e_QM_FQ_CHANNEL_FMAN1_SP7,
++ e_QM_FQ_CHANNEL_FMAN1_SP8,
++ e_QM_FQ_CHANNEL_FMAN1_SP9,
++ e_QM_FQ_CHANNEL_FMAN1_SP10,
++ e_QM_FQ_CHANNEL_FMAN1_SP11,
++
++ e_QM_FQ_CHANNEL_CAAM = 0x80, /**< Dedicated channel serviced by Direct Connect Portal 2:
++ connected to SEC 4.x */
++
++ e_QM_FQ_CHANNEL_PME = 0xA0, /**< Dedicated channel serviced by Direct Connect Portal 3:
++ connected to PME */
++ e_QM_FQ_CHANNEL_RAID = 0xC0 /**< Dedicated channel serviced by Direct Connect Portal 4:
++ connected to RAID */
++} e_QmFQChannel;
++
++/*****************************************************************************
++ BMan INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
++
++
++/*****************************************************************************
++ FM INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define INTG_MAX_NUM_OF_FM 2
++
++/* Ports defines */
++#define FM_MAX_NUM_OF_1G_MACS 5
++#define FM_MAX_NUM_OF_10G_MACS 1
++#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
++#define FM_MAX_NUM_OF_OH_PORTS 7
++
++#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
++
++#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
++#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
++#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
++
++#define FM_PORT_MAX_NUM_OF_EXT_POOLS 8 /**< Number of external BM pools per Rx port */
++#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
++#define FM_MAX_NUM_OF_SUB_PORTALS 12
++#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
++
++/* Rams defines */
++#define FM_MURAM_SIZE (160*KILOBYTE)
++#define FM_IRAM_SIZE(major, minor) (64 * KILOBYTE)
++#define FM_NUM_OF_CTRL 2
++
++/* PCD defines */
++#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
++#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
++#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
++#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000200 /**< Number of bytes saved for patches */
++#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
++
++/* RTC defines */
++#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
++#define FM_RTC_NUM_OF_PERIODIC_PULSES 2 /**< RTC number of periodic pulses */
++#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
++
++/* QMI defines */
++#define QMI_MAX_NUM_OF_TNUMS 64
++#define QMI_DEF_TNUMS_THRESH 48
++
++/* FPM defines */
++#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
++
++/* DMA defines */
++#define DMA_THRESH_MAX_COMMQ 31
++#define DMA_THRESH_MAX_BUF 127
++
++/* BMI defines */
++#define BMI_MAX_NUM_OF_TASKS 128
++#define BMI_MAX_NUM_OF_DMAS 32
++#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
++#define PORT_MAX_WEIGHT 16
++
++
++#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
++
++/* p4080-rev1 unique features */
++#define QM_CGS_NO_FRAME_MODE
++
++/* p4080 unique features */
++#define FM_NO_DISPATCH_RAM_ECC
++#define FM_NO_WATCHDOG
++#define FM_NO_TNUM_AGING
++#define FM_KG_NO_BYPASS_FQID_GEN
++#define FM_KG_NO_BYPASS_PLCR_PROFILE_GEN
++#define FM_NO_BACKUP_POOLS
++#define FM_NO_OP_OBSERVED_POOLS
++#define FM_NO_ADVANCED_RATE_LIMITER
++#define FM_NO_OP_OBSERVED_CGS
++#define FM_HAS_TOTAL_DMAS
++#define FM_KG_NO_IPPID_SUPPORT
++#define FM_NO_GUARANTEED_RESET_VALUES
++#define FM_MAC_RESET
++
++/* FM erratas */
++#define FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
++#define FM_TX_SHORT_FRAME_BAD_TS_ERRATA_10GMAC_A006 /* No implementation, Out of LLD scope */
++#define FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007
++#define FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008
++#define FM_TX_INVALID_ECC_ERRATA_10GMAC_A009 /* Out of LLD scope, user may disable ECC exceptions using FM_DisableRamsEcc */
++#define FM_BAD_VLAN_DETECT_ERRATA_10GMAC_A010
++
++#define FM_RX_PREAM_4_ERRATA_DTSEC_A001
++#define FM_GRS_ERRATA_DTSEC_A002
++#define FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003
++#define FM_GTS_ERRATA_DTSEC_A004
++#define FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012
++#define FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014
++#define FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839
++
++#define FM_MAGIC_PACKET_UNRECOGNIZED_ERRATA_DTSEC2 /* No implementation, Out of LLD scope */
++#define FM_TX_LOCKUP_ERRATA_DTSEC6
++
++#define FM_HC_DEF_FQID_ONLY_ERRATA_FMAN_A003 /* Implemented by ucode */
++#define FM_DEBUG_TRACE_FMAN_A004 /* No implementation, Out of LLD scope */
++
++#define FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
++
++#define FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005
++
++#define FM_LEN_CHECK_ERRATA_FMAN_SW002
++
++#define FM_NO_CTXA_COPY_ERRATA_FMAN_SW001
++#define FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004
++
++/*****************************************************************************
++ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define NUM_OF_RX_SC 16
++#define NUM_OF_TX_SC 16
++
++#define NUM_OF_SA_PER_RX_SC 2
++#define NUM_OF_SA_PER_TX_SC 2
++
++
++#endif /* __DPAA_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_ext.h
+new file mode 100644
+index 00000000..512f0baf
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_ext.h
+@@ -0,0 +1,83 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++
++ @File part_ext.h
++
++ @Description Definitions for the part (integration) module.
++*//***************************************************************************/
++
++#ifndef __PART_EXT_H
++#define __PART_EXT_H
++
++#include "std_ext.h"
++#include "part_integration_ext.h"
++
++
++#if !(defined(MPC8306) || \
++ defined(MPC8309) || \
++ defined(MPC834x) || \
++ defined(MPC836x) || \
++ defined(MPC832x) || \
++ defined(MPC837x) || \
++ defined(MPC8568) || \
++ defined(MPC8569) || \
++ defined(P1020) || \
++ defined(P1021) || \
++ defined(P1022) || \
++ defined(P1023) || \
++ defined(P2020) || \
++ defined(P2040) || \
++ defined(P3041) || \
++ defined(P4080) || \
++ defined(SC4080) || \
++ defined(P5020) || \
++ defined(MSC814x))
++#error "unable to proceed without chip-definition"
++#endif /* !(defined(MPC834x) || ... */
++
++
++/**************************************************************************//*
++ @Description Part data structure - must be contained in any integration
++ data structure.
++*//***************************************************************************/
++typedef struct t_Part
++{
++ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
++ /**< Returns the address of the module's memory map base. */
++ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
++ /**< Returns the module's ID according to its memory map base. */
++} t_Part;
++
++
++#endif /* __PART_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_integration_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_integration_ext.h
+new file mode 100644
+index 00000000..03c59b8b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_integration_ext.h
+@@ -0,0 +1,336 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File part_integration_ext.h
++
++ @Description P3040/P4080/P5020 external definitions and structures.
++*//***************************************************************************/
++#ifndef __PART_INTEGRATION_EXT_H
++#define __PART_INTEGRATION_EXT_H
++
++#include "std_ext.h"
++#include "dpaa_integration_ext.h"
++
++
++/**************************************************************************//**
++ @Group P3040/P4080/P5020_chip_id P5020 Application Programming Interface
++
++ @Description P3040/P4080/P5020 Chip functions,definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++#define CORE_E500MC
++
++#define INTG_MAX_NUM_OF_CORES 1
++
++
++/**************************************************************************//**
++ @Description Module types.
++*//***************************************************************************/
++typedef enum e_ModuleId
++{
++ e_MODULE_ID_DUART_1 = 0,
++ e_MODULE_ID_DUART_2,
++ e_MODULE_ID_DUART_3,
++ e_MODULE_ID_DUART_4,
++ e_MODULE_ID_LAW,
++ e_MODULE_ID_LBC,
++ e_MODULE_ID_PAMU,
++ e_MODULE_ID_QM, /**< Queue manager module */
++ e_MODULE_ID_BM, /**< Buffer manager module */
++ e_MODULE_ID_QM_CE_PORTAL_0,
++ e_MODULE_ID_QM_CI_PORTAL_0,
++ e_MODULE_ID_QM_CE_PORTAL_1,
++ e_MODULE_ID_QM_CI_PORTAL_1,
++ e_MODULE_ID_QM_CE_PORTAL_2,
++ e_MODULE_ID_QM_CI_PORTAL_2,
++ e_MODULE_ID_QM_CE_PORTAL_3,
++ e_MODULE_ID_QM_CI_PORTAL_3,
++ e_MODULE_ID_QM_CE_PORTAL_4,
++ e_MODULE_ID_QM_CI_PORTAL_4,
++ e_MODULE_ID_QM_CE_PORTAL_5,
++ e_MODULE_ID_QM_CI_PORTAL_5,
++ e_MODULE_ID_QM_CE_PORTAL_6,
++ e_MODULE_ID_QM_CI_PORTAL_6,
++ e_MODULE_ID_QM_CE_PORTAL_7,
++ e_MODULE_ID_QM_CI_PORTAL_7,
++ e_MODULE_ID_QM_CE_PORTAL_8,
++ e_MODULE_ID_QM_CI_PORTAL_8,
++ e_MODULE_ID_QM_CE_PORTAL_9,
++ e_MODULE_ID_QM_CI_PORTAL_9,
++ e_MODULE_ID_BM_CE_PORTAL_0,
++ e_MODULE_ID_BM_CI_PORTAL_0,
++ e_MODULE_ID_BM_CE_PORTAL_1,
++ e_MODULE_ID_BM_CI_PORTAL_1,
++ e_MODULE_ID_BM_CE_PORTAL_2,
++ e_MODULE_ID_BM_CI_PORTAL_2,
++ e_MODULE_ID_BM_CE_PORTAL_3,
++ e_MODULE_ID_BM_CI_PORTAL_3,
++ e_MODULE_ID_BM_CE_PORTAL_4,
++ e_MODULE_ID_BM_CI_PORTAL_4,
++ e_MODULE_ID_BM_CE_PORTAL_5,
++ e_MODULE_ID_BM_CI_PORTAL_5,
++ e_MODULE_ID_BM_CE_PORTAL_6,
++ e_MODULE_ID_BM_CI_PORTAL_6,
++ e_MODULE_ID_BM_CE_PORTAL_7,
++ e_MODULE_ID_BM_CI_PORTAL_7,
++ e_MODULE_ID_BM_CE_PORTAL_8,
++ e_MODULE_ID_BM_CI_PORTAL_8,
++ e_MODULE_ID_BM_CE_PORTAL_9,
++ e_MODULE_ID_BM_CI_PORTAL_9,
++ e_MODULE_ID_FM1, /**< Frame manager #1 module */
++ e_MODULE_ID_FM1_RTC, /**< FM Real-Time-Clock */
++ e_MODULE_ID_FM1_MURAM, /**< FM Multi-User-RAM */
++ e_MODULE_ID_FM1_BMI, /**< FM BMI block */
++ e_MODULE_ID_FM1_QMI, /**< FM QMI block */
++ e_MODULE_ID_FM1_PRS, /**< FM parser block */
++ e_MODULE_ID_FM1_PORT_HO0, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM1_PORT_HO1, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM1_PORT_HO2, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM1_PORT_HO3, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM1_PORT_HO4, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM1_PORT_HO5, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM1_PORT_HO6, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM1_PORT_1GRx0, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GRx1, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GRx2, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GRx3, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GRx4, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_10GRx0, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GTx0, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GTx1, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GTx2, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GTx3, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_1GTx4, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM1_PORT_10GTx0, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM1_PLCR, /**< FM Policer */
++ e_MODULE_ID_FM1_KG, /**< FM Keygen */
++ e_MODULE_ID_FM1_DMA, /**< FM DMA */
++ e_MODULE_ID_FM1_FPM, /**< FM FPM */
++ e_MODULE_ID_FM1_IRAM, /**< FM Instruction-RAM */
++ e_MODULE_ID_FM1_1GMDIO0, /**< FM 1G MDIO MAC 0*/
++ e_MODULE_ID_FM1_1GMDIO1, /**< FM 1G MDIO MAC 1*/
++ e_MODULE_ID_FM1_1GMDIO2, /**< FM 1G MDIO MAC 2*/
++ e_MODULE_ID_FM1_1GMDIO3, /**< FM 1G MDIO MAC 3*/
++ e_MODULE_ID_FM1_10GMDIO, /**< FM 10G MDIO */
++ e_MODULE_ID_FM1_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
++ e_MODULE_ID_FM1_1GMAC0, /**< FM 1G MAC #0 */
++ e_MODULE_ID_FM1_1GMAC1, /**< FM 1G MAC #1 */
++ e_MODULE_ID_FM1_1GMAC2, /**< FM 1G MAC #2 */
++ e_MODULE_ID_FM1_1GMAC3, /**< FM 1G MAC #3 */
++ e_MODULE_ID_FM1_10GMAC0, /**< FM 10G MAC #0 */
++
++ e_MODULE_ID_FM2, /**< Frame manager #2 module */
++ e_MODULE_ID_FM2_RTC, /**< FM Real-Time-Clock */
++ e_MODULE_ID_FM2_MURAM, /**< FM Multi-User-RAM */
++ e_MODULE_ID_FM2_BMI, /**< FM BMI block */
++ e_MODULE_ID_FM2_QMI, /**< FM QMI block */
++ e_MODULE_ID_FM2_PRS, /**< FM parser block */
++ e_MODULE_ID_FM2_PORT_HO0, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM2_PORT_HO1, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM2_PORT_HO2, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM2_PORT_HO3, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM2_PORT_HO4, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM2_PORT_HO5, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM2_PORT_HO6, /**< FM Host-command/offline-parsing port block */
++ e_MODULE_ID_FM2_PORT_1GRx0, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_1GRx1, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_1GRx2, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_1GRx3, /**< FM Rx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_10GRx0, /**< FM Rx 10G MAC port block */
++ e_MODULE_ID_FM2_PORT_1GTx0, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_1GTx1, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_1GTx2, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_1GTx3, /**< FM Tx 1G MAC port block */
++ e_MODULE_ID_FM2_PORT_10GTx0, /**< FM Tx 10G MAC port block */
++ e_MODULE_ID_FM2_PLCR, /**< FM Policer */
++ e_MODULE_ID_FM2_KG, /**< FM Keygen */
++ e_MODULE_ID_FM2_DMA, /**< FM DMA */
++ e_MODULE_ID_FM2_FPM, /**< FM FPM */
++ e_MODULE_ID_FM2_IRAM, /**< FM Instruction-RAM */
++ e_MODULE_ID_FM2_1GMDIO0, /**< FM 1G MDIO MAC 0*/
++ e_MODULE_ID_FM2_1GMDIO1, /**< FM 1G MDIO MAC 1*/
++ e_MODULE_ID_FM2_1GMDIO2, /**< FM 1G MDIO MAC 2*/
++ e_MODULE_ID_FM2_1GMDIO3, /**< FM 1G MDIO MAC 3*/
++ e_MODULE_ID_FM2_10GMDIO, /**< FM 10G MDIO */
++ e_MODULE_ID_FM2_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
++ e_MODULE_ID_FM2_1GMAC0, /**< FM 1G MAC #0 */
++ e_MODULE_ID_FM2_1GMAC1, /**< FM 1G MAC #1 */
++ e_MODULE_ID_FM2_1GMAC2, /**< FM 1G MAC #2 */
++ e_MODULE_ID_FM2_1GMAC3, /**< FM 1G MAC #3 */
++ e_MODULE_ID_FM2_10GMAC0, /**< FM 10G MAC #0 */
++
++ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
++ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
++ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
++ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
++ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
++ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
++ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
++ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
++ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
++ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
++ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
++ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
++
++ e_MODULE_ID_MPIC, /**< MPIC */
++ e_MODULE_ID_GPIO, /**< GPIO */
++ e_MODULE_ID_SERDES, /**< SERDES */
++ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
++ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
++
++ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
++ e_MODULE_ID_SRIO_MU, /**< RapidIO messaging unit module */
++
++ e_MODULE_ID_DUMMY_LAST
++} e_ModuleId;
++
++#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
++
++#if 0 /* using unified values */
++/*****************************************************************************
++ INTEGRATION-SPECIFIC MODULE CODES
++******************************************************************************/
++#define MODULE_UNKNOWN 0x00000000
++#define MODULE_MEM 0x00010000
++#define MODULE_MM 0x00020000
++#define MODULE_CORE 0x00030000
++#define MODULE_CHIP 0x00040000
++#define MODULE_PLTFRM 0x00050000
++#define MODULE_PM 0x00060000
++#define MODULE_MMU 0x00070000
++#define MODULE_PIC 0x00080000
++#define MODULE_CPC 0x00090000
++#define MODULE_DUART 0x000a0000
++#define MODULE_SERDES 0x000b0000
++#define MODULE_PIO 0x000c0000
++#define MODULE_QM 0x000d0000
++#define MODULE_BM 0x000e0000
++#define MODULE_SEC 0x000f0000
++#define MODULE_LAW 0x00100000
++#define MODULE_LBC 0x00110000
++#define MODULE_PAMU 0x00120000
++#define MODULE_FM 0x00130000
++#define MODULE_FM_MURAM 0x00140000
++#define MODULE_FM_PCD 0x00150000
++#define MODULE_FM_RTC 0x00160000
++#define MODULE_FM_MAC 0x00170000
++#define MODULE_FM_PORT 0x00180000
++#define MODULE_FM_SP 0x00190000
++#define MODULE_DPA_PORT 0x001a0000
++#define MODULE_MII 0x001b0000
++#define MODULE_I2C 0x001c0000
++#define MODULE_DMA 0x001d0000
++#define MODULE_DDR 0x001e0000
++#define MODULE_ESPI 0x001f0000
++#define MODULE_DPAA_IPSEC 0x00200000
++#endif /* using unified values */
++
++/*****************************************************************************
++ PAMU INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define PAMU_NUM_OF_PARTITIONS 5
++
++#define PAMU_PICS_AVICS_ERRATA_PAMU3
++
++/*****************************************************************************
++ LAW INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define LAW_NUM_OF_WINDOWS 32
++#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4KB */
++#define LAW_MAX_WINDOW_SIZE 0x0000002000000000LL /**< 64GB */
++
++
++/*****************************************************************************
++ LBC INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++/**************************************************************************//**
++ @Group lbc_exception_grp LBC Exception Unit
++
++ @Description LBC Exception unit API functions, definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Anchor lbc_exbm
++
++ @Collection LBC Errors Bit Mask
++
++ These errors are reported through the exceptions callback..
++ The values can be or'ed in any combination in the errors mask
++ parameter of the errors report structure.
++
++ These errors can also be passed as a bit-mask to
++ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
++ for enabling or disabling error checking.
++ @{
++*//***************************************************************************/
++#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
++#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
++#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
++#define LBC_ERR_ATOMIC_WRITE 0x00800000 /**< Atomic write error */
++#define LBC_ERR_ATOMIC_READ 0x00400000 /**< Atomic read error */
++#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
++
++#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
++ LBC_ERR_WRITE_PROTECT | LBC_ERR_ATOMIC_WRITE | \
++ LBC_ERR_ATOMIC_READ | LBC_ERR_CHIP_SELECT)
++ /**< All possible errors */
++/* @} */
++/** @} */ /* end of lbc_exception_grp group */
++
++#define LBC_INCORRECT_ERROR_REPORT_ERRATA
++
++#define LBC_NUM_OF_BANKS 8
++#define LBC_MAX_CS_SIZE 0x0000000100000000LL
++#define LBC_ATOMIC_OPERATION_SUPPORT
++#define LBC_PARITY_SUPPORT
++#define LBC_ADDRESS_HOLD_TIME_CTRL
++#define LBC_HIGH_CLK_DIVIDERS
++#define LBC_FCM_AVAILABLE
++
++/*****************************************************************************
++ GPIO INTEGRATION-SPECIFIC DEFINITIONS
++******************************************************************************/
++#define GPIO_NUM_OF_PORTS 1 /**< Number of ports in GPIO module;
++ Each port contains up to 32 i/O pins. */
++
++#define GPIO_VALID_PIN_MASKS \
++ { /* Port A */ 0xFFFFFFFF }
++
++#define GPIO_VALID_INTR_MASKS \
++ { /* Port A */ 0xFFFFFFFF }
++
++#endif /* __PART_INTEGRATION_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h
+new file mode 100644
+index 00000000..4ecfc6ed
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h
+@@ -0,0 +1,100 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __MATH_EXT_H
++#define __MATH_EXT_H
++
++
++#if defined(NCSW_LINUX) && defined(__KERNEL__)
++#include <linux/math.h>
++#include <linux/math64.h>
++
++#elif defined(__MWERKS__)
++#define LOW(x) ( sizeof(x)==8 ? *(1+(int32_t*)&x) : (*(int32_t*)&x))
++#define HIGH(x) (*(int32_t*)&x)
++#define ULOW(x) ( sizeof(x)==8 ? *(1+(uint32_t*)&x) : (*(uint32_t*)&x))
++#define UHIGH(x) (*(uint32_t*)&x)
++
++static const double big = 1.0e300;
++
++/* Macro for checking if a number is a power of 2 */
++static __inline__ double ceil(double x)
++{
++ int32_t i0,i1,j0; /*- cc 020130 -*/
++ uint32_t i,j; /*- cc 020130 -*/
++ i0 = HIGH(x);
++ i1 = LOW(x);
++ j0 = ((i0>>20)&0x7ff)-0x3ff;
++ if(j0<20) {
++ if(j0<0) { /* raise inexact if x != 0 */
++ if(big+x>0.0) {/* return 0*sign(x) if |x|<1 */
++ if(i0<0) {i0=0x80000000;i1=0;}
++ else if((i0|i1)!=0) { i0=0x3ff00000;i1=0;}
++ }
++ } else {
++ i = (uint32_t)(0x000fffff)>>j0;
++ if(((i0&i)|i1)==0) return x; /* x is integral */
++ if(big+x>0.0) { /* raise inexact flag */
++ if(i0>0) i0 += (0x00100000)>>j0;
++ i0 &= (~i); i1=0;
++ }
++ }
++ } else if (j0>51) {
++ if(j0==0x400) return x+x; /* inf or NaN */
++ else return x; /* x is integral */
++ } else {
++ i = ((uint32_t)(0xffffffff))>>(j0-20); /*- cc 020130 -*/
++ if((i1&i)==0) return x; /* x is integral */
++ if(big+x>0.0) { /* raise inexact flag */
++ if(i0>0) {
++ if(j0==20) i0+=1;
++ else {
++ j = (uint32_t)(i1 + (1<<(52-j0)));
++ if(j<i1) i0+=1; /* got a carry */
++ i1 = (int32_t)j;
++ }
++ }
++ i1 &= (~i);
++ }
++ }
++ HIGH(x) = i0;
++ LOW(x) = i1;
++ return x;
++}
++
++#else
++#include <math.h>
++#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
++
++
++#endif /* __MATH_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h
+new file mode 100644
+index 00000000..dc32e249
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h
+@@ -0,0 +1,435 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File ncsw_ext.h
++
++ @Description General NetCommSw Standard Definitions
++*//***************************************************************************/
++
++#ifndef __NCSW_EXT_H
++#define __NCSW_EXT_H
++
++
++#include "memcpy_ext.h"
++
++#define WRITE_BLOCK IOMemSet32 /* include memcpy_ext.h */
++#define COPY_BLOCK Mem2IOCpy32 /* include memcpy_ext.h */
++
++#define PTR_TO_UINT(_ptr) ((uintptr_t)(_ptr))
++#define UINT_TO_PTR(_val) ((void*)(uintptr_t)(_val))
++
++#define PTR_MOVE(_ptr, _offset) (void*)((uint8_t*)(_ptr) + (_offset))
++
++
++#define WRITE_UINT8_UINT24(arg, data08, data24) \
++ WRITE_UINT32(arg,((uint32_t)(data08)<<24)|((uint32_t)(data24)&0x00FFFFFF))
++#define WRITE_UINT24_UINT8(arg, data24, data08) \
++ WRITE_UINT32(arg,((uint32_t)(data24)<< 8)|((uint32_t)(data08)&0x000000FF))
++
++/* Little-Endian access macros */
++
++#define WRITE_UINT16_LE(arg, data) \
++ WRITE_UINT16((arg), SwapUint16(data))
++
++#define WRITE_UINT32_LE(arg, data) \
++ WRITE_UINT32((arg), SwapUint32(data))
++
++#define WRITE_UINT64_LE(arg, data) \
++ WRITE_UINT64((arg), SwapUint64(data))
++
++#define GET_UINT16_LE(arg) \
++ SwapUint16(GET_UINT16(arg))
++
++#define GET_UINT32_LE(arg) \
++ SwapUint32(GET_UINT32(arg))
++
++#define GET_UINT64_LE(arg) \
++ SwapUint64(GET_UINT64(arg))
++
++/* Write and Read again macros */
++#define WRITE_UINT_SYNC(size, arg, data) \
++ do { \
++ WRITE_UINT##size((arg), (data)); \
++ CORE_MemoryBarrier(); \
++ } while (0)
++
++#define WRITE_UINT8_SYNC(arg, data) WRITE_UINT_SYNC(8, (arg), (data))
++
++#define WRITE_UINT16_SYNC(arg, data) WRITE_UINT_SYNC(16, (arg), (data))
++#define WRITE_UINT32_SYNC(arg, data) WRITE_UINT_SYNC(32, (arg), (data))
++
++#define MAKE_UINT64(high32, low32) (((uint64_t)high32 << 32) | (low32))
++
++
++/*----------------------*/
++/* Miscellaneous macros */
++/*----------------------*/
++
++#define UNUSED(_x) ((void)(_x))
++
++#define KILOBYTE 0x400UL /* 1024 */
++#define MEGABYTE (KILOBYTE * KILOBYTE) /* 1024*1024 */
++#define GIGABYTE ((uint64_t)(KILOBYTE * MEGABYTE)) /* 1024*1024*1024 */
++#define TERABYTE ((uint64_t)(KILOBYTE * GIGABYTE)) /* 1024*1024*1024*1024 */
++
++#ifndef NO_IRQ
++#define NO_IRQ (0)
++#endif
++#define NCSW_MASTER_ID (0)
++
++/* Macro for checking if a number is a power of 2 */
++#define POWER_OF_2(n) (!((n) & ((n)-1)))
++
++/* Macro for calculating log of base 2 */
++#define LOG2(num, log2Num) \
++ do \
++ { \
++ uint64_t tmp = (num); \
++ log2Num = 0; \
++ while (tmp > 1) \
++ { \
++ log2Num++; \
++ tmp >>= 1; \
++ } \
++ } while (0)
++
++#define NEXT_POWER_OF_2(_num, _nextPow) \
++do \
++{ \
++ if (POWER_OF_2(_num)) \
++ _nextPow = (_num); \
++ else \
++ { \
++ uint64_t tmp = (_num); \
++ _nextPow = 1; \
++ while (tmp) \
++ { \
++ _nextPow <<= 1; \
++ tmp >>= 1; \
++ } \
++ } \
++} while (0)
++
++/* Ceiling division - not the fastest way, but safer in terms of overflow */
++#define DIV_CEIL(x,y) (div64_u64((x),(y)) + (((div64_u64((x),(y))*(y)) == (x)) ? 0 : 1))
++
++/* Round up a number to be a multiple of a second number */
++#define ROUND_UP(x,y) ((((x) + (y) - 1) / (y)) * (y))
++
++/* Timing macro for converting usec units to number of ticks. */
++/* (number of usec * clock_Hz) / 1,000,000) - since */
++/* clk is in MHz units, no division needed. */
++#define USEC_TO_CLK(usec,clk) ((usec) * (clk))
++#define CYCLES_TO_USEC(cycles,clk) ((cycles) / (clk))
++
++/* Timing macros for converting between nsec units and number of clocks. */
++#define NSEC_TO_CLK(nsec,clk) DIV_CEIL(((nsec) * (clk)), 1000)
++#define CYCLES_TO_NSEC(cycles,clk) (((cycles) * 1000) / (clk))
++
++/* Timing macros for converting between psec units and number of clocks. */
++#define PSEC_TO_CLK(psec,clk) DIV_CEIL(((psec) * (clk)), 1000000)
++#define CYCLES_TO_PSEC(cycles,clk) (((cycles) * 1000000) / (clk))
++
++/* Min, Max macros */
++#define MIN(a,b) ((a) < (b) ? (a) : (b))
++#define MAX(a,b) ((a) > (b) ? (a) : (b))
++#define IN_RANGE(min,val,max) ((min)<=(val) && (val)<=(max))
++
++#define ABS(a) ((a<0)?(a*-1):a)
++
++#if !(defined(ARRAY_SIZE))
++#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
++#endif /* !defined(ARRAY_SIZE) */
++
++
++/* possible alignments */
++#define HALF_WORD_ALIGNMENT 2
++#define WORD_ALIGNMENT 4
++#define DOUBLE_WORD_ALIGNMENT 8
++#define BURST_ALIGNMENT 32
++
++#define HALF_WORD_ALIGNED 0x00000001
++#define WORD_ALIGNED 0x00000003
++#define DOUBLE_WORD_ALIGNED 0x00000007
++#define BURST_ALIGNED 0x0000001f
++#ifndef IS_ALIGNED
++#define IS_ALIGNED(n,align) (!((uint32_t)(n) & (align - 1)))
++#endif /* IS_ALIGNED */
++
++
++#define LAST_BUF 1
++#define FIRST_BUF 2
++#define SINGLE_BUF (LAST_BUF | FIRST_BUF)
++#define MIDDLE_BUF 4
++
++#define ARRAY_END -1
++
++#define ILLEGAL_BASE (~0)
++
++#define BUF_POSITION(first, last) state[(!!(last))<<1 | !!(first)]
++#define DECLARE_POSITION static uint8_t state[4] = { (uint8_t)MIDDLE_BUF, (uint8_t)FIRST_BUF, (uint8_t)LAST_BUF, (uint8_t)SINGLE_BUF };
++
++
++/**************************************************************************//**
++ @Description Timers operation mode
++*//***************************************************************************/
++typedef enum e_TimerMode
++{
++ e_TIMER_MODE_INVALID = 0,
++ e_TIMER_MODE_FREE_RUN, /**< Free run - counter continues to increase
++ after reaching the reference value. */
++ e_TIMER_MODE_PERIODIC, /**< Periodic - counter restarts counting from 0
++ after reaching the reference value. */
++ e_TIMER_MODE_SINGLE /**< Single (one-shot) - counter stops counting
++ after reaching the reference value. */
++} e_TimerMode;
++
++
++/**************************************************************************//**
++ @Description Enumeration (bit flags) of communication modes (Transmit,
++ receive or both).
++*//***************************************************************************/
++typedef enum e_CommMode
++{
++ e_COMM_MODE_NONE = 0, /**< No transmit/receive communication */
++ e_COMM_MODE_RX = 1, /**< Only receive communication */
++ e_COMM_MODE_TX = 2, /**< Only transmit communication */
++ e_COMM_MODE_RX_AND_TX = 3 /**< Both transmit and receive communication */
++} e_CommMode;
++
++/**************************************************************************//**
++ @Description General Diagnostic Mode
++*//***************************************************************************/
++typedef enum e_DiagMode
++{
++ e_DIAG_MODE_NONE = 0, /**< Normal operation; no diagnostic mode */
++ e_DIAG_MODE_CTRL_LOOPBACK, /**< Loopback in the controller */
++ e_DIAG_MODE_CHIP_LOOPBACK, /**< Loopback in the chip but not in the
++ controller; e.g. IO-pins, SerDes, etc. */
++ e_DIAG_MODE_PHY_LOOPBACK, /**< Loopback in the external PHY */
++ e_DIAG_MODE_EXT_LOOPBACK, /**< Loopback in the external line (beyond the PHY) */
++ e_DIAG_MODE_CTRL_ECHO, /**< Echo incoming data by the controller */
++ e_DIAG_MODE_PHY_ECHO /**< Echo incoming data by the PHY */
++} e_DiagMode;
++
++/**************************************************************************//**
++ @Description Possible RxStore callback responses.
++*//***************************************************************************/
++typedef enum e_RxStoreResponse
++{
++ e_RX_STORE_RESPONSE_PAUSE /**< Pause invoking callback with received data;
++ in polling mode, start again invoking callback
++ only next time user invokes the receive routine;
++ in interrupt mode, start again invoking callback
++ only next time a receive event triggers an interrupt;
++ in all cases, received data that are pending are not
++ lost, rather, their processing is temporarily deferred;
++ in all cases, received data are processed in the order
++ in which they were received. */
++ , e_RX_STORE_RESPONSE_CONTINUE /**< Continue invoking callback with received data. */
++} e_RxStoreResponse;
++
++
++/**************************************************************************//**
++ @Description General Handle
++*//***************************************************************************/
++typedef void * t_Handle; /**< handle, used as object's descriptor */
++
++/**************************************************************************//**
++ @Description MUTEX type
++*//***************************************************************************/
++typedef uint32_t t_Mutex;
++
++/**************************************************************************//**
++ @Description Error Code.
++
++ The high word of the error code is the code of the software
++ module (driver). The low word is the error type (e_ErrorType).
++ To get the values from the error code, use GET_ERROR_TYPE()
++ and GET_ERROR_MODULE().
++*//***************************************************************************/
++typedef uint32_t t_Error;
++
++/**************************************************************************//**
++ @Description General prototype of interrupt service routine (ISR).
++
++ @Param[in] handle - Optional handle of the module handling the interrupt.
++
++ @Return None
++ *//***************************************************************************/
++typedef void (t_Isr)(t_Handle handle);
++
++/**************************************************************************//**
++ @Anchor mem_attr
++
++ @Collection Memory Attributes
++
++ Various attributes of memory partitions. These values may be
++ or'ed together to create a mask of all memory attributes.
++ @{
++*//***************************************************************************/
++#define MEMORY_ATTR_CACHEABLE 0x00000001
++ /**< Memory is cacheable */
++#define MEMORY_ATTR_QE_2ND_BUS_ACCESS 0x00000002
++ /**< Memory can be accessed by QUICC Engine
++ through its secondary bus interface */
++
++/* @} */
++
++
++/**************************************************************************//**
++ @Function t_GetBufFunction
++
++ @Description User callback function called by driver to get data buffer.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_BufferPool - A handle to buffer pool manager
++ @Param[out] p_BufContextHandle - Returns the user's private context that
++ should be associated with the buffer
++
++ @Return Pointer to data buffer, NULL if error
++ *//***************************************************************************/
++typedef uint8_t * (t_GetBufFunction)(t_Handle h_BufferPool,
++ t_Handle *p_BufContextHandle);
++
++/**************************************************************************//**
++ @Function t_PutBufFunction
++
++ @Description User callback function called by driver to return data buffer.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_BufferPool - A handle to buffer pool manager
++ @Param[in] p_Buffer - A pointer to buffer to return
++ @Param[in] h_BufContext - The user's private context associated with
++ the returned buffer
++
++ @Return E_OK on success; Error code otherwise
++ *//***************************************************************************/
++typedef t_Error (t_PutBufFunction)(t_Handle h_BufferPool,
++ uint8_t *p_Buffer,
++ t_Handle h_BufContext);
++
++/**************************************************************************//**
++ @Function t_PhysToVirt
++
++ @Description Translates a physical address to the matching virtual address.
++
++ @Param[in] addr - The physical address to translate.
++
++ @Return Virtual address.
++*//***************************************************************************/
++typedef void * t_PhysToVirt(physAddress_t addr);
++
++/**************************************************************************//**
++ @Function t_VirtToPhys
++
++ @Description Translates a virtual address to the matching physical address.
++
++ @Param[in] addr - The virtual address to translate.
++
++ @Return Physical address.
++*//***************************************************************************/
++typedef physAddress_t t_VirtToPhys(void *addr);
++
++/**************************************************************************//**
++ @Description Buffer Pool Information Structure.
++*//***************************************************************************/
++typedef struct t_BufferPoolInfo
++{
++ t_Handle h_BufferPool; /**< A handle to the buffer pool manager */
++ t_GetBufFunction *f_GetBuf; /**< User callback to get a free buffer */
++ t_PutBufFunction *f_PutBuf; /**< User callback to return a buffer */
++ uint16_t bufferSize; /**< Buffer size (in bytes) */
++
++ t_PhysToVirt *f_PhysToVirt; /**< User callback to translate pool buffers
++ physical addresses to virtual addresses */
++ t_VirtToPhys *f_VirtToPhys; /**< User callback to translate pool buffers
++ virtual addresses to physical addresses */
++} t_BufferPoolInfo;
++
++
++/**************************************************************************//**
++ @Description User callback function called by driver when transmit completed.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_App - Application's handle, as was provided to the
++ driver by the user
++ @Param[in] queueId - Transmit queue ID
++ @Param[in] p_Data - Pointer to the data buffer
++ @Param[in] h_BufContext - The user's private context associated with
++ the given data buffer
++ @Param[in] status - Transmit status and errors
++ @Param[in] flags - Driver-dependent information
++ *//***************************************************************************/
++typedef void (t_TxConfFunction)(t_Handle h_App,
++ uint32_t queueId,
++ uint8_t *p_Data,
++ t_Handle h_BufContext,
++ uint16_t status,
++ uint32_t flags);
++
++/**************************************************************************//**
++ @Description User callback function called by driver with receive data.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_App - Application's handle, as was provided to the
++ driver by the user
++ @Param[in] queueId - Receive queue ID
++ @Param[in] p_Data - Pointer to the buffer with received data
++ @Param[in] h_BufContext - The user's private context associated with
++ the given data buffer
++ @Param[in] length - Length of received data
++ @Param[in] status - Receive status and errors
++ @Param[in] position - Position of buffer in frame
++ @Param[in] flags - Driver-dependent information
++
++ @Retval e_RX_STORE_RESPONSE_CONTINUE - order the driver to continue Rx
++ operation for all ready data.
++ @Retval e_RX_STORE_RESPONSE_PAUSE - order the driver to stop Rx operation.
++ *//***************************************************************************/
++typedef e_RxStoreResponse (t_RxStoreFunction)(t_Handle h_App,
++ uint32_t queueId,
++ uint8_t *p_Data,
++ t_Handle h_BufContext,
++ uint32_t length,
++ uint16_t status,
++ uint8_t position,
++ uint32_t flags);
++
++
++#endif /* __NCSW_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/net_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/net_ext.h
+new file mode 100644
+index 00000000..8f3bc369
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/net_ext.h
+@@ -0,0 +1,430 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File net_ext.h
++
++ @Description This file contains common and general netcomm headers definitions.
++*//***************************************************************************/
++#ifndef __NET_EXT_H
++#define __NET_EXT_H
++
++#include "std_ext.h"
++
++
++typedef uint8_t headerFieldPpp_t;
++
++#define NET_HEADER_FIELD_PPP_PID (1)
++#define NET_HEADER_FIELD_PPP_COMPRESSED (NET_HEADER_FIELD_PPP_PID << 1)
++#define NET_HEADER_FIELD_PPP_ALL_FIELDS ((NET_HEADER_FIELD_PPP_PID << 2) - 1)
++
++
++typedef uint8_t headerFieldPppoe_t;
++
++#define NET_HEADER_FIELD_PPPoE_VER (1)
++#define NET_HEADER_FIELD_PPPoE_TYPE (NET_HEADER_FIELD_PPPoE_VER << 1)
++#define NET_HEADER_FIELD_PPPoE_CODE (NET_HEADER_FIELD_PPPoE_VER << 2)
++#define NET_HEADER_FIELD_PPPoE_SID (NET_HEADER_FIELD_PPPoE_VER << 3)
++#define NET_HEADER_FIELD_PPPoE_LEN (NET_HEADER_FIELD_PPPoE_VER << 4)
++#define NET_HEADER_FIELD_PPPoE_SESSION (NET_HEADER_FIELD_PPPoE_VER << 5)
++#define NET_HEADER_FIELD_PPPoE_PID (NET_HEADER_FIELD_PPPoE_VER << 6)
++#define NET_HEADER_FIELD_PPPoE_ALL_FIELDS ((NET_HEADER_FIELD_PPPoE_VER << 7) - 1)
++
++#define NET_HEADER_FIELD_PPPMUX_PID (1)
++#define NET_HEADER_FIELD_PPPMUX_CKSUM (NET_HEADER_FIELD_PPPMUX_PID << 1)
++#define NET_HEADER_FIELD_PPPMUX_COMPRESSED (NET_HEADER_FIELD_PPPMUX_PID << 2)
++#define NET_HEADER_FIELD_PPPMUX_ALL_FIELDS ((NET_HEADER_FIELD_PPPMUX_PID << 3) - 1)
++
++#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF (1)
++#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_LXT (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 1)
++#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_LEN (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 2)
++#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_PID (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 3)
++#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_USE_PID (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 4)
++#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS ((NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 5) - 1)
++
++
++typedef uint8_t headerFieldEth_t;
++
++#define NET_HEADER_FIELD_ETH_DA (1)
++#define NET_HEADER_FIELD_ETH_SA (NET_HEADER_FIELD_ETH_DA << 1)
++#define NET_HEADER_FIELD_ETH_LENGTH (NET_HEADER_FIELD_ETH_DA << 2)
++#define NET_HEADER_FIELD_ETH_TYPE (NET_HEADER_FIELD_ETH_DA << 3)
++#define NET_HEADER_FIELD_ETH_FINAL_CKSUM (NET_HEADER_FIELD_ETH_DA << 4)
++#define NET_HEADER_FIELD_ETH_PADDING (NET_HEADER_FIELD_ETH_DA << 5)
++#define NET_HEADER_FIELD_ETH_ALL_FIELDS ((NET_HEADER_FIELD_ETH_DA << 6) - 1)
++
++#define NET_HEADER_FIELD_ETH_ADDR_SIZE 6
++
++typedef uint16_t headerFieldIp_t;
++
++#define NET_HEADER_FIELD_IP_VER (1)
++#define NET_HEADER_FIELD_IP_DSCP (NET_HEADER_FIELD_IP_VER << 2)
++#define NET_HEADER_FIELD_IP_ECN (NET_HEADER_FIELD_IP_VER << 3)
++#define NET_HEADER_FIELD_IP_PROTO (NET_HEADER_FIELD_IP_VER << 4)
++
++#define NET_HEADER_FIELD_IP_PROTO_SIZE 1
++
++typedef uint16_t headerFieldIpv4_t;
++
++#define NET_HEADER_FIELD_IPv4_VER (1)
++#define NET_HEADER_FIELD_IPv4_HDR_LEN (NET_HEADER_FIELD_IPv4_VER << 1)
++#define NET_HEADER_FIELD_IPv4_TOS (NET_HEADER_FIELD_IPv4_VER << 2)
++#define NET_HEADER_FIELD_IPv4_TOTAL_LEN (NET_HEADER_FIELD_IPv4_VER << 3)
++#define NET_HEADER_FIELD_IPv4_ID (NET_HEADER_FIELD_IPv4_VER << 4)
++#define NET_HEADER_FIELD_IPv4_FLAG_D (NET_HEADER_FIELD_IPv4_VER << 5)
++#define NET_HEADER_FIELD_IPv4_FLAG_M (NET_HEADER_FIELD_IPv4_VER << 6)
++#define NET_HEADER_FIELD_IPv4_OFFSET (NET_HEADER_FIELD_IPv4_VER << 7)
++#define NET_HEADER_FIELD_IPv4_TTL (NET_HEADER_FIELD_IPv4_VER << 8)
++#define NET_HEADER_FIELD_IPv4_PROTO (NET_HEADER_FIELD_IPv4_VER << 9)
++#define NET_HEADER_FIELD_IPv4_CKSUM (NET_HEADER_FIELD_IPv4_VER << 10)
++#define NET_HEADER_FIELD_IPv4_SRC_IP (NET_HEADER_FIELD_IPv4_VER << 11)
++#define NET_HEADER_FIELD_IPv4_DST_IP (NET_HEADER_FIELD_IPv4_VER << 12)
++#define NET_HEADER_FIELD_IPv4_OPTS (NET_HEADER_FIELD_IPv4_VER << 13)
++#define NET_HEADER_FIELD_IPv4_OPTS_COUNT (NET_HEADER_FIELD_IPv4_VER << 14)
++#define NET_HEADER_FIELD_IPv4_ALL_FIELDS ((NET_HEADER_FIELD_IPv4_VER << 15) - 1)
++
++#define NET_HEADER_FIELD_IPv4_ADDR_SIZE 4
++#define NET_HEADER_FIELD_IPv4_PROTO_SIZE 1
++
++
++typedef uint8_t headerFieldIpv6_t;
++
++#define NET_HEADER_FIELD_IPv6_VER (1)
++#define NET_HEADER_FIELD_IPv6_TC (NET_HEADER_FIELD_IPv6_VER << 1)
++#define NET_HEADER_FIELD_IPv6_SRC_IP (NET_HEADER_FIELD_IPv6_VER << 2)
++#define NET_HEADER_FIELD_IPv6_DST_IP (NET_HEADER_FIELD_IPv6_VER << 3)
++#define NET_HEADER_FIELD_IPv6_NEXT_HDR (NET_HEADER_FIELD_IPv6_VER << 4)
++#define NET_HEADER_FIELD_IPv6_FL (NET_HEADER_FIELD_IPv6_VER << 5)
++#define NET_HEADER_FIELD_IPv6_HOP_LIMIT (NET_HEADER_FIELD_IPv6_VER << 6)
++#define NET_HEADER_FIELD_IPv6_ALL_FIELDS ((NET_HEADER_FIELD_IPv6_VER << 7) - 1)
++
++#define NET_HEADER_FIELD_IPv6_ADDR_SIZE 16
++#define NET_HEADER_FIELD_IPv6_NEXT_HDR_SIZE 1
++
++#define NET_HEADER_FIELD_ICMP_TYPE (1)
++#define NET_HEADER_FIELD_ICMP_CODE (NET_HEADER_FIELD_ICMP_TYPE << 1)
++#define NET_HEADER_FIELD_ICMP_CKSUM (NET_HEADER_FIELD_ICMP_TYPE << 2)
++#define NET_HEADER_FIELD_ICMP_ID (NET_HEADER_FIELD_ICMP_TYPE << 3)
++#define NET_HEADER_FIELD_ICMP_SQ_NUM (NET_HEADER_FIELD_ICMP_TYPE << 4)
++#define NET_HEADER_FIELD_ICMP_ALL_FIELDS ((NET_HEADER_FIELD_ICMP_TYPE << 5) - 1)
++
++#define NET_HEADER_FIELD_ICMP_CODE_SIZE 1
++#define NET_HEADER_FIELD_ICMP_TYPE_SIZE 1
++
++#define NET_HEADER_FIELD_IGMP_VERSION (1)
++#define NET_HEADER_FIELD_IGMP_TYPE (NET_HEADER_FIELD_IGMP_VERSION << 1)
++#define NET_HEADER_FIELD_IGMP_CKSUM (NET_HEADER_FIELD_IGMP_VERSION << 2)
++#define NET_HEADER_FIELD_IGMP_DATA (NET_HEADER_FIELD_IGMP_VERSION << 3)
++#define NET_HEADER_FIELD_IGMP_ALL_FIELDS ((NET_HEADER_FIELD_IGMP_VERSION << 4) - 1)
++
++
++typedef uint16_t headerFieldTcp_t;
++
++#define NET_HEADER_FIELD_TCP_PORT_SRC (1)
++#define NET_HEADER_FIELD_TCP_PORT_DST (NET_HEADER_FIELD_TCP_PORT_SRC << 1)
++#define NET_HEADER_FIELD_TCP_SEQ (NET_HEADER_FIELD_TCP_PORT_SRC << 2)
++#define NET_HEADER_FIELD_TCP_ACK (NET_HEADER_FIELD_TCP_PORT_SRC << 3)
++#define NET_HEADER_FIELD_TCP_OFFSET (NET_HEADER_FIELD_TCP_PORT_SRC << 4)
++#define NET_HEADER_FIELD_TCP_FLAGS (NET_HEADER_FIELD_TCP_PORT_SRC << 5)
++#define NET_HEADER_FIELD_TCP_WINDOW (NET_HEADER_FIELD_TCP_PORT_SRC << 6)
++#define NET_HEADER_FIELD_TCP_CKSUM (NET_HEADER_FIELD_TCP_PORT_SRC << 7)
++#define NET_HEADER_FIELD_TCP_URGPTR (NET_HEADER_FIELD_TCP_PORT_SRC << 8)
++#define NET_HEADER_FIELD_TCP_OPTS (NET_HEADER_FIELD_TCP_PORT_SRC << 9)
++#define NET_HEADER_FIELD_TCP_OPTS_COUNT (NET_HEADER_FIELD_TCP_PORT_SRC << 10)
++#define NET_HEADER_FIELD_TCP_ALL_FIELDS ((NET_HEADER_FIELD_TCP_PORT_SRC << 11) - 1)
++
++#define NET_HEADER_FIELD_TCP_PORT_SIZE 2
++
++
++typedef uint8_t headerFieldSctp_t;
++
++#define NET_HEADER_FIELD_SCTP_PORT_SRC (1)
++#define NET_HEADER_FIELD_SCTP_PORT_DST (NET_HEADER_FIELD_SCTP_PORT_SRC << 1)
++#define NET_HEADER_FIELD_SCTP_VER_TAG (NET_HEADER_FIELD_SCTP_PORT_SRC << 2)
++#define NET_HEADER_FIELD_SCTP_CKSUM (NET_HEADER_FIELD_SCTP_PORT_SRC << 3)
++#define NET_HEADER_FIELD_SCTP_ALL_FIELDS ((NET_HEADER_FIELD_SCTP_PORT_SRC << 4) - 1)
++
++#define NET_HEADER_FIELD_SCTP_PORT_SIZE 2
++
++typedef uint8_t headerFieldDccp_t;
++
++#define NET_HEADER_FIELD_DCCP_PORT_SRC (1)
++#define NET_HEADER_FIELD_DCCP_PORT_DST (NET_HEADER_FIELD_DCCP_PORT_SRC << 1)
++#define NET_HEADER_FIELD_DCCP_ALL_FIELDS ((NET_HEADER_FIELD_DCCP_PORT_SRC << 2) - 1)
++
++#define NET_HEADER_FIELD_DCCP_PORT_SIZE 2
++
++
++typedef uint8_t headerFieldUdp_t;
++
++#define NET_HEADER_FIELD_UDP_PORT_SRC (1)
++#define NET_HEADER_FIELD_UDP_PORT_DST (NET_HEADER_FIELD_UDP_PORT_SRC << 1)
++#define NET_HEADER_FIELD_UDP_LEN (NET_HEADER_FIELD_UDP_PORT_SRC << 2)
++#define NET_HEADER_FIELD_UDP_CKSUM (NET_HEADER_FIELD_UDP_PORT_SRC << 3)
++#define NET_HEADER_FIELD_UDP_ALL_FIELDS ((NET_HEADER_FIELD_UDP_PORT_SRC << 4) - 1)
++
++#define NET_HEADER_FIELD_UDP_PORT_SIZE 2
++
++typedef uint8_t headerFieldUdpLite_t;
++
++#define NET_HEADER_FIELD_UDP_LITE_PORT_SRC (1)
++#define NET_HEADER_FIELD_UDP_LITE_PORT_DST (NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 1)
++#define NET_HEADER_FIELD_UDP_LITE_ALL_FIELDS ((NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 2) - 1)
++
++#define NET_HEADER_FIELD_UDP_LITE_PORT_SIZE 2
++
++typedef uint8_t headerFieldUdpEncapEsp_t;
++
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC (1)
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 1)
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 2)
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 3)
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 4)
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 5)
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS ((NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 6) - 1)
++
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SIZE 2
++#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI_SIZE 4
++
++#define NET_HEADER_FIELD_IPHC_CID (1)
++#define NET_HEADER_FIELD_IPHC_CID_TYPE (NET_HEADER_FIELD_IPHC_CID << 1)
++#define NET_HEADER_FIELD_IPHC_HCINDEX (NET_HEADER_FIELD_IPHC_CID << 2)
++#define NET_HEADER_FIELD_IPHC_GEN (NET_HEADER_FIELD_IPHC_CID << 3)
++#define NET_HEADER_FIELD_IPHC_D_BIT (NET_HEADER_FIELD_IPHC_CID << 4)
++#define NET_HEADER_FIELD_IPHC_ALL_FIELDS ((NET_HEADER_FIELD_IPHC_CID << 5) - 1)
++
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE (1)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_FLAGS (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 1)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_LENGTH (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 2)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_TSN (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 3)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_ID (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 4)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_SQN (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 5)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_PAYLOAD_PID (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 6)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_UNORDERED (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 7)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_BEGGINING (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 8)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_END (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 9)
++#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS ((NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++
++#define NET_HEADER_FIELD_L2TPv2_TYPE_BIT (1)
++#define NET_HEADER_FIELD_L2TPv2_LENGTH_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 1)
++#define NET_HEADER_FIELD_L2TPv2_SEQUENCE_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 2)
++#define NET_HEADER_FIELD_L2TPv2_OFFSET_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 3)
++#define NET_HEADER_FIELD_L2TPv2_PRIORITY_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 4)
++#define NET_HEADER_FIELD_L2TPv2_VERSION (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 5)
++#define NET_HEADER_FIELD_L2TPv2_LEN (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 6)
++#define NET_HEADER_FIELD_L2TPv2_TUNNEL_ID (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 7)
++#define NET_HEADER_FIELD_L2TPv2_SESSION_ID (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 8)
++#define NET_HEADER_FIELD_L2TPv2_NS (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 9)
++#define NET_HEADER_FIELD_L2TPv2_NR (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 10)
++#define NET_HEADER_FIELD_L2TPv2_OFFSET_SIZE (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 11)
++#define NET_HEADER_FIELD_L2TPv2_FIRST_BYTE (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 12)
++#define NET_HEADER_FIELD_L2TPv2_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 13) - 1)
++
++#define NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT (1)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH_BIT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 1)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_SEQUENCE_BIT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 2)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_VERSION (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 3)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 4)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_CONTROL (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 5)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_SENT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 6)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_RECV (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 7)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_FIRST_BYTE (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 8)
++#define NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 9) - 1)
++
++#define NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT (1)
++#define NET_HEADER_FIELD_L2TPv3_SESS_VERSION (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 1)
++#define NET_HEADER_FIELD_L2TPv3_SESS_ID (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 2)
++#define NET_HEADER_FIELD_L2TPv3_SESS_COOKIE (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 3)
++#define NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 4) - 1)
++
++
++typedef uint8_t headerFieldVlan_t;
++
++#define NET_HEADER_FIELD_VLAN_VPRI (1)
++#define NET_HEADER_FIELD_VLAN_CFI (NET_HEADER_FIELD_VLAN_VPRI << 1)
++#define NET_HEADER_FIELD_VLAN_VID (NET_HEADER_FIELD_VLAN_VPRI << 2)
++#define NET_HEADER_FIELD_VLAN_LENGTH (NET_HEADER_FIELD_VLAN_VPRI << 3)
++#define NET_HEADER_FIELD_VLAN_TYPE (NET_HEADER_FIELD_VLAN_VPRI << 4)
++#define NET_HEADER_FIELD_VLAN_ALL_FIELDS ((NET_HEADER_FIELD_VLAN_VPRI << 5) - 1)
++
++#define NET_HEADER_FIELD_VLAN_TCI (NET_HEADER_FIELD_VLAN_VPRI | \
++ NET_HEADER_FIELD_VLAN_CFI | \
++ NET_HEADER_FIELD_VLAN_VID)
++
++
++typedef uint8_t headerFieldLlc_t;
++
++#define NET_HEADER_FIELD_LLC_DSAP (1)
++#define NET_HEADER_FIELD_LLC_SSAP (NET_HEADER_FIELD_LLC_DSAP << 1)
++#define NET_HEADER_FIELD_LLC_CTRL (NET_HEADER_FIELD_LLC_DSAP << 2)
++#define NET_HEADER_FIELD_LLC_ALL_FIELDS ((NET_HEADER_FIELD_LLC_DSAP << 3) - 1)
++
++#define NET_HEADER_FIELD_NLPID_NLPID (1)
++#define NET_HEADER_FIELD_NLPID_ALL_FIELDS ((NET_HEADER_FIELD_NLPID_NLPID << 1) - 1)
++
++
++typedef uint8_t headerFieldSnap_t;
++
++#define NET_HEADER_FIELD_SNAP_OUI (1)
++#define NET_HEADER_FIELD_SNAP_PID (NET_HEADER_FIELD_SNAP_OUI << 1)
++#define NET_HEADER_FIELD_SNAP_ALL_FIELDS ((NET_HEADER_FIELD_SNAP_OUI << 2) - 1)
++
++
++typedef uint8_t headerFieldLlcSnap_t;
++
++#define NET_HEADER_FIELD_LLC_SNAP_TYPE (1)
++#define NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS ((NET_HEADER_FIELD_LLC_SNAP_TYPE << 1) - 1)
++
++#define NET_HEADER_FIELD_ARP_HTYPE (1)
++#define NET_HEADER_FIELD_ARP_PTYPE (NET_HEADER_FIELD_ARP_HTYPE << 1)
++#define NET_HEADER_FIELD_ARP_HLEN (NET_HEADER_FIELD_ARP_HTYPE << 2)
++#define NET_HEADER_FIELD_ARP_PLEN (NET_HEADER_FIELD_ARP_HTYPE << 3)
++#define NET_HEADER_FIELD_ARP_OPER (NET_HEADER_FIELD_ARP_HTYPE << 4)
++#define NET_HEADER_FIELD_ARP_SHA (NET_HEADER_FIELD_ARP_HTYPE << 5)
++#define NET_HEADER_FIELD_ARP_SPA (NET_HEADER_FIELD_ARP_HTYPE << 6)
++#define NET_HEADER_FIELD_ARP_THA (NET_HEADER_FIELD_ARP_HTYPE << 7)
++#define NET_HEADER_FIELD_ARP_TPA (NET_HEADER_FIELD_ARP_HTYPE << 8)
++#define NET_HEADER_FIELD_ARP_ALL_FIELDS ((NET_HEADER_FIELD_ARP_HTYPE << 9) - 1)
++
++#define NET_HEADER_FIELD_RFC2684_LLC (1)
++#define NET_HEADER_FIELD_RFC2684_NLPID (NET_HEADER_FIELD_RFC2684_LLC << 1)
++#define NET_HEADER_FIELD_RFC2684_OUI (NET_HEADER_FIELD_RFC2684_LLC << 2)
++#define NET_HEADER_FIELD_RFC2684_PID (NET_HEADER_FIELD_RFC2684_LLC << 3)
++#define NET_HEADER_FIELD_RFC2684_VPN_OUI (NET_HEADER_FIELD_RFC2684_LLC << 4)
++#define NET_HEADER_FIELD_RFC2684_VPN_IDX (NET_HEADER_FIELD_RFC2684_LLC << 5)
++#define NET_HEADER_FIELD_RFC2684_ALL_FIELDS ((NET_HEADER_FIELD_RFC2684_LLC << 6) - 1)
++
++#define NET_HEADER_FIELD_USER_DEFINED_SRCPORT (1)
++#define NET_HEADER_FIELD_USER_DEFINED_PCDID (NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 1)
++#define NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS ((NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 2) - 1)
++
++#define NET_HEADER_FIELD_PAYLOAD_BUFFER (1)
++#define NET_HEADER_FIELD_PAYLOAD_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 1)
++#define NET_HEADER_FIELD_MAX_FRM_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 2)
++#define NET_HEADER_FIELD_MIN_FRM_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 3)
++#define NET_HEADER_FIELD_PAYLOAD_TYPE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 4)
++#define NET_HEADER_FIELD_FRAME_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 5)
++#define NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS ((NET_HEADER_FIELD_PAYLOAD_BUFFER << 6) - 1)
++
++
++typedef uint8_t headerFieldGre_t;
++
++#define NET_HEADER_FIELD_GRE_TYPE (1)
++#define NET_HEADER_FIELD_GRE_ALL_FIELDS ((NET_HEADER_FIELD_GRE_TYPE << 1) - 1)
++
++
++typedef uint8_t headerFieldMinencap_t;
++
++#define NET_HEADER_FIELD_MINENCAP_SRC_IP (1)
++#define NET_HEADER_FIELD_MINENCAP_DST_IP (NET_HEADER_FIELD_MINENCAP_SRC_IP << 1)
++#define NET_HEADER_FIELD_MINENCAP_TYPE (NET_HEADER_FIELD_MINENCAP_SRC_IP << 2)
++#define NET_HEADER_FIELD_MINENCAP_ALL_FIELDS ((NET_HEADER_FIELD_MINENCAP_SRC_IP << 3) - 1)
++
++
++typedef uint8_t headerFieldIpsecAh_t;
++
++#define NET_HEADER_FIELD_IPSEC_AH_SPI (1)
++#define NET_HEADER_FIELD_IPSEC_AH_NH (NET_HEADER_FIELD_IPSEC_AH_SPI << 1)
++#define NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS ((NET_HEADER_FIELD_IPSEC_AH_SPI << 2) - 1)
++
++
++typedef uint8_t headerFieldIpsecEsp_t;
++
++#define NET_HEADER_FIELD_IPSEC_ESP_SPI (1)
++#define NET_HEADER_FIELD_IPSEC_ESP_SEQUENCE_NUM (NET_HEADER_FIELD_IPSEC_ESP_SPI << 1)
++#define NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS ((NET_HEADER_FIELD_IPSEC_ESP_SPI << 2) - 1)
++
++#define NET_HEADER_FIELD_IPSEC_ESP_SPI_SIZE 4
++
++
++typedef uint8_t headerFieldMpls_t;
++
++#define NET_HEADER_FIELD_MPLS_LABEL_STACK (1)
++#define NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS ((NET_HEADER_FIELD_MPLS_LABEL_STACK << 1) - 1)
++
++
++typedef uint8_t headerFieldMacsec_t;
++
++#define NET_HEADER_FIELD_MACSEC_SECTAG (1)
++#define NET_HEADER_FIELD_MACSEC_ALL_FIELDS ((NET_HEADER_FIELD_MACSEC_SECTAG << 1) - 1)
++
++
++typedef enum {
++ HEADER_TYPE_NONE = 0,
++ HEADER_TYPE_PAYLOAD,
++ HEADER_TYPE_ETH,
++ HEADER_TYPE_VLAN,
++ HEADER_TYPE_IPv4,
++ HEADER_TYPE_IPv6,
++ HEADER_TYPE_IP,
++ HEADER_TYPE_TCP,
++ HEADER_TYPE_UDP,
++ HEADER_TYPE_UDP_LITE,
++ HEADER_TYPE_IPHC,
++ HEADER_TYPE_SCTP,
++ HEADER_TYPE_SCTP_CHUNK_DATA,
++ HEADER_TYPE_PPPoE,
++ HEADER_TYPE_PPP,
++ HEADER_TYPE_PPPMUX,
++ HEADER_TYPE_PPPMUX_SUBFRAME,
++ HEADER_TYPE_L2TPv2,
++ HEADER_TYPE_L2TPv3_CTRL,
++ HEADER_TYPE_L2TPv3_SESS,
++ HEADER_TYPE_LLC,
++ HEADER_TYPE_LLC_SNAP,
++ HEADER_TYPE_NLPID,
++ HEADER_TYPE_SNAP,
++ HEADER_TYPE_MPLS,
++ HEADER_TYPE_IPSEC_AH,
++ HEADER_TYPE_IPSEC_ESP,
++ HEADER_TYPE_UDP_ENCAP_ESP, /* RFC 3948 */
++ HEADER_TYPE_MACSEC,
++ HEADER_TYPE_GRE,
++ HEADER_TYPE_MINENCAP,
++ HEADER_TYPE_DCCP,
++ HEADER_TYPE_ICMP,
++ HEADER_TYPE_IGMP,
++ HEADER_TYPE_ARP,
++ HEADER_TYPE_CAPWAP,
++ HEADER_TYPE_CAPWAP_DTLS,
++ HEADER_TYPE_RFC2684,
++ HEADER_TYPE_USER_DEFINED_L2,
++ HEADER_TYPE_USER_DEFINED_L3,
++ HEADER_TYPE_USER_DEFINED_L4,
++ HEADER_TYPE_USER_DEFINED_SHIM1,
++ HEADER_TYPE_USER_DEFINED_SHIM2,
++ MAX_HEADER_TYPE_COUNT
++} e_NetHeaderType;
++
++
++#endif /* __NET_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/std_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/std_ext.h
+new file mode 100644
+index 00000000..d91e6fdd
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/std_ext.h
+@@ -0,0 +1,48 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File std_ext.h
++
++ @Description General Standard Definitions
++*//***************************************************************************/
++
++#ifndef __STD_EXT_H
++#define __STD_EXT_H
++
++
++#include "types_ext.h"
++#include "ncsw_ext.h"
++
++
++#endif /* __STD_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/stdarg_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/stdarg_ext.h
+new file mode 100644
+index 00000000..3c8bb0a0
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/stdarg_ext.h
+@@ -0,0 +1,49 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __STDARG_EXT_H
++#define __STDARG_EXT_H
++
++
++#if defined(NCSW_LINUX) && defined(__KERNEL__)
++#include <stdarg.h>
++
++#else
++#include <stdarg.h>
++
++#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
++
++#include "std_ext.h"
++
++
++#endif /* __STDARG_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/stdlib_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/stdlib_ext.h
+new file mode 100644
+index 00000000..a47860cf
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/stdlib_ext.h
+@@ -0,0 +1,162 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++
++#ifndef __STDLIB_EXT_H
++#define __STDLIB_EXT_H
++
++
++#if (defined(NCSW_LINUX)) && defined(__KERNEL__)
++#include "stdarg_ext.h"
++#include "std_ext.h"
++
++
++/**
++ * strtoul - convert a string to an uint32_t
++ * @cp: The start of the string
++ * @endp: A pointer to the end of the parsed string will be placed here
++ * @base: The number base to use
++ */
++uint32_t strtoul(const char *cp,char **endp,uint32_t base);
++
++/**
++ * strtol - convert a string to a int32_t
++ * @cp: The start of the string
++ * @endp: A pointer to the end of the parsed string will be placed here
++ * @base: The number base to use
++ */
++long strtol(const char *cp,char **endp,uint32_t base);
++
++/**
++ * strtoull - convert a string to an uint64_t
++ * @cp: The start of the string
++ * @endp: A pointer to the end of the parsed string will be placed here
++ * @base: The number base to use
++ */
++uint64_t strtoull(const char *cp,char **endp,uint32_t base);
++
++/**
++ * strtoll - convert a string to a int64 long
++ * @cp: The start of the string
++ * @endp: A pointer to the end of the parsed string will be placed here
++ * @base: The number base to use
++ */
++long long strtoll(const char *cp,char **endp,uint32_t base);
++
++/**
++ * atoi - convert a character to a int
++ * @s: The start of the string
++ */
++int atoi(const char *s);
++
++/**
++ * strnlen - Find the length of a length-limited string
++ * @s: The string to be sized
++ * @count: The maximum number of bytes to search
++ */
++size_t strnlen(const char * s, size_t count);
++
++/**
++ * strlen - Find the length of a string
++ * @s: The string to be sized
++ */
++size_t strlen(const char * s);
++
++/**
++ * strtok - Split a string into tokens
++ * @s: The string to be searched
++ * @ct: The characters to search for
++ *
++ * WARNING: strtok is deprecated, use strsep instead.
++ */
++char * strtok(char * s,const char * ct);
++
++/**
++ * strncpy - Copy a length-limited, %NUL-terminated string
++ * @dest: Where to copy the string to
++ * @src: Where to copy the string from
++ * @count: The maximum number of bytes to copy
++ *
++ * Note that unlike userspace strncpy, this does not %NUL-pad the buffer.
++ * However, the result is not %NUL-terminated if the source exceeds
++ * @count bytes.
++ */
++char * strncpy(char * dest,const char *src,size_t count);
++
++/**
++ * strcpy - Copy a %NUL terminated string
++ * @dest: Where to copy the string to
++ * @src: Where to copy the string from
++ */
++char * strcpy(char * dest,const char *src);
++
++/**
++ * vsscanf - Unformat a buffer into a list of arguments
++ * @buf: input buffer
++ * @fmt: format of buffer
++ * @args: arguments
++ */
++int vsscanf(const char * buf, const char * fmt, va_list args);
++
++/**
++ * vsnprintf - Format a string and place it in a buffer
++ * @buf: The buffer to place the result into
++ * @size: The size of the buffer, including the trailing null space
++ * @fmt: The format string to use
++ * @args: Arguments for the format string
++ *
++ * Call this function if you are already dealing with a va_list.
++ * You probably want snprintf instead.
++ */
++int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
++
++/**
++ * vsprintf - Format a string and place it in a buffer
++ * @buf: The buffer to place the result into
++ * @fmt: The format string to use
++ * @args: Arguments for the format string
++ *
++ * Call this function if you are already dealing with a va_list.
++ * You probably want sprintf instead.
++ */
++int vsprintf(char *buf, const char *fmt, va_list args);
++
++#else
++#include <stdlib.h>
++#include <stdio.h>
++#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
++
++#include "std_ext.h"
++
++
++#endif /* __STDLIB_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/string_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/string_ext.h
+new file mode 100644
+index 00000000..a5c6c7e0
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/string_ext.h
+@@ -0,0 +1,56 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef __STRING_EXT_H
++#define __STRING_EXT_H
++
++
++#if defined(NCSW_LINUX) && defined(__KERNEL__)
++#include <linux/kernel.h>
++#include <linux/string.h>
++extern char * strtok ( char * str, const char * delimiters );
++
++#elif defined(__KERNEL__)
++#include "linux/types.h"
++#include "linux/posix_types.h"
++#include "linux/string.h"
++
++#else
++#include <string.h>
++
++#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
++
++#include "std_ext.h"
++
++
++#endif /* __STRING_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/types_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/types_ext.h
+new file mode 100644
+index 00000000..8c87edb7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/types_ext.h
+@@ -0,0 +1,62 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File types_ext.h
++
++ @Description General types Standard Definitions
++*//***************************************************************************/
++
++#ifndef __TYPES_EXT_H
++#define __TYPES_EXT_H
++
++#if defined(NCSW_LINUX)
++#include "types_linux.h"
++
++#elif defined(NCSW_VXWORKS)
++#include "types_vxworks.h"
++
++#elif defined(__GNUC__) && defined(__cplusplus)
++#include "types_bb_gpp.h"
++
++#elif defined(__GNUC__)
++#include "types_bb_gcc.h"
++
++#elif defined(__ghs__)
++#include "types_ghs.h"
++
++#else
++#include "types_dflt.h"
++#endif /* defined (__ROCOO__) */
++
++#endif /* __TYPES_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/xx_common.h b/drivers/net/ethernet/freescale/sdk_fman/inc/xx_common.h
+new file mode 100644
+index 00000000..8e81094b
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/xx_common.h
+@@ -0,0 +1,56 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File debug_ext.h
++
++ @Description Debug mode definitions.
++*//***************************************************************************/
++
++#ifndef __XX_COMMON_H
++#define __XX_COMMON_H
++
++/*****************************************************************************
++ * UNIFIED MODULE CODES
++ *****************************************************************************/
++#define MODULE_UNKNOWN 0x00000000
++#define MODULE_FM 0x00010000
++#define MODULE_FM_MURAM 0x00020000
++#define MODULE_FM_PCD 0x00030000
++#define MODULE_FM_RTC 0x00040000
++#define MODULE_FM_MAC 0x00050000
++#define MODULE_FM_PORT 0x00060000
++#define MODULE_MM 0x00070000
++#define MODULE_FM_SP 0x00080000
++#define MODULE_FM_MACSEC 0x00090000
++#endif /* __XX_COMMON_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/inc/xx_ext.h b/drivers/net/ethernet/freescale/sdk_fman/inc/xx_ext.h
+new file mode 100644
+index 00000000..21b62d0a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/inc/xx_ext.h
+@@ -0,0 +1,791 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File xx_ext.h
++
++ @Description Prototypes, externals and typedefs for system-supplied
++ (external) routines
++*//***************************************************************************/
++
++#ifndef __XX_EXT_H
++#define __XX_EXT_H
++
++#include "std_ext.h"
++#include "xx_common.h"
++#include "part_ext.h"
++
++
++
++/**************************************************************************//**
++ @Group xx_id XX Interface (System call hooks)
++
++ @Description Prototypes, externals and typedefs for system-supplied
++ (external) routines
++
++ @{
++*//***************************************************************************/
++
++#ifdef DEBUG_XX_MALLOC
++void * XX_MallocDebug(uint32_t size, char *fname, int line);
++
++void * XX_MallocSmartDebug(uint32_t size,
++ int memPartitionId,
++ uint32_t alignment,
++ char *fname,
++ int line);
++
++#define XX_Malloc(sz) \
++ XX_MallocDebug((sz), __FILE__, __LINE__)
++
++#define XX_MallocSmart(sz, memt, al) \
++ XX_MallocSmartDebug((sz), (memt), (al), __FILE__, __LINE__)
++
++#else /* not DEBUG_XX_MALLOC */
++/**************************************************************************//**
++ @Function XX_Malloc
++
++ @Description allocates contiguous block of memory.
++
++ @Param[in] size - Number of bytes to allocate.
++
++ @Return The address of the newly allocated block on success, NULL on failure.
++*//***************************************************************************/
++void * XX_Malloc(uint32_t size);
++
++/**************************************************************************//**
++ @Function XX_MallocSmart
++
++ @Description Allocates contiguous block of memory in a specified
++ alignment and from the specified segment.
++
++ @Param[in] size - Number of bytes to allocate.
++ @Param[in] memPartitionId - Memory partition ID; The value zero must
++ be mapped to the default heap partition.
++ @Param[in] alignment - Required memory alignment (in bytes).
++
++ @Return The address of the newly allocated block on success, NULL on failure.
++*//***************************************************************************/
++void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment);
++#endif /* not DEBUG_XX_MALLOC */
++
++/**************************************************************************//**
++ @Function XX_FreeSmart
++
++ @Description Frees the memory block pointed to by "p".
++ Only for memory allocated by XX_MallocSmart
++
++ @Param[in] p_Memory - pointer to the memory block.
++
++ @Return None.
++*//***************************************************************************/
++void XX_FreeSmart(void *p_Memory);
++
++/**************************************************************************//**
++ @Function XX_Free
++
++ @Description frees the memory block pointed to by "p".
++
++ @Param[in] p_Memory - pointer to the memory block.
++
++ @Return None.
++*//***************************************************************************/
++void XX_Free(void *p_Memory);
++
++/**************************************************************************//**
++ @Function XX_Print
++
++ @Description print a string.
++
++ @Param[in] str - string to print.
++
++ @Return None.
++*//***************************************************************************/
++void XX_Print(char *str, ...);
++
++/**************************************************************************//**
++ @Function XX_SetIntr
++
++ @Description Set an interrupt service routine for a specific interrupt source.
++
++ @Param[in] irq - Interrupt ID (system-specific number).
++ @Param[in] f_Isr - Callback routine that will be called when the interrupt occurs.
++ @Param[in] handle - The argument for the user callback routine.
++
++ @Return E_OK on success; error code otherwise..
++*//***************************************************************************/
++t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle);
++
++/**************************************************************************//**
++ @Function XX_FreeIntr
++
++ @Description Free a specific interrupt and a specific callback routine.
++
++ @Param[in] irq - Interrupt ID (system-specific number).
++
++ @Return E_OK on success; error code otherwise..
++*//***************************************************************************/
++t_Error XX_FreeIntr(int irq);
++
++/**************************************************************************//**
++ @Function XX_EnableIntr
++
++ @Description Enable a specific interrupt.
++
++ @Param[in] irq - Interrupt ID (system-specific number).
++
++ @Return E_OK on success; error code otherwise..
++*//***************************************************************************/
++t_Error XX_EnableIntr(int irq);
++
++/**************************************************************************//**
++ @Function XX_DisableIntr
++
++ @Description Disable a specific interrupt.
++
++ @Param[in] irq - Interrupt ID (system-specific number).
++
++ @Return E_OK on success; error code otherwise..
++*//***************************************************************************/
++t_Error XX_DisableIntr(int irq);
++
++/**************************************************************************//**
++ @Function XX_DisableAllIntr
++
++ @Description Disable all interrupts by masking them at the CPU.
++
++ @Return A value that represents the interrupts state before the
++ operation, and should be passed to the matching
++ XX_RestoreAllIntr() call.
++*//***************************************************************************/
++uint32_t XX_DisableAllIntr(void);
++
++/**************************************************************************//**
++ @Function XX_RestoreAllIntr
++
++ @Description Restore previous state of interrupts level at the CPU.
++
++ @Param[in] flags - A value that represents the interrupts state to restore,
++ as returned by the matching call for XX_DisableAllIntr().
++
++ @Return None.
++*//***************************************************************************/
++void XX_RestoreAllIntr(uint32_t flags);
++
++
++/**************************************************************************//**
++ @Function XX_Exit
++
++ @Description Stop execution and report status (where it is applicable)
++
++ @Param[in] status - exit status
++*//***************************************************************************/
++void XX_Exit(int status);
++
++
++/*****************************************************************************/
++/* Tasklet Service Routines */
++/*****************************************************************************/
++typedef t_Handle t_TaskletHandle;
++
++/**************************************************************************//**
++ @Function XX_InitTasklet
++
++ @Description Create and initialize a tasklet object.
++
++ @Param[in] routine - A routine to be ran as a tasklet.
++ @Param[in] data - An argument to pass to the tasklet.
++
++ @Return Tasklet handle is returned on success. NULL is returned otherwise.
++*//***************************************************************************/
++t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data);
++
++/**************************************************************************//**
++ @Function XX_FreeTasklet
++
++ @Description Free a tasklet object.
++
++ @Param[in] h_Tasklet - A handle to a tasklet to be free.
++
++ @Return None.
++*//***************************************************************************/
++void XX_FreeTasklet (t_TaskletHandle h_Tasklet);
++
++/**************************************************************************//**
++ @Function XX_ScheduleTask
++
++ @Description Schedule a tasklet object.
++
++ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
++ @Param[in] immediate - Indicate whether to schedule this tasklet on
++ the immediate queue or on the delayed one.
++
++ @Return 0 - on success. Error code - otherwise.
++*//***************************************************************************/
++int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate);
++
++/**************************************************************************//**
++ @Function XX_FlushScheduledTasks
++
++ @Description Flush all tasks there are in the scheduled tasks queue.
++
++ @Return None.
++*//***************************************************************************/
++void XX_FlushScheduledTasks(void);
++
++/**************************************************************************//**
++ @Function XX_TaskletIsQueued
++
++ @Description Check if task is queued.
++
++ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
++
++ @Return 1 - task is queued. 0 - otherwise.
++*//***************************************************************************/
++int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet);
++
++/**************************************************************************//**
++ @Function XX_SetTaskletData
++
++ @Description Set data to a scheduled task. Used to change data of already
++ scheduled task.
++
++ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
++ @Param[in] data - Data to be set.
++*//***************************************************************************/
++void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data);
++
++/**************************************************************************//**
++ @Function XX_GetTaskletData
++
++ @Description Get the data of scheduled task.
++
++ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
++
++ @Return handle to the data of the task.
++*//***************************************************************************/
++t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet);
++
++/**************************************************************************//**
++ @Function XX_BottomHalf
++
++ @Description Bottom half implementation, invoked by the interrupt handler.
++
++ This routine handles all bottom-half tasklets with interrupts
++ enabled.
++
++ @Return None.
++*//***************************************************************************/
++void XX_BottomHalf(void);
++
++
++/*****************************************************************************/
++/* Spinlock Service Routines */
++/*****************************************************************************/
++
++/**************************************************************************//**
++ @Function XX_InitSpinlock
++
++ @Description Creates a spinlock.
++
++ @Return Spinlock handle is returned on success; NULL otherwise.
++*//***************************************************************************/
++t_Handle XX_InitSpinlock(void);
++
++/**************************************************************************//**
++ @Function XX_FreeSpinlock
++
++ @Description Frees the memory allocated for the spinlock creation.
++
++ @Param[in] h_Spinlock - A handle to a spinlock.
++
++ @Return None.
++*//***************************************************************************/
++void XX_FreeSpinlock(t_Handle h_Spinlock);
++
++/**************************************************************************//**
++ @Function XX_LockSpinlock
++
++ @Description Locks a spinlock.
++
++ @Param[in] h_Spinlock - A handle to a spinlock.
++
++ @Return None.
++*//***************************************************************************/
++void XX_LockSpinlock(t_Handle h_Spinlock);
++
++/**************************************************************************//**
++ @Function XX_UnlockSpinlock
++
++ @Description Unlocks a spinlock.
++
++ @Param[in] h_Spinlock - A handle to a spinlock.
++
++ @Return None.
++*//***************************************************************************/
++void XX_UnlockSpinlock(t_Handle h_Spinlock);
++
++/**************************************************************************//**
++ @Function XX_LockIntrSpinlock
++
++ @Description Locks a spinlock (interrupt safe).
++
++ @Param[in] h_Spinlock - A handle to a spinlock.
++
++ @Return A value that represents the interrupts state before the
++ operation, and should be passed to the matching
++ XX_UnlockIntrSpinlock() call.
++*//***************************************************************************/
++uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock);
++
++/**************************************************************************//**
++ @Function XX_UnlockIntrSpinlock
++
++ @Description Unlocks a spinlock (interrupt safe).
++
++ @Param[in] h_Spinlock - A handle to a spinlock.
++ @Param[in] intrFlags - A value that represents the interrupts state to
++ restore, as returned by the matching call for
++ XX_LockIntrSpinlock().
++
++ @Return None.
++*//***************************************************************************/
++void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags);
++
++
++/*****************************************************************************/
++/* Timers Service Routines */
++/*****************************************************************************/
++
++/**************************************************************************//**
++ @Function XX_CurrentTime
++
++ @Description Returns current system time.
++
++ @Return Current system time (in milliseconds).
++*//***************************************************************************/
++uint32_t XX_CurrentTime(void);
++
++/**************************************************************************//**
++ @Function XX_CreateTimer
++
++ @Description Creates a timer.
++
++ @Return Timer handle is returned on success; NULL otherwise.
++*//***************************************************************************/
++t_Handle XX_CreateTimer(void);
++
++/**************************************************************************//**
++ @Function XX_FreeTimer
++
++ @Description Frees the memory allocated for the timer creation.
++
++ @Param[in] h_Timer - A handle to a timer.
++
++ @Return None.
++*//***************************************************************************/
++void XX_FreeTimer(t_Handle h_Timer);
++
++/**************************************************************************//**
++ @Function XX_StartTimer
++
++ @Description Starts a timer.
++
++ The user can select to start the timer as periodic timer or as
++ one-shot timer. The user should provide a callback routine that
++ will be called when the timer expires.
++
++ @Param[in] h_Timer - A handle to a timer.
++ @Param[in] msecs - Timer expiration period (in milliseconds).
++ @Param[in] periodic - TRUE for a periodic timer;
++ FALSE for a one-shot timer..
++ @Param[in] f_TimerExpired - A callback routine to be called when the
++ timer expires.
++ @Param[in] h_Arg - The argument to pass in the timer-expired
++ callback routine.
++
++ @Return None.
++*//***************************************************************************/
++void XX_StartTimer(t_Handle h_Timer,
++ uint32_t msecs,
++ bool periodic,
++ void (*f_TimerExpired)(t_Handle h_Arg),
++ t_Handle h_Arg);
++
++/**************************************************************************//**
++ @Function XX_StopTimer
++
++ @Description Frees the memory allocated for the timer creation.
++
++ @Param[in] h_Timer - A handle to a timer.
++
++ @Return None.
++*//***************************************************************************/
++void XX_StopTimer(t_Handle h_Timer);
++
++/**************************************************************************//**
++ @Function XX_ModTimer
++
++ @Description Updates the expiration time of a timer.
++
++ This routine adds the given time to the current system time,
++ and sets this value as the new expiration time of the timer.
++
++ @Param[in] h_Timer - A handle to a timer.
++ @Param[in] msecs - The new interval until timer expiration
++ (in milliseconds).
++
++ @Return None.
++*//***************************************************************************/
++void XX_ModTimer(t_Handle h_Timer, uint32_t msecs);
++
++/**************************************************************************//**
++ @Function XX_Sleep
++
++ @Description Non-busy wait until the desired time (in milliseconds) has passed.
++
++ @Param[in] msecs - The requested sleep time (in milliseconds).
++
++ @Return Zero if the requested time has elapsed; Otherwise, the value
++ returned will be the unslept amount) in milliseconds.
++
++ @Cautions This routine enables interrupts during its wait time.
++*//***************************************************************************/
++uint32_t XX_Sleep(uint32_t msecs);
++
++/**************************************************************************//**
++ @Function XX_UDelay
++
++ @Description Busy-wait until the desired time (in microseconds) has passed.
++
++ @Param[in] usecs - The requested delay time (in microseconds).
++
++ @Return None.
++
++ @Cautions It is highly unrecommended to call this routine during interrupt
++ time, because the system time may not be updated properly during
++ the delay loop. The behavior of this routine during interrupt
++ time is unexpected.
++*//***************************************************************************/
++void XX_UDelay(uint32_t usecs);
++
++
++/*****************************************************************************/
++/* Other Service Routines */
++/*****************************************************************************/
++
++/**************************************************************************//**
++ @Function XX_PhysToVirt
++
++ @Description Translates a physical address to the matching virtual address.
++
++ @Param[in] addr - The physical address to translate.
++
++ @Return Virtual address.
++*//***************************************************************************/
++void * XX_PhysToVirt(physAddress_t addr);
++
++/**************************************************************************//**
++ @Function XX_VirtToPhys
++
++ @Description Translates a virtual address to the matching physical address.
++
++ @Param[in] addr - The virtual address to translate.
++
++ @Return Physical address.
++*//***************************************************************************/
++physAddress_t XX_VirtToPhys(void *addr);
++
++
++/**************************************************************************//**
++ @Group xx_ipc XX Inter-Partition-Communication API
++
++ @Description The following API is to be used when working with multiple
++ partitions configuration.
++
++ @{
++*//***************************************************************************/
++
++#define XX_IPC_MAX_ADDR_NAME_LENGTH 16 /**< Maximum length of an endpoint name string;
++ The IPC service can use this constant to limit
++ the storage space for IPC endpoint names. */
++
++
++/**************************************************************************//**
++ @Function t_IpcMsgCompletion
++
++ @Description Callback function used upon IPC non-blocking transaction completion
++ to return message buffer to the caller and to forward reply if available.
++
++ This callback function may be attached by the source endpoint to any outgoing
++ IPC message to indicate a non-blocking send (see also XX_IpcSendMessage() routine).
++ Upon completion of an IPC transaction (consisting of a message and an optional reply),
++ the IPC service invokes this callback routine to return the message buffer to the sender
++ and to provide the received reply, if requested.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_Module - Abstract handle to the sending module - the same handle as was passed
++ in the XX_IpcSendMessage() function; This handle is typically used to point
++ to the internal data structure of the source endpoint.
++ @Param[in] p_Msg - Pointer to original (sent) message buffer;
++ The source endpoint can free (or reuse) this buffer when message
++ completion callback is called.
++ @Param[in] p_Reply - Pointer to (received) reply buffer;
++ This pointer is the same as was provided by the source endpoint in
++ XX_IpcSendMessage().
++ @Param[in] replyLength - Length (in bytes) of actual data in the reply buffer.
++ @Param[in] status - Completion status - E_OK or failure indication, e.g. IPC transaction completion
++ timeout.
++
++ @Return None
++ *//***************************************************************************/
++typedef void (t_IpcMsgCompletion)(t_Handle h_Module,
++ uint8_t *p_Msg,
++ uint8_t *p_Reply,
++ uint32_t replyLength,
++ t_Error status);
++
++/**************************************************************************//**
++ @Function t_IpcMsgHandler
++
++ @Description Callback function used as IPC message handler.
++
++ The IPC service invokes message handlers for each IPC message received.
++ The actual function pointer should be registered by each destination endpoint
++ via the XX_IpcRegisterMsgHandler() routine.
++
++ User provides this function. Driver invokes it.
++
++ @Param[in] h_Module - Abstract handle to the message handling module - the same handle as
++ was passed in the XX_IpcRegisterMsgHandler() function; this handle is
++ typically used to point to the internal data structure of the destination
++ endpoint.
++ @Param[in] p_Msg - Pointer to message buffer with data received from peer.
++ @Param[in] msgLength - Length (in bytes) of message data.
++ @Param[in] p_Reply - Pointer to reply buffer, to be filled by the message handler and then sent
++ by the IPC service;
++ The reply buffer is allocated by the IPC service with size equals to the
++ replyLength parameter provided in message handler registration (see
++ XX_IpcRegisterMsgHandler() function);
++ If replyLength was initially specified as zero during message handler registration,
++ the IPC service may set this pointer to NULL and assume that a reply is not needed;
++ The IPC service is also responsible for freeing the reply buffer after the
++ reply has been sent or dismissed.
++ @Param[in,out] p_ReplyLength - Pointer to reply length, which has a dual role in this function:
++ [In] equals the replyLength parameter provided in message handler
++ registration (see XX_IpcRegisterMsgHandler() function), and
++ [Out] should be updated by message handler to the actual reply length; if
++ this value is set to zero, the IPC service must assume that a reply should
++ not be sent;
++ Note: If p_Reply is not NULL, p_ReplyLength must not be NULL as well.
++
++ @Return E_OK on success; Error code otherwise.
++ *//***************************************************************************/
++typedef t_Error (t_IpcMsgHandler)(t_Handle h_Module,
++ uint8_t *p_Msg,
++ uint32_t msgLength,
++ uint8_t *p_Reply,
++ uint32_t *p_ReplyLength);
++
++/**************************************************************************//**
++ @Function XX_IpcRegisterMsgHandler
++
++ @Description IPC mailbox registration.
++
++ This function is used for registering an IPC message handler in the IPC service.
++ This function is called by each destination endpoint to indicate that it is ready
++ to handle incoming messages. The IPC service invokes the message handler upon receiving
++ a message addressed to the specified destination endpoint.
++
++ @Param[in] addr - The address name string associated with the destination endpoint;
++ This address must be unique across the IPC service domain to ensure
++ correct message routing.
++ @Param[in] f_MsgHandler - Pointer to the message handler callback for processing incoming
++ message; invoked by the IPC service upon receiving a message
++ addressed to the destination endpoint specified by the addr
++ parameter.
++ @Param[in] h_Module - Abstract handle to the message handling module, passed unchanged
++ to f_MsgHandler callback function.
++ @Param[in] replyLength - The maximal data length (in bytes) of any reply that the specified message handler
++ may generate; the IPC service provides the message handler with buffer
++ for reply according to the length specified here (refer also to the description
++ of #t_IpcMsgHandler callback function type);
++ This size shall be zero if the message handler never generates replies.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH],
++ t_IpcMsgHandler *f_MsgHandler,
++ t_Handle h_Module,
++ uint32_t replyLength);
++
++/**************************************************************************//**
++ @Function XX_IpcUnregisterMsgHandler
++
++ @Description Release IPC mailbox routine.
++
++ This function is used for unregistering an IPC message handler from the IPC service.
++ This function is called by each destination endpoint to indicate that it is no longer
++ capable of handling incoming messages.
++
++ @Param[in] addr - The address name string associated with the destination endpoint;
++ This address is the same as was used when the message handler was
++ registered via XX_IpcRegisterMsgHandler().
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH]);
++
++/**************************************************************************//**
++ @Function XX_IpcInitSession
++
++ @Description This function is used for creating an IPC session between the source endpoint
++ and the destination endpoint.
++
++ The actual implementation and representation of a session is left for the IPC service.
++ The function returns an abstract handle to the created session. This handle shall be used
++ by the source endpoint in subsequent calls to XX_IpcSendMessage().
++ The IPC service assumes that before this function is called, no messages are sent from
++ the specified source endpoint to the specified destination endpoint.
++
++ The IPC service may use a connection-oriented approach or a connectionless approach (or both)
++ as described below.
++
++ @par Connection-Oriented Approach
++
++ The IPC service may implement a session in a connection-oriented approach - when this function is called,
++ the IPC service should take the necessary steps to bring up a source-to-destination channel for messages
++ and a destination-to-source channel for replies. The returned handle should represent the internal
++ representation of these channels.
++
++ @par Connectionless Approach
++
++ The IPC service may implement a session in a connectionless approach - when this function is called, the
++ IPC service should not perform any particular steps, but it must store the pair of source and destination
++ addresses in some session representation and return it as a handle. When XX_IpcSendMessage() shall be
++ called, the IPC service may use this handle to provide the necessary identifiers for routing the messages
++ through the connectionless medium.
++
++ @Param[in] destAddr - The address name string associated with the destination endpoint.
++ @Param[in] srcAddr - The address name string associated with the source endpoint.
++
++ @Return Abstract handle to the initialized session, or NULL on error.
++*//***************************************************************************/
++t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH],
++ char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH]);
++
++/**************************************************************************//**
++ @Function XX_IpcFreeSession
++
++ @Description This function is used for terminating an existing IPC session between a source endpoint
++ and a destination endpoint.
++
++ The IPC service assumes that after this function is called, no messages shall be sent from
++ the associated source endpoint to the associated destination endpoint.
++
++ @Param[in] h_Session - Abstract handle to the IPC session - the same handle as was originally
++ returned by the XX_IpcInitSession() function.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error XX_IpcFreeSession(t_Handle h_Session);
++
++/**************************************************************************//**
++ @Function XX_IpcSendMessage
++
++ @Description IPC message send routine.
++
++ This function may be used by a source endpoint to send an IPC message to a destination
++ endpoint. The source endpoint cannot send a message to the destination endpoint without
++ first initiating a session with that destination endpoint via XX_IpcInitSession() routine.
++
++ The source endpoint must provide the buffer pointer and length of the outgoing message.
++ Optionally, it may also provide a buffer for an expected reply. In the latter case, the
++ transaction is not considered complete by the IPC service until the reply has been received.
++ If the source endpoint does not provide a reply buffer, the transaction is considered
++ complete after the message has been sent. The source endpoint must keep the message (and
++ optional reply) buffers valid until the transaction is complete.
++
++ @par Non-blocking mode
++
++ The source endpoint may request a non-blocking send by providing a non-NULL pointer to a message
++ completion callback function (f_Completion). Upon completion of the IPC transaction (consisting of a
++ message and an optional reply), the IPC service invokes this callback routine to return the message
++ buffer to the sender and to provide the received reply, if requested.
++
++ @par Blocking mode
++
++ The source endpoint may request a blocking send by setting f_Completion to NULL. The function is
++ expected to block until the IPC transaction is complete - either the reply has been received or (if no reply
++ was requested) the message has been sent.
++
++ @Param[in] h_Session - Abstract handle to the IPC session - the same handle as was originally
++ returned by the XX_IpcInitSession() function.
++ @Param[in] p_Msg - Pointer to message buffer to send.
++ @Param[in] msgLength - Length (in bytes) of actual data in the message buffer.
++ @Param[in] p_Reply - Pointer to reply buffer - if this buffer is not NULL, the IPC service
++ fills this buffer with the received reply data;
++ In blocking mode, the reply data must be valid when the function returns;
++ In non-blocking mode, the reply data is valid when f_Completion is called;
++ If this pointer is NULL, no reply is expected.
++ @Param[in,out] p_ReplyLength - Pointer to reply length, which has a dual role in this function:
++ [In] specifies the maximal length (in bytes) of the reply buffer pointed by
++ p_Reply, and
++ [Out] in non-blocking mode this value is updated by the IPC service to the
++ actual reply length (in bytes).
++ @Param[in] f_Completion - Pointer to a completion callback to be used in non-blocking send mode;
++ The completion callback is invoked by the IPC service upon
++ completion of the IPC transaction (consisting of a message and an optional
++ reply);
++ If this pointer is NULL, the function is expected to block until the IPC
++ transaction is complete.
++ @Param[in] h_Arg - Abstract handle to the sending module; passed unchanged to the f_Completion
++ callback function as the first argument.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error XX_IpcSendMessage(t_Handle h_Session,
++ uint8_t *p_Msg,
++ uint32_t msgLength,
++ uint8_t *p_Reply,
++ uint32_t *p_ReplyLength,
++ t_IpcMsgCompletion *f_Completion,
++ t_Handle h_Arg);
++
++
++/** @} */ /* end of xx_ipc group */
++/** @} */ /* end of xx_id group */
++
++
++#endif /* __XX_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/ls1043_dflags.h b/drivers/net/ethernet/freescale/sdk_fman/ls1043_dflags.h
+new file mode 100644
+index 00000000..c3a5a623
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/ls1043_dflags.h
+@@ -0,0 +1,56 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __dflags_h
++#define __dflags_h
++
++
++#define NCSW_LINUX
++
++#define LS1043
++
++#define DEBUG_ERRORS 1
++
++#if defined(DEBUG)
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
++
++#define DEBUG_XX_MALLOC
++#define DEBUG_MEM_LEAKS
++
++#else
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
++#endif /* (DEBUG) */
++
++#define REPORT_EVENTS 1
++#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
++
++#endif /* __dflags_h */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk b/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
+new file mode 100644
+index 00000000..586f9c79
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
+@@ -0,0 +1,53 @@
++#
++# Makefile config for the Freescale NetcommSW
++#
++NET_DPA = $(srctree)/drivers/net
++DRV_DPA = $(srctree)/drivers/net/ethernet/freescale/sdk_dpaa
++FMAN = $(srctree)/drivers/net/ethernet/freescale/sdk_fman
++
++ifeq ("$(CONFIG_FMAN_P3040_P4080_P5020)", "y")
++ccflags-y +=-include $(FMAN)/p3040_4080_5020_dflags.h
++endif
++ifeq ("$(CONFIG_FMAN_P1023)", "y")
++ccflags-y +=-include $(FMAN)/p1023_dflags.h
++endif
++ifdef CONFIG_FMAN_V3H
++ccflags-y +=-include $(FMAN)/fmanv3h_dflags.h
++endif
++ifdef CONFIG_FMAN_V3L
++ccflags-y +=-include $(FMAN)/fmanv3l_dflags.h
++endif
++ifdef CONFIG_FMAN_ARM
++ccflags-y +=-include $(FMAN)/ls1043_dflags.h
++endif
++
++ccflags-y += -I$(DRV_DPA)/
++ccflags-y += -I$(FMAN)/inc
++ccflags-y += -I$(FMAN)/inc/cores
++ccflags-y += -I$(FMAN)/inc/etc
++ccflags-y += -I$(FMAN)/inc/Peripherals
++ccflags-y += -I$(FMAN)/inc/flib
++
++ifeq ("$(CONFIG_FMAN_P3040_P4080_P5020)", "y")
++ccflags-y += -I$(FMAN)/inc/integrations/P3040_P4080_P5020
++endif
++ifeq ("$(CONFIG_FMAN_P1023)", "y")
++ccflags-y += -I$(FMAN)/inc/integrations/P1023
++endif
++ifdef CONFIG_FMAN_V3H
++ccflags-y += -I$(FMAN)/inc/integrations/FMANV3H
++endif
++ifdef CONFIG_FMAN_V3L
++ccflags-y += -I$(FMAN)/inc/integrations/FMANV3L
++endif
++ifdef CONFIG_FMAN_ARM
++ccflags-y += -I$(FMAN)/inc/integrations/LS1043
++endif
++
++ccflags-y += -I$(FMAN)/src/inc
++ccflags-y += -I$(FMAN)/src/inc/system
++ccflags-y += -I$(FMAN)/src/inc/wrapper
++ccflags-y += -I$(FMAN)/src/inc/xx
++ccflags-y += -I$(srctree)/include/uapi/linux/fmd
++ccflags-y += -I$(srctree)/include/uapi/linux/fmd/Peripherals
++ccflags-y += -I$(srctree)/include/uapi/linux/fmd/integrations
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/p1023_dflags.h b/drivers/net/ethernet/freescale/sdk_fman/p1023_dflags.h
+new file mode 100644
+index 00000000..b48819d7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/p1023_dflags.h
+@@ -0,0 +1,65 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __dflags_h
++#define __dflags_h
++
++
++#define NCSW_LINUX
++#if 0
++#define DEBUG
++#endif
++
++#define P1023
++#define NCSW_PPC_CORE
++
++#define DEBUG_ERRORS 1
++
++#if defined(DEBUG)
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
++
++#define DEBUG_XX_MALLOC
++#define DEBUG_MEM_LEAKS
++
++#else
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
++#endif /* (DEBUG) */
++
++#define REPORT_EVENTS 1
++#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
++
++#ifdef CONFIG_P4080_SIM
++#error "Do not define CONFIG_P4080_SIM..."
++#endif
++
++
++#endif /* __dflags_h */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/p3040_4080_5020_dflags.h b/drivers/net/ethernet/freescale/sdk_fman/p3040_4080_5020_dflags.h
+new file mode 100644
+index 00000000..74389742
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/p3040_4080_5020_dflags.h
+@@ -0,0 +1,62 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __dflags_h
++#define __dflags_h
++
++
++#define NCSW_LINUX
++
++#define P4080
++#define NCSW_PPC_CORE
++
++#define DEBUG_ERRORS 1
++
++#if defined(DEBUG)
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
++
++#define DEBUG_XX_MALLOC
++#define DEBUG_MEM_LEAKS
++
++#else
++#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
++#endif /* (DEBUG) */
++
++#define REPORT_EVENTS 1
++#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
++
++#ifdef CONFIG_P4080_SIM
++#define SIMULATOR
++#endif /* CONFIG_P4080_SIM */
++
++
++#endif /* __dflags_h */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/Makefile b/drivers/net/ethernet/freescale/sdk_fman/src/Makefile
+new file mode 100644
+index 00000000..49405d0e
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/Makefile
+@@ -0,0 +1,11 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++#
++obj-y += system/
++obj-y += wrapper/
++obj-y += xx/
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_ext.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_ext.h
+new file mode 100644
+index 00000000..20f27d29
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_ext.h
+@@ -0,0 +1,118 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __SYS_EXT_H
++#define __SYS_EXT_H
++
++#include "std_ext.h"
++
++
++/**************************************************************************//**
++ @Group sys_grp System Interfaces
++
++ @Description Linux system programming interfaces.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group sys_gen_grp System General Interface
++
++ @Description General definitions, structures and routines of the linux
++ system programming interface.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection Macros for Advanced Configuration Requests
++ @{
++*//***************************************************************************/
++#define SYS_MAX_ADV_CONFIG_ARGS 4
++ /**< Maximum number of arguments in
++ an advanced configuration entry */
++/* @} */
++
++/**************************************************************************//**
++ @Description System Object Advanced Configuration Entry
++
++ This structure represents a single request for an advanced
++ configuration call on the initialized object. An array of such
++ requests may be contained in the settings structure of the
++ corresponding object.
++
++ The maximum number of arguments is limited to #SYS_MAX_ADV_CONFIG_ARGS.
++*//***************************************************************************/
++typedef struct t_SysObjectAdvConfigEntry
++{
++ void *p_Function; /**< Pointer to advanced configuration routine */
++
++ uintptr_t args[SYS_MAX_ADV_CONFIG_ARGS];
++ /**< Array of arguments for the specified routine;
++ All arguments should be casted to uint32_t. */
++} t_SysObjectAdvConfigEntry;
++
++
++/** @} */ /* end of sys_gen_grp */
++/** @} */ /* end of sys_grp */
++
++#define NCSW_PARAMS(_num, _params) ADV_CONFIG_PARAMS_##_num _params
++
++#define ADV_CONFIG_PARAMS_1(_type) \
++ , (_type)p_Entry->args[0]
++
++#define SET_ADV_CONFIG_ARGS_1(_arg0) \
++ p_Entry->args[0] = (uintptr_t )(_arg0); \
++
++#define ARGS(_num, _params) SET_ADV_CONFIG_ARGS_##_num _params
++
++#define ADD_ADV_CONFIG_START(_p_Entries, _maxEntries) \
++ { \
++ t_SysObjectAdvConfigEntry *p_Entry; \
++ t_SysObjectAdvConfigEntry *p_Entrys = (_p_Entries); \
++ int i=0, max = (_maxEntries); \
++
++#define ADD_ADV_CONFIG_END \
++ }
++
++#define ADV_CONFIG_CHECK_START(_p_Entry) \
++ { \
++ t_SysObjectAdvConfigEntry *p_Entry = _p_Entry; \
++ t_Error errCode; \
++
++#define ADV_CONFIG_CHECK(_handle, _func, _params) \
++ if (p_Entry->p_Function == _func) \
++ { \
++ errCode = _func(_handle _params); \
++ } else
++
++#endif /* __SYS_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_io_ext.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_io_ext.h
+new file mode 100644
+index 00000000..d6aa9d41
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_io_ext.h
+@@ -0,0 +1,46 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __SYS_IO_EXT_H
++#define __SYS_IO_EXT_H
++
++#include "std_ext.h"
++#include "error_ext.h"
++
++
++t_Error SYS_RegisterIoMap (uint64_t virtAddr, uint64_t physAddr, uint32_t size);
++t_Error SYS_UnregisterIoMap (uint64_t virtAddr);
++uint64_t SYS_PhysToVirt (uint64_t addr);
++uint64_t SYS_VirtToPhys (uint64_t addr);
++
++
++#endif /* __SYS_IO_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/types_linux.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/types_linux.h
+new file mode 100644
+index 00000000..201ad699
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/types_linux.h
+@@ -0,0 +1,208 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __TYPES_LINUX_H__
++#define __TYPES_LINUX_H__
++
++#include <linux/version.h>
++
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#include <config/modversions.h>
++#endif /* MODVERSIONS */
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <asm/io.h>
++#include <linux/delay.h>
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++ #error "This kernel is probably not supported!!!"
++#elif (!((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) || \
++ (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) || \
++ (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30))))
++ #warning "This kernel is probably not supported!!! You may need to add some fixes."
++#endif /* LINUX_VERSION_CODE */
++
++
++typedef float float_t; /* Single precision floating point */
++typedef double double_t; /* Double precision floating point */
++
++
++#define _Packed
++#define _PackedType __attribute__ ((packed))
++
++typedef phys_addr_t physAddress_t;
++
++#define UINT8_MAX 0xFF
++#define UINT8_MIN 0
++#define UINT16_MAX 0xFFFF
++#define UINT16_MIN 0
++#define UINT32_MAX 0xFFFFFFFF
++#define UINT32_MIN 0
++#define UINT64_MAX 0xFFFFFFFFFFFFFFFFLL
++#define UINT64_MIN 0
++#define INT8_MAX 0x7F
++#define INT8_MIN 0x80
++#define INT16_MAX 0x7FFF
++#define INT16_MIN 0x8000
++#define INT32_MAX 0x7FFFFFFF
++#define INT32_MIN 0x80000000
++#define INT64_MAX 0x7FFFFFFFFFFFFFFFLL
++#define INT64_MIN 0x8000000000000000LL
++
++#define ON 1
++#define OFF 0
++
++#define FALSE false
++#define TRUE true
++
++
++/************************/
++/* memory access macros */
++/************************/
++#ifdef CONFIG_FMAN_ARM
++#define in_be16(a) __be16_to_cpu(__raw_readw(a))
++#define in_be32(a) __be32_to_cpu(__raw_readl(a))
++#define out_be16(a, v) __raw_writew(__cpu_to_be16(v), a)
++#define out_be32(a, v) __raw_writel(__cpu_to_be32(v), a)
++#endif
++
++#define GET_UINT8(arg) *(volatile uint8_t *)(&(arg))
++#define GET_UINT16(arg) in_be16(&(arg))//*(volatile uint16_t*)(&(arg))
++#define GET_UINT32(arg) in_be32(&(arg))//*(volatile uint32_t*)(&(arg))
++#define GET_UINT64(arg) *(volatile uint64_t*)(&(arg))
++
++#ifdef VERBOSE_WRITE
++void XX_Print(char *str, ...);
++#define WRITE_UINT8(arg, data) \
++ do { XX_Print("ADDR: 0x%08x, VAL: 0x%02x\r\n", (uint32_t)&(arg), (data)); *(volatile uint8_t *)(&(arg)) = (data); } while (0)
++#define WRITE_UINT16(arg, data) \
++ do { XX_Print("ADDR: 0x%08x, VAL: 0x%04x\r\n", (uint32_t)&(arg), (data)); out_be16(&(arg), data); /* *(volatile uint16_t*)(&(arg)) = (data);*/ } while (0)
++#define WRITE_UINT32(arg, data) \
++ do { XX_Print("ADDR: 0x%08x, VAL: 0x%08x\r\n", (uint32_t)&(arg), (data)); out_be32(&(arg), data); /* *(volatile uint32_t*)(&(arg)) = (data);*/ } while (0)
++#define WRITE_UINT64(arg, data) \
++ do { XX_Print("ADDR: 0x%08x, VAL: 0x%016llx\r\n", (uint32_t)&(arg), (data)); *(volatile uint64_t*)(&(arg)) = (data); } while (0)
++
++#else /* not VERBOSE_WRITE */
++#define WRITE_UINT8(arg, data) *(volatile uint8_t *)(&(arg)) = (data)
++#define WRITE_UINT16(arg, data) out_be16(&(arg), data)//*(volatile uint16_t*)(&(arg)) = (data)
++#define WRITE_UINT32(arg, data) out_be32(&(arg), data)//*(volatile unsigned int *)(&(arg)) = (data)
++#define WRITE_UINT64(arg, data) *(volatile uint64_t*)(&(arg)) = (data)
++#endif /* not VERBOSE_WRITE */
++
++
++/*****************************************************************************/
++/* General stuff */
++/*****************************************************************************/
++#ifdef ARRAY_SIZE
++#undef ARRAY_SIZE
++#endif /* ARRAY_SIZE */
++
++#ifdef MAJOR
++#undef MAJOR
++#endif /* MAJOR */
++
++#ifdef MINOR
++#undef MINOR
++#endif /* MINOR */
++
++#ifdef QE_SIZEOF_BD
++#undef QE_SIZEOF_BD
++#endif /* QE_SIZEOF_BD */
++
++#ifdef BD_BUFFER_CLEAR
++#undef BD_BUFFER_CLEAR
++#endif /* BD_BUFFER_CLEAR */
++
++#ifdef BD_BUFFER
++#undef BD_BUFFER
++#endif /* BD_BUFFER */
++
++#ifdef BD_STATUS_AND_LENGTH_SET
++#undef BD_STATUS_AND_LENGTH_SET
++#endif /* BD_STATUS_AND_LENGTH_SET */
++
++#ifdef BD_STATUS_AND_LENGTH
++#undef BD_STATUS_AND_LENGTH
++#endif /* BD_STATUS_AND_LENGTH */
++
++#ifdef BD_BUFFER_ARG
++#undef BD_BUFFER_ARG
++#endif /* BD_BUFFER_ARG */
++
++#ifdef BD_GET_NEXT
++#undef BD_GET_NEXT
++#endif /* BD_GET_NEXT */
++
++#ifdef QE_SDEBCR_BA_MASK
++#undef QE_SDEBCR_BA_MASK
++#endif /* QE_SDEBCR_BA_MASK */
++
++#ifdef BD_BUFFER_SET
++#undef BD_BUFFER_SET
++#endif /* BD_BUFFER_SET */
++
++#ifdef UPGCR_PROTOCOL
++#undef UPGCR_PROTOCOL
++#endif /* UPGCR_PROTOCOL */
++
++#ifdef UPGCR_TMS
++#undef UPGCR_TMS
++#endif /* UPGCR_TMS */
++
++#ifdef UPGCR_RMS
++#undef UPGCR_RMS
++#endif /* UPGCR_RMS */
++
++#ifdef UPGCR_ADDR
++#undef UPGCR_ADDR
++#endif /* UPGCR_ADDR */
++
++#ifdef UPGCR_DIAG
++#undef UPGCR_DIAG
++#endif /* UPGCR_DIAG */
++
++#ifdef NCSW_PARAMS
++#undef NCSW_PARAMS
++#endif /* NCSW_PARAMS */
++
++#ifdef NO_IRQ
++#undef NO_IRQ
++#endif /* NO_IRQ */
++
++#define PRINT_LINE XX_Print("%s:\n %s [%d]\n",__FILE__,__FUNCTION__,__LINE__);
++
++
++#endif /* __TYPES_LINUX_H__ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/fsl_fman_test.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/fsl_fman_test.h
+new file mode 100644
+index 00000000..0466a473
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/fsl_fman_test.h
+@@ -0,0 +1,84 @@
++/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fsl_fman_test.h
++
++ @Description
++*//***************************************************************************/
++
++#ifndef __FSL_FMAN_TEST_H
++#define __FSL_FMAN_TEST_H
++
++#include <linux/types.h>
++#include <linux/smp.h> /* raw_smp_processor_id() */
++
++//#define FMT_K_DBG
++//#define FMT_K_DBG_RUNTIME
++
++#define _fmt_prk(stage, format, arg...) \
++ printk(stage "fmt (cpu:%u): " format, raw_smp_processor_id(), ##arg)
++
++#define _fmt_inf(format, arg...) _fmt_prk(KERN_INFO, format, ##arg)
++#define _fmt_wrn(format, arg...) _fmt_prk(KERN_WARNING, format, ##arg)
++#define _fmt_err(format, arg...) _fmt_prk(KERN_ERR, format, ##arg)
++
++/* there are two macros for debugging: for runtime and generic.
++ * Helps when the runtime functions are not targeted for debugging,
++ * thus all the unnecessary information will be skipped.
++ */
++/* used for generic debugging */
++#if defined(FMT_K_DBG)
++ #define _fmt_dbg(format, arg...) \
++ printk("fmt [%s:%u](cpu:%u) - " format, \
++ __func__, __LINE__, raw_smp_processor_id(), ##arg)
++#else
++# define _fmt_dbg(arg...)
++#endif
++
++/* used for debugging runtime functions */
++#if defined(FMT_K_DBG_RUNTIME)
++ #define _fmt_dbgr(format, arg...) \
++ printk("fmt [%s:%u](cpu:%u) - " format, \
++ __func__, __LINE__, raw_smp_processor_id(), ##arg)
++#else
++# define _fmt_dbgr(arg...)
++#endif
++
++#define FMT_RX_ERR_Q 0xffffffff
++#define FMT_RX_DFLT_Q 0xfffffffe
++#define FMT_TX_ERR_Q 0xfffffffd
++#define FMT_TX_CONF_Q 0xfffffffc
++
++#define FMAN_TEST_MAX_TX_FQS 8
++
++#endif /* __FSL_FMAN_TEST_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h
+new file mode 100644
+index 00000000..0c0c6c11
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h
+@@ -0,0 +1,128 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_exp_sym.h
++ @Description FMan exported routines
++*/
++
++#ifndef __LNXWRP_EXP_SYM_H
++#define __LNXWRP_EXP_SYM_H
++
++#include "fm_port_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_mac_ext.h"
++
++
++/* FMAN Port exported routines */
++EXPORT_SYMBOL(FM_PORT_Disable);
++EXPORT_SYMBOL(FM_PORT_Enable);
++EXPORT_SYMBOL(FM_PORT_SetPCD);
++EXPORT_SYMBOL(FM_PORT_DeletePCD);
++
++/* Runtime PCD exported routines */
++EXPORT_SYMBOL(FM_PCD_Enable);
++EXPORT_SYMBOL(FM_PCD_Disable);
++EXPORT_SYMBOL(FM_PCD_GetCounter);
++EXPORT_SYMBOL(FM_PCD_PrsLoadSw);
++EXPORT_SYMBOL(FM_PCD_KgSetDfltValue);
++EXPORT_SYMBOL(FM_PCD_KgSetAdditionalDataAfterParsing);
++EXPORT_SYMBOL(FM_PCD_SetException);
++EXPORT_SYMBOL(FM_PCD_ModifyCounter);
++EXPORT_SYMBOL(FM_PCD_SetPlcrStatistics);
++EXPORT_SYMBOL(FM_PCD_SetPrsStatistics);
++EXPORT_SYMBOL(FM_PCD_ForceIntr);
++EXPORT_SYMBOL(FM_PCD_HcTxConf);
++
++EXPORT_SYMBOL(FM_PCD_NetEnvCharacteristicsSet);
++EXPORT_SYMBOL(FM_PCD_NetEnvCharacteristicsDelete);
++EXPORT_SYMBOL(FM_PCD_KgSchemeSet);
++EXPORT_SYMBOL(FM_PCD_KgSchemeDelete);
++EXPORT_SYMBOL(FM_PCD_KgSchemeGetCounter);
++EXPORT_SYMBOL(FM_PCD_KgSchemeSetCounter);
++EXPORT_SYMBOL(FM_PCD_CcRootBuild);
++EXPORT_SYMBOL(FM_PCD_CcRootDelete);
++EXPORT_SYMBOL(FM_PCD_MatchTableSet);
++EXPORT_SYMBOL(FM_PCD_MatchTableDelete);
++EXPORT_SYMBOL(FM_PCD_CcRootModifyNextEngine);
++EXPORT_SYMBOL(FM_PCD_MatchTableModifyNextEngine);
++EXPORT_SYMBOL(FM_PCD_MatchTableFindNModifyNextEngine);
++EXPORT_SYMBOL(FM_PCD_MatchTableModifyMissNextEngine);
++EXPORT_SYMBOL(FM_PCD_MatchTableRemoveKey);
++EXPORT_SYMBOL(FM_PCD_MatchTableFindNRemoveKey);
++EXPORT_SYMBOL(FM_PCD_MatchTableAddKey);
++EXPORT_SYMBOL(FM_PCD_MatchTableModifyKeyAndNextEngine);
++EXPORT_SYMBOL(FM_PCD_MatchTableFindNModifyKeyAndNextEngine);
++EXPORT_SYMBOL(FM_PCD_MatchTableModifyKey);
++EXPORT_SYMBOL(FM_PCD_MatchTableFindNModifyKey);
++EXPORT_SYMBOL(FM_PCD_MatchTableGetIndexedHashBucket);
++EXPORT_SYMBOL(FM_PCD_MatchTableGetNextEngine);
++EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyCounter);
++EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyStatistics);
++EXPORT_SYMBOL(FM_PCD_MatchTableFindNGetKeyStatistics);
++EXPORT_SYMBOL(FM_PCD_MatchTableGetMissStatistics);
++EXPORT_SYMBOL(FM_PCD_HashTableGetMissStatistics);
++EXPORT_SYMBOL(FM_PCD_HashTableSet);
++EXPORT_SYMBOL(FM_PCD_HashTableDelete);
++EXPORT_SYMBOL(FM_PCD_HashTableAddKey);
++EXPORT_SYMBOL(FM_PCD_HashTableRemoveKey);
++EXPORT_SYMBOL(FM_PCD_HashTableModifyNextEngine);
++EXPORT_SYMBOL(FM_PCD_HashTableModifyMissNextEngine);
++EXPORT_SYMBOL(FM_PCD_HashTableGetMissNextEngine);
++EXPORT_SYMBOL(FM_PCD_HashTableFindNGetKeyStatistics);
++EXPORT_SYMBOL(FM_PCD_PlcrProfileSet);
++EXPORT_SYMBOL(FM_PCD_PlcrProfileDelete);
++EXPORT_SYMBOL(FM_PCD_PlcrProfileGetCounter);
++EXPORT_SYMBOL(FM_PCD_PlcrProfileSetCounter);
++EXPORT_SYMBOL(FM_PCD_ManipNodeSet);
++EXPORT_SYMBOL(FM_PCD_ManipNodeDelete);
++EXPORT_SYMBOL(FM_PCD_ManipGetStatistics);
++EXPORT_SYMBOL(FM_PCD_ManipNodeReplace);
++#if (DPAA_VERSION >= 11)
++EXPORT_SYMBOL(FM_PCD_FrmReplicSetGroup);
++EXPORT_SYMBOL(FM_PCD_FrmReplicDeleteGroup);
++EXPORT_SYMBOL(FM_PCD_FrmReplicAddMember);
++EXPORT_SYMBOL(FM_PCD_FrmReplicRemoveMember);
++#endif /* DPAA_VERSION >= 11 */
++
++#ifdef FM_CAPWAP_SUPPORT
++EXPORT_SYMBOL(FM_PCD_StatisticsSetNode);
++#endif /* FM_CAPWAP_SUPPORT */
++
++EXPORT_SYMBOL(FM_PCD_SetAdvancedOffloadSupport);
++
++/* FMAN MAC exported routines */
++EXPORT_SYMBOL(FM_MAC_GetStatistics);
++
++EXPORT_SYMBOL(FM_GetSpecialOperationCoding);
++
++#endif /* __LNXWRP_EXP_SYM_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h
+new file mode 100644
+index 00000000..a72c8670
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h
+@@ -0,0 +1,163 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File lnxwrp_fm_ext.h
++
++ @Description TODO
++*//***************************************************************************/
++
++#ifndef __LNXWRP_FM_EXT_H
++#define __LNXWRP_FM_EXT_H
++
++#include "std_ext.h"
++#include "sys_ext.h"
++#include "fm_ext.h"
++#include "fm_muram_ext.h"
++#include "fm_pcd_ext.h"
++#include "fm_port_ext.h"
++#include "fm_mac_ext.h"
++#include "fm_rtc_ext.h"
++
++
++/**************************************************************************//**
++ @Group FM_LnxKern_grp Frame Manager Linux wrapper API
++
++ @Description FM API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_LnxKern_init_grp Initialization Unit
++
++ @Description Initialization Unit
++
++ Initialization Flow:
++ Initialization of the FM Module will be carried out by the Linux
++ kernel according to the following sequence:
++ a. Calling the initialization routine with no parameters.
++ b. The driver will register to the Device-Tree.
++ c. The Linux Device-Tree will initiate a call to the driver for
++ initialization.
++ d. The driver will read the appropriate information from the Device-Tree
++ e. [Optional] Calling the advance initialization routines to change
++ driver's defaults.
++ f. Initialization of the device will be automatically upon using it.
++
++ @{
++*//***************************************************************************/
++
++typedef struct t_WrpFmDevSettings
++{
++ t_FmParams param;
++ t_SysObjectAdvConfigEntry *advConfig;
++} t_WrpFmDevSettings;
++
++typedef struct t_WrpFmPcdDevSettings
++{
++ t_FmPcdParams param;
++ t_SysObjectAdvConfigEntry *advConfig;
++} t_WrpFmPcdDevSettings;
++
++typedef struct t_WrpFmPortDevSettings
++{
++ bool frag_enabled;
++ t_FmPortParams param;
++ t_SysObjectAdvConfigEntry *advConfig;
++} t_WrpFmPortDevSettings;
++
++typedef struct t_WrpFmMacDevSettings
++{
++ t_FmMacParams param;
++ t_SysObjectAdvConfigEntry *advConfig;
++} t_WrpFmMacDevSettings;
++
++
++/**************************************************************************//**
++ @Function LNXWRP_FM_Init
++
++ @Description Initialize the FM linux wrapper.
++
++ @Return A handle (descriptor) of the newly created FM Linux wrapper
++ structure.
++*//***************************************************************************/
++t_Handle LNXWRP_FM_Init(void);
++
++/**************************************************************************//**
++ @Function LNXWRP_FM_Free
++
++ @Description Free the FM linux wrapper.
++
++ @Param[in] h_LnxWrpFm - A handle to the FM linux wrapper.
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++t_Error LNXWRP_FM_Free(t_Handle h_LnxWrpFm);
++
++/**************************************************************************//**
++ @Function LNXWRP_FM_GetMacHandle
++
++ @Description Get the FM-MAC LLD handle from the FM linux wrapper.
++
++ @Param[in] h_LnxWrpFm - A handle to the FM linux wrapper.
++ @Param[in] fmId - Index of the FM device to get the MAC handle from.
++ @Param[in] macId - Index of the mac handle.
++
++ @Return A handle of the LLD compressor.
++*//***************************************************************************/
++t_Handle LNXWRP_FM_GetMacHandle(t_Handle h_LnxWrpFm, uint8_t fmId, uint8_t macId);
++
++#ifdef CONFIG_FSL_SDK_FMAN_TEST
++t_Handle LNXWRP_FM_TEST_Init(void);
++t_Error LNXWRP_FM_TEST_Free(t_Handle h_FmTestLnxWrp);
++#endif /* CONFIG_FSL_SDK_FMAN_TEST */
++
++/** @} */ /* end of FM_LnxKern_init_grp group */
++
++
++/**************************************************************************//**
++ @Group FM_LnxKern_ctrl_grp Control Unit
++
++ @Description Control Unit
++
++ TODO
++ @{
++*//***************************************************************************/
++
++#include "lnxwrp_fsl_fman.h"
++
++/** @} */ /* end of FM_LnxKern_ctrl_grp group */
++/** @} */ /* end of FM_LnxKern_grp group */
++
++
++#endif /* __LNXWRP_FM_EXT_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h
+new file mode 100644
+index 00000000..c50031cf
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h
+@@ -0,0 +1,921 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File lnxwrp_fsl_fman.h
++
++ @Description Linux internal kernel API
++*//***************************************************************************/
++
++#ifndef __LNXWRP_FSL_FMAN_H
++#define __LNXWRP_FSL_FMAN_H
++
++#include <linux/types.h>
++#include <linux/device.h> /* struct device */
++#include <linux/fsl_qman.h> /* struct qman_fq */
++#include "dpaa_integration_ext.h"
++#include "fm_port_ext.h"
++#include "fm_mac_ext.h"
++#include "fm_macsec_ext.h"
++#include "fm_rtc_ext.h"
++
++/**************************************************************************//**
++ @Group FM_LnxKern_grp Frame Manager Linux wrapper API
++
++ @Description FM API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group FM_LnxKern_ctrl_grp Control Unit
++
++ @Description Control Unit
++
++ Internal Kernel Control Unit API
++ @{
++*//***************************************************************************/
++
++/*****************************************************************************/
++/* Internal Linux kernel routines */
++/*****************************************************************************/
++
++/**************************************************************************//**
++ @Description MACSEC Exceptions wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_exception {
++ SINGLE_BIT_ECC = e_FM_MACSEC_EX_SINGLE_BIT_ECC,
++ MULTI_BIT_ECC = e_FM_MACSEC_EX_MULTI_BIT_ECC
++} fm_macsec_exception;
++
++/**************************************************************************//**
++ @Description Unknown sci frame treatment wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_unknown_sci_frame_treatment {
++ SCI_DISCARD_BOTH = e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_BOTH,
++ SCI_DISCARD_UNCTRL_DELIVER_DISCARD_CTRL = \
++ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED,
++ SCI_DELIVER_UNCTRL_DISCARD_CTRL = \
++ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED,
++ SCI_DELIVER_DISCARD_UNCTRL_DELIVER_DISCARD_CTRL = \
++ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_OR_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED
++} fm_macsec_unknown_sci_frame_treatment;
++
++/**************************************************************************//**
++ @Description Untag frame treatment wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_untag_frame_treatment {
++ UNTAG_DELIVER_UNCTRL_DISCARD_CTRL = \
++ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED,
++ UNTAG_DISCARD_BOTH = e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_BOTH,
++ UNTAG_DISCARD_UNCTRL_DELIVER_CTRL_UNMODIFIED = \
++ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_CONTROLLED_UNMODIFIED
++} fm_macsec_untag_frame_treatment;
++
++/**************************************************************************//**
++@Description MACSEC SECY Cipher Suite wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_secy_cipher_suite {
++ SECY_GCM_AES_128 = e_FM_MACSEC_SECY_GCM_AES_128, /**< GCM-AES-128 */
++#if (DPAA_VERSION >= 11)
++ SECY_GCM_AES_256 = e_FM_MACSEC_SECY_GCM_AES_256 /**< GCM-AES-256 */
++#endif /* (DPAA_VERSION >= 11) */
++} fm_macsec_secy_cipher_suite;
++
++/**************************************************************************//**
++ @Description MACSEC SECY Exceptions wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_secy_exception {
++ SECY_EX_FRAME_DISCARDED = e_FM_MACSEC_SECY_EX_FRAME_DISCARDED
++} fm_macsec_secy_exception;
++
++/**************************************************************************//**
++ @Description MACSEC SECY Events wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_secy_event {
++ SECY_EV_NEXT_PN = e_FM_MACSEC_SECY_EV_NEXT_PN
++} fm_macsec_secy_event;
++
++/**************************************************************************//**
++ @Description Valid frame behaviors wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_valid_frame_behavior {
++ VALID_FRAME_BEHAVIOR_DISABLE = e_FM_MACSEC_VALID_FRAME_BEHAVIOR_DISABLE,
++ VALID_FRAME_BEHAVIOR_CHECK = e_FM_MACSEC_VALID_FRAME_BEHAVIOR_CHECK,
++ VALID_FRAME_BEHAVIOR_STRICT = e_FM_MACSEC_VALID_FRAME_BEHAVIOR_STRICT
++} fm_macsec_valid_frame_behavior;
++
++/**************************************************************************//**
++ @Description SCI insertion modes wrapper
++*//***************************************************************************/
++typedef enum fm_macsec_sci_insertion_mode {
++ SCI_INSERTION_MODE_EXPLICIT_SECTAG = \
++ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG,
++ SCI_INSERTION_MODE_EXPLICIT_MAC_SA = \
++ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_MAC_SA,
++ SCI_INSERTION_MODE_IMPLICT_PTP = e_FM_MACSEC_SCI_INSERTION_MODE_IMPLICT_PTP
++} fm_macsec_sci_insertion_mode;
++
++typedef macsecSAKey_t macsec_sa_key_t;
++typedef macsecSCI_t macsec_sci_t;
++typedef macsecAN_t macsec_an_t;
++typedef t_Handle handle_t;
++
++/**************************************************************************//**
++ @Function fm_macsec_secy_exception_callback wrapper
++ @Description Exceptions user callback routine, will be called upon an
++ exception passing the exception identification.
++ @Param[in] app_h A handle to an application layer object; This handle
++ will be passed by the driver upon calling this callback.
++ @Param[in] exception The exception.
++*//***************************************************************************/
++typedef void (fm_macsec_secy_exception_callback) (handle_t app_h,
++ fm_macsec_secy_exception exception);
++
++/**************************************************************************//**
++ @Function fm_macsec_secy_event_callback wrapper
++ @Description Events user callback routine, will be called upon an
++ event passing the event identification.
++ @Param[in] app_h A handle to an application layer object; This handle
++ will be passed by the driver upon calling this callback.
++ @Param[in] event The event.
++*//***************************************************************************/
++typedef void (fm_macsec_secy_event_callback) (handle_t app_h,
++ fm_macsec_secy_event event);
++
++/**************************************************************************//**
++ @Function fm_macsec_exception_callback wrapper
++ @Description Exceptions user callback routine, will be called upon an
++ exception passing the exception identification.
++ @Param[in] app_h A handle to an application layer object; This handle
++ will be passed by the driver upon calling this callback.
++ @Param[in] exception The exception.
++*//***************************************************************************/
++typedef void (fm_macsec_exception_callback) (handle_t app_h,
++ fm_macsec_exception exception);
++
++/**************************************************************************//**
++ @Description MACSEC SecY SC Params wrapper
++*//***************************************************************************/
++struct fm_macsec_secy_sc_params {
++ macsec_sci_t sci;
++ fm_macsec_secy_cipher_suite cipher_suite;
++};
++
++/**************************************************************************//**
++ @Description FM MACSEC SecY config input wrapper
++*//***************************************************************************/
++struct fm_macsec_secy_params {
++ handle_t fm_macsec_h;
++ struct fm_macsec_secy_sc_params tx_sc_params;
++ uint32_t num_receive_channels;
++ fm_macsec_secy_exception_callback *exception_f;
++ fm_macsec_secy_event_callback *event_f;
++ handle_t app_h;
++};
++
++/**************************************************************************//**
++ @Description FM MACSEC config input wrapper
++*//***************************************************************************/
++struct fm_macsec_params {
++ handle_t fm_h;
++ bool guest_mode;
++
++ union {
++ struct {
++ uint8_t fm_mac_id;
++ } guest_params;
++
++ struct {
++ uintptr_t base_addr;
++ handle_t fm_mac_h;
++ fm_macsec_exception_callback *exception_f;
++ handle_t app_h;
++ } non_guest_params;
++ };
++
++};
++
++/**************************************************************************//**
++ @Description FM device opaque structure used for type checking
++*//***************************************************************************/
++struct fm;
++
++/**************************************************************************//**
++ @Description FM MAC device opaque structure used for type checking
++*//***************************************************************************/
++struct fm_mac_dev;
++
++/**************************************************************************//**
++ @Description FM MACSEC device opaque structure used for type checking
++*//***************************************************************************/
++struct fm_macsec_dev;
++struct fm_macsec_secy_dev;
++
++/**************************************************************************//**
++ @Description A structure ..,
++*//***************************************************************************/
++struct fm_port;
++
++typedef int (*alloc_pcd_fqids)(struct device *dev, uint32_t num,
++ uint8_t alignment, uint32_t *base_fqid);
++
++typedef int (*free_pcd_fqids)(struct device *dev, uint32_t base_fqid);
++
++struct fm_port_pcd_param {
++ alloc_pcd_fqids cba;
++ free_pcd_fqids cbf;
++ struct device *dev;
++};
++
++/**************************************************************************//**
++ @Description A structure of information about each of the external
++ buffer pools used by the port,
++*//***************************************************************************/
++struct fm_port_pool_param {
++ uint8_t id; /**< External buffer pool id */
++ uint16_t size; /**< External buffer pool buffer size */
++};
++
++/**************************************************************************//**
++ @Description structure for additional port parameters
++*//***************************************************************************/
++struct fm_port_params {
++ uint32_t errq; /**< Error Queue Id. */
++ uint32_t defq; /**< For Tx and HC - Default Confirmation queue,
++ 0 means no Tx conf for processed frames.
++ For Rx and OP - default Rx queue. */
++ uint8_t num_pools; /**< Number of pools use by this port */
++ struct fm_port_pool_param pool_param[FM_PORT_MAX_NUM_OF_EXT_POOLS];
++ /**< Parameters for each pool */
++ uint16_t priv_data_size; /**< Area that user may save for his own
++ need (E.g. save the SKB) */
++ bool parse_results; /**< Put the parser-results in the Rx/Tx buffer */
++ bool hash_results; /**< Put the hash-results in the Rx/Tx buffer */
++ bool time_stamp; /**< Put the time-stamp in the Rx/Tx buffer */
++ bool frag_enable; /**< Fragmentation support, for OP only */
++ uint16_t data_align; /**< value for selecting a data alignment (must be a power of 2);
++ if write optimization is used, must be >= 16. */
++ uint8_t manip_extra_space; /**< Maximum extra size needed (insertion-size minus removal-size);
++ Note that this field impacts the size of the buffer-prefix
++ (i.e. it pushes the data offset); */
++};
++
++/**************************************************************************//**
++ @Function fm_bind
++
++ @Description Bind to a specific FM device.
++
++ @Param[in] fm_dev - the OF handle of the FM device.
++
++ @Return A handle of the FM device.
++
++ @Cautions Allowed only after the port was created.
++*//***************************************************************************/
++struct fm *fm_bind(struct device *fm_dev);
++
++/**************************************************************************//**
++ @Function fm_unbind
++
++ @Description Un-bind from a specific FM device.
++
++ @Param[in] fm - A handle of the FM device.
++
++ @Cautions Allowed only after the port was created.
++*//***************************************************************************/
++void fm_unbind(struct fm *fm);
++
++void *fm_get_handle(struct fm *fm);
++void *fm_get_rtc_handle(struct fm *fm);
++struct resource *fm_get_mem_region(struct fm *fm);
++
++/**************************************************************************//**
++ @Function fm_port_bind
++
++ @Description Bind to a specific FM-port device (may be Rx or Tx port).
++
++ @Param[in] fm_port_dev - the OF handle of the FM port device.
++
++ @Return A handle of the FM port device.
++
++ @Cautions Allowed only after the port was created.
++*//***************************************************************************/
++struct fm_port *fm_port_bind(struct device *fm_port_dev);
++
++/**************************************************************************//**
++ @Function fm_port_unbind
++
++ @Description Un-bind from a specific FM-port device (may be Rx or Tx port).
++
++ @Param[in] port - A handle of the FM port device.
++
++ @Cautions Allowed only after the port was created.
++*//***************************************************************************/
++void fm_port_unbind(struct fm_port *port);
++
++/**************************************************************************//**
++ @Function fm_set_rx_port_params
++
++ @Description Configure parameters for a specific Rx FM-port device.
++
++ @Param[in] port - A handle of the FM port device.
++ @Param[in] params - Rx port parameters
++
++ @Cautions Allowed only after the port is binded.
++*//***************************************************************************/
++void fm_set_rx_port_params(struct fm_port *port,
++ struct fm_port_params *params);
++
++/**************************************************************************//**
++ @Function fm_port_pcd_bind
++
++ @Description Bind as a listener on a port PCD.
++
++ @Param[in] port - A handle of the FM port device.
++ @Param[in] params - PCD port parameters
++
++ @Cautions Allowed only after the port is binded.
++*//***************************************************************************/
++void fm_port_pcd_bind (struct fm_port *port, struct fm_port_pcd_param *params);
++
++/**************************************************************************//**
++ @Function fm_port_get_buff_layout_ext_params
++
++ @Description Get data_align and manip_extra_space from the device tree
++ chosen node if applied.
++ This function will only update these two parameters.
++ When this port has no such parameters in the device tree
++ values will be set to 0.
++
++ @Param[in] port - A handle of the FM port device.
++ @Param[in] params - PCD port parameters
++
++ @Cautions Allowed only after the port is binded.
++*//***************************************************************************/
++void fm_port_get_buff_layout_ext_params(struct fm_port *port, struct fm_port_params *params);
++
++/**************************************************************************//**
++ @Function fm_get_tx_port_channel
++
++ @Description Get qman-channel number for this Tx port.
++
++ @Param[in] port - A handle of the FM port device.
++
++ @Return qman-channel number for this Tx port.
++
++ @Cautions Allowed only after the port is binded.
++*//***************************************************************************/
++uint16_t fm_get_tx_port_channel(struct fm_port *port);
++
++/**************************************************************************//**
++ @Function fm_set_tx_port_params
++
++ @Description Configure parameters for a specific Tx FM-port device
++
++ @Param[in] port - A handle of the FM port device.
++ @Param[in] params - Tx port parameters
++
++ @Cautions Allowed only after the port is binded.
++*//***************************************************************************/
++void fm_set_tx_port_params(struct fm_port *port, struct fm_port_params *params);
++
++
++/**************************************************************************//**
++ @Function fm_mac_set_handle
++
++ @Description Set mac handle
++
++ @Param[in] h_lnx_wrp_fm_dev - A handle of the LnxWrp FM device.
++ @Param[in] h_fm_mac - A handle of the LnxWrp FM MAC device.
++ @Param[in] mac_id - MAC id.
++*//***************************************************************************/
++void fm_mac_set_handle(t_Handle h_lnx_wrp_fm_dev, t_Handle h_fm_mac,
++ int mac_id);
++
++/**************************************************************************//**
++ @Function fm_port_enable
++
++ @Description Enable specific FM-port device (may be Rx or Tx port).
++
++ @Param[in] port - A handle of the FM port device.
++
++ @Cautions Allowed only after the port is initialized.
++*//***************************************************************************/
++int fm_port_enable(struct fm_port *port);
++
++/**************************************************************************//**
++ @Function fm_port_disable
++
++ @Description Disable specific FM-port device (may be Rx or Tx port).
++
++ @Param[in] port - A handle of the FM port device.
++
++ @Cautions Allowed only after the port is initialized.
++*//***************************************************************************/
++int fm_port_disable(struct fm_port *port);
++
++void *fm_port_get_handle(const struct fm_port *port);
++
++u64 *fm_port_get_buffer_time_stamp(const struct fm_port *port,
++ const void *data);
++
++/**************************************************************************//**
++ @Function fm_port_get_base_address
++
++ @Description Get base address of this port. Useful for accessing
++ port-specific registers (i.e., not common ones).
++
++ @Param[in] port - A handle of the FM port device.
++
++ @Param[out] base_addr - The port's base addr (virtual address).
++*//***************************************************************************/
++void fm_port_get_base_addr(const struct fm_port *port, uint64_t *base_addr);
++
++/**************************************************************************//**
++ @Function fm_mutex_lock
++
++ @Description Lock function required before any FMD/LLD call.
++*//***************************************************************************/
++void fm_mutex_lock(void);
++
++/**************************************************************************//**
++ @Function fm_mutex_unlock
++
++ @Description Unlock function required after any FMD/LLD call.
++*//***************************************************************************/
++void fm_mutex_unlock(void);
++
++/**************************************************************************//**
++ @Function fm_get_max_frm
++
++ @Description Get the maximum frame size
++*//***************************************************************************/
++int fm_get_max_frm(void);
++
++/**************************************************************************//**
++ @Function fm_get_rx_extra_headroom
++
++ @Description Get the extra headroom size
++*//***************************************************************************/
++int fm_get_rx_extra_headroom(void);
++
++/**************************************************************************//**
++@Function fm_port_set_rate_limit
++
++@Description Configure Shaper parameter on FM-port device (Tx port).
++
++@Param[in] port - A handle of the FM port device.
++@Param[in] max_burst_size - Value of maximum burst size allowed.
++@Param[in] rate_limit - The required rate value.
++
++@Cautions Allowed only after the port is initialized.
++*//***************************************************************************/
++int fm_port_set_rate_limit(struct fm_port *port,
++ uint16_t max_burst_size,
++ uint32_t rate_limit);
++/**************************************************************************//**
++@Function fm_port_set_rate_limit
++
++@Description Delete Shaper configuration on FM-port device (Tx port).
++
++@Param[in] port - A handle of the FM port device.
++
++@Cautions Allowed only after the port is initialized.
++*//***************************************************************************/
++int fm_port_del_rate_limit(struct fm_port *port);
++
++struct auto_res_tables_sizes
++{
++ uint16_t max_num_of_arp_entries;
++ uint16_t max_num_of_echo_ipv4_entries;
++ uint16_t max_num_of_ndp_entries;
++ uint16_t max_num_of_echo_ipv6_entries;
++ uint16_t max_num_of_snmp_ipv4_entries;
++ uint16_t max_num_of_snmp_ipv6_entries;
++ uint16_t max_num_of_snmp_oid_entries;
++ uint16_t max_num_of_snmp_char; /* total amount of character needed
++ for the snmp table */
++ uint16_t max_num_of_ip_prot_filtering;
++ uint16_t max_num_of_tcp_port_filtering;
++ uint16_t max_num_of_udp_port_filtering;
++};
++/* ARP */
++struct auto_res_arp_entry
++{
++ uint32_t ip_address;
++ uint8_t mac[6];
++ bool is_vlan;
++ uint16_t vid;
++};
++struct auto_res_arp_info
++{
++ uint8_t table_size;
++ struct auto_res_arp_entry *auto_res_table;
++ bool enable_conflict_detection; /* when TRUE
++ Conflict Detection will be checked and wake the host if
++ needed */
++};
++
++/* NDP */
++struct auto_res_ndp_entry
++{
++ uint32_t ip_address[4];
++ uint8_t mac[6];
++ bool is_vlan;
++ uint16_t vid;
++};
++struct auto_res_ndp_info
++{
++ uint32_t multicast_group;
++ uint8_t table_size_assigned;
++ struct auto_res_ndp_entry *auto_res_table_assigned; /* This list
++ refer to solicitation IP addresses. Note that all IP adresses
++ must be from the same multicast group. This will be checked and
++ if not operation will fail. */
++ uint8_t table_size_tmp;
++ struct auto_res_ndp_entry *auto_res_table_tmp; /* This list
++ refer to temp IP addresses. Note that all temp IP adresses must
++ be from the same multicast group. This will be checked and if
++ not operation will fail. */
++
++ bool enable_conflict_detection; /* when TRUE
++ Conflict Detection will be checked and wake the host if
++ needed */
++};
++
++/* ICMP ECHO */
++struct auto_res_echo_ipv4_info
++{
++ uint8_t table_size;
++ struct auto_res_arp_entry *auto_res_table;
++};
++
++struct auto_res_echo_ipv6_info
++{
++ uint8_t table_size;
++ struct auto_res_ndp_entry *auto_res_table;
++};
++
++/* SNMP */
++struct auto_res_snmp_entry
++{
++ uint16_t oidSize;
++ uint8_t *oidVal; /* only the oid string */
++ uint16_t resSize;
++ uint8_t *resVal; /* resVal will be the entire reply,
++ i.e. "Type|Length|Value" */
++};
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP IPv4 Addresses Table Entry
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++struct auto_res_snmp_ipv4addr_tbl_entry
++{
++ uint32_t ipv4addr; /*!< 32 bit IPv4 Address. */
++ bool is_vlan;
++ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++};
++
++/**************************************************************************//**
++ @Description Deep Sleep Auto Response SNMP IPv6 Addresses Table Entry
++ Refer to the FMan Controller spec for more details.
++*//***************************************************************************/
++struct auto_res_snmp_ipv6addr_tbl_entry
++{
++ uint32_t ipv6Addr[4]; /*!< 4 * 32 bit IPv6 Address. */
++ bool isVlan;
++ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
++ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
++};
++
++struct auto_res_snmp_info
++{
++ uint16_t control; /**< Control bits [0-15]. */
++ uint16_t max_snmp_msg_length; /**< Maximal allowed SNMP message length. */
++ uint16_t num_ipv4_addresses; /**< Number of entries in IPv4 addresses table. */
++ uint16_t num_ipv6_addresses; /**< Number of entries in IPv6 addresses table. */
++ struct auto_res_snmp_ipv4addr_tbl_entry *ipv4addr_tbl; /**< Pointer to IPv4 addresses table. */
++ struct auto_res_snmp_ipv6addr_tbl_entry *ipv6addr_tbl; /**< Pointer to IPv6 addresses table. */
++ char *community_read_write_string;
++ char *community_read_only_string;
++ struct auto_res_snmp_entry *oid_table;
++ uint32_t oid_table_size;
++ uint32_t *statistics;
++};
++
++/* Filtering */
++struct auto_res_port_filtering_entry
++{
++ uint16_t src_port;
++ uint16_t dst_port;
++ uint16_t src_port_mask;
++ uint16_t dst_port_mask;
++};
++struct auto_res_filtering_info
++{
++ /* IP protocol filtering parameters */
++ uint8_t ip_prot_table_size;
++ uint8_t *ip_prot_table_ptr;
++ bool ip_prot_pass_on_hit; /* when TRUE, miss in the table will
++ cause the packet to be droped, hit will pass the packet to
++ UDP/TCP filters if needed and if not to the classification
++ tree. If the classification tree will pass the packet to a
++ queue it will cause a wake interupt. When FALSE it the other
++ way around. */
++ /* UDP port filtering parameters */
++ uint8_t udp_ports_table_size;
++ struct auto_res_port_filtering_entry *udp_ports_table_ptr;
++ bool udp_port_pass_on_hit; /* when TRUE, miss in the table will
++ cause the packet to be droped, hit will pass the packet to
++ classification tree. If the classification tree will pass the
++ packet to a queue it will cause a wake interupt. When FALSE it
++ the other way around. */
++ /* TCP port filtering parameters */
++ uint16_t tcp_flags_mask;
++ uint8_t tcp_ports_table_size;
++ struct auto_res_port_filtering_entry *tcp_ports_table_ptr;
++ bool tcp_port_pass_on_hit; /* when TRUE, miss in the table will
++ cause the packet to be droped, hit will pass the packet to
++ classification tree. If the classification tree will pass the
++ packet to a queue it will cause a wake interupt. When FALSE it
++ the other way around. */
++};
++
++struct auto_res_port_params
++{
++ t_Handle h_FmPortTx;
++ struct auto_res_arp_info *p_auto_res_arp_info;
++ struct auto_res_echo_ipv4_info *p_auto_res_echo_ipv4_info;
++ struct auto_res_ndp_info *p_auto_res_ndp_info;
++ struct auto_res_echo_ipv6_info *p_auto_res_echo_ipv6_info;
++ struct auto_res_snmp_info *p_auto_res_snmp_info;
++ struct auto_res_filtering_info *p_auto_res_filtering_info;
++};
++
++struct auto_res_port_stats
++{
++ uint32_t arp_ar_cnt;
++ uint32_t echo_icmpv4_ar_cnt;
++ uint32_t ndp_ar_cnt;
++ uint32_t echo_icmpv6_ar_cnt;
++};
++
++int fm_port_config_autores_for_deepsleep_support(struct fm_port *port,
++ struct auto_res_tables_sizes *params);
++
++int fm_port_enter_autores_for_deepsleep(struct fm_port *port,
++ struct auto_res_port_params *params);
++
++void fm_port_exit_auto_res_for_deep_sleep(struct fm_port *port_rx,
++ struct fm_port *port_tx);
++
++bool fm_port_is_in_auto_res_mode(struct fm_port *port);
++
++struct auto_res_tables_sizes *fm_port_get_autores_maxsize(
++ struct fm_port *port);
++
++int fm_port_get_autores_stats(struct fm_port *port, struct auto_res_port_stats
++ *stats);
++
++int fm_port_resume(struct fm_port *port);
++
++int fm_port_suspend(struct fm_port *port);
++
++#ifdef CONFIG_FMAN_PFC
++/**************************************************************************//**
++@Function fm_port_set_pfc_priorities_mapping_to_qman_wq
++
++@Description Associate a QMan Work Queue with a PFC priority on this
++ FM-port device (Tx port).
++
++@Param[in] port - A handle of the FM port device.
++
++@Param[in] prio - The PFC priority.
++
++@Param[in] wq - The Work Queue associated with the PFC priority.
++
++@Cautions Allowed only after the port is initialized.
++*//***************************************************************************/
++int fm_port_set_pfc_priorities_mapping_to_qman_wq(struct fm_port *port,
++ uint8_t prio, uint8_t wq);
++#endif
++
++/**************************************************************************//**
++@Function fm_mac_set_exception
++
++@Description Set MAC exception state.
++
++@Param[in] fm_mac_dev - A handle of the FM MAC device.
++@Param[in] exception - FM MAC exception type.
++@Param[in] enable - new state.
++
++*//***************************************************************************/
++int fm_mac_set_exception(struct fm_mac_dev *fm_mac_dev,
++ e_FmMacExceptions exception, bool enable);
++
++int fm_mac_free(struct fm_mac_dev *fm_mac_dev);
++
++struct fm_mac_dev *fm_mac_config(t_FmMacParams *params);
++
++int fm_mac_config_max_frame_length(struct fm_mac_dev *fm_mac_dev,
++ int len);
++
++int fm_mac_config_pad_and_crc(struct fm_mac_dev *fm_mac_dev, bool enable);
++
++int fm_mac_config_half_duplex(struct fm_mac_dev *fm_mac_dev, bool enable);
++
++int fm_mac_config_reset_on_init(struct fm_mac_dev *fm_mac_dev, bool enable);
++
++int fm_mac_init(struct fm_mac_dev *fm_mac_dev);
++
++int fm_mac_get_version(struct fm_mac_dev *fm_mac_dev, uint32_t *version);
++
++int fm_mac_enable(struct fm_mac_dev *fm_mac_dev);
++
++int fm_mac_disable(struct fm_mac_dev *fm_mac_dev);
++
++int fm_mac_resume(struct fm_mac_dev *fm_mac_dev);
++
++int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev,
++ bool enable);
++
++int fm_mac_remove_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
++ t_EnetAddr *mac_addr);
++
++int fm_mac_add_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
++ t_EnetAddr *mac_addr);
++
++int fm_mac_modify_mac_addr(struct fm_mac_dev *fm_mac_dev,
++ uint8_t *addr);
++
++int fm_mac_adjust_link(struct fm_mac_dev *fm_mac_dev,
++ bool link, int speed, bool duplex);
++
++int fm_mac_enable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev);
++
++int fm_mac_disable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev);
++
++int fm_mac_set_rx_pause_frames(
++ struct fm_mac_dev *fm_mac_dev, bool en);
++
++int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
++ bool en);
++
++int fm_rtc_enable(struct fm *fm_dev);
++
++int fm_rtc_disable(struct fm *fm_dev);
++
++int fm_rtc_get_cnt(struct fm *fm_dev, uint64_t *ts);
++
++int fm_rtc_set_cnt(struct fm *fm_dev, uint64_t ts);
++
++int fm_rtc_get_drift(struct fm *fm_dev, uint32_t *drift);
++
++int fm_rtc_set_drift(struct fm *fm_dev, uint32_t drift);
++
++int fm_rtc_set_alarm(struct fm *fm_dev, uint32_t id,
++ uint64_t time);
++
++int fm_rtc_set_fiper(struct fm *fm_dev, uint32_t id,
++ uint64_t fiper);
++
++int fm_mac_set_wol(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
++ bool en);
++
++/**************************************************************************//**
++@Function fm_macsec_set_exception
++
++@Description Set MACSEC exception state.
++
++@Param[in] fm_macsec_dev - A handle of the FM MACSEC device.
++@Param[in] exception - FM MACSEC exception type.
++@Param[in] enable - new state.
++
++*//***************************************************************************/
++
++int fm_macsec_set_exception(struct fm_macsec_dev *fm_macsec_dev,
++ fm_macsec_exception exception, bool enable);
++int fm_macsec_free(struct fm_macsec_dev *fm_macsec_dev);
++struct fm_macsec_dev *fm_macsec_config(struct fm_macsec_params *fm_params);
++int fm_macsec_init(struct fm_macsec_dev *fm_macsec_dev);
++int fm_macsec_config_unknown_sci_frame_treatment(struct fm_macsec_dev
++ *fm_macsec_dev,
++ fm_macsec_unknown_sci_frame_treatment treat_mode);
++int fm_macsec_config_invalid_tags_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
++ bool deliver_uncontrolled);
++int fm_macsec_config_kay_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
++ bool discard_uncontrolled);
++int fm_macsec_config_untag_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
++ fm_macsec_untag_frame_treatment treat_mode);
++int fm_macsec_config_pn_exhaustion_threshold(struct fm_macsec_dev *fm_macsec_dev,
++ uint32_t pnExhThr);
++int fm_macsec_config_keys_unreadable(struct fm_macsec_dev *fm_macsec_dev);
++int fm_macsec_config_sectag_without_sci(struct fm_macsec_dev *fm_macsec_dev);
++int fm_macsec_config_exception(struct fm_macsec_dev *fm_macsec_dev,
++ fm_macsec_exception exception, bool enable);
++int fm_macsec_get_revision(struct fm_macsec_dev *fm_macsec_dev,
++ int *macsec_revision);
++int fm_macsec_enable(struct fm_macsec_dev *fm_macsec_dev);
++int fm_macsec_disable(struct fm_macsec_dev *fm_macsec_dev);
++
++
++int fm_macsec_secy_config_exception(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_secy_exception exception,
++ bool enable);
++int fm_macsec_secy_free(struct fm_macsec_secy_dev *fm_macsec_secy_dev);
++struct fm_macsec_secy_dev *fm_macsec_secy_config(struct fm_macsec_secy_params *secy_params);
++int fm_macsec_secy_init(struct fm_macsec_secy_dev *fm_macsec_secy_dev);
++int fm_macsec_secy_config_sci_insertion_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_sci_insertion_mode sci_insertion_mode);
++int fm_macsec_secy_config_protect_frames(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ bool protect_frames);
++int fm_macsec_secy_config_replay_window(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ bool replay_protect, uint32_t replay_window);
++int fm_macsec_secy_config_validation_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_valid_frame_behavior validate_frames);
++int fm_macsec_secy_config_confidentiality(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ bool confidentiality_enable,
++ uint32_t confidentiality_offset);
++int fm_macsec_secy_config_point_to_point(struct fm_macsec_secy_dev *fm_macsec_secy_dev);
++int fm_macsec_secy_config_event(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_secy_event event,
++ bool enable);
++struct rx_sc_dev *fm_macsec_secy_create_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct fm_macsec_secy_sc_params *params);
++int fm_macsec_secy_delete_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc);
++int fm_macsec_secy_create_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc, macsec_an_t an,
++ uint32_t lowest_pn, macsec_sa_key_t key);
++int fm_macsec_secy_delete_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc, macsec_an_t an);
++int fm_macsec_secy_rxsa_enable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an);
++int fm_macsec_secy_rxsa_disable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an);
++int fm_macsec_secy_rxsa_update_next_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an, uint32_t updt_next_pn);
++int fm_macsec_secy_rxsa_update_lowest_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an, uint32_t updt_lowest_pn);
++int fm_macsec_secy_rxsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an, macsec_sa_key_t key);
++int fm_macsec_secy_create_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t an, macsec_sa_key_t key);
++int fm_macsec_secy_delete_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t an);
++int fm_macsec_secy_txsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t next_active_an,
++ macsec_sa_key_t key);
++int fm_macsec_secy_txsa_set_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t an);
++int fm_macsec_secy_txsa_get_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t *p_an);
++int fm_macsec_secy_get_rxsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc, uint32_t *sc_phys_id);
++int fm_macsec_secy_get_txsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ uint32_t *sc_phys_id);
++
++/** @} */ /* end of FM_LnxKern_ctrl_grp group */
++/** @} */ /* end of FM_LnxKern_grp group */
++
++/* default values for initializing PTP 1588 timer clock */
++#define DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT 2 /* power of 2 for better performance */
++#define DPA_PTP_NOMINAL_FREQ_PERIOD_NS (1 << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT) /* 4ns,250MHz */
++
++#endif /* __LNXWRP_FSL_FMAN_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/inc/xx/xx.h b/drivers/net/ethernet/freescale/sdk_fman/src/inc/xx/xx.h
+new file mode 100644
+index 00000000..b183c86d
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/xx/xx.h
+@@ -0,0 +1,50 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __XX_H
++#define __XX_H
++
++#include "xx_ext.h"
++
++void * xx_Malloc(uint32_t n);
++void xx_Free(void *p);
++
++void *xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t align);
++void xx_FreeSmart(void *p);
++
++/* never used: */
++#define GetDeviceName(irq) ((char *)NULL)
++
++int GetDeviceIrqNum(int irq);
++
++
++#endif /* __XX_H */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/system/Makefile b/drivers/net/ethernet/freescale/sdk_fman/src/system/Makefile
+new file mode 100644
+index 00000000..667cd859
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/system/Makefile
+@@ -0,0 +1,10 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++#
++
++obj-y += sys_io.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/system/sys_io.c b/drivers/net/ethernet/freescale/sdk_fman/src/system/sys_io.c
+new file mode 100644
+index 00000000..c106a8b7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/system/sys_io.c
+@@ -0,0 +1,171 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/version.h>
++
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/modversions.h>
++#else
++#include <config/modversions.h>
++#endif /* LINUX_VERSION_CODE */
++#endif /* MODVERSIONS */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++
++#include <asm/io.h>
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "string_ext.h"
++#include "list_ext.h"
++#include "sys_io_ext.h"
++
++
++#define __ERR_MODULE__ MODULE_UNKNOWN
++
++
++typedef struct {
++ uint64_t virtAddr;
++ uint64_t physAddr;
++ uint32_t size;
++ t_List node;
++} t_IoMap;
++#define IOMAP_OBJECT(ptr) LIST_OBJECT(ptr, t_IoMap, node)
++
++LIST(mapsList);
++
++
++static void EnqueueIoMap(t_IoMap *p_IoMap)
++{
++ uint32_t intFlags;
++
++ intFlags = XX_DisableAllIntr();
++ LIST_AddToTail(&p_IoMap->node, &mapsList);
++ XX_RestoreAllIntr(intFlags);
++}
++
++static t_IoMap * FindIoMapByVirtAddr(uint64_t addr)
++{
++ t_IoMap *p_IoMap;
++ t_List *p_Pos;
++
++ LIST_FOR_EACH(p_Pos, &mapsList)
++ {
++ p_IoMap = IOMAP_OBJECT(p_Pos);
++ if ((addr >= p_IoMap->virtAddr) && (addr < p_IoMap->virtAddr+p_IoMap->size))
++ return p_IoMap;
++ }
++
++ return NULL;
++}
++
++static t_IoMap * FindIoMapByPhysAddr(uint64_t addr)
++{
++ t_IoMap *p_IoMap;
++ t_List *p_Pos;
++
++ LIST_FOR_EACH(p_Pos, &mapsList)
++ {
++ p_IoMap = IOMAP_OBJECT(p_Pos);
++ if ((addr >= p_IoMap->physAddr) && (addr < p_IoMap->physAddr+p_IoMap->size))
++ return p_IoMap;
++ }
++
++ return NULL;
++}
++
++t_Error SYS_RegisterIoMap (uint64_t virtAddr, uint64_t physAddr, uint32_t size)
++{
++ t_IoMap *p_IoMap;
++
++ p_IoMap = (t_IoMap*)XX_Malloc(sizeof(t_IoMap));
++ if (!p_IoMap)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!"));
++ memset(p_IoMap, 0, sizeof(t_IoMap));
++
++ p_IoMap->virtAddr = virtAddr;
++ p_IoMap->physAddr = physAddr;
++ p_IoMap->size = size;
++
++ INIT_LIST(&p_IoMap->node);
++ EnqueueIoMap(p_IoMap);
++
++ return E_OK;
++}
++
++t_Error SYS_UnregisterIoMap (uint64_t virtAddr)
++{
++ t_IoMap *p_IoMap = FindIoMapByVirtAddr(virtAddr);
++ if (!p_IoMap)
++ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
++
++ LIST_Del(&p_IoMap->node);
++ XX_Free(p_IoMap);
++
++ return E_OK;
++}
++
++uint64_t SYS_PhysToVirt(uint64_t addr)
++{
++ t_IoMap *p_IoMap = FindIoMapByPhysAddr(addr);
++ if (p_IoMap)
++ {
++ /* This is optimization - put the latest in the list-head - like a cache */
++ if (mapsList.p_Next != &p_IoMap->node)
++ {
++ uint32_t intFlags = XX_DisableAllIntr();
++ LIST_DelAndInit(&p_IoMap->node);
++ LIST_Add(&p_IoMap->node, &mapsList);
++ XX_RestoreAllIntr(intFlags);
++ }
++ return (uint64_t)(addr - p_IoMap->physAddr + p_IoMap->virtAddr);
++ }
++ return PTR_TO_UINT(phys_to_virt((unsigned long)addr));
++}
++
++uint64_t SYS_VirtToPhys(uint64_t addr)
++{
++ t_IoMap *p_IoMap;
++
++ if (addr == 0)
++ return 0;
++
++ p_IoMap = FindIoMapByVirtAddr(addr);
++ if (p_IoMap)
++ return (uint64_t)(addr - p_IoMap->virtAddr + p_IoMap->physAddr);
++ return (uint64_t)virt_to_phys(UINT_TO_PTR(addr));
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/Makefile b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/Makefile
+new file mode 100644
+index 00000000..62713d62
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/Makefile
+@@ -0,0 +1,19 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
++
++ccflags-y += -I$(NCSW_FM_INC)
++ccflags-y += -I$(NET_DPA)
++
++obj-y += fsl-ncsw-PFM.o
++obj-$(CONFIG_FSL_SDK_FMAN_TEST) += fman_test.o
++
++fsl-ncsw-PFM-objs := lnxwrp_fm.o lnxwrp_fm_port.o lnxwrp_ioctls_fm.o \
++ lnxwrp_sysfs.o lnxwrp_sysfs_fm.o lnxwrp_sysfs_fm_port.o
++obj-$(CONFIG_COMPAT) += lnxwrp_ioctls_fm_compat.o
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/fman_test.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/fman_test.c
+new file mode 100644
+index 00000000..270d07b8
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/fman_test.c
+@@ -0,0 +1,1665 @@
++/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File fman_test.c
++ @Authors Pistirica Sorin Andrei
++ @Description FM Linux test environment
++*/
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/of_platform.h>
++#include <linux/ip.h>
++#include <linux/compat.h>
++#include <linux/uaccess.h>
++#include <linux/errno.h>
++#include <linux/netdevice.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/fsl_qman.h>
++#include <linux/fsl_bman.h>
++
++/* private headers */
++#include "fm_ext.h"
++#include "lnxwrp_fsl_fman.h"
++#include "fm_port_ext.h"
++#if (DPAA_VERSION == 11)
++#include "../../Peripherals/FM/MAC/memac.h"
++#endif
++#include "fm_test_ioctls.h"
++#include "fsl_fman_test.h"
++
++#include "dpaa_eth.h"
++#include "dpaa_eth_common.h"
++
++#define FMT_FRM_WATERMARK 0xdeadbeefdeadbeeaLL
++
++struct fmt_frame_s {
++ ioc_fmt_buff_desc_t buff;
++ struct list_head list;
++};
++
++struct fmt_fqs_s {
++ struct qman_fq fq_base;
++ bool init;
++ struct fmt_port_s *fmt_port_priv;
++};
++
++struct fmt_port_pcd_s {
++ int num_queues;
++ struct fmt_fqs_s *fmt_pcd_fqs;
++ uint32_t fqid_base;
++};
++
++/* char dev structure: fm test port */
++struct fmt_port_s {
++ bool valid;
++ uint8_t id;
++ ioc_fmt_port_type port_type;
++ ioc_diag_mode diag;
++ bool compat_test_type;
++
++ /* fm ports */
++ /* ! for oh ports p_tx_fm_port_dev == p_rx_fm_port_dev &&
++ * p_tx_port == p_rx_port */
++ /* t_LnxWrpFmPortDev */
++ struct fm_port *p_tx_port;
++ /* t_LnxWrpFmPortDev->h_Dev: t_FmPort */
++ void *p_tx_fm_port_dev;
++ /* t_LnxWrpFmPortDev */
++ struct fm_port *p_rx_port;
++ /* t_LnxWrpFmPortDev->h_Dev: t_FmPort */
++ void *p_rx_fm_port_dev;
++
++ void *p_mac_dev;
++ uint64_t fm_phys_base_addr;
++
++ /* read/write queue manipulation */
++ spinlock_t rx_q_lock;
++ struct list_head rx_q;
++
++ /* tx queuee for injecting traffic */
++ int num_of_tx_fqs;
++ struct fmt_fqs_s p_tx_fqs[FMAN_TEST_MAX_TX_FQS];
++
++ /* pcd private queues manipulation */
++ struct fmt_port_pcd_s fmt_port_pcd;
++
++ /* debugging stuff */
++
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ atomic_t enqueue_to_qman_frm;
++ atomic_t enqueue_to_rxq;
++ atomic_t dequeue_from_rxq;
++ atomic_t not_enqueue_to_rxq_wrong_frm;
++#endif
++
++};
++
++/* The devices. */
++struct fmt_s {
++ int major;
++ struct fmt_port_s ports[IOC_FMT_MAX_NUM_OF_PORTS];
++ struct class *fmt_class;
++};
++
++/* fm test structure */
++static struct fmt_s fm_test;
++
++#if (DPAA_VERSION == 11)
++struct mac_priv_s {
++ t_Handle mac;
++};
++#endif
++
++#define DTSEC_BASE_ADDR 0x000e0000
++#define DTSEC_MEM_RANGE 0x00002000
++#define MAC_1G_MACCFG1 0x00000100
++#define MAC_1G_LOOP_MASK 0x00000100
++static int set_1gmac_loopback(
++ struct fmt_port_s *fmt_port,
++ bool en)
++{
++#if (DPAA_VERSION <= 10)
++ uint32_t dtsec_idx = fmt_port->id; /* dtsec for which port */
++ uint32_t dtsec_idx_off = dtsec_idx * DTSEC_MEM_RANGE;
++ phys_addr_t maccfg1_hw;
++ void *maccfg1_map;
++ uint32_t maccfg1_val;
++
++ /* compute the maccfg1 register address */
++ maccfg1_hw = fmt_port->fm_phys_base_addr +
++ (phys_addr_t)(DTSEC_BASE_ADDR +
++ dtsec_idx_off +
++ MAC_1G_MACCFG1);
++
++ /* map register */
++ maccfg1_map = ioremap(maccfg1_hw, sizeof(u32));
++
++ /* set register */
++ maccfg1_val = in_be32(maccfg1_map);
++ if (en)
++ maccfg1_val |= MAC_1G_LOOP_MASK;
++ else
++ maccfg1_val &= ~MAC_1G_LOOP_MASK;
++ out_be32(maccfg1_map, maccfg1_val);
++
++ /* unmap register */
++ iounmap(maccfg1_map);
++#else
++ struct mac_device *mac_dev;
++ struct mac_priv_s *priv;
++ t_Memac *p_memac;
++
++ if (!fmt_port)
++ return -EINVAL;
++
++ mac_dev = (struct mac_device *)fmt_port->p_mac_dev;
++
++ if (!mac_dev)
++ return -EINVAL;
++
++ priv = macdev_priv(mac_dev);
++
++ if (!priv)
++ return -EINVAL;
++
++ p_memac = priv->mac;
++
++ if (!p_memac)
++ return -EINVAL;
++
++ memac_set_loopback(p_memac->p_MemMap, en);
++#endif
++ return 0;
++}
++
++/* TODO: re-write this function */
++static int set_10gmac_int_loopback(
++ struct fmt_port_s *fmt_port,
++ bool en)
++{
++#ifndef FM_10G_MAC_NO_CTRL_LOOPBACK
++#define FM_10GMAC0_OFFSET 0x000f0000
++#define FM_10GMAC_CMD_CONF_CTRL_OFFSET 0x8
++#define CMD_CFG_LOOPBACK_EN 0x00000400
++
++ uint64_t base_addr, reg_addr;
++ uint32_t tmp_val;
++
++ base_addr = fmt_port->fm_phys_base_addr + (FM_10GMAC0_OFFSET +
++ ((fmt_port->id-FM_MAX_NUM_OF_1G_RX_PORTS)*0x2000));
++
++ base_addr = PTR_TO_UINT(ioremap(base_addr, 0x1000));
++
++ reg_addr = base_addr + FM_10GMAC_CMD_CONF_CTRL_OFFSET;
++ tmp_val = GET_UINT32(*((uint32_t *)UINT_TO_PTR(reg_addr)));
++ if (en)
++ tmp_val |= CMD_CFG_LOOPBACK_EN;
++ else
++ tmp_val &= ~CMD_CFG_LOOPBACK_EN;
++ WRITE_UINT32(*((uint32_t *)UINT_TO_PTR(reg_addr)), tmp_val);
++
++ iounmap(UINT_TO_PTR(base_addr));
++
++ return 0;
++#else
++ _fmt_err("TGEC don't have internal-loopback.\n");
++ return -EPERM;
++#endif
++}
++
++static int set_mac_int_loopback(struct fmt_port_s *fmt_port, bool en)
++{
++ int _err = 0;
++
++ switch (fmt_port->port_type) {
++
++ case e_IOC_FMT_PORT_T_RXTX:
++ /* 1G port */
++ if (fmt_port->id < FM_MAX_NUM_OF_1G_RX_PORTS)
++ _err = set_1gmac_loopback(fmt_port, en);
++ /* 10g port */
++ else if ((fmt_port->id >= FM_MAX_NUM_OF_1G_RX_PORTS) &&
++ (fmt_port->id < FM_MAX_NUM_OF_1G_RX_PORTS +
++ FM_MAX_NUM_OF_10G_RX_PORTS)) {
++
++ _err = set_10gmac_int_loopback(fmt_port, en);
++ } else
++ _err = -EINVAL;
++ break;
++ /* op port does not have MAC (loopback mode) */
++ case e_IOC_FMT_PORT_T_OP:
++
++ _err = 0;
++ break;
++ default:
++
++ _err = -EPERM;
++ break;
++ }
++
++ return _err;
++}
++
++static void enqueue_fmt_frame(
++ struct fmt_port_s *fmt_port,
++ struct fmt_frame_s *p_fmt_frame)
++{
++ spinlock_t *rx_q_lock = NULL;
++
++ rx_q_lock = &fmt_port->rx_q_lock;
++
++ spin_lock(rx_q_lock);
++ list_add_tail(&p_fmt_frame->list, &fmt_port->rx_q);
++ spin_unlock(rx_q_lock);
++
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ atomic_inc(&fmt_port->enqueue_to_rxq);
++#endif
++}
++
++static struct fmt_frame_s *dequeue_fmt_frame(
++ struct fmt_port_s *fmt_port)
++{
++ struct fmt_frame_s *p_fmt_frame = NULL;
++ spinlock_t *rx_q_lock = NULL;
++
++ rx_q_lock = &fmt_port->rx_q_lock;
++
++ spin_lock(rx_q_lock);
++
++#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member)
++
++ if (!list_empty(&fmt_port->rx_q)) {
++ p_fmt_frame = list_last_entry(&fmt_port->rx_q,
++ struct fmt_frame_s,
++ list);
++ list_del(&p_fmt_frame->list);
++
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ atomic_inc(&fmt_port->dequeue_from_rxq);
++#endif
++ }
++
++ spin_unlock(rx_q_lock);
++
++ return p_fmt_frame;
++}
++
++/* eth-dev -to- fmt port association */
++struct fmt_port_s *match_dpa_to_fmt_port(
++ struct dpa_priv_s *dpa_priv) {
++ struct mac_device *mac_dev = dpa_priv->mac_dev;
++ struct fm_port *fm_port = (struct fm_port *) mac_dev;
++ struct fmt_port_s *fmt_port = NULL;
++ int i;
++
++ _fmt_dbgr("calling...\n");
++
++ /* find the FM-test-port object */
++ for (i = 0; i < IOC_FMT_MAX_NUM_OF_PORTS; i++)
++ if ((fm_test.ports[i].p_mac_dev &&
++ mac_dev == fm_test.ports[i].p_mac_dev) ||
++ fm_port == fm_test.ports[i].p_tx_port) {
++
++ fmt_port = &fm_test.ports[i];
++ break;
++ }
++
++ _fmt_dbgr("called\n");
++ return fmt_port;
++}
++
++void dump_frame(
++ uint8_t *buffer,
++ uint32_t size)
++{
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ unsigned int i;
++
++ for (i = 0; i < size; i++) {
++ if (i%16 == 0)
++ printk(KERN_DEBUG "\n");
++ printk(KERN_DEBUG "%2x ", *(buffer+i));
++ }
++#endif
++ return;
++}
++
++bool test_and_steal_frame(struct fmt_port_s *fmt_port,
++ uint32_t fqid,
++ uint8_t *buffer,
++ uint32_t size)
++{
++ struct fmt_frame_s *p_fmt_frame = NULL;
++ bool test_and_steal_frame_frame;
++ uint32_t data_offset;
++ uint32_t i;
++
++ _fmt_dbgr("calling...\n");
++
++ if (!fmt_port || !fmt_port->p_rx_fm_port_dev)
++ return false;
++
++ /* check watermark */
++ test_and_steal_frame_frame = false;
++ for (i = 0; i < size; i++) {
++ uint64_t temp = *((uint64_t *)(buffer + i));
++
++ if (temp == (uint64_t) FMT_FRM_WATERMARK) {
++ _fmt_dbgr("watermark found!\n");
++ test_and_steal_frame_frame = true;
++ break;
++ }
++ }
++
++ if (!test_and_steal_frame_frame) {
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ atomic_inc(&fmt_port->not_enqueue_to_rxq_wrong_frm);
++#endif
++ _fmt_dbgr("NOT watermark found!\n");
++ return false;
++ }
++
++ /* do not enqueue the tx conf/err frames */
++ if ((fqid == FMT_TX_CONF_Q) || (fqid == FMT_TX_ERR_Q))
++ goto _test_and_steal_frame_return_true;
++
++ _fmt_dbgr("on port %d got FMUC frame\n", fmt_port->id);
++ data_offset = FM_PORT_GetBufferDataOffset(
++ fmt_port->p_rx_fm_port_dev);
++
++ p_fmt_frame = kmalloc(sizeof(struct fmt_frame_s), GFP_KERNEL);
++
++ /* dump frame... no more space left on device */
++ if (p_fmt_frame == NULL) {
++ _fmt_err("no space left on device!\n");
++ goto _test_and_steal_frame_return_true;
++ }
++
++ memset(p_fmt_frame, 0, sizeof(struct fmt_frame_s));
++ p_fmt_frame->buff.p_data = kmalloc(size * sizeof(uint8_t), GFP_KERNEL);
++
++ /* No more space left on device*/
++ if (p_fmt_frame->buff.p_data == NULL) {
++ _fmt_err("no space left on device!\n");
++ kfree(p_fmt_frame);
++ goto _test_and_steal_frame_return_true;
++ }
++
++ p_fmt_frame->buff.size = size-data_offset;
++ p_fmt_frame->buff.qid = fqid;
++
++ memcpy(p_fmt_frame->buff.p_data,
++ (uint8_t *)PTR_MOVE(buffer, data_offset),
++ p_fmt_frame->buff.size);
++
++ memcpy(p_fmt_frame->buff.buff_context.fm_prs_res,
++ FM_PORT_GetBufferPrsResult(fmt_port->p_rx_fm_port_dev,
++ (char *)buffer),
++ 32);
++
++ /* enqueue frame - this frame will go to us */
++ enqueue_fmt_frame(fmt_port, p_fmt_frame);
++
++_test_and_steal_frame_return_true:
++ return true;
++}
++
++static int fmt_fq_release(const struct qm_fd *fd)
++{
++ struct dpa_bp *_dpa_bp;
++ struct bm_buffer _bmb;
++
++ if (fd->format == qm_fd_contig) {
++ _dpa_bp = dpa_bpid2pool(fd->bpid);
++ BUG_ON(IS_ERR(_dpa_bp));
++
++ _bmb.hi = fd->addr_hi;
++ _bmb.lo = fd->addr_lo;
++
++ while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
++ cpu_relax();
++
++ } else {
++ _fmt_err("frame not supported !\n");
++ return -1;
++ }
++
++ return 0;
++}
++
++/* sync it w/ dpaa_eth.c: DPA_BP_HEAD */
++#define DPA_BP_HEADROOM (DPA_TX_PRIV_DATA_SIZE + \
++ fm_get_rx_extra_headroom() + \
++ DPA_PARSE_RESULTS_SIZE + \
++ DPA_HASH_RESULTS_SIZE)
++#define MAC_HEADER_LENGTH 14
++#define L2_AND_HEADROOM_OFF ((DPA_BP_HEADROOM) + (MAC_HEADER_LENGTH))
++
++/* dpa ingress hooks definition */
++enum dpaa_eth_hook_result fmt_rx_default_hook(
++ struct sk_buff *skb,
++ struct net_device *net_dev,
++ u32 fqid)
++{
++ struct dpa_priv_s *dpa_priv = NULL;
++ struct fmt_port_s *fmt_port = NULL;
++ uint8_t *buffer;
++ uint32_t buffer_len;
++
++ _fmt_dbgr("calling...\n");
++
++ dpa_priv = netdev_priv(net_dev);
++ fmt_port = match_dpa_to_fmt_port(dpa_priv);
++
++ /* conversion from skb to fd:
++ * skb cames processed for L3, so we need to go back for
++ * layer 2 offset */
++ buffer = (uint8_t *)(skb->data - ((int)L2_AND_HEADROOM_OFF));
++ buffer_len = skb->len + ((int)L2_AND_HEADROOM_OFF);
++
++ /* if is not out frame let dpa to handle it */
++ if (test_and_steal_frame(fmt_port,
++ FMT_RX_DFLT_Q,
++ buffer,
++ buffer_len))
++ goto _fmt_rx_default_hook_stolen;
++
++ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
++ return DPAA_ETH_CONTINUE;
++
++_fmt_rx_default_hook_stolen:
++ dev_kfree_skb(skb);
++
++ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
++ return DPAA_ETH_STOLEN;
++}
++
++enum dpaa_eth_hook_result fmt_rx_error_hook(
++ struct net_device *net_dev,
++ const struct qm_fd *fd,
++ u32 fqid)
++{
++ struct dpa_priv_s *dpa_priv = NULL;
++ struct dpa_bp *dpa_bp = NULL;
++ struct fmt_port_s *fmt_port = NULL;
++ void *fd_virt_addr = NULL;
++ dma_addr_t addr = qm_fd_addr(fd);
++
++ _fmt_dbgr("calling...\n");
++
++ dpa_priv = netdev_priv(net_dev);
++ fmt_port = match_dpa_to_fmt_port(dpa_priv);
++
++ /* dpaa doesn't do this... we have to do it here */
++ dpa_bp = dpa_bpid2pool(fd->bpid);
++ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
++
++ fd_virt_addr = phys_to_virt(addr);
++ /* if is not out frame let dpa to handle it */
++ if (test_and_steal_frame(fmt_port,
++ FMT_RX_ERR_Q,
++ fd_virt_addr,
++ fd->length20 + fd->offset)) {
++ goto _fmt_rx_error_hook_stolen;
++ }
++
++ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
++ return DPAA_ETH_CONTINUE;
++
++_fmt_rx_error_hook_stolen:
++ /* the frame data doesn't matter,
++ * so, no mapping is needed */
++ fmt_fq_release(fd);
++
++ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
++ return DPAA_ETH_STOLEN;
++}
++
++enum dpaa_eth_hook_result fmt_tx_confirm_hook(
++ struct net_device *net_dev,
++ const struct qm_fd *fd,
++ u32 fqid)
++{
++ struct dpa_priv_s *dpa_priv = NULL;
++ struct fmt_port_s *fmt_port = NULL;
++ dma_addr_t addr = qm_fd_addr(fd);
++ void *fd_virt_addr = NULL;
++ uint32_t fd_len = 0;
++
++ _fmt_dbgr("calling...\n");
++
++ dpa_priv = netdev_priv(net_dev);
++ fmt_port = match_dpa_to_fmt_port(dpa_priv);
++
++ fd_virt_addr = phys_to_virt(addr);
++ fd_len = fd->length20 + fd->offset;
++
++ if (fd_len > fm_get_max_frm()) {
++ _fmt_err("tx confirm bad frame size: %u!\n", fd_len);
++ goto _fmt_tx_confirm_hook_continue;
++ }
++
++ if (test_and_steal_frame(fmt_port,
++ FMT_TX_CONF_Q,
++ fd_virt_addr,
++ fd_len))
++ goto _fmt_tx_confirm_hook_stolen;
++
++_fmt_tx_confirm_hook_continue:
++ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
++ return DPAA_ETH_CONTINUE;
++
++_fmt_tx_confirm_hook_stolen:
++ kfree(fd_virt_addr);
++
++ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
++ return DPAA_ETH_STOLEN;
++}
++
++enum dpaa_eth_hook_result fmt_tx_confirm_error_hook(
++ struct net_device *net_dev,
++ const struct qm_fd *fd,
++ u32 fqid)
++{
++ struct dpa_priv_s *dpa_priv = NULL;
++ struct fmt_port_s *fmt_port = NULL;
++ dma_addr_t addr = qm_fd_addr(fd);
++ void *fd_virt_addr = NULL;
++ uint32_t fd_len = 0;
++
++ _fmt_dbgr("calling...\n");
++
++ dpa_priv = netdev_priv(net_dev);
++ fmt_port = match_dpa_to_fmt_port(dpa_priv);
++
++ fd_virt_addr = phys_to_virt(addr);
++ fd_len = fd->length20 + fd->offset;
++
++ if (fd_len > fm_get_max_frm()) {
++ _fmt_err("tx confirm err bad frame size: %u !\n", fd_len);
++ goto _priv_ingress_tx_err_continue;
++ }
++
++ if (test_and_steal_frame(fmt_port, FMT_TX_ERR_Q, fd_virt_addr, fd_len))
++ goto _priv_ingress_tx_err_stolen;
++
++_priv_ingress_tx_err_continue:
++ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
++ return DPAA_ETH_CONTINUE;
++
++_priv_ingress_tx_err_stolen:
++ kfree(fd_virt_addr);
++
++ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
++ return DPAA_ETH_STOLEN;
++}
++
++/* egress callbacks definition */
++enum qman_cb_dqrr_result fmt_egress_dqrr(
++ struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dqrr)
++{
++ /* this callback should never be called */
++ BUG();
++ return qman_cb_dqrr_consume;
++}
++
++static void fmt_egress_error_dqrr(
++ struct qman_portal *p,
++ struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ uint8_t *fd_virt_addr = NULL;
++
++ /* tx failure, on the ern callback - release buffer */
++ fd_virt_addr = (uint8_t *)phys_to_virt(qm_fd_addr(&msg->ern.fd));
++ kfree(fd_virt_addr);
++
++ return;
++}
++
++static const struct qman_fq fmt_egress_fq = {
++ .cb = { .dqrr = fmt_egress_dqrr,
++ .ern = fmt_egress_error_dqrr,
++ .fqs = NULL}
++};
++
++int fmt_fq_alloc(
++ struct fmt_fqs_s *fmt_fqs,
++ const struct qman_fq *qman_fq,
++ uint32_t fqid, uint32_t flags,
++ uint16_t channel, uint8_t wq)
++{
++ int _errno = 0;
++
++ _fmt_dbg("calling...\n");
++
++ fmt_fqs->fq_base = *qman_fq;
++
++ if (fqid == 0) {
++ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
++ flags &= ~QMAN_FQ_FLAG_NO_MODIFY;
++ } else
++ flags &= ~QMAN_FQ_FLAG_DYNAMIC_FQID;
++
++ fmt_fqs->init = !(flags & QMAN_FQ_FLAG_NO_MODIFY);
++
++ _errno = qman_create_fq(fqid, flags, &fmt_fqs->fq_base);
++ if (_errno < 0) {
++ _fmt_err("frame queues create failed.\n");
++ return -EINVAL;
++ }
++
++ if (fmt_fqs->init) {
++ struct qm_mcc_initfq initfq;
++
++ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
++ initfq.fqd.dest.channel = channel;
++ initfq.fqd.dest.wq = wq;
++
++ _errno = qman_init_fq(&fmt_fqs->fq_base,
++ QMAN_INITFQ_FLAG_SCHED,
++ &initfq);
++ if (_errno < 0) {
++ _fmt_err("frame queues init erorr.\n");
++ qman_destroy_fq(&fmt_fqs->fq_base, 0);
++ return -EINVAL;
++ }
++ }
++
++ _fmt_dbg("called.\n");
++ return 0;
++}
++
++static int fmt_fq_free(struct fmt_fqs_s *fmt_fq)
++{
++ int _err = 0;
++
++ _fmt_dbg("calling...\n");
++
++ if (fmt_fq->init) {
++ _err = qman_retire_fq(&fmt_fq->fq_base, NULL);
++ if (unlikely(_err < 0))
++ _fmt_err("qman_retire_fq(%u) = %d\n",
++ qman_fq_fqid(&fmt_fq->fq_base), _err);
++
++ _err = qman_oos_fq(&fmt_fq->fq_base);
++ if (unlikely(_err < 0))
++ _fmt_err("qman_oos_fq(%u) = %d\n",
++ qman_fq_fqid(&fmt_fq->fq_base), _err);
++ }
++
++ qman_destroy_fq(&fmt_fq->fq_base, 0);
++
++ _fmt_dbg("called.\n");
++ return _err;
++}
++
++/* private pcd dqrr calbacks */
++static enum qman_cb_dqrr_result fmt_pcd_dqrr(
++ struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dq)
++{
++ struct dpa_bp *dpa_bp = NULL;
++ dma_addr_t addr = qm_fd_addr(&dq->fd);
++ uint8_t *fd_virt_addr = NULL;
++ struct fmt_port_s *fmt_port;
++ struct fmt_port_pcd_s *fmt_port_pcd;
++ uint32_t relative_fqid = 0;
++ uint32_t fd_len = 0;
++
++ _fmt_dbgr("calling...\n");
++
++ /* upcast - from pcd_alloc_fq */
++ fmt_port = ((struct fmt_fqs_s *)fq)->fmt_port_priv;
++ if (!fmt_port) {
++ _fmt_err(" wrong fmt port -to- fq match.\n");
++ goto _fmt_pcd_dqrr_return;
++ }
++ fmt_port_pcd = &fmt_port->fmt_port_pcd;
++
++ relative_fqid = dq->fqid - fmt_port_pcd->fqid_base;
++ _fmt_dbgr("pcd dqrr got frame on relative fq:%u@base:%u\n",
++ relative_fqid, fmt_port_pcd->fqid_base);
++
++ fd_len = dq->fd.length20 + dq->fd.offset;
++
++ if (fd_len > fm_get_max_frm()) {
++ _fmt_err("pcd dqrr wrong frame size: %u (%u:%u)!\n",
++ fd_len, dq->fd.length20, dq->fd.offset);
++ goto _fmt_pcd_dqrr_return;
++ }
++
++ dpa_bp = dpa_bpid2pool(dq->fd.bpid);
++ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
++
++ fd_virt_addr = phys_to_virt(addr);
++ if (!test_and_steal_frame(fmt_port, relative_fqid, fd_virt_addr,
++ fd_len)) {
++
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ atomic_inc(&fmt_port->not_enqueue_to_rxq_wrong_frm);
++#endif
++ _fmt_wrn("pcd dqrr unrecognized frame@fqid: %u,"
++ " frame len: %u (dropped).\n",
++ dq->fqid, dq->fd.length20);
++ dump_frame(fd_virt_addr, fd_len);
++ }
++
++_fmt_pcd_dqrr_return:
++ /* no need to map again here */
++ fmt_fq_release(&dq->fd);
++
++ _fmt_dbgr("calle.\n");
++ return qman_cb_dqrr_consume;
++}
++
++static void fmt_pcd_err_dqrr(
++ struct qman_portal *qm,
++ struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ _fmt_err("this callback should never be called.\n");
++ BUG();
++ return;
++}
++
++static void fmt_pcd_fqs_dqrr(
++ struct qman_portal *qm,
++ struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ _fmt_dbg(" fq state(0x%x)@fqid(%u.\n", msg->fq.fqs, msg->fq.fqid);
++ return;
++}
++
++/* private pcd queue template */
++static const struct qman_fq pcd_fq = {
++ .cb = { .dqrr = fmt_pcd_dqrr,
++ .ern = fmt_pcd_err_dqrr,
++ .fqs = fmt_pcd_fqs_dqrr}
++};
++
++/* defined as weak in dpaa driver. */
++/* ! parameters come from IOCTL call - US */
++int dpa_alloc_pcd_fqids(
++ struct device *dev,
++ uint32_t num, uint8_t alignment,
++ uint32_t *base_fqid)
++{
++ int _err = 0, i;
++ struct net_device *net_dev = NULL;
++ struct dpa_priv_s *dpa_priv = NULL;
++ struct fmt_port_pcd_s *fmt_port_pcd = NULL;
++ struct fmt_fqs_s *fmt_fqs = NULL;
++ struct fmt_port_s *fmt_port = NULL;
++ int num_allocated = 0;
++
++ _fmt_dbg("calling...\n");
++
++ net_dev = (typeof(net_dev))dev_get_drvdata(dev);
++ dpa_priv = (typeof(dpa_priv))netdev_priv(net_dev);
++
++ if (!netif_msg_probe(dpa_priv)) {
++ _fmt_err("dpa not probe.\n");
++ _err = -ENODEV;
++ goto _pcd_alloc_fqs_err;
++ }
++
++ fmt_port = match_dpa_to_fmt_port(dpa_priv);
++ if (!fmt_port) {
++ _fmt_err("fmt port not found.");
++ _err = -EINVAL;
++ goto _pcd_alloc_fqs_err;
++ }
++
++ fmt_port_pcd = &fmt_port->fmt_port_pcd;
++
++ num_allocated = qman_alloc_fqid_range(base_fqid, num, alignment, 0);
++
++ if ((num_allocated <= 0) ||
++ (num_allocated < num) ||
++ (alignment && (*base_fqid) % alignment)) {
++ *base_fqid = 0;
++ _fmt_err("Failed to alloc pcd fqs rang.\n");
++ _err = -EINVAL;
++ goto _pcd_alloc_fqs_err;
++ }
++
++ _fmt_dbg("wanted %d fqs(align %d), got %d fqids@%u.\n",
++ num, alignment, num_allocated, *base_fqid);
++
++ /* alloc pcd queues */
++ fmt_port_pcd->fmt_pcd_fqs = kmalloc(num_allocated *
++ sizeof(struct fmt_fqs_s),
++ GFP_KERNEL);
++ fmt_port_pcd->num_queues = num_allocated;
++ fmt_port_pcd->fqid_base = *base_fqid;
++ fmt_fqs = fmt_port_pcd->fmt_pcd_fqs;
++
++ /* alloc the pcd queues */
++ for (i = 0; i < num_allocated; i++, fmt_fqs++) {
++ _err = fmt_fq_alloc(
++ fmt_fqs,
++ &pcd_fq,
++ (*base_fqid) + i, QMAN_FQ_FLAG_NO_ENQUEUE,
++ dpa_priv->channel, 7);
++
++ if (_err < 0)
++ goto _pcd_alloc_fqs_err;
++
++ /* upcast to identify from where the frames came from */
++ fmt_fqs->fmt_port_priv = fmt_port;
++ }
++
++ _fmt_dbg("called.\n");
++ return _err;
++_pcd_alloc_fqs_err:
++ if (num_allocated > 0)
++ qman_release_fqid_range(*base_fqid, num_allocated);
++ /*TODO: free fmt_pcd_fqs if are any */
++
++ _fmt_dbg("called(_err:%d).\n", _err);
++ return _err;
++}
++
++/* defined as weak in dpaa driver. */
++int dpa_free_pcd_fqids(
++ struct device *dev,
++ uint32_t base_fqid)
++{
++
++ int _err = 0, i;
++ struct net_device *net_dev = NULL;
++ struct dpa_priv_s *dpa_priv = NULL;
++ struct fmt_port_pcd_s *fmt_port_pcd = NULL;
++ struct fmt_fqs_s *fmt_fqs = NULL;
++ struct fmt_port_s *fmt_port = NULL;
++ int num_allocated = 0;
++
++ _fmt_dbg("calling...\n");
++
++ net_dev = (typeof(net_dev))dev_get_drvdata(dev);
++ dpa_priv = (typeof(dpa_priv))netdev_priv(net_dev);
++
++ if (!netif_msg_probe(dpa_priv)) {
++ _fmt_err("dpa not probe.\n");
++ _err = -ENODEV;
++ goto _pcd_free_fqs_err;
++ }
++
++ fmt_port = match_dpa_to_fmt_port(dpa_priv);
++ if (!fmt_port) {
++ _fmt_err("fmt port not found.");
++ _err = -EINVAL;
++ goto _pcd_free_fqs_err;
++ }
++
++ fmt_port_pcd = &fmt_port->fmt_port_pcd;
++ num_allocated = fmt_port_pcd->num_queues;
++ fmt_fqs = fmt_port_pcd->fmt_pcd_fqs;
++
++ for (i = 0; i < num_allocated; i++, fmt_fqs++)
++ fmt_fq_free(fmt_fqs);
++
++ qman_release_fqid_range(base_fqid,num_allocated);
++
++ kfree(fmt_port_pcd->fmt_pcd_fqs);
++ memset(fmt_port_pcd, 0, sizeof(*fmt_port_pcd));
++
++ /* debugging stuff */
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ _fmt_dbg(" portid: %u.\n", fmt_port->id);
++ _fmt_dbg(" frames enqueue to qman: %u.\n",
++ atomic_read(&fmt_port->enqueue_to_qman_frm));
++ _fmt_dbg(" frames enqueue to rxq: %u.\n",
++ atomic_read(&fmt_port->enqueue_to_rxq));
++ _fmt_dbg(" frames dequeue from rxq: %u.\n",
++ atomic_read(&fmt_port->dequeue_from_rxq));
++ _fmt_dbg(" frames not enqueue to rxq - wrong frm: %u.\n",
++ atomic_read(&fmt_port->not_enqueue_to_rxq_wrong_frm));
++ atomic_set(&fmt_port->enqueue_to_qman_frm, 0);
++ atomic_set(&fmt_port->enqueue_to_rxq, 0);
++ atomic_set(&fmt_port->dequeue_from_rxq, 0);
++ atomic_set(&fmt_port->not_enqueue_to_rxq_wrong_frm, 0);
++#endif
++ return 0;
++
++_pcd_free_fqs_err:
++ return _err;
++}
++
++static int fmt_port_init(
++ struct fmt_port_s *fmt_port,
++ ioc_fmt_port_param_t *p_Params)
++{
++ struct device_node *fm_node, *fm_port_node;
++ const uint32_t *uint32_prop;
++ int _errno = 0, lenp = 0, i;
++ static struct of_device_id fm_node_of_match[] = {
++ { .compatible = "fsl,fman", },
++ { /* end of list */ },
++ };
++
++ _fmt_dbg("calling...\n");
++
++ /* init send/receive tu US list */
++ INIT_LIST_HEAD(&fmt_port->rx_q);
++
++ /* check parameters */
++ if (p_Params->num_tx_queues > FMAN_TEST_MAX_TX_FQS ||
++ p_Params->fm_port_id > IOC_FMT_MAX_NUM_OF_PORTS) {
++ _fmt_dbg("wrong test parameters.\n");
++ return -EINVAL;
++ }
++
++ /* set port parameters */
++ fmt_port->num_of_tx_fqs = p_Params->num_tx_queues;
++ fmt_port->id = p_Params->fm_port_id;
++ fmt_port->port_type = p_Params->fm_port_type;
++ fmt_port->diag = e_IOC_DIAG_MODE_NONE;
++
++ /* init debugging stuff */
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ atomic_set(&fmt_port->enqueue_to_qman_frm, 0);
++ atomic_set(&fmt_port->enqueue_to_rxq, 0);
++ atomic_set(&fmt_port->dequeue_from_rxq, 0);
++ atomic_set(&fmt_port->not_enqueue_to_rxq_wrong_frm, 0);
++#endif
++
++ /* TODO: This should be done at probe time not at runtime
++ * very ugly function */
++ /* fill fmt port properties from dts */
++ for_each_matching_node(fm_node, fm_node_of_match) {
++
++ uint32_prop = (uint32_t *)of_get_property(fm_node,
++ "cell-index", &lenp);
++ if (unlikely(uint32_prop == NULL)) {
++ _fmt_wrn("of_get_property(%s, cell-index) invalid",
++ fm_node->full_name);
++ return -EINVAL;
++ }
++ if (WARN_ON(lenp != sizeof(uint32_t))) {
++ _fmt_wrn("of_get_property(%s, cell-index) invalid",
++ fm_node->full_name);
++ return -EINVAL;
++ }
++
++ if (*uint32_prop == p_Params->fm_id) {
++ struct resource res;
++
++ /* Get the FM address */
++ _errno = of_address_to_resource(fm_node, 0, &res);
++ if (unlikely(_errno < 0)) {
++ _fmt_wrn("of_address_to_resource() = %u.\n", _errno);
++ return -EINVAL;
++ }
++
++ fmt_port->fm_phys_base_addr = res.start;
++
++ for_each_child_of_node(fm_node, fm_port_node) {
++ struct platform_device *of_dev;
++
++ if (!of_device_is_available(fm_port_node))
++ continue;
++
++ uint32_prop = (uint32_t *)of_get_property(
++ fm_port_node,
++ "cell-index",
++ &lenp);
++ if (uint32_prop == NULL)
++ continue;
++
++ if (of_device_is_compatible(fm_port_node,
++ "fsl,fman-port-oh") &&
++ (fmt_port->port_type == e_IOC_FMT_PORT_T_OP)) {
++
++ if (*uint32_prop == fmt_port->id) {
++ of_dev = of_find_device_by_node(fm_port_node);
++ if (unlikely(of_dev == NULL)) {
++ _fmt_wrn("fm id invalid\n");
++ return -EINVAL;
++ }
++
++ fmt_port->p_tx_port =
++ fm_port_bind(&of_dev->dev);
++ fmt_port->p_tx_fm_port_dev =
++ (void *)fm_port_get_handle(
++ fmt_port->p_tx_port);
++ fmt_port->p_rx_port =
++ fmt_port->p_tx_port;
++ fmt_port->p_rx_fm_port_dev =
++ fmt_port->p_tx_fm_port_dev;
++ fmt_port->p_mac_dev = NULL;
++ break;
++ }
++ } else if ((*uint32_prop == fmt_port->id) &&
++ fmt_port->port_type == e_IOC_FMT_PORT_T_RXTX) {
++
++ of_dev = of_find_device_by_node(fm_port_node);
++ if (unlikely(of_dev == NULL)) {
++ _fmt_wrn("dtb fm id invalid value");
++ return -EINVAL;
++ }
++
++ if (of_device_is_compatible(fm_port_node,
++ "fsl,fman-port-1g-tx")) {
++ fmt_port->p_tx_port =
++ fm_port_bind(&of_dev->dev);
++ fmt_port->p_tx_fm_port_dev = (void *)
++ fm_port_get_handle(
++ fmt_port->p_tx_port);
++ } else if (of_device_is_compatible(fm_port_node,
++ "fsl,fman-port-1g-rx")) {
++ fmt_port->p_rx_port =
++ fm_port_bind(&of_dev->dev);
++ fmt_port->p_rx_fm_port_dev = (void *)
++ fm_port_get_handle(
++ fmt_port->p_rx_port);
++ } else if (of_device_is_compatible(fm_port_node,
++ "fsl,fman-1g-mac") ||
++ of_device_is_compatible(fm_port_node,
++ "fsl,fman-memac"))
++ fmt_port->p_mac_dev =
++ (typeof(fmt_port->p_mac_dev))
++ dev_get_drvdata(&of_dev->dev);
++ else
++ continue;
++
++ if (fmt_port->p_tx_fm_port_dev &&
++ fmt_port->p_rx_fm_port_dev && fmt_port->p_mac_dev)
++ break;
++ } else if (((*uint32_prop + FM_MAX_NUM_OF_1G_RX_PORTS) ==
++ fmt_port->id) &&
++ fmt_port->port_type == e_IOC_FMT_PORT_T_RXTX) {
++
++ of_dev = of_find_device_by_node(fm_port_node);
++ if (unlikely(of_dev == NULL)) {
++ _fmt_wrn("dtb fm id invalid value\n");
++ return -EINVAL;
++ }
++
++ if (of_device_is_compatible(fm_port_node,
++ "fsl,fman-port-10g-tx")) {
++ fmt_port->p_tx_port =
++ fm_port_bind(&of_dev->dev);
++ fmt_port->p_tx_fm_port_dev = (void *)
++ fm_port_get_handle(
++ fmt_port->p_tx_port);
++ } else if (of_device_is_compatible(fm_port_node,
++ "fsl,fman-port-10g-rx")) {
++ fmt_port->p_rx_port =
++ fm_port_bind(&of_dev->dev);
++ fmt_port->p_rx_fm_port_dev = (void *)
++ fm_port_get_handle(
++ fmt_port->p_rx_port);
++ } else if (of_device_is_compatible(fm_port_node,
++ "fsl,fman-10g-mac") ||
++ of_device_is_compatible(fm_port_node,
++ "fsl,fman-memac"))
++ fmt_port->p_mac_dev =
++ (typeof(fmt_port->p_mac_dev))
++ dev_get_drvdata(&of_dev->dev);
++ else
++ continue;
++
++ if (fmt_port->p_tx_fm_port_dev &&
++ fmt_port->p_rx_fm_port_dev && fmt_port->p_mac_dev)
++ break;
++ }
++ } /* for_each_child */
++ }
++ } /* for each matching node */
++
++ if (fmt_port->p_tx_fm_port_dev == 0 ||
++ fmt_port->p_rx_fm_port_dev == 0) {
++
++ _fmt_err("bad fm port pointers.\n");
++ return -EINVAL;
++ }
++
++ _fmt_dbg("alloc %u tx queues.\n", fmt_port->num_of_tx_fqs);
++
++ /* init fman test egress dynamic frame queues */
++ for (i = 0; i < fmt_port->num_of_tx_fqs; i++) {
++ int _errno;
++ _errno = fmt_fq_alloc(
++ &fmt_port->p_tx_fqs[i],
++ &fmt_egress_fq,
++ 0,
++ QMAN_FQ_FLAG_TO_DCPORTAL,
++ fm_get_tx_port_channel(fmt_port->p_tx_port),
++ i);
++
++ if (_errno < 0) {
++ _fmt_err("tx queues allocation failed.\n");
++ /* TODO: memory leak here if 1 queue is allocated and
++ * next queues are failing ... */
++ return -EINVAL;
++ }
++ }
++
++ /* port is valid and ready to use. */
++ fmt_port->valid = TRUE;
++
++ _fmt_dbg("called.\n");
++ return 0;
++}
++
++/* fm test chardev functions */
++static int fmt_open(struct inode *inode, struct file *file)
++{
++ unsigned int minor = iminor(inode);
++
++ _fmt_dbg("calling...\n");
++
++ if (file->private_data != NULL)
++ return 0;
++
++ /* The minor represent the port number.
++ * Set the port structure accordingly, thus all the operations
++ * will be done on this port. */
++ if ((minor >= DEV_FM_TEST_PORTS_MINOR_BASE) &&
++ (minor < DEV_FM_TEST_MAX_MINORS))
++ file->private_data = &fm_test.ports[minor];
++ else
++ return -ENXIO;
++
++ _fmt_dbg("called.\n");
++ return 0;
++}
++
++static int fmt_close(struct inode *inode, struct file *file)
++{
++ struct fmt_port_s *fmt_port = NULL;
++ struct fmt_frame_s *fmt_frame = NULL;
++
++ int err = 0;
++
++ _fmt_dbg("calling...\n");
++
++ fmt_port = file->private_data;
++ if (!fmt_port)
++ return -ENODEV;
++
++ /* Close the current test port by invalidating it. */
++ fmt_port->valid = FALSE;
++
++ /* clean the fmt port queue */
++ while ((fmt_frame = dequeue_fmt_frame(fmt_port)) != NULL) {
++ if (fmt_frame && fmt_frame->buff.p_data){
++ kfree(fmt_frame->buff.p_data);
++ kfree(fmt_frame);
++ }
++ }
++
++ /* !!! the qman queues are cleaning from fm_ioctl...
++ * - very ugly */
++
++ _fmt_dbg("called.\n");
++ return err;
++}
++
++static int fmt_ioctls(unsigned int minor,
++ struct file *file,
++ unsigned int cmd,
++ unsigned long arg,
++ bool compat)
++{
++ struct fmt_port_s *fmt_port = NULL;
++
++ _fmt_dbg("IOCTL minor:%u "
++ " arg:0x%08lx ioctl cmd (0x%08x):(0x%02x:0x%02x.\n",
++ minor, arg, cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
++
++ fmt_port = file->private_data;
++ if (!fmt_port) {
++ _fmt_err("invalid fmt port.\n");
++ return -ENODEV;
++ }
++
++ /* set test type properly */
++ if (compat)
++ fmt_port->compat_test_type = true;
++ else
++ fmt_port->compat_test_type = false;
++
++ switch (cmd) {
++ case FMT_PORT_IOC_INIT:
++ {
++ ioc_fmt_port_param_t param;
++
++ if (fmt_port->valid) {
++ _fmt_wrn("port is already initialized.\n");
++ return -EFAULT;
++ }
++#if defined(CONFIG_COMPAT)
++ if (compat) {
++ if (copy_from_user(&param,
++ (ioc_fmt_port_param_t *)compat_ptr(arg),
++ sizeof(ioc_fmt_port_param_t)))
++
++ return -EFAULT;
++ } else
++#endif
++ {
++ if (copy_from_user(&param,
++ (ioc_fmt_port_param_t *) arg,
++ sizeof(ioc_fmt_port_param_t)))
++
++ return -EFAULT;
++ }
++
++ return fmt_port_init(fmt_port, &param);
++ }
++
++ case FMT_PORT_IOC_SET_DIAG_MODE:
++ if (get_user(fmt_port->diag, (ioc_diag_mode *)arg))
++ return -EFAULT;
++
++ if (fmt_port->diag == e_IOC_DIAG_MODE_CTRL_LOOPBACK)
++ return set_mac_int_loopback(fmt_port, TRUE);
++ else
++ return set_mac_int_loopback(fmt_port, FALSE);
++ break;
++
++ case FMT_PORT_IOC_SET_DPAECHO_MODE:
++ case FMT_PORT_IOC_SET_IP_HEADER_MANIP:
++ default:
++ _fmt_wrn("ioctl unimplemented minor:%u@ioctl"
++ " cmd:0x%08x(type:0x%02x, nr:0x%02x.\n",
++ minor, cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_COMPAT
++static long fmt_compat_ioctl(
++ struct file *file,
++ unsigned int cmd,
++ unsigned long arg)
++{
++ unsigned int minor = iminor(file->f_path.dentry->d_inode);
++
++ _fmt_dbg("calling...\n");
++ return fmt_ioctls(minor, file, cmd, arg, true);
++}
++#endif
++
++static long fmt_ioctl(
++ struct file *file,
++ unsigned int cmd,
++ unsigned long arg)
++{
++ unsigned int minor = iminor(file->f_path.dentry->d_inode);
++ unsigned int res;
++
++ _fmt_dbg("calling...\n");
++
++ fm_mutex_lock();
++ res = fmt_ioctls(minor, file, cmd, arg, false);
++ fm_mutex_unlock();
++
++ _fmt_dbg("called.\n");
++
++ return res;
++}
++
++#ifdef CONFIG_COMPAT
++void copy_compat_test_frame_buffer(
++ ioc_fmt_buff_desc_t *buff,
++ ioc_fmt_compat_buff_desc_t *compat_buff)
++{
++ compat_buff->qid = buff->qid;
++ compat_buff->p_data = ptr_to_compat(buff->p_data);
++ compat_buff->size = buff->size;
++ compat_buff->status = buff->status;
++
++ compat_buff->buff_context.p_user_priv =
++ ptr_to_compat(buff->buff_context.p_user_priv);
++ memcpy(compat_buff->buff_context.fm_prs_res,
++ buff->buff_context.fm_prs_res,
++ FM_PRS_MAX * sizeof(uint8_t));
++ memcpy(compat_buff->buff_context.fm_time_stamp,
++ buff->buff_context.fm_time_stamp,
++ FM_TIME_STAMP_MAX * sizeof(uint8_t));
++}
++#endif
++
++ssize_t fmt_read(
++ struct file *file,
++ char __user *buf,
++ size_t size,
++ loff_t *ppos)
++{
++ struct fmt_port_s *fmt_port = NULL;
++ struct fmt_frame_s *p_fmt_frame = NULL;
++ ssize_t cnt = 0;
++
++ fmt_port = file->private_data;
++ if (!fmt_port || !fmt_port->valid) {
++ _fmt_err("fmt port not valid!\n");
++ return -ENODEV;
++ }
++
++ p_fmt_frame = dequeue_fmt_frame(fmt_port);
++ if (p_fmt_frame == NULL)
++ return 0;
++
++ _fmt_dbgr("calling...\n");
++
++#ifdef CONFIG_COMPAT
++ if (fmt_port->compat_test_type){
++ cnt = sizeof(ioc_fmt_compat_buff_desc_t);
++ }
++ else
++#endif
++ {
++ cnt = sizeof(ioc_fmt_buff_desc_t);
++ }
++
++ if (size < cnt) {
++ _fmt_err("illegal buffer-size!\n");
++ cnt = 0;
++ goto _fmt_read_return;
++ }
++
++ /* Copy structure */
++#ifdef CONFIG_COMPAT
++ if (fmt_port->compat_test_type) {
++ {
++ ioc_fmt_compat_buff_desc_t compat_buff;
++ copy_compat_test_frame_buffer(&p_fmt_frame->buff,
++ &compat_buff);
++
++ if (copy_to_user(buf, &compat_buff, cnt)) {
++ _fmt_err("copy_to_user failed!\n");
++ goto _fmt_read_return;
++ }
++ }
++
++ ((ioc_fmt_compat_buff_desc_t *)buf)->p_data =
++ ptr_to_compat(buf+sizeof(ioc_fmt_compat_buff_desc_t));
++ cnt += MIN(p_fmt_frame->buff.size, size-cnt);
++ } else
++#endif
++ {
++ if (copy_to_user(buf, &p_fmt_frame->buff, cnt)) {
++ _fmt_err("copy_to_user failed!\n");
++ goto _fmt_read_return;
++ }
++
++ ((ioc_fmt_buff_desc_t *)buf)->p_data =
++ buf + sizeof(ioc_fmt_buff_desc_t);
++ cnt += MIN(p_fmt_frame->buff.size, size-cnt);
++ }
++
++ if (size < cnt) {
++ _fmt_err("illegal buffer-size!\n");
++ goto _fmt_read_return;
++ }
++
++ /* copy frame */
++#ifdef CONFIG_COMPAT
++ if (fmt_port->compat_test_type) {
++ if (copy_to_user(buf+sizeof(ioc_fmt_compat_buff_desc_t),
++ p_fmt_frame->buff.p_data, cnt)) {
++ _fmt_err("copy_to_user failed!\n");
++ goto _fmt_read_return;
++ }
++ } else
++#endif
++ {
++ if (copy_to_user(buf+sizeof(ioc_fmt_buff_desc_t),
++ p_fmt_frame->buff.p_data, cnt)) {
++ _fmt_err("copy_to_user failed!\n");
++ goto _fmt_read_return;
++ }
++ }
++
++_fmt_read_return:
++ kfree(p_fmt_frame->buff.p_data);
++ kfree(p_fmt_frame);
++
++ _fmt_dbgr("called.\n");
++ return cnt;
++}
++
++ssize_t fmt_write(
++ struct file *file,
++ const char __user *buf,
++ size_t size,
++ loff_t *ppos)
++{
++ struct fmt_port_s *fmt_port = NULL;
++ ioc_fmt_buff_desc_t buff_desc;
++#ifdef CONFIG_COMPAT
++ ioc_fmt_compat_buff_desc_t buff_desc_compat;
++#endif
++ uint8_t *p_data = NULL;
++ uint32_t data_offset;
++ int _errno;
++ t_DpaaFD fd;
++
++ _fmt_dbgr("calling...\n");
++
++ fmt_port = file->private_data;
++ if (!fmt_port || !fmt_port->valid) {
++ _fmt_err("fmt port not valid.\n");
++ return -EINVAL;
++ }
++
++ /* If Compat (32B UserSpace - 64B KernelSpace) */
++#ifdef CONFIG_COMPAT
++ if (fmt_port->compat_test_type) {
++ if (size < sizeof(ioc_fmt_compat_buff_desc_t)) {
++ _fmt_err("invalid buff_desc size.\n");
++ return -EFAULT;
++ }
++
++ if (copy_from_user(&buff_desc_compat, buf,
++ sizeof(ioc_fmt_compat_buff_desc_t)))
++ return -EFAULT;
++
++ buff_desc.qid = buff_desc_compat.qid;
++ buff_desc.p_data = compat_ptr(buff_desc_compat.p_data);
++ buff_desc.size = buff_desc_compat.size;
++ buff_desc.status = buff_desc_compat.status;
++
++ buff_desc.buff_context.p_user_priv =
++ compat_ptr(buff_desc_compat.buff_context.p_user_priv);
++ memcpy(buff_desc.buff_context.fm_prs_res,
++ buff_desc_compat.buff_context.fm_prs_res,
++ FM_PRS_MAX * sizeof(uint8_t));
++ memcpy(buff_desc.buff_context.fm_time_stamp,
++ buff_desc_compat.buff_context.fm_time_stamp,
++ FM_TIME_STAMP_MAX * sizeof(uint8_t));
++ } else
++#endif
++ {
++ if (size < sizeof(ioc_fmt_buff_desc_t)) {
++ _fmt_err("invalid buff_desc size.\n");
++ return -EFAULT;
++ }
++
++ if (copy_from_user(&buff_desc, (ioc_fmt_buff_desc_t *)buf,
++ sizeof(ioc_fmt_buff_desc_t)))
++ return -EFAULT;
++ }
++
++ data_offset = FM_PORT_GetBufferDataOffset(fmt_port->p_tx_fm_port_dev);
++ p_data = kmalloc(buff_desc.size+data_offset, GFP_KERNEL);
++ if (!p_data)
++ return -ENOMEM;
++
++ /* If Compat (32UserSpace - 64KernelSpace) the buff_desc.p_data is ok */
++ if (copy_from_user((uint8_t *)PTR_MOVE(p_data, data_offset),
++ buff_desc.p_data,
++ buff_desc.size)) {
++ kfree(p_data);
++ return -EFAULT;
++ }
++
++ /* TODO: dma_map_single here (cannot access the bpool struct) */
++
++ /* prepare fd */
++ memset(&fd, 0, sizeof(fd));
++ DPAA_FD_SET_ADDR(&fd, p_data);
++ DPAA_FD_SET_OFFSET(&fd, data_offset);
++ DPAA_FD_SET_LENGTH(&fd, buff_desc.size);
++
++ _errno = qman_enqueue(&fmt_port->p_tx_fqs[buff_desc.qid].fq_base,
++ (struct qm_fd *)&fd, 0);
++ if (_errno) {
++ buff_desc.status = (uint32_t)_errno;
++ if (copy_to_user((ioc_fmt_buff_desc_t *)buf, &buff_desc,
++ sizeof(ioc_fmt_buff_desc_t))) {
++ kfree(p_data);
++ return -EFAULT;
++ }
++ }
++
++ /* for debugging */
++#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
++ atomic_inc(&fmt_port->enqueue_to_qman_frm);
++#endif
++ _fmt_dbgr("called.\n");
++ return buff_desc.size;
++}
++
++/* fm test character device definition */
++static const struct file_operations fmt_fops =
++{
++ .owner = THIS_MODULE,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = fmt_compat_ioctl,
++#endif
++ .unlocked_ioctl = fmt_ioctl,
++ .open = fmt_open,
++ .release = fmt_close,
++ .read = fmt_read,
++ .write = fmt_write,
++};
++
++static int fmt_init(void)
++{
++ int id;
++
++ _fmt_dbg("calling...\n");
++
++ /* Register to the /dev for IOCTL API */
++ /* Register dynamically a new major number for the character device: */
++ fm_test.major = register_chrdev(0, DEV_FM_TEST_NAME, &fmt_fops);
++ if (fm_test.major <= 0) {
++ _fmt_wrn("Failed to allocate major number for device %s.\n",
++ DEV_FM_TEST_NAME);
++ return -ENODEV;
++ }
++
++ /* Creating class for FMan_test */
++ fm_test.fmt_class = class_create(THIS_MODULE, DEV_FM_TEST_NAME);
++ if (IS_ERR(fm_test.fmt_class)) {
++ unregister_chrdev(fm_test.major, DEV_FM_TEST_NAME);
++ _fmt_wrn("Error creating %s class.\n", DEV_FM_TEST_NAME);
++ return -ENODEV;
++ }
++
++ for (id = 0; id < IOC_FMT_MAX_NUM_OF_PORTS; id++)
++ if (NULL == device_create(fm_test.fmt_class, NULL,
++ MKDEV(fm_test.major,
++ DEV_FM_TEST_PORTS_MINOR_BASE + id), NULL,
++ DEV_FM_TEST_NAME "%d", id)) {
++
++ _fmt_err("Error creating %s device.\n",
++ DEV_FM_TEST_NAME);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static void fmt_free(void)
++{
++ int id;
++
++ for (id = 0; id < IOC_FMT_MAX_NUM_OF_PORTS; id++)
++ device_destroy(fm_test.fmt_class, MKDEV(fm_test.major,
++ DEV_FM_TEST_PORTS_MINOR_BASE + id));
++ class_destroy(fm_test.fmt_class);
++}
++
++static int __init __cold fmt_load(void)
++{
++ struct dpaa_eth_hooks_s priv_dpaa_eth_hooks;
++
++ /* set dpaa hooks for default queues */
++ memset(&priv_dpaa_eth_hooks, 0, sizeof(priv_dpaa_eth_hooks));
++ priv_dpaa_eth_hooks.rx_default = fmt_rx_default_hook;
++ priv_dpaa_eth_hooks.rx_error = fmt_rx_error_hook;
++ priv_dpaa_eth_hooks.tx_confirm = fmt_tx_confirm_hook;
++ priv_dpaa_eth_hooks.tx_error = fmt_tx_confirm_error_hook;
++
++ fsl_dpaa_eth_set_hooks(&priv_dpaa_eth_hooks);
++
++ /* initialize the fman test environment */
++ if (fmt_init() < 0) {
++ _fmt_err("Failed to init FM-test modul.\n");
++ fmt_free();
++ return -ENODEV;
++ }
++
++ _fmt_inf("FSL FM test module loaded.\n");
++
++ return 0;
++}
++
++static void __exit __cold fmt_unload(void)
++{
++ fmt_free();
++ _fmt_inf("FSL FM test module unloaded.\n");
++}
++
++module_init(fmt_load);
++module_exit(fmt_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c
+new file mode 100755
+index 00000000..31f654b4
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c
+@@ -0,0 +1,2908 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_fm.c
++ @Author Shlomi Gridish
++ @Description FM Linux wrapper functions.
++*/
++
++#include <linux/version.h>
++#include <linux/slab.h>
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#include <config/modversions.h>
++#endif /* MODVERSIONS */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/clk.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#ifndef CONFIG_FMAN_ARM
++#include <sysdev/fsl_soc.h>
++#include <linux/fsl/guts.h>
++#include <linux/fsl/svr.h>
++#endif
++#include <linux/stat.h> /* For file access mask */
++#include <linux/skbuff.h>
++#include <linux/proc_fs.h>
++
++/* NetCommSw Headers --------------- */
++#include "std_ext.h"
++#include "error_ext.h"
++#include "sprint_ext.h"
++#include "debug_ext.h"
++#include "sys_io_ext.h"
++
++#include "fm_ioctls.h"
++
++#include "lnxwrp_fm.h"
++#include "lnxwrp_resources.h"
++#include "lnxwrp_sysfs_fm.h"
++#include "lnxwrp_sysfs_fm_port.h"
++#include "lnxwrp_exp_sym.h"
++#include "fm_common.h"
++#include "../../sdk_fman/Peripherals/FM/fm.h"
++#define __ERR_MODULE__ MODULE_FM
++
++extern struct device_node *GetFmPortAdvArgsDevTreeNode (struct device_node *fm_node,
++ e_FmPortType portType,
++ uint8_t portId);
++
++#define PROC_PRINT(args...) offset += sprintf(buf+offset,args)
++
++#define ADD_ADV_CONFIG_NO_RET(_func, _param) \
++ do { \
++ if (i<max){ \
++ p_Entry = &p_Entrys[i]; \
++ p_Entry->p_Function = _func; \
++ _param \
++ i++; \
++ } \
++ else \
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,\
++ ("Number of advanced-configuration entries exceeded"));\
++ } while (0)
++
++/* Bootarg used to override the Kconfig FSL_FM_MAX_FRAME_SIZE value */
++#define FSL_FM_MAX_FRM_BOOTARG "fsl_fm_max_frm"
++
++/* Bootarg used to override FSL_FM_RX_EXTRA_HEADROOM Kconfig value */
++#define FSL_FM_RX_EXTRA_HEADROOM_BOOTARG "fsl_fm_rx_extra_headroom"
++
++/* Minimum and maximum value for the fsl_fm_rx_extra_headroom bootarg */
++#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16
++#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
++
++#define FSL_FM_PAUSE_TIME_ENABLE 0xf000
++#define FSL_FM_PAUSE_TIME_DISABLE 0
++#define FSL_FM_PAUSE_THRESH_DEFAULT 0
++
++/*
++ * Max frame size, across all interfaces.
++ * Configurable from Kconfig or bootargs, to avoid allocating
++ * oversized (socket) buffers when not using jumbo frames.
++ * Must be large enough to accommodate the network MTU, but small enough
++ * to avoid wasting skb memory.
++ *
++ * Could be overridden once, at boot-time, via the
++ * fm_set_max_frm() callback.
++ */
++int fsl_fm_max_frm = CONFIG_FSL_FM_MAX_FRAME_SIZE;
++
++/*
++ * Extra headroom for Rx buffers.
++ * FMan is instructed to allocate, on the Rx path, this amount of
++ * space at the beginning of a data buffer, beside the DPA private
++ * data area and the IC fields.
++ * Does not impact Tx buffer layout.
++ *
++ * Configurable from Kconfig or bootargs. Zero by default, it's needed
++ * on particular forwarding scenarios that add extra headers to the
++ * forwarded frame.
++ */
++int fsl_fm_rx_extra_headroom = CONFIG_FSL_FM_RX_EXTRA_HEADROOM;
++
++#ifdef CONFIG_FMAN_PFC
++static int fsl_fm_pfc_quanta[] = {
++ CONFIG_FMAN_PFC_QUANTA_0,
++ CONFIG_FMAN_PFC_QUANTA_1,
++ CONFIG_FMAN_PFC_QUANTA_2,
++ CONFIG_FMAN_PFC_QUANTA_3
++};
++#endif
++
++static t_LnxWrpFm lnxWrpFm;
++
++int fm_get_max_frm()
++{
++ return fsl_fm_max_frm;
++}
++EXPORT_SYMBOL(fm_get_max_frm);
++
++int fm_get_rx_extra_headroom()
++{
++ return ALIGN(fsl_fm_rx_extra_headroom, 16);
++}
++EXPORT_SYMBOL(fm_get_rx_extra_headroom);
++
++static int __init fm_set_max_frm(char *str)
++{
++ int ret = 0;
++
++ ret = get_option(&str, &fsl_fm_max_frm);
++ if (ret != 1) {
++ /*
++ * This will only work if CONFIG_EARLY_PRINTK is compiled in,
++ * and something like "earlyprintk=serial,uart0,115200" is
++ * specified in the bootargs
++ */
++ printk(KERN_WARNING "No suitable %s=<int> prop in bootargs; "
++ "will use the default FSL_FM_MAX_FRAME_SIZE (%d) "
++ "from Kconfig.\n", FSL_FM_MAX_FRM_BOOTARG,
++ CONFIG_FSL_FM_MAX_FRAME_SIZE);
++
++ fsl_fm_max_frm = CONFIG_FSL_FM_MAX_FRAME_SIZE;
++ return 1;
++ }
++
++ /* Don't allow invalid bootargs; fallback to the Kconfig value */
++ if (fsl_fm_max_frm < 64 || fsl_fm_max_frm > 9600) {
++ printk(KERN_WARNING "Invalid %s=%d in bootargs, valid range is "
++ "64-9600. Falling back to the FSL_FM_MAX_FRAME_SIZE (%d) "
++ "from Kconfig.\n",
++ FSL_FM_MAX_FRM_BOOTARG, fsl_fm_max_frm,
++ CONFIG_FSL_FM_MAX_FRAME_SIZE);
++
++ fsl_fm_max_frm = CONFIG_FSL_FM_MAX_FRAME_SIZE;
++ return 1;
++ }
++
++ printk(KERN_INFO "Using fsl_fm_max_frm=%d from bootargs\n",
++ fsl_fm_max_frm);
++ return 0;
++}
++early_param(FSL_FM_MAX_FRM_BOOTARG, fm_set_max_frm);
++
++static int __init fm_set_rx_extra_headroom(char *str)
++{
++ int ret;
++
++ ret = get_option(&str, &fsl_fm_rx_extra_headroom);
++
++ if (ret != 1) {
++ printk(KERN_WARNING "No suitable %s=<int> prop in bootargs; "
++ "will use the default FSL_FM_RX_EXTRA_HEADROOM (%d) "
++ "from Kconfig.\n", FSL_FM_RX_EXTRA_HEADROOM_BOOTARG,
++ CONFIG_FSL_FM_RX_EXTRA_HEADROOM);
++ fsl_fm_rx_extra_headroom = CONFIG_FSL_FM_RX_EXTRA_HEADROOM;
++
++ return 1;
++ }
++
++ if (fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN ||
++ fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX) {
++ printk(KERN_WARNING "Invalid value for %s=%d prop in "
++ "bootargs; will use the default "
++ "FSL_FM_RX_EXTRA_HEADROOM (%d) from Kconfig.\n",
++ FSL_FM_RX_EXTRA_HEADROOM_BOOTARG,
++ fsl_fm_rx_extra_headroom,
++ CONFIG_FSL_FM_RX_EXTRA_HEADROOM);
++ fsl_fm_rx_extra_headroom = CONFIG_FSL_FM_RX_EXTRA_HEADROOM;
++ }
++
++ printk(KERN_INFO "Using fsl_fm_rx_extra_headroom=%d from bootargs\n",
++ fsl_fm_rx_extra_headroom);
++
++ return 0;
++}
++early_param(FSL_FM_RX_EXTRA_HEADROOM_BOOTARG, fm_set_rx_extra_headroom);
++
++static irqreturn_t fm_irq(int irq, void *_dev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)_dev;
++#ifdef CONFIG_PM_SLEEP
++ t_Fm *p_Fm = (t_Fm*)p_LnxWrpFmDev->h_Dev;
++#endif
++ if (!p_LnxWrpFmDev || !p_LnxWrpFmDev->h_Dev)
++ return IRQ_NONE;
++
++#ifdef CONFIG_PM_SLEEP
++ if (fman_get_normal_pending(p_Fm->p_FmFpmRegs) & INTR_EN_WAKEUP)
++ {
++ pm_wakeup_event(p_LnxWrpFmDev->dev, 200);
++ }
++#endif
++ FM_EventIsr(p_LnxWrpFmDev->h_Dev);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t fm_err_irq(int irq, void *_dev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)_dev;
++
++ if (!p_LnxWrpFmDev || !p_LnxWrpFmDev->h_Dev)
++ return IRQ_NONE;
++
++ if (FM_ErrorIsr(p_LnxWrpFmDev->h_Dev) == E_OK)
++ return IRQ_HANDLED;
++
++ return IRQ_NONE;
++}
++
++/* used to protect FMD/LLD from concurrent calls in functions fm_mutex_lock / fm_mutex_unlock */
++static struct mutex lnxwrp_mutex;
++
++static t_LnxWrpFmDev * CreateFmDev(uint8_t id)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ int j;
++
++ p_LnxWrpFmDev = (t_LnxWrpFmDev *)XX_Malloc(sizeof(t_LnxWrpFmDev));
++ if (!p_LnxWrpFmDev)
++ {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ return NULL;
++ }
++
++ memset(p_LnxWrpFmDev, 0, sizeof(t_LnxWrpFmDev));
++ p_LnxWrpFmDev->fmDevSettings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
++ memset(p_LnxWrpFmDev->fmDevSettings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
++ p_LnxWrpFmDev->fmPcdDevSettings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
++ memset(p_LnxWrpFmDev->fmPcdDevSettings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
++ p_LnxWrpFmDev->hcPort.settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
++ memset(p_LnxWrpFmDev->hcPort.settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
++ for (j=0; j<FM_MAX_NUM_OF_RX_PORTS; j++)
++ {
++ p_LnxWrpFmDev->rxPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
++ memset(p_LnxWrpFmDev->rxPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
++ }
++ for (j=0; j<FM_MAX_NUM_OF_TX_PORTS; j++)
++ {
++ p_LnxWrpFmDev->txPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
++ memset(p_LnxWrpFmDev->txPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
++ }
++ for (j=0; j<FM_MAX_NUM_OF_OH_PORTS-1; j++)
++ {
++ p_LnxWrpFmDev->opPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
++ memset(p_LnxWrpFmDev->opPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
++ }
++
++ return p_LnxWrpFmDev;
++}
++
++static void DestroyFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++ int j;
++
++ for (j=0; j<FM_MAX_NUM_OF_OH_PORTS-1; j++)
++ if (p_LnxWrpFmDev->opPorts[j].settings.advConfig)
++ XX_Free(p_LnxWrpFmDev->opPorts[j].settings.advConfig);
++ for (j=0; j<FM_MAX_NUM_OF_TX_PORTS; j++)
++ if (p_LnxWrpFmDev->txPorts[j].settings.advConfig)
++ XX_Free(p_LnxWrpFmDev->txPorts[j].settings.advConfig);
++ for (j=0; j<FM_MAX_NUM_OF_RX_PORTS; j++)
++ if (p_LnxWrpFmDev->rxPorts[j].settings.advConfig)
++ XX_Free(p_LnxWrpFmDev->rxPorts[j].settings.advConfig);
++ if (p_LnxWrpFmDev->hcPort.settings.advConfig)
++ XX_Free(p_LnxWrpFmDev->hcPort.settings.advConfig);
++ if (p_LnxWrpFmDev->fmPcdDevSettings.advConfig)
++ XX_Free(p_LnxWrpFmDev->fmPcdDevSettings.advConfig);
++ if (p_LnxWrpFmDev->fmDevSettings.advConfig)
++ XX_Free(p_LnxWrpFmDev->fmDevSettings.advConfig);
++
++ XX_Free(p_LnxWrpFmDev);
++}
++
++static t_Error FillRestFmInfo(t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++#define FM_BMI_PPIDS_OFFSET 0x00080304
++#define FM_DMA_PLR_OFFSET 0x000c2060
++#define FM_FPM_IP_REV_1_OFFSET 0x000c30c4
++#define DMA_HIGH_LIODN_MASK 0x0FFF0000
++#define DMA_LOW_LIODN_MASK 0x00000FFF
++#define DMA_LIODN_SHIFT 16
++
++typedef _Packed struct {
++ uint32_t plr[32];
++} _PackedType t_Plr;
++
++typedef _Packed struct {
++ volatile uint32_t fmbm_ppid[63];
++} _PackedType t_Ppids;
++
++ t_Plr *p_Plr;
++ t_Ppids *p_Ppids;
++ int i,j;
++ uint32_t fmRev;
++
++ static const uint8_t phys1GRxPortId[] = {0x8,0x9,0xa,0xb,0xc,0xd,0xe,0xf};
++ static const uint8_t phys10GRxPortId[] = {0x10,0x11};
++#if (DPAA_VERSION >= 11)
++ static const uint8_t physOhPortId[] = {/* 0x1, */0x2,0x3,0x4,0x5,0x6,0x7};
++#else
++ static const uint8_t physOhPortId[] = {0x1,0x2,0x3,0x4,0x5,0x6,0x7};
++#endif
++ static const uint8_t phys1GTxPortId[] = {0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f};
++ static const uint8_t phys10GTxPortId[] = {0x30,0x31};
++
++ fmRev = (uint32_t)(*((volatile uint32_t *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_FPM_IP_REV_1_OFFSET)));
++ fmRev &= 0xffff;
++
++ p_Plr = (t_Plr *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_DMA_PLR_OFFSET);
++#ifdef MODULE
++ for (i=0;i<FM_MAX_NUM_OF_PARTITIONS/2;i++)
++ p_Plr->plr[i] = 0;
++#endif /* MODULE */
++
++ for (i=0; i<FM_MAX_NUM_OF_PARTITIONS; i++)
++ {
++ uint16_t liodnBase = (uint16_t)((i%2) ?
++ (p_Plr->plr[i/2] & DMA_LOW_LIODN_MASK) :
++ ((p_Plr->plr[i/2] & DMA_HIGH_LIODN_MASK) >> DMA_LIODN_SHIFT));
++#ifdef FM_PARTITION_ARRAY
++ /* TODO: this was .liodnPerPartition[i] = liodnBase; is the index meaning the same? */
++ p_LnxWrpFmDev->fmDevSettings.param.liodnBasePerPort[i] = liodnBase;
++#endif /* FM_PARTITION_ARRAY */
++
++ if ((i >= phys1GRxPortId[0]) &&
++ (i <= phys1GRxPortId[FM_MAX_NUM_OF_1G_RX_PORTS-1]))
++ {
++ for (j=0; j<ARRAY_SIZE(phys1GRxPortId); j++)
++ if (phys1GRxPortId[j] == i)
++ break;
++ ASSERT_COND(j<ARRAY_SIZE(phys1GRxPortId));
++ p_LnxWrpFmDev->rxPorts[j].settings.param.liodnBase = liodnBase;
++ }
++ else if (FM_MAX_NUM_OF_10G_RX_PORTS &&
++ (i >= phys10GRxPortId[0]) &&
++ (i <= phys10GRxPortId[FM_MAX_NUM_OF_10G_RX_PORTS-1]))
++ {
++ for (j=0; j<ARRAY_SIZE(phys10GRxPortId); j++)
++ if (phys10GRxPortId[j] == i)
++ break;
++ ASSERT_COND(j<ARRAY_SIZE(phys10GRxPortId));
++ p_LnxWrpFmDev->rxPorts[FM_MAX_NUM_OF_1G_RX_PORTS+j].settings.param.liodnBase = liodnBase;
++ }
++ else if ((i >= physOhPortId[0]) &&
++ (i <= physOhPortId[FM_MAX_NUM_OF_OH_PORTS-1]))
++ {
++ for (j=0; j<ARRAY_SIZE(physOhPortId); j++)
++ if (physOhPortId[j] == i)
++ break;
++ ASSERT_COND(j<ARRAY_SIZE(physOhPortId));
++ if (j == 0)
++ p_LnxWrpFmDev->hcPort.settings.param.liodnBase = liodnBase;
++ else
++ p_LnxWrpFmDev->opPorts[j - 1].settings.param.liodnBase = liodnBase;
++ }
++ else if ((i >= phys1GTxPortId[0]) &&
++ (i <= phys1GTxPortId[FM_MAX_NUM_OF_1G_TX_PORTS-1]))
++ {
++ for (j=0; j<ARRAY_SIZE(phys1GTxPortId); j++)
++ if (phys1GTxPortId[j] == i)
++ break;
++ ASSERT_COND(j<ARRAY_SIZE(phys1GTxPortId));
++ p_LnxWrpFmDev->txPorts[j].settings.param.liodnBase = liodnBase;
++ }
++ else if (FM_MAX_NUM_OF_10G_TX_PORTS &&
++ (i >= phys10GTxPortId[0]) &&
++ (i <= phys10GTxPortId[FM_MAX_NUM_OF_10G_TX_PORTS-1]))
++ {
++ for (j=0; j<ARRAY_SIZE(phys10GTxPortId); j++)
++ if (phys10GTxPortId[j] == i)
++ break;
++ ASSERT_COND(j<ARRAY_SIZE(phys10GTxPortId));
++ p_LnxWrpFmDev->txPorts[FM_MAX_NUM_OF_1G_TX_PORTS+j].settings.param.liodnBase = liodnBase;
++ }
++ }
++
++ p_Ppids = (t_Ppids *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_BMI_PPIDS_OFFSET);
++
++ for (i=0; i<FM_MAX_NUM_OF_1G_RX_PORTS; i++)
++ p_LnxWrpFmDev->rxPorts[i].settings.param.specificParams.rxParams.liodnOffset =
++ p_Ppids->fmbm_ppid[phys1GRxPortId[i]-1];
++
++ for (i=0; i<FM_MAX_NUM_OF_10G_RX_PORTS; i++)
++ p_LnxWrpFmDev->rxPorts[FM_MAX_NUM_OF_1G_RX_PORTS+i].settings.param.specificParams.rxParams.liodnOffset =
++ p_Ppids->fmbm_ppid[phys10GRxPortId[i]-1];
++
++ return E_OK;
++}
++
++/* Structure that defines QE firmware binary files.
++ *
++ * See Documentation/powerpc/qe_firmware.txt for a description of these
++ * fields.
++ */
++struct qe_firmware {
++ struct qe_header {
++ __be32 length; /* Length of the entire structure, in bytes */
++ u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */
++ u8 version; /* Version of this layout. First ver is '1' */
++ } header;
++ u8 id[62]; /* Null-terminated identifier string */
++ u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */
++ u8 count; /* Number of microcode[] structures */
++ struct {
++ __be16 model; /* The SOC model */
++ u8 major; /* The SOC revision major */
++ u8 minor; /* The SOC revision minor */
++ } __attribute__ ((packed)) soc;
++ u8 padding[4]; /* Reserved, for alignment */
++ __be64 extended_modes; /* Extended modes */
++ __be32 vtraps[8]; /* Virtual trap addresses */
++ u8 reserved[4]; /* Reserved, for future expansion */
++ struct qe_microcode {
++ u8 id[32]; /* Null-terminated identifier */
++ __be32 traps[16]; /* Trap addresses, 0 == ignore */
++ __be32 eccr; /* The value for the ECCR register */
++ __be32 iram_offset; /* Offset into I-RAM for the code */
++ __be32 count; /* Number of 32-bit words of the code */
++ __be32 code_offset; /* Offset of the actual microcode */
++ u8 major; /* The microcode version major */
++ u8 minor; /* The microcode version minor */
++ u8 revision; /* The microcode version revision */
++ u8 padding; /* Reserved, for alignment */
++ u8 reserved[4]; /* Reserved, for future expansion */
++ } __attribute__ ((packed)) microcode[1];
++ /* All microcode binaries should be located here */
++ /* CRC32 should be located here, after the microcode binaries */
++} __attribute__ ((packed));
++
++
++/**
++ * FindFmanMicrocode - find the Fman microcode
++ *
++ * This function returns a pointer to the QE Firmware blob that holds
++ * the Fman microcode. We use the QE Firmware structure because Fman microcode
++ * is similar to QE microcode, so there's no point in defining a new layout.
++ *
++ * Current versions of U-Boot embed the Fman firmware into the device tree,
++ * so we check for that first. Each Fman node in the device tree contains a
++ * node or a pointer to node that holds the firmware. Technically, we should
++ * be fetching the firmware node for the current Fman, but we don't have that
++ * information any more, so we assume that there is only one firmware node in
++ * the device tree, and that all Fmen use the same firmware.
++ */
++static const struct qe_firmware *FindFmanMicrocode(void)
++{
++ static const struct qe_firmware *P4080_UCPatch;
++ struct device_node *np;
++
++ if (P4080_UCPatch)
++ return P4080_UCPatch;
++
++ /* The firmware should be inside the device tree. */
++ np = of_find_compatible_node(NULL, NULL, "fsl,fman-firmware");
++ if (np) {
++ P4080_UCPatch = of_get_property(np, "fsl,firmware", NULL);
++ of_node_put(np);
++ if (P4080_UCPatch)
++ return P4080_UCPatch;
++ else
++ REPORT_ERROR(WARNING, E_NOT_FOUND, ("firmware node is incomplete"));
++ }
++
++ /* Returning NULL here forces the reuse of the IRAM content */
++ return NULL;
++}
++#define SVR_SECURITY_MASK 0x00080000
++#define SVR_PERSONALITY_MASK 0x0000FF00
++#define SVR_VER_IGNORE_MASK (SVR_SECURITY_MASK | SVR_PERSONALITY_MASK)
++#define SVR_B4860_REV1_VALUE 0x86800010
++#define SVR_B4860_REV2_VALUE 0x86800020
++#define SVR_T4240_VALUE 0x82400000
++#define SVR_T4120_VALUE 0x82400100
++#define SVR_T4160_VALUE 0x82410000
++#define SVR_T4080_VALUE 0x82410200
++#define SVR_T4_DEVICE_ID 0x82400000
++#define SVR_DEVICE_ID_MASK 0xFFF00000
++
++#define OF_DEV_ID_NUM 2 /* one used, another one zeroed */
++
++/* searches for a subnode with the given name/compatible */
++static bool HasFmPcdOfNode(struct device_node *fm_node,
++ struct of_device_id *ids,
++ const char *name,
++ const char *compatible)
++{
++ struct device_node *dev_node;
++ bool ret = false;
++
++ memset(ids, 0, OF_DEV_ID_NUM*sizeof(struct of_device_id));
++ if (WARN_ON(strlen(name) >= sizeof(ids[0].name)))
++ return false;
++ strcpy(ids[0].name, name);
++ if (WARN_ON(strlen(compatible) >= sizeof(ids[0].compatible)))
++ return false;
++ strcpy(ids[0].compatible, compatible);
++ for_each_child_of_node(fm_node, dev_node)
++ if (of_match_node(ids, dev_node) != NULL)
++ ret = true;
++ return ret;
++}
++
++static t_LnxWrpFmDev * ReadFmDevTreeNode (struct platform_device *of_dev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ struct device_node *fm_node, *dev_node;
++ struct of_device_id ids[OF_DEV_ID_NUM];
++ struct resource res;
++ struct clk *clk;
++ u32 clk_rate;
++ const uint32_t *uint32_prop;
++ int _errno=0, lenp;
++ uint32_t tmp_prop;
++
++ fm_node = of_node_get(of_dev->dev.of_node);
++
++ uint32_prop = (uint32_t *)of_get_property(fm_node, "cell-index", &lenp);
++ if (unlikely(uint32_prop == NULL)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, cell-index) failed", fm_node->full_name));
++ return NULL;
++ }
++ tmp_prop = be32_to_cpu(*uint32_prop);
++
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ return NULL;
++
++ if (tmp_prop > INTG_MAX_NUM_OF_FM) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!"));
++ return NULL;
++ }
++ p_LnxWrpFmDev = CreateFmDev(tmp_prop);
++ if (!p_LnxWrpFmDev) {
++ REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG);
++ return NULL;
++ }
++ p_LnxWrpFmDev->dev = &of_dev->dev;
++ p_LnxWrpFmDev->id = tmp_prop;
++
++ /* Get the FM interrupt */
++ p_LnxWrpFmDev->irq = of_irq_to_resource(fm_node, 0, NULL);
++ if (unlikely(p_LnxWrpFmDev->irq == /*NO_IRQ*/0)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ));
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++
++ /* Get the FM error interrupt */
++ p_LnxWrpFmDev->err_irq = of_irq_to_resource(fm_node, 1, NULL);
++
++ if (unlikely(p_LnxWrpFmDev->err_irq == /*NO_IRQ*/0)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ));
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++
++ /* Get the FM address */
++ _errno = of_address_to_resource(fm_node, 0, &res);
++ if (unlikely(_errno < 0)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++
++
++ p_LnxWrpFmDev->fmBaseAddr = 0;
++ p_LnxWrpFmDev->fmPhysBaseAddr = res.start;
++ p_LnxWrpFmDev->fmMemSize = res.end + 1 - res.start;
++
++ clk = of_clk_get(fm_node, 0);
++ if (IS_ERR(clk)) {
++ dev_err(&of_dev->dev, "%s: Failed to get FM clock structure\n",
++ __func__);
++ of_node_put(fm_node);
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++
++ clk_rate = clk_get_rate(clk);
++ if (!clk_rate) {
++ dev_err(&of_dev->dev, "%s: Failed to determine FM clock rate\n",
++ __func__);
++ of_node_put(fm_node);
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++
++ p_LnxWrpFmDev->fmDevSettings.param.fmClkFreq = DIV_ROUND_UP(clk_rate, 1000000); /* In MHz, rounded */
++ /* Get the MURAM base address and size */
++ memset(ids, 0, sizeof(ids));
++ if (WARN_ON(strlen("muram") >= sizeof(ids[0].name)))
++ return NULL;
++ strcpy(ids[0].name, "muram");
++ if (WARN_ON(strlen("fsl,fman-muram") >= sizeof(ids[0].compatible)))
++ return NULL;
++ strcpy(ids[0].compatible, "fsl,fman-muram");
++ for_each_child_of_node(fm_node, dev_node) {
++ if (likely(of_match_node(ids, dev_node) != NULL)) {
++ _errno = of_address_to_resource(dev_node, 0, &res);
++ if (unlikely(_errno < 0)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++
++ p_LnxWrpFmDev->fmMuramBaseAddr = 0;
++ p_LnxWrpFmDev->fmMuramPhysBaseAddr = res.start;
++ p_LnxWrpFmDev->fmMuramMemSize = res.end + 1 - res.start;
++
++#ifndef CONFIG_FMAN_ARM
++ {
++ uint32_t svr;
++ svr = mfspr(SPRN_SVR);
++
++ if ((svr & ~SVR_VER_IGNORE_MASK) >= SVR_B4860_REV2_VALUE)
++ p_LnxWrpFmDev->fmMuramMemSize = 0x80000;
++ }
++#endif
++ }
++ }
++
++ /* Get the RTC base address and size */
++ memset(ids, 0, sizeof(ids));
++ if (WARN_ON(strlen("ptp-timer") >= sizeof(ids[0].name)))
++ return NULL;
++ strcpy(ids[0].name, "ptp-timer");
++ if (WARN_ON(strlen("fsl,fman-rtc") >= sizeof(ids[0].compatible)))
++ return NULL;
++ strcpy(ids[0].compatible, "fsl,fman-rtc");
++ for_each_child_of_node(fm_node, dev_node) {
++ if (likely(of_match_node(ids, dev_node) != NULL)) {
++ _errno = of_address_to_resource(dev_node, 0, &res);
++ if (unlikely(_errno < 0)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++
++ p_LnxWrpFmDev->fmRtcBaseAddr = 0;
++ p_LnxWrpFmDev->fmRtcPhysBaseAddr = res.start;
++ p_LnxWrpFmDev->fmRtcMemSize = res.end + 1 - res.start;
++ }
++ }
++
++#if (DPAA_VERSION >= 11)
++ /* Get the VSP base address */
++ for_each_child_of_node(fm_node, dev_node) {
++ if (of_device_is_compatible(dev_node, "fsl,fman-vsps")) {
++ _errno = of_address_to_resource(dev_node, 0, &res);
++ if (unlikely(_errno < 0)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
++ DestroyFmDev(p_LnxWrpFmDev);
++ return NULL;
++ }
++ p_LnxWrpFmDev->fmVspBaseAddr = 0;
++ p_LnxWrpFmDev->fmVspPhysBaseAddr = res.start;
++ p_LnxWrpFmDev->fmVspMemSize = res.end + 1 - res.start;
++ }
++ }
++#endif
++
++ /* Get all PCD nodes */
++ p_LnxWrpFmDev->prsActive = HasFmPcdOfNode(fm_node, ids, "parser", "fsl,fman-parser");
++ p_LnxWrpFmDev->kgActive = HasFmPcdOfNode(fm_node, ids, "keygen", "fsl,fman-keygen");
++ p_LnxWrpFmDev->ccActive = HasFmPcdOfNode(fm_node, ids, "cc", "fsl,fman-cc");
++ p_LnxWrpFmDev->plcrActive = HasFmPcdOfNode(fm_node, ids, "policer", "fsl,fman-policer");
++
++ if (p_LnxWrpFmDev->prsActive || p_LnxWrpFmDev->kgActive ||
++ p_LnxWrpFmDev->ccActive || p_LnxWrpFmDev->plcrActive)
++ p_LnxWrpFmDev->pcdActive = TRUE;
++
++ if (p_LnxWrpFmDev->pcdActive)
++ {
++ const char *str_prop = (char *)of_get_property(fm_node, "fsl,default-pcd", &lenp);
++ if (str_prop) {
++ if (strncmp(str_prop, "3-tuple", strlen("3-tuple")) == 0)
++ p_LnxWrpFmDev->defPcd = e_FM_PCD_3_TUPLE;
++ }
++ else
++ p_LnxWrpFmDev->defPcd = e_NO_PCD;
++ }
++
++ of_node_put(fm_node);
++
++ p_LnxWrpFmDev->hcCh =
++ qman_affine_channel(cpumask_first(qman_affine_cpus()));
++
++ p_LnxWrpFmDev->active = TRUE;
++
++ return p_LnxWrpFmDev;
++}
++
++struct device_node *GetFmAdvArgsDevTreeNode (uint8_t fmIndx)
++{
++ struct device_node *dev_node;
++ const uint32_t *uint32_prop;
++ int lenp;
++ uint32_t tmp_prop;
++
++ for_each_compatible_node(dev_node, NULL, "fsl,fman-extended-args") {
++ uint32_prop = (uint32_t *)of_get_property(dev_node, "cell-index", &lenp);
++ if (unlikely(uint32_prop == NULL)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ dev_node->full_name));
++ return NULL;
++ }
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ return NULL;
++ if (tmp_prop > INTG_MAX_NUM_OF_FM) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!"));
++ return NULL;
++ }
++ if (fmIndx == tmp_prop)
++ return dev_node;
++ }
++
++ return NULL;
++}
++
++static t_Error CheckNConfigFmAdvArgs (t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++ struct device_node *dev_node;
++ t_Error err = E_INVALID_VALUE;
++ const uint32_t *uint32_prop;
++ const char *str_prop;
++ int lenp;
++ uint32_t tmp_prop;
++
++ dev_node = GetFmAdvArgsDevTreeNode(p_LnxWrpFmDev->id);
++ if (!dev_node) /* no advance parameters for FMan */
++ return E_OK;
++
++ str_prop = (char *)of_get_property(dev_node, "dma-aid-mode", &lenp);
++ if (str_prop) {
++ if (strcmp(str_prop, "port") == 0)
++ err = FM_ConfigDmaAidMode(p_LnxWrpFmDev->h_Dev, e_FM_DMA_AID_OUT_PORT_ID);
++ else if (strcmp(str_prop, "tnum") == 0)
++ err = FM_ConfigDmaAidMode(p_LnxWrpFmDev->h_Dev, e_FM_DMA_AID_OUT_TNUM);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ uint32_prop = (uint32_t *)of_get_property(dev_node,
++ "total-fifo-size", &lenp);
++ if (uint32_prop) {
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ if (FM_ConfigTotalFifoSize(p_LnxWrpFmDev->h_Dev,
++ tmp_prop) != E_OK)
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++ }
++
++ uint32_prop = (uint32_t *)of_get_property(dev_node, "tnum-aging-period",
++ &lenp);
++ if (uint32_prop) {
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ err = FM_ConfigTnumAgingPeriod(p_LnxWrpFmDev->h_Dev,
++ (uint16_t)tmp_prop/*tnumAgingPeriod*/);
++
++ if (err != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ of_node_put(dev_node);
++
++ return E_OK;
++}
++
++static void LnxwrpFmDevExceptionsCb(t_Handle h_App, e_FmExceptions exception)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)h_App;
++
++ ASSERT_COND(p_LnxWrpFmDev);
++
++ DBG(INFO, ("got fm exception %d", exception));
++
++ /* do nothing */
++ UNUSED(exception);
++}
++
++static void LnxwrpFmDevBusErrorCb(t_Handle h_App,
++ e_FmPortType portType,
++ uint8_t portId,
++ uint64_t addr,
++ uint8_t tnum,
++ uint16_t liodn)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)h_App;
++
++ ASSERT_COND(p_LnxWrpFmDev);
++
++ /* do nothing */
++ UNUSED(portType);UNUSED(portId);UNUSED(addr);UNUSED(tnum);UNUSED(liodn);
++}
++
++static t_Error ConfigureFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++ struct resource *dev_res;
++ int _errno;
++
++ if (!p_LnxWrpFmDev->active)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM not configured!!!"));
++
++#ifndef MODULE
++ _errno = can_request_irq(p_LnxWrpFmDev->irq, 0);
++ if (unlikely(_errno < 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno));
++#endif
++ _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, 0, "fman", p_LnxWrpFmDev);
++ if (unlikely(_errno < 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->irq, _errno));
++
++ enable_irq_wake(p_LnxWrpFmDev->irq);
++
++ if (p_LnxWrpFmDev->err_irq != 0) {
++#ifndef MODULE
++ _errno = can_request_irq(p_LnxWrpFmDev->err_irq, 0);
++ if (unlikely(_errno < 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno));
++#endif
++ _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->err_irq, fm_err_irq, IRQF_SHARED, "fman-err", p_LnxWrpFmDev);
++ if (unlikely(_errno < 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->err_irq, _errno));
++
++ enable_irq_wake(p_LnxWrpFmDev->err_irq);
++ }
++
++ p_LnxWrpFmDev->res = devm_request_mem_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize, "fman");
++ if (unlikely(p_LnxWrpFmDev->res == NULL))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_mem_region() failed"));
++
++ p_LnxWrpFmDev->fmBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize));
++ if (unlikely(p_LnxWrpFmDev->fmBaseAddr == 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
++
++ if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmBaseAddr, (uint64_t)p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM memory map"));
++
++ dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize, "fman-muram");
++ if (unlikely(dev_res == NULL))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed"));
++
++ p_LnxWrpFmDev->fmMuramBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize));
++ if (unlikely(p_LnxWrpFmDev->fmMuramBaseAddr == 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
++
++ if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmMuramBaseAddr, (uint64_t)p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM MURAM memory map"));
++
++ if (p_LnxWrpFmDev->fmRtcPhysBaseAddr)
++ {
++ dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize, "fman-rtc");
++ if (unlikely(dev_res == NULL))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed"));
++
++ p_LnxWrpFmDev->fmRtcBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize));
++ if (unlikely(p_LnxWrpFmDev->fmRtcBaseAddr == 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
++
++ if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmRtcBaseAddr, (uint64_t)p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC memory map"));
++ }
++
++#if (DPAA_VERSION >= 11)
++ if (p_LnxWrpFmDev->fmVspPhysBaseAddr) {
++ dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmVspPhysBaseAddr, p_LnxWrpFmDev->fmVspMemSize, "fman-vsp");
++ if (unlikely(dev_res == NULL))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed"));
++
++ p_LnxWrpFmDev->fmVspBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmVspPhysBaseAddr, p_LnxWrpFmDev->fmVspMemSize));
++ if (unlikely(p_LnxWrpFmDev->fmVspBaseAddr == 0))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
++ }
++#endif
++
++ p_LnxWrpFmDev->fmDevSettings.param.baseAddr = p_LnxWrpFmDev->fmBaseAddr;
++ p_LnxWrpFmDev->fmDevSettings.param.fmId = p_LnxWrpFmDev->id;
++ p_LnxWrpFmDev->fmDevSettings.param.irq = NO_IRQ;
++ p_LnxWrpFmDev->fmDevSettings.param.errIrq = NO_IRQ;
++ p_LnxWrpFmDev->fmDevSettings.param.f_Exception = LnxwrpFmDevExceptionsCb;
++ p_LnxWrpFmDev->fmDevSettings.param.f_BusError = LnxwrpFmDevBusErrorCb;
++ p_LnxWrpFmDev->fmDevSettings.param.h_App = p_LnxWrpFmDev;
++
++ return FillRestFmInfo(p_LnxWrpFmDev);
++}
++
++#ifndef CONFIG_FMAN_ARM
++/*
++ * Table for matching compatible strings, for device tree
++ * guts node, for QorIQ SOCs.
++ * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
++ * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
++ * string would be used.
++*/
++static const struct of_device_id guts_device_ids[] = {
++ { .compatible = "fsl,qoriq-device-config-1.0", },
++ { .compatible = "fsl,qoriq-device-config-2.0", },
++ {}
++};
++
++static unsigned int get_rcwsr(int regnum)
++{
++ struct ccsr_guts __iomem *guts_regs = NULL;
++ struct device_node *guts_node;
++
++ guts_node = of_find_matching_node(NULL, guts_device_ids);
++ if (!guts_node) {
++ pr_err("could not find GUTS node\n");
++ return 0;
++ }
++ guts_regs = of_iomap(guts_node, 0);
++ of_node_put(guts_node);
++ if (!guts_regs) {
++ pr_err("ioremap of GUTS node failed\n");
++ return 0;
++ }
++
++ return ioread32be(&guts_regs->rcwsr[regnum]);
++}
++
++#define FMAN1_ALL_MACS_MASK 0xFCC00000
++#define FMAN2_ALL_MACS_MASK 0x000FCC00
++
++/**
++ * @Function ResetOnInitErrata_A007273
++ *
++ * @Description Workaround for Errata A-007273
++ * This workaround is required to avoid a FMan hang during reset on initialization.
++ * Enable all MACs in guts.devdisr2 register,
++ * then perform a regular FMan reset and then restore MACs to their original state.
++ *
++ * @Param[in] h_Fm - FM module descriptor
++ *
++ * @Return None.
++ */
++void ResetOnInitErrata_A007273(t_Handle h_Fm)
++{
++ struct ccsr_guts __iomem *guts_regs = NULL;
++ struct device_node *guts_node;
++ u32 devdisr2, enableMacs;
++
++ /* Get guts registers */
++ guts_node = of_find_matching_node(NULL, guts_device_ids);
++ if (!guts_node) {
++ pr_err("could not find GUTS node\n");
++ return;
++ }
++ guts_regs = of_iomap(guts_node, 0);
++ of_node_put(guts_node);
++ if (!guts_regs) {
++ pr_err("ioremap of GUTS node failed\n");
++ return;
++ }
++
++ /* Read current state */
++ devdisr2 = ioread32be(&guts_regs->devdisr2);
++
++ if (FmGetId(h_Fm) == 0)
++ enableMacs = devdisr2 & ~FMAN1_ALL_MACS_MASK;
++ else
++ enableMacs = devdisr2 & ~FMAN2_ALL_MACS_MASK;
++
++ /* Enable all MACs */
++ iowrite32be(enableMacs, &guts_regs->devdisr2);
++
++ /* Perform standard FMan reset */
++ FmReset(h_Fm);
++
++ /* Restore devdisr2 value */
++ iowrite32be(devdisr2, &guts_regs->devdisr2);
++
++ iounmap(guts_regs);
++}
++#endif
++
++static t_Error InitFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++ const struct qe_firmware *fw;
++
++ if (!p_LnxWrpFmDev->active)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM not configured!!!"));
++
++ if ((p_LnxWrpFmDev->h_MuramDev = FM_MURAM_ConfigAndInit(p_LnxWrpFmDev->fmMuramBaseAddr, p_LnxWrpFmDev->fmMuramMemSize)) == NULL)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-MURAM!"));
++
++ /* Loading the fman-controller code */
++ fw = FindFmanMicrocode();
++
++ if (!fw) {
++ /* this forces the reuse of the current IRAM content */
++ p_LnxWrpFmDev->fmDevSettings.param.firmware.size = 0;
++ p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code = NULL;
++ } else {
++ p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code =
++ (void *) fw + be32_to_cpu(fw->microcode[0].code_offset);
++ p_LnxWrpFmDev->fmDevSettings.param.firmware.size =
++ sizeof(u32) * be32_to_cpu(fw->microcode[0].count);
++ DBG(INFO, ("Loading fman-controller code version %d.%d.%d",
++ fw->microcode[0].major,
++ fw->microcode[0].minor,
++ fw->microcode[0].revision));
++ }
++
++#ifdef CONFIG_FMAN_ARM
++ { /* endianness adjustments: byteswap the ucode retrieved from the f/w blob */
++ int i;
++ int usz = p_LnxWrpFmDev->fmDevSettings.param.firmware.size;
++ void * p_Code = p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code;
++ u32 *dest = kzalloc(usz, GFP_KERNEL);
++
++ if (p_Code && dest)
++ for(i=0; i < usz / 4; ++i)
++ dest[i] = be32_to_cpu(((u32 *)p_Code)[i]);
++
++ p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code = dest;
++ }
++#endif
++
++ p_LnxWrpFmDev->fmDevSettings.param.h_FmMuram = p_LnxWrpFmDev->h_MuramDev;
++
++#if (DPAA_VERSION >= 11)
++ if (p_LnxWrpFmDev->fmVspBaseAddr) {
++ p_LnxWrpFmDev->fmDevSettings.param.vspBaseAddr = p_LnxWrpFmDev->fmVspBaseAddr;
++ p_LnxWrpFmDev->fmDevSettings.param.partVSPBase = 0;
++ p_LnxWrpFmDev->fmDevSettings.param.partNumOfVSPs = FM_VSP_MAX_NUM_OF_ENTRIES;
++ }
++#endif
++
++#ifdef CONFIG_FMAN_ARM
++ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio = 1;
++#else
++ if(p_LnxWrpFmDev->fmDevSettings.param.fmId == 0)
++ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio =
++ !!(get_rcwsr(4) & 0x2); /* RCW[FM_MAC_RAT0] */
++ else
++ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio =
++ !!(get_rcwsr(4) & 0x1); /* RCW[FM_MAC_RAT1] */
++
++ {
++ /* T4 Devices ClkRatio is always 1 regardless of RCW[FM_MAC_RAT1] */
++ uint32_t svr;
++ svr = mfspr(SPRN_SVR);
++
++ if ((svr & SVR_DEVICE_ID_MASK) == SVR_T4_DEVICE_ID)
++ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio = 1;
++ }
++#endif /* CONFIG_FMAN_ARM */
++
++ if ((p_LnxWrpFmDev->h_Dev = FM_Config(&p_LnxWrpFmDev->fmDevSettings.param)) == NULL)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM"));
++
++
++ if (FM_ConfigResetOnInit(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
++
++#ifndef CONFIG_FMAN_ARM
++#ifdef FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273
++ if (FM_ConfigResetOnInitOverrideCallback(p_LnxWrpFmDev->h_Dev, ResetOnInitErrata_A007273) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
++#endif /* FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273 */
++#endif /* CONFIG_FMAN_ARM */
++
++#ifdef CONFIG_FMAN_P1023
++ if (FM_ConfigDmaAidOverride(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
++#endif
++
++
++ CheckNConfigFmAdvArgs(p_LnxWrpFmDev);
++
++ if (FM_Init(p_LnxWrpFmDev->h_Dev) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
++
++ /* TODO: Why we mask these interrupts? */
++ if (p_LnxWrpFmDev->err_irq == 0) {
++ FM_SetException(p_LnxWrpFmDev->h_Dev, e_FM_EX_DMA_BUS_ERROR,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_READ_ECC,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_SYSTEM_WRITE_ECC,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_FM_WRITE_ECC,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_SINGLE_PORT_ECC, FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_STALL_ON_TASKS , FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_SINGLE_ECC, FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_DOUBLE_ECC,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_SINGLE_ECC, FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_DOUBLE_ECC,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_LIST_RAM_ECC,FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_STORAGE_PROFILE_ECC, FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_STATISTICS_RAM_ECC, FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_DISPATCH_RAM_ECC, FALSE);
++ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_IRAM_ECC,FALSE);
++ /* TODO: FmDisableRamsEcc assert for ramsEccOwners.
++ * FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_MURAM_ECC,FALSE);*/
++ }
++
++ if (p_LnxWrpFmDev->fmRtcBaseAddr)
++ {
++ t_FmRtcParams fmRtcParam;
++
++ memset(&fmRtcParam, 0, sizeof(fmRtcParam));
++ fmRtcParam.h_App = p_LnxWrpFmDev;
++ fmRtcParam.h_Fm = p_LnxWrpFmDev->h_Dev;
++ fmRtcParam.baseAddress = p_LnxWrpFmDev->fmRtcBaseAddr;
++
++ if(!(p_LnxWrpFmDev->h_RtcDev = FM_RTC_Config(&fmRtcParam)))
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-RTC"));
++
++ if (FM_RTC_ConfigPeriod(p_LnxWrpFmDev->h_RtcDev, DPA_PTP_NOMINAL_FREQ_PERIOD_NS) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC"));
++
++ if (FM_RTC_Init(p_LnxWrpFmDev->h_RtcDev) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC"));
++ }
++
++ return E_OK;
++}
++
++/* TODO: to be moved back here */
++extern void FreeFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev);
++
++static void FreeFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++ if (!p_LnxWrpFmDev->active)
++ return;
++
++ FreeFmPcdDev(p_LnxWrpFmDev);
++
++ if (p_LnxWrpFmDev->h_RtcDev)
++ FM_RTC_Free(p_LnxWrpFmDev->h_RtcDev);
++
++ if (p_LnxWrpFmDev->h_Dev)
++ FM_Free(p_LnxWrpFmDev->h_Dev);
++
++ if (p_LnxWrpFmDev->h_MuramDev)
++ FM_MURAM_Free(p_LnxWrpFmDev->h_MuramDev);
++
++ if (p_LnxWrpFmDev->fmRtcBaseAddr)
++ {
++ SYS_UnregisterIoMap(p_LnxWrpFmDev->fmRtcBaseAddr);
++ devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmRtcBaseAddr));
++ __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize);
++ }
++ SYS_UnregisterIoMap(p_LnxWrpFmDev->fmMuramBaseAddr);
++ devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmMuramBaseAddr));
++ __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize);
++ SYS_UnregisterIoMap(p_LnxWrpFmDev->fmBaseAddr);
++ devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr));
++ devm_release_mem_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize);
++ if (p_LnxWrpFmDev->err_irq != 0) {
++ devm_free_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->err_irq, p_LnxWrpFmDev);
++ }
++
++ devm_free_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, p_LnxWrpFmDev);
++}
++
++/* FMan character device file operations */
++extern struct file_operations fm_fops;
++
++static int /*__devinit*/ fm_probe(struct platform_device *of_dev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++
++ if ((p_LnxWrpFmDev = ReadFmDevTreeNode(of_dev)) == NULL)
++ return -EIO;
++ if (ConfigureFmDev(p_LnxWrpFmDev) != E_OK)
++ return -EIO;
++ if (InitFmDev(p_LnxWrpFmDev) != E_OK)
++ return -EIO;
++
++ /* IOCTL ABI checking */
++ LnxWrpPCDIOCTLEnumChecking();
++ LnxWrpPCDIOCTLTypeChecking();
++
++ Sprint (p_LnxWrpFmDev->name, "%s%d", DEV_FM_NAME, p_LnxWrpFmDev->id);
++
++ /* Register to the /dev for IOCTL API */
++ /* Register dynamically a new major number for the character device: */
++ if ((p_LnxWrpFmDev->major = register_chrdev(0, p_LnxWrpFmDev->name, &fm_fops)) <= 0) {
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Failed to allocate a major number for device \"%s\"", p_LnxWrpFmDev->name));
++ return -EIO;
++ }
++
++ /* Creating classes for FM */
++ DBG(TRACE ,("class_create fm_class"));
++ p_LnxWrpFmDev->fm_class = class_create(THIS_MODULE, p_LnxWrpFmDev->name);
++ if (IS_ERR(p_LnxWrpFmDev->fm_class)) {
++ unregister_chrdev(p_LnxWrpFmDev->major, p_LnxWrpFmDev->name);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("class_create error fm_class"));
++ return -EIO;
++ }
++
++ device_create(p_LnxWrpFmDev->fm_class, NULL, MKDEV(p_LnxWrpFmDev->major, DEV_FM_MINOR_BASE), NULL,
++ "fm%d", p_LnxWrpFmDev->id);
++ device_create(p_LnxWrpFmDev->fm_class, NULL, MKDEV(p_LnxWrpFmDev->major, DEV_FM_PCD_MINOR_BASE), NULL,
++ "fm%d-pcd", p_LnxWrpFmDev->id);
++ dev_set_drvdata(p_LnxWrpFmDev->dev, p_LnxWrpFmDev);
++
++ /* create sysfs entries for stats and regs */
++ if ( fm_sysfs_create(p_LnxWrpFmDev->dev) !=0 )
++ {
++ FreeFmDev(p_LnxWrpFmDev);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Unable to create sysfs entry - fm!!!"));
++ return -EIO;
++ }
++
++#ifdef CONFIG_PM
++ device_set_wakeup_capable(p_LnxWrpFmDev->dev, true);
++#endif
++
++ DBG(TRACE, ("FM%d probed", p_LnxWrpFmDev->id));
++
++ return 0;
++}
++
++static int fm_remove(struct platform_device *of_dev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ struct device *dev;
++
++ dev = &of_dev->dev;
++ p_LnxWrpFmDev = dev_get_drvdata(dev);
++
++ fm_sysfs_destroy(dev);
++
++ DBG(TRACE, ("destroy fm_class"));
++ device_destroy(p_LnxWrpFmDev->fm_class, MKDEV(p_LnxWrpFmDev->major, DEV_FM_MINOR_BASE));
++ device_destroy(p_LnxWrpFmDev->fm_class, MKDEV(p_LnxWrpFmDev->major, DEV_FM_PCD_MINOR_BASE));
++ class_destroy(p_LnxWrpFmDev->fm_class);
++
++ /* Destroy chardev */
++ unregister_chrdev(p_LnxWrpFmDev->major, p_LnxWrpFmDev->name);
++
++ FreeFmDev(p_LnxWrpFmDev);
++
++ DestroyFmDev(p_LnxWrpFmDev);
++
++ dev_set_drvdata(dev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id fm_match[] = {
++ {
++ .compatible = "fsl,fman"
++ },
++ {}
++};
++#ifndef MODULE
++MODULE_DEVICE_TABLE(of, fm_match);
++#endif /* !MODULE */
++
++#ifdef CONFIG_PM
++
++#define SCFG_FMCLKDPSLPCR_ADDR 0xFFE0FC00C
++#define SCFG_FMCLKDPSLPCR_DS_VAL 0x48402000
++#define SCFG_FMCLKDPSLPCR_NORMAL_VAL 0x00402000
++
++struct device *g_fm_dev;
++
++static int fm_soc_suspend(struct device *dev)
++{
++ int err = 0;
++ uint32_t *fmclk;
++ t_LnxWrpFmDev *p_LnxWrpFmDev = dev_get_drvdata(get_device(dev));
++ g_fm_dev = dev;
++ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
++ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL);
++ if (p_LnxWrpFmDev->h_DsarRxPort)
++ {
++#ifdef CONFIG_FSL_QORIQ_PM
++ device_set_wakeup_enable(p_LnxWrpFmDev->dev, 1);
++#endif
++ err = FM_PORT_EnterDsarFinal(p_LnxWrpFmDev->h_DsarRxPort,
++ p_LnxWrpFmDev->h_DsarTxPort);
++ }
++ return err;
++}
++
++static int fm_soc_resume(struct device *dev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = dev_get_drvdata(get_device(dev));
++ uint32_t *fmclk;
++ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
++ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_NORMAL_VAL);
++ if (p_LnxWrpFmDev->h_DsarRxPort)
++ {
++#ifdef CONFIG_FSL_QORIQ_PM
++ device_set_wakeup_enable(p_LnxWrpFmDev->dev, 0);
++#endif
++ FM_PORT_ExitDsar(p_LnxWrpFmDev->h_DsarRxPort,
++ p_LnxWrpFmDev->h_DsarTxPort);
++ p_LnxWrpFmDev->h_DsarRxPort = 0;
++ p_LnxWrpFmDev->h_DsarTxPort = 0;
++ }
++ return 0;
++}
++
++static const struct dev_pm_ops fm_pm_ops = {
++ .suspend = fm_soc_suspend,
++ .resume = fm_soc_resume,
++};
++
++#define FM_PM_OPS (&fm_pm_ops)
++
++#else /* CONFIG_PM */
++
++#define FM_PM_OPS NULL
++
++#endif /* CONFIG_PM */
++
++static struct platform_driver fm_driver = {
++ .driver = {
++ .name = "fsl-fman",
++ .of_match_table = fm_match,
++ .owner = THIS_MODULE,
++ .pm = FM_PM_OPS,
++ },
++ .probe = fm_probe,
++ .remove = fm_remove
++};
++
++t_Handle LNXWRP_FM_Init(void)
++{
++ memset(&lnxWrpFm, 0, sizeof(lnxWrpFm));
++ mutex_init(&lnxwrp_mutex);
++
++ /* Register to the DTB for basic FM API */
++ platform_driver_register(&fm_driver);
++
++ return &lnxWrpFm;
++}
++
++t_Error LNXWRP_FM_Free(t_Handle h_LnxWrpFm)
++{
++ platform_driver_unregister(&fm_driver);
++ mutex_destroy(&lnxwrp_mutex);
++
++ return E_OK;
++}
++
++
++struct fm * fm_bind(struct device *fm_dev)
++{
++ return (struct fm *)(dev_get_drvdata(get_device(fm_dev)));
++}
++EXPORT_SYMBOL(fm_bind);
++
++void fm_unbind(struct fm *fm)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
++
++ put_device(p_LnxWrpFmDev->dev);
++}
++EXPORT_SYMBOL(fm_unbind);
++
++struct resource * fm_get_mem_region(struct fm *fm)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
++
++ return p_LnxWrpFmDev->res;
++}
++EXPORT_SYMBOL(fm_get_mem_region);
++
++void * fm_get_handle(struct fm *fm)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
++
++ return (void *)p_LnxWrpFmDev->h_Dev;
++}
++EXPORT_SYMBOL(fm_get_handle);
++
++void * fm_get_rtc_handle(struct fm *fm)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
++
++ return (void *)p_LnxWrpFmDev->h_RtcDev;
++}
++EXPORT_SYMBOL(fm_get_rtc_handle);
++
++struct fm_port * fm_port_bind (struct device *fm_port_dev)
++{
++ return (struct fm_port *)(dev_get_drvdata(get_device(fm_port_dev)));
++}
++EXPORT_SYMBOL(fm_port_bind);
++
++void fm_port_unbind(struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
++
++ put_device(p_LnxWrpFmPortDev->dev);
++}
++EXPORT_SYMBOL(fm_port_unbind);
++
++void *fm_port_get_handle(const struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
++
++ return (void *)p_LnxWrpFmPortDev->h_Dev;
++}
++EXPORT_SYMBOL(fm_port_get_handle);
++
++u64 *fm_port_get_buffer_time_stamp(const struct fm_port *port,
++ const void *data)
++{
++ return FM_PORT_GetBufferTimeStamp(fm_port_get_handle(port),
++ (void *)data);
++}
++EXPORT_SYMBOL(fm_port_get_buffer_time_stamp);
++
++void fm_port_get_base_addr(const struct fm_port *port, uint64_t *base_addr)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++
++ *base_addr = p_LnxWrpFmPortDev->settings.param.baseAddr;
++}
++EXPORT_SYMBOL(fm_port_get_base_addr);
++
++void fm_port_pcd_bind (struct fm_port *port, struct fm_port_pcd_param *params)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
++
++ p_LnxWrpFmPortDev->pcd_owner_params.cba = params->cba;
++ p_LnxWrpFmPortDev->pcd_owner_params.cbf = params->cbf;
++ p_LnxWrpFmPortDev->pcd_owner_params.dev = params->dev;
++}
++EXPORT_SYMBOL(fm_port_pcd_bind);
++
++void fm_port_get_buff_layout_ext_params(struct fm_port *port, struct fm_port_params *params)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ struct device_node *fm_node, *port_node;
++ const uint32_t *uint32_prop;
++ int lenp;
++
++ params->data_align = 0;
++ params->manip_extra_space = 0;
++
++ fm_node = GetFmAdvArgsDevTreeNode(((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev)->id);
++ if (!fm_node) /* no advance parameters for FMan */
++ return;
++
++ port_node = GetFmPortAdvArgsDevTreeNode(fm_node,
++ p_LnxWrpFmPortDev->settings.param.portType,
++ p_LnxWrpFmPortDev->settings.param.portId);
++ if (!port_node) /* no advance parameters for FMan-Port */
++ return;
++
++ uint32_prop = (uint32_t *)of_get_property(port_node, "buffer-layout", &lenp);
++ if (uint32_prop) {
++ if (WARN_ON(lenp != sizeof(uint32_t)*2))
++ return;
++
++ params->manip_extra_space = (uint8_t)be32_to_cpu(uint32_prop[0]);
++ params->data_align = (uint16_t)be32_to_cpu(uint32_prop[1]);
++ }
++
++ of_node_put(port_node);
++ of_node_put(fm_node);
++}
++EXPORT_SYMBOL(fm_port_get_buff_layout_ext_params);
++
++uint16_t fm_get_tx_port_channel(struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
++
++ return p_LnxWrpFmPortDev->txCh;
++}
++EXPORT_SYMBOL(fm_get_tx_port_channel);
++
++int fm_port_enable (struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
++ t_Error err = FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev);
++
++ return GET_ERROR_TYPE(err);
++}
++EXPORT_SYMBOL(fm_port_enable);
++
++int fm_port_disable(struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
++ t_Error err = FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev);
++
++ return GET_ERROR_TYPE(err);
++}
++EXPORT_SYMBOL(fm_port_disable);
++
++int fm_port_set_rate_limit(struct fm_port *port,
++ uint16_t max_burst_size,
++ uint32_t rate_limit)
++{
++ t_FmPortRateLimit param;
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ int err = 0;
++
++ param.maxBurstSize = max_burst_size;
++ param.rateLimit = rate_limit;
++ param.rateLimitDivider = 0;
++
++ err = FM_PORT_SetRateLimit(p_LnxWrpFmPortDev->h_Dev, &param);
++ return err;
++}
++EXPORT_SYMBOL(fm_port_set_rate_limit);
++
++int fm_port_del_rate_limit(struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++
++ FM_PORT_DeleteRateLimit(p_LnxWrpFmPortDev->h_Dev);
++ return 0;
++}
++EXPORT_SYMBOL(fm_port_del_rate_limit);
++
++void FM_PORT_Dsar_DumpRegs(void);
++int ar_showmem(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ FM_PORT_Dsar_DumpRegs();
++ return 2;
++}
++
++struct auto_res_tables_sizes *fm_port_get_autores_maxsize(
++ struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ return &p_LnxWrpFmPortDev->dsar_table_sizes;
++}
++EXPORT_SYMBOL(fm_port_get_autores_maxsize);
++
++int fm_port_enter_autores_for_deepsleep(struct fm_port *port,
++ struct auto_res_port_params *params)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ t_LnxWrpFmDev* p_LnxWrpFmDev = (t_LnxWrpFmDev*)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ p_LnxWrpFmDev->h_DsarRxPort = p_LnxWrpFmPortDev->h_Dev;
++ p_LnxWrpFmDev->h_DsarTxPort = params->h_FmPortTx;
++
++ /*Register other under /proc/autoresponse */
++ if (WARN_ON(sizeof(t_FmPortDsarParams) != sizeof(struct auto_res_port_params)))
++ return -EFAULT;
++
++ FM_PORT_EnterDsar(p_LnxWrpFmPortDev->h_Dev, (t_FmPortDsarParams*)params);
++ return 0;
++}
++EXPORT_SYMBOL(fm_port_enter_autores_for_deepsleep);
++
++void fm_port_exit_auto_res_for_deep_sleep(struct fm_port *port_rx,
++ struct fm_port *port_tx)
++{
++}
++EXPORT_SYMBOL(fm_port_exit_auto_res_for_deep_sleep);
++
++int fm_port_get_autores_stats(struct fm_port *port,
++ struct auto_res_port_stats *stats)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ if (WARN_ON(sizeof(t_FmPortDsarStats) != sizeof(struct auto_res_port_stats)))
++ return -EFAULT;
++ return FM_PORT_GetDsarStats(p_LnxWrpFmPortDev->h_Dev, (t_FmPortDsarStats*)stats);
++}
++EXPORT_SYMBOL(fm_port_get_autores_stats);
++
++int fm_port_suspend(struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ if (!FM_PORT_IsInDsar(p_LnxWrpFmPortDev->h_Dev))
++ return FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev);
++ else
++ return 0;
++}
++EXPORT_SYMBOL(fm_port_suspend);
++
++int fm_port_resume(struct fm_port *port)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ if (!FM_PORT_IsInDsar(p_LnxWrpFmPortDev->h_Dev))
++ return FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev);
++ else
++ return 0;
++}
++EXPORT_SYMBOL(fm_port_resume);
++
++bool fm_port_is_in_auto_res_mode(struct fm_port *port)
++{
++ return FM_PORT_IsInDsar(port);
++}
++EXPORT_SYMBOL(fm_port_is_in_auto_res_mode);
++
++#ifdef CONFIG_FMAN_PFC
++int fm_port_set_pfc_priorities_mapping_to_qman_wq(struct fm_port *port,
++ uint8_t prio, uint8_t wq)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++ int err;
++ int _errno;
++
++ err = FM_PORT_SetPfcPrioritiesMappingToQmanWQ(p_LnxWrpFmPortDev->h_Dev,
++ prio, wq);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_PORT_SetPfcPrioritiesMappingToQmanWQ() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_port_set_pfc_priorities_mapping_to_qman_wq);
++#endif
++
++int fm_mac_set_exception(struct fm_mac_dev *fm_mac_dev,
++ e_FmMacExceptions exception, bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MAC_SetException(fm_mac_dev, exception, enable);
++
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_SetException() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_set_exception);
++
++int fm_mac_free(struct fm_mac_dev *fm_mac_dev)
++{
++ int err;
++ int _error;
++
++ err = FM_MAC_Free(fm_mac_dev);
++ _error = -GET_ERROR_TYPE(err);
++
++ if (unlikely(_error < 0))
++ pr_err("FM_MAC_Free() = 0x%08x\n", err);
++
++ return _error;
++}
++EXPORT_SYMBOL(fm_mac_free);
++
++struct fm_mac_dev *fm_mac_config(t_FmMacParams *params)
++{
++ struct fm_mac_dev *fm_mac_dev;
++
++ fm_mac_dev = FM_MAC_Config(params);
++ if (unlikely(fm_mac_dev == NULL))
++ pr_err("FM_MAC_Config() failed\n");
++
++ return fm_mac_dev;
++}
++EXPORT_SYMBOL(fm_mac_config);
++
++int fm_mac_config_max_frame_length(struct fm_mac_dev *fm_mac_dev,
++ int len)
++{
++ int err;
++ int _errno;
++
++ err = FM_MAC_ConfigMaxFrameLength(fm_mac_dev, len);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_config_max_frame_length);
++
++int fm_mac_config_pad_and_crc(struct fm_mac_dev *fm_mac_dev, bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MAC_ConfigPadAndCrc(fm_mac_dev, enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_ConfigPadAndCrc() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_config_pad_and_crc);
++
++int fm_mac_config_half_duplex(struct fm_mac_dev *fm_mac_dev, bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MAC_ConfigHalfDuplex(fm_mac_dev, enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_ConfigHalfDuplex() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_config_half_duplex);
++
++int fm_mac_config_reset_on_init(struct fm_mac_dev *fm_mac_dev, bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MAC_ConfigResetOnInit(fm_mac_dev, enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_ConfigResetOnInit() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_config_reset_on_init);
++
++int fm_mac_init(struct fm_mac_dev *fm_mac_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MAC_Init(fm_mac_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_Init() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_init);
++
++int fm_mac_get_version(struct fm_mac_dev *fm_mac_dev, uint32_t *version)
++{
++ int err;
++ int _errno;
++
++ err = FM_MAC_GetVesrion(fm_mac_dev, version);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_GetVesrion() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_get_version);
++
++int fm_mac_enable(struct fm_mac_dev *fm_mac_dev)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_Enable(fm_mac_dev, e_COMM_MODE_RX_AND_TX);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_Enable() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_enable);
++
++int fm_mac_disable(struct fm_mac_dev *fm_mac_dev)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_Disable(fm_mac_dev, e_COMM_MODE_RX_AND_TX);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_Disable() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_disable);
++
++int fm_mac_resume(struct fm_mac_dev *fm_mac_dev)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_Resume(fm_mac_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_Resume() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_resume);
++
++int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev,
++ bool enable)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_SetPromiscuous(fm_mac_dev, enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_SetPromiscuous() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_set_promiscuous);
++
++int fm_mac_remove_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
++ t_EnetAddr *mac_addr)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_RemoveHashMacAddr(fm_mac_dev, mac_addr);
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0) {
++ pr_err("FM_MAC_RemoveHashMacAddr() = 0x%08x\n", err);
++ return _errno;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(fm_mac_remove_hash_mac_addr);
++
++int fm_mac_add_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
++ t_EnetAddr *mac_addr)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_AddHashMacAddr(fm_mac_dev, mac_addr);
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0) {
++ pr_err("FM_MAC_AddHashMacAddr() = 0x%08x\n", err);
++ return _errno;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(fm_mac_add_hash_mac_addr);
++
++int fm_mac_modify_mac_addr(struct fm_mac_dev *fm_mac_dev,
++ uint8_t *addr)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_ModifyMacAddr(fm_mac_dev, (t_EnetAddr *)addr);
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0)
++ pr_err("FM_MAC_ModifyMacAddr() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_modify_mac_addr);
++
++int fm_mac_adjust_link(struct fm_mac_dev *fm_mac_dev,
++ bool link, int speed, bool duplex)
++{
++ int _errno;
++ t_Error err;
++
++ if (!link) {
++#if (DPAA_VERSION < 11)
++ FM_MAC_RestartAutoneg(fm_mac_dev);
++#endif
++ return 0;
++ }
++
++ err = FM_MAC_AdjustLink(fm_mac_dev, speed, duplex);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_AdjustLink() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_adjust_link);
++
++int fm_mac_enable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_Enable1588TimeStamp(fm_mac_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_Enable1588TimeStamp() = 0x%08x\n", err);
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_enable_1588_time_stamp);
++
++int fm_mac_disable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_MAC_Disable1588TimeStamp(fm_mac_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_Disable1588TimeStamp() = 0x%08x\n", err);
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_disable_1588_time_stamp);
++
++int fm_mac_set_rx_pause_frames(
++ struct fm_mac_dev *fm_mac_dev, bool en)
++{
++ int _errno;
++ t_Error err;
++
++ /* if rx pause is enabled, do NOT ignore pause frames */
++ err = FM_MAC_SetRxIgnorePauseFrames(fm_mac_dev, !en);
++
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0)
++ pr_err("FM_MAC_SetRxIgnorePauseFrames() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_set_rx_pause_frames);
++
++#ifdef CONFIG_FMAN_PFC
++int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
++ bool en)
++{
++ int _errno, i;
++ t_Error err;
++
++ if (en)
++ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
++ err = FM_MAC_SetTxPauseFrames(fm_mac_dev,
++ i, fsl_fm_pfc_quanta[i],
++ FSL_FM_PAUSE_THRESH_DEFAULT);
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0) {
++ pr_err("FM_MAC_SetTxPauseFrames() = 0x%08x\n", err);
++ return _errno;
++ }
++ }
++ else
++ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
++ err = FM_MAC_SetTxPauseFrames(fm_mac_dev,
++ i, FSL_FM_PAUSE_TIME_DISABLE,
++ FSL_FM_PAUSE_THRESH_DEFAULT);
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0) {
++ pr_err("FM_MAC_SetTxPauseFrames() = 0x%08x\n", err);
++ return _errno;
++ }
++ }
++
++ return _errno;
++}
++#else
++int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
++ bool en)
++{
++ int _errno;
++ t_Error err;
++
++ if (en)
++ err = FM_MAC_SetTxAutoPauseFrames(fm_mac_dev,
++ FSL_FM_PAUSE_TIME_ENABLE);
++ else
++ err = FM_MAC_SetTxAutoPauseFrames(fm_mac_dev,
++ FSL_FM_PAUSE_TIME_DISABLE);
++
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0)
++ pr_err("FM_MAC_SetTxAutoPauseFrames() = 0x%08x\n", err);
++
++ return _errno;
++}
++#endif
++EXPORT_SYMBOL(fm_mac_set_tx_pause_frames);
++
++int fm_rtc_enable(struct fm *fm_dev)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_Enable(fm_get_rtc_handle(fm_dev), 0);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_Enable = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_enable);
++
++int fm_rtc_disable(struct fm *fm_dev)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_Disable(fm_get_rtc_handle(fm_dev));
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_Disable = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_disable);
++
++int fm_rtc_get_cnt(struct fm *fm_dev, uint64_t *ts)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_GetCurrentTime(fm_get_rtc_handle(fm_dev), ts);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_GetCurrentTime = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_get_cnt);
++
++int fm_rtc_set_cnt(struct fm *fm_dev, uint64_t ts)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_SetCurrentTime(fm_get_rtc_handle(fm_dev), ts);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_SetCurrentTime = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_set_cnt);
++
++int fm_rtc_get_drift(struct fm *fm_dev, uint32_t *drift)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_GetFreqCompensation(fm_get_rtc_handle(fm_dev),
++ drift);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_GetFreqCompensation = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_get_drift);
++
++int fm_rtc_set_drift(struct fm *fm_dev, uint32_t drift)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_SetFreqCompensation(fm_get_rtc_handle(fm_dev),
++ drift);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_SetFreqCompensation = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_set_drift);
++
++int fm_rtc_set_alarm(struct fm *fm_dev, uint32_t id,
++ uint64_t time)
++{
++ t_FmRtcAlarmParams alarm;
++ int _errno;
++ t_Error err;
++
++ alarm.alarmId = id;
++ alarm.alarmTime = time;
++ alarm.f_AlarmCallback = NULL;
++ err = FM_RTC_SetAlarm(fm_get_rtc_handle(fm_dev),
++ &alarm);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_SetAlarm = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_set_alarm);
++
++int fm_rtc_set_fiper(struct fm *fm_dev, uint32_t id,
++ uint64_t fiper)
++{
++ t_FmRtcPeriodicPulseParams pp;
++ int _errno;
++ t_Error err;
++
++ pp.periodicPulseId = id;
++ pp.periodicPulsePeriod = fiper;
++ pp.f_PeriodicPulseCallback = NULL;
++ err = FM_RTC_SetPeriodicPulse(fm_get_rtc_handle(fm_dev), &pp);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_SetPeriodicPulse = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_set_fiper);
++
++#ifdef CONFIG_PTP_1588_CLOCK_DPAA
++int fm_rtc_enable_interrupt(struct fm *fm_dev, uint32_t events)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_EnableInterrupt(fm_get_rtc_handle(fm_dev),
++ events);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_EnableInterrupt = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_enable_interrupt);
++
++int fm_rtc_disable_interrupt(struct fm *fm_dev, uint32_t events)
++{
++ int _errno;
++ t_Error err;
++
++ err = FM_RTC_DisableInterrupt(fm_get_rtc_handle(fm_dev),
++ events);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_RTC_DisableInterrupt = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_rtc_disable_interrupt);
++#endif
++
++int fm_mac_set_wol(struct fm_port *port, struct fm_mac_dev *fm_mac_dev, bool en)
++{
++ int _errno;
++ t_Error err;
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
++
++ /* Do not set WoL on AR ports */
++ if (FM_PORT_IsInDsar(p_LnxWrpFmPortDev->h_Dev)) {
++ printk(KERN_WARNING "Port is AutoResponse enabled! WoL will not be set on this port!\n");
++ return 0;
++ }
++
++ err = FM_MAC_SetWakeOnLan(fm_mac_dev, en);
++
++ _errno = -GET_ERROR_TYPE(err);
++ if (_errno < 0)
++ pr_err("FM_MAC_SetWakeOnLan() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_mac_set_wol);
++
++void fm_mutex_lock(void)
++{
++ mutex_lock(&lnxwrp_mutex);
++}
++EXPORT_SYMBOL(fm_mutex_lock);
++
++void fm_mutex_unlock(void)
++{
++ mutex_unlock(&lnxwrp_mutex);
++}
++EXPORT_SYMBOL(fm_mutex_unlock);
++
++/*Macsec wrapper functions*/
++struct fm_macsec_dev *fm_macsec_config(struct fm_macsec_params *fm_params)
++{
++ struct fm_macsec_dev *fm_macsec_dev;
++
++ fm_macsec_dev = FM_MACSEC_Config((t_FmMacsecParams *)fm_params);
++ if (unlikely(fm_macsec_dev == NULL))
++ pr_err("FM_MACSEC_Config() failed\n");
++
++ return fm_macsec_dev;
++}
++EXPORT_SYMBOL(fm_macsec_config);
++
++int fm_macsec_init(struct fm_macsec_dev *fm_macsec_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_Init(fm_macsec_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_Init() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_init);
++
++int fm_macsec_free(struct fm_macsec_dev *fm_macsec_dev)
++{
++ int err;
++ int _error;
++
++ err = FM_MACSEC_Free(fm_macsec_dev);
++ _error = -GET_ERROR_TYPE(err);
++
++ if (unlikely(_error < 0))
++ pr_err("FM_MACSEC_Free() = 0x%08x\n", err);
++
++ return _error;
++}
++EXPORT_SYMBOL(fm_macsec_free);
++
++int fm_macsec_config_unknown_sci_frame_treatment(struct fm_macsec_dev
++ *fm_macsec_dev,
++ fm_macsec_unknown_sci_frame_treatment treat_mode)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigUnknownSciFrameTreatment(fm_macsec_dev,
++ treat_mode);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_ConfigUnknownSciFrameTreatmen() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_unknown_sci_frame_treatment);
++
++int fm_macsec_config_invalid_tags_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
++ bool deliver_uncontrolled)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigInvalidTagsFrameTreatment(fm_macsec_dev,
++ deliver_uncontrolled);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_invalid_tags_frame_treatment);
++
++int fm_macsec_config_kay_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
++ bool discard_uncontrolled)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(fm_macsec_dev,
++ discard_uncontrolled);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatmen() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_kay_frame_treatment);
++
++int fm_macsec_config_untag_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
++ fm_macsec_untag_frame_treatment treat_mode)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigUntagFrameTreatment(fm_macsec_dev, treat_mode);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_ConfigUntagFrameTreatment() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_untag_frame_treatment);
++
++int fm_macsec_config_pn_exhaustion_threshold(struct fm_macsec_dev *fm_macsec_dev,
++ uint32_t pn_exh_thr)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigPnExhaustionThreshold(fm_macsec_dev, pn_exh_thr);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_ConfigPnExhaustionThreshold() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_pn_exhaustion_threshold);
++
++int fm_macsec_config_keys_unreadable(struct fm_macsec_dev *fm_macsec_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigKeysUnreadable(fm_macsec_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_ConfigKeysUnreadable() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_keys_unreadable);
++
++int fm_macsec_config_sectag_without_sci(struct fm_macsec_dev *fm_macsec_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigSectagWithoutSCI(fm_macsec_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_ConfigSectagWithoutSCI() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_sectag_without_sci);
++
++int fm_macsec_config_exception(struct fm_macsec_dev *fm_macsec_dev,
++ fm_macsec_exception exception, bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_ConfigException(fm_macsec_dev, exception, enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_ConfigException() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_config_exception);
++
++int fm_macsec_get_revision(struct fm_macsec_dev *fm_macsec_dev,
++ int *macsec_revision)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_GetRevision(fm_macsec_dev, macsec_revision);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_GetRevision() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_get_revision);
++
++int fm_macsec_enable(struct fm_macsec_dev *fm_macsec_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_Enable(fm_macsec_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_Enable() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_enable);
++
++int fm_macsec_disable(struct fm_macsec_dev *fm_macsec_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_Disable(fm_macsec_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_Disable() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_disable);
++
++int fm_macsec_set_exception(struct fm_macsec_dev *fm_macsec_dev,
++ fm_macsec_exception exception, bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SetException(fm_macsec_dev, exception, enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SetException() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_set_exception);
++
++/* Macsec SECY wrapper API */
++struct fm_macsec_secy_dev *fm_macsec_secy_config(struct fm_macsec_secy_params *secy_params)
++{
++ struct fm_macsec_secy_dev *fm_macsec_secy;
++
++ fm_macsec_secy = FM_MACSEC_SECY_Config((t_FmMacsecSecYParams *)secy_params);
++ if (unlikely(fm_macsec_secy < 0))
++ pr_err("FM_MACSEC_SECY_Config() failed\n");
++
++ return fm_macsec_secy;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config);
++
++int fm_macsec_secy_init(struct fm_macsec_secy_dev *fm_macsec_secy_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_Init(fm_macsec_secy_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_Init() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_init);
++
++int fm_macsec_secy_free(struct fm_macsec_secy_dev *fm_macsec_secy_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_Free(fm_macsec_secy_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_Free() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_free);
++
++int fm_macsec_secy_config_sci_insertion_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_sci_insertion_mode sci_insertion_mode)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigSciInsertionMode(fm_macsec_secy_dev,
++ sci_insertion_mode);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigSciInsertionMode() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_sci_insertion_mode);
++
++int fm_macsec_secy_config_protect_frames(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ bool protect_frames)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigProtectFrames(fm_macsec_secy_dev,
++ protect_frames);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigProtectFrames() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_protect_frames);
++
++int fm_macsec_secy_config_replay_window(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ bool replay_protect, uint32_t replay_window)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigReplayWindow(fm_macsec_secy_dev,
++ replay_protect, replay_window);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigReplayWindow() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_replay_window);
++
++int fm_macsec_secy_config_validation_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_valid_frame_behavior validate_frames)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigValidationMode(fm_macsec_secy_dev,
++ validate_frames);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigValidationMode() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_validation_mode);
++
++int fm_macsec_secy_config_confidentiality(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ bool confidentiality_enable,
++ uint32_t confidentiality_offset)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigConfidentiality(fm_macsec_secy_dev,
++ confidentiality_enable,
++ confidentiality_offset);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigConfidentiality() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_confidentiality);
++
++int fm_macsec_secy_config_point_to_point(struct fm_macsec_secy_dev *fm_macsec_secy_dev)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigPointToPoint(fm_macsec_secy_dev);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigPointToPoint() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_point_to_point);
++
++int fm_macsec_secy_config_exception(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_secy_exception exception,
++ bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigException(fm_macsec_secy_dev, exception,
++ enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigException() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_exception);
++
++int fm_macsec_secy_config_event(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ fm_macsec_secy_event event,
++ bool enable)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_ConfigEvent(fm_macsec_secy_dev, event, enable);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_ConfigEvent() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_config_event);
++
++struct rx_sc_dev *fm_macsec_secy_create_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct fm_macsec_secy_sc_params *params)
++{
++ struct rx_sc_dev *rx_sc_dev;
++
++ rx_sc_dev = FM_MACSEC_SECY_CreateRxSc(fm_macsec_secy_dev, (t_FmMacsecSecYSCParams *)params);
++ if (unlikely(rx_sc_dev == NULL))
++ pr_err("FM_MACSEC_SECY_CreateRxSc() failed\n");
++
++ return rx_sc_dev;
++}
++EXPORT_SYMBOL(fm_macsec_secy_create_rxsc);
++
++int fm_macsec_secy_delete_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_DeleteRxSc(fm_macsec_secy_dev, sc);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_DeleteRxSc() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_delete_rxsc);
++
++int fm_macsec_secy_create_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc, macsec_an_t an,
++ uint32_t lowest_pn, macsec_sa_key_t key)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_CreateRxSa(fm_macsec_secy_dev, sc, an,
++ lowest_pn, key);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_CreateRxSa() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_create_rx_sa);
++
++int fm_macsec_secy_delete_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc, macsec_an_t an)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_DeleteRxSa(fm_macsec_secy_dev, sc, an);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_DeleteRxSa() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_delete_rx_sa);
++
++int fm_macsec_secy_rxsa_enable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_RxSaEnableReceive(fm_macsec_secy_dev, sc, an);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_RxSaEnableReceive() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_rxsa_enable_receive);
++
++int fm_macsec_secy_rxsa_disable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_RxSaDisableReceive(fm_macsec_secy_dev, sc, an);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_RxSaDisableReceive() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_rxsa_disable_receive);
++
++int fm_macsec_secy_rxsa_update_next_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an, uint32_t updt_next_pn)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_RxSaUpdateNextPn(fm_macsec_secy_dev, sc, an,
++ updt_next_pn);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_RxSaUpdateNextPn() = 0x%08x\n", err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_rxsa_update_next_pn);
++
++int fm_macsec_secy_rxsa_update_lowest_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an, uint32_t updt_lowest_pn)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_RxSaUpdateLowestPn(fm_macsec_secy_dev, sc, an,
++ updt_lowest_pn);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_RxSaUpdateLowestPn() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_rxsa_update_lowest_pn);
++
++int fm_macsec_secy_rxsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc,
++ macsec_an_t an, macsec_sa_key_t key)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_RxSaModifyKey(fm_macsec_secy_dev, sc, an, key);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_RxSaModifyKey() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_rxsa_modify_key);
++
++int fm_macsec_secy_create_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t an, macsec_sa_key_t key)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_CreateTxSa(fm_macsec_secy_dev, an, key);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_CreateTxSa() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_create_tx_sa);
++
++int fm_macsec_secy_delete_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t an)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_DeleteTxSa(fm_macsec_secy_dev, an);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_DeleteTxSa() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_delete_tx_sa);
++
++int fm_macsec_secy_txsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t next_active_an,
++ macsec_sa_key_t key)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_TxSaModifyKey(fm_macsec_secy_dev, next_active_an,
++ key);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_TxSaModifyKey() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_txsa_modify_key);
++
++int fm_macsec_secy_txsa_set_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t an)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_TxSaSetActive(fm_macsec_secy_dev, an);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_TxSaSetActive() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_txsa_set_active);
++
++int fm_macsec_secy_txsa_get_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ macsec_an_t *p_an)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_TxSaGetActive(fm_macsec_secy_dev, p_an);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_TxSaGetActive() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_txsa_get_active);
++
++int fm_macsec_secy_get_rxsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ struct rx_sc_dev *sc, uint32_t *sc_phys_id)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_GetRxScPhysId(fm_macsec_secy_dev, sc, sc_phys_id);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_GetRxScPhysId() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_get_rxsc_phys_id);
++
++int fm_macsec_secy_get_txsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
++ uint32_t *sc_phys_id)
++{
++ int err;
++ int _errno;
++
++ err = FM_MACSEC_SECY_GetTxScPhysId(fm_macsec_secy_dev, sc_phys_id);
++ _errno = -GET_ERROR_TYPE(err);
++ if (unlikely(_errno < 0))
++ pr_err("FM_MACSEC_SECY_GetTxScPhysId() = 0x%08x\n",
++ err);
++
++ return _errno;
++}
++EXPORT_SYMBOL(fm_macsec_secy_get_txsc_phys_id);
++
++static t_Handle h_FmLnxWrp;
++
++static int __init __cold fm_load (void)
++{
++ if ((h_FmLnxWrp = LNXWRP_FM_Init()) == NULL)
++ {
++ printk("Failed to init FM wrapper!\n");
++ return -ENODEV;
++ }
++
++ printk(KERN_CRIT "Freescale FM module," \
++ " FMD API version %d.%d.%d\n",
++ FMD_API_VERSION_MAJOR,
++ FMD_API_VERSION_MINOR,
++ FMD_API_VERSION_RESPIN);
++ return 0;
++}
++
++static void __exit __cold fm_unload (void)
++{
++ if (h_FmLnxWrp)
++ LNXWRP_FM_Free(h_FmLnxWrp);
++}
++
++module_init (fm_load);
++module_exit (fm_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.h b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.h
+new file mode 100644
+index 00000000..09832563
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.h
+@@ -0,0 +1,294 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_fm.h
++
++ @Author Shlomi Gridish
++
++ @Description FM Linux wrapper functions.
++
++*/
++
++#ifndef __LNXWRP_FM_H__
++#define __LNXWRP_FM_H__
++
++#include <linux/fsl_qman.h> /* struct qman_fq */
++
++#include "std_ext.h"
++#include "error_ext.h"
++#include "list_ext.h"
++
++#include "lnxwrp_fm_ext.h"
++
++#define FM_MAX_NUM_OF_ADV_SETTINGS 10
++
++#define LNXWRP_FM_NUM_OF_SHARED_PROFILES 16
++
++#if defined(CONFIG_FMAN_DISABLE_OH_TO_REUSE_RESOURCES)
++#define FM_10G_OPENDMA_MIN_TRESHOLD 8 /* 10g minimum treshold if only HC is enabled and no OH port enabled */
++#define FM_OPENDMA_RX_TX_RAPORT 2 /* RX = 2*TX */
++#else
++#define FM_10G_OPENDMA_MIN_TRESHOLD 7 /* 10g minimum treshold if 7 OH ports are enabled */
++#define FM_OPENDMA_RX_TX_RAPORT 1 /* RX = TX */
++#endif
++#define FM_DEFAULT_TX10G_OPENDMA 8 /* default TX 10g open dmas */
++#define FM_DEFAULT_RX10G_OPENDMA 8 /* default RX 10g open dmas */
++
++#define FRAG_MANIP_SPACE 128
++#define FRAG_DATA_ALIGN 64
++
++#ifndef CONFIG_FSL_FM_MAX_FRAME_SIZE
++#define CONFIG_FSL_FM_MAX_FRAME_SIZE 0
++#endif
++
++#ifndef CONFIG_FSL_FM_RX_EXTRA_HEADROOM
++#define CONFIG_FSL_FM_RX_EXTRA_HEADROOM 16
++#endif
++
++typedef enum {
++ e_NO_PCD = 0,
++ e_FM_PCD_3_TUPLE
++} e_LnxWrpFmPortPcdDefUseCase;
++
++
++typedef struct t_FmTestFq {
++ struct qman_fq fq_base;
++ t_Handle h_Arg;
++} t_FmTestFq;
++
++typedef struct {
++ uint8_t id; /* sw port id, see SW_PORT_ID_TO_HW_PORT_ID() in fm_common.h */
++ int minor;
++ char name[20];
++ bool active;
++ uint64_t phys_baseAddr;
++ uint64_t baseAddr; /* Port's *virtual* address */
++ uint32_t memSize;
++ t_WrpFmPortDevSettings settings;
++ t_FmExtPools opExtPools;
++ uint8_t totalNumOfSchemes;
++ uint8_t schemesBase;
++ uint8_t numOfSchemesUsed;
++ uint32_t pcdBaseQ;
++ uint16_t pcdNumOfQs;
++ struct fm_port_pcd_param pcd_owner_params;
++ e_LnxWrpFmPortPcdDefUseCase defPcd;
++ t_Handle h_DefNetEnv;
++ t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES];
++ t_FmBufferPrefixContent buffPrefixContent;
++ t_Handle h_Dev;
++ t_Handle h_DfltVsp;
++ t_Handle h_LnxWrpFmDev;
++ uint16_t txCh;
++ struct device *dev;
++ struct device_attribute *dev_attr_stats;
++ struct device_attribute *dev_attr_regs;
++ struct device_attribute *dev_attr_bmi_regs;
++ struct device_attribute *dev_attr_qmi_regs;
++#if (DPAA_VERSION >= 11)
++ struct device_attribute *dev_attr_ipv4_opt;
++#endif
++ struct device_attribute *dev_attr_dsar_regs;
++ struct device_attribute *dev_attr_dsar_mem;
++ struct auto_res_tables_sizes dsar_table_sizes;
++} t_LnxWrpFmPortDev;
++
++typedef struct {
++ uint8_t id;
++ bool active;
++ uint64_t baseAddr;
++ uint32_t memSize;
++ t_WrpFmMacDevSettings settings;
++ t_Handle h_Dev;
++ t_Handle h_LnxWrpFmDev;
++} t_LnxWrpFmMacDev;
++
++/* information about all active ports for an FMan.
++ * !Some ports may be disabled by u-boot, thus will not be available */
++struct fm_active_ports {
++ uint32_t num_oh_ports;
++ uint32_t num_tx_ports;
++ uint32_t num_rx_ports;
++ uint32_t num_tx25_ports;
++ uint32_t num_rx25_ports;
++ uint32_t num_tx10_ports;
++ uint32_t num_rx10_ports;
++};
++
++/* FMan resources precalculated at fm probe based
++ * on available FMan port. */
++struct fm_resource_settings {
++ /* buffers - fifo sizes */
++ uint32_t tx1g_num_buffers;
++ uint32_t rx1g_num_buffers;
++ uint32_t tx2g5_num_buffers; /* Not supported yet by LLD */
++ uint32_t rx2g5_num_buffers; /* Not supported yet by LLD */
++ uint32_t tx10g_num_buffers;
++ uint32_t rx10g_num_buffers;
++ uint32_t oh_num_buffers;
++ uint32_t shared_ext_buffers;
++
++ /* open DMAs */
++ uint32_t tx_1g_dmas;
++ uint32_t rx_1g_dmas;
++ uint32_t tx_2g5_dmas; /* Not supported yet by LLD */
++ uint32_t rx_2g5_dmas; /* Not supported yet by LLD */
++ uint32_t tx_10g_dmas;
++ uint32_t rx_10g_dmas;
++ uint32_t oh_dmas;
++ uint32_t shared_ext_open_dma;
++
++ /* Tnums */
++ uint32_t tx_1g_tnums;
++ uint32_t rx_1g_tnums;
++ uint32_t tx_2g5_tnums; /* Not supported yet by LLD */
++ uint32_t rx_2g5_tnums; /* Not supported yet by LLD */
++ uint32_t tx_10g_tnums;
++ uint32_t rx_10g_tnums;
++ uint32_t oh_tnums;
++ uint32_t shared_ext_tnums;
++};
++
++typedef struct {
++ uint8_t id;
++ char name[10];
++ bool active;
++ bool pcdActive;
++ bool prsActive;
++ bool kgActive;
++ bool ccActive;
++ bool plcrActive;
++ e_LnxWrpFmPortPcdDefUseCase defPcd;
++ uint32_t usedSchemes;
++ uint8_t totalNumOfSharedSchemes;
++ uint8_t sharedSchemesBase;
++ uint8_t numOfSchemesUsed;
++ uint8_t defNetEnvId;
++ uint64_t fmPhysBaseAddr;
++ uint64_t fmBaseAddr;
++ uint32_t fmMemSize;
++ uint64_t fmMuramPhysBaseAddr;
++ uint64_t fmMuramBaseAddr;
++ uint32_t fmMuramMemSize;
++ uint64_t fmRtcPhysBaseAddr;
++ uint64_t fmRtcBaseAddr;
++ uint32_t fmRtcMemSize;
++ uint64_t fmVspPhysBaseAddr;
++ uint64_t fmVspBaseAddr;
++ uint32_t fmVspMemSize;
++ int irq;
++ int err_irq;
++ t_WrpFmDevSettings fmDevSettings;
++ t_WrpFmPcdDevSettings fmPcdDevSettings;
++ t_Handle h_Dev;
++ uint16_t hcCh;
++
++ t_Handle h_MuramDev;
++ t_Handle h_PcdDev;
++ t_Handle h_RtcDev;
++
++ t_Handle h_DsarRxPort;
++ t_Handle h_DsarTxPort;
++
++ t_LnxWrpFmPortDev hcPort;
++ t_LnxWrpFmPortDev opPorts[FM_MAX_NUM_OF_OH_PORTS-1];
++ t_LnxWrpFmPortDev rxPorts[FM_MAX_NUM_OF_RX_PORTS];
++ t_LnxWrpFmPortDev txPorts[FM_MAX_NUM_OF_TX_PORTS];
++ t_LnxWrpFmMacDev macs[FM_MAX_NUM_OF_MACS];
++ struct fm_active_ports fm_active_ports_info;
++ struct fm_resource_settings fm_resource_settings_info;
++
++ struct device *dev;
++ struct resource *res;
++ int major;
++ struct class *fm_class;
++ struct device_attribute *dev_attr_stats;
++ struct device_attribute *dev_attr_regs;
++ struct device_attribute *dev_attr_risc_load;
++
++ struct device_attribute *dev_pcd_attr_stats;
++ struct device_attribute *dev_plcr_attr_regs;
++ struct device_attribute *dev_prs_attr_regs;
++ struct device_attribute *dev_fm_fpm_attr_regs;
++ struct device_attribute *dev_fm_kg_attr_regs;
++ struct device_attribute *dev_fm_kg_pe_attr_regs;
++ struct device_attribute *dev_attr_muram_free_size;
++ struct device_attribute *dev_attr_fm_ctrl_code_ver;
++
++
++ struct qman_fq *hc_tx_conf_fq, *hc_tx_err_fq, *hc_tx_fq;
++} t_LnxWrpFmDev;
++
++typedef struct {
++ t_LnxWrpFmDev *p_FmDevs[INTG_MAX_NUM_OF_FM];
++} t_LnxWrpFm;
++#define LNXWRP_FM_OBJECT(ptr) LIST_OBJECT(ptr, t_LnxWrpFm, fms[((t_LnxWrpFmDev *)ptr)->id])
++
++
++t_Error LnxwrpFmIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat);
++t_Error LnxwrpFmPortIOCTL(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev, unsigned int cmd, unsigned long arg, bool compat);
++
++
++#if 0
++static __inline__ t_Error AllocSchemesForPort(t_LnxWrpFmDev *p_LnxWrpFmDev, uint8_t numSchemes, uint8_t *p_BaseSchemeNum)
++{
++ uint32_t schemeMask;
++ uint8_t i;
++
++ if (!numSchemes)
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ schemeMask = 0x80000000;
++ *p_BaseSchemeNum = 0xff;
++
++ for (i=0; schemeMask && numSchemes; schemeMask>>=1, i++)
++ if ((p_LnxWrpFmDev->usedSchemes & schemeMask) == 0)
++ {
++ p_LnxWrpFmDev->usedSchemes |= schemeMask;
++ numSchemes--;
++ if (*p_BaseSchemeNum==0xff)
++ *p_BaseSchemeNum = i;
++ }
++ else if (*p_BaseSchemeNum!=0xff)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Fragmentation on schemes array!!!"));
++
++ if (numSchemes)
++ RETURN_ERROR(MINOR, E_FULL, ("schemes!!!"));
++ return E_OK;
++}
++#endif
++
++void LnxWrpPCDIOCTLTypeChecking(void);
++void LnxWrpPCDIOCTLEnumChecking(void);
++
++#endif /* __LNXWRP_FM_H__ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c
+new file mode 100644
+index 00000000..00ab4bcb
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c
+@@ -0,0 +1,1480 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_fm_port.c
++
++ @Description FMD wrapper - FMan port functions.
++
++*/
++
++#include <linux/version.h>
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#include <config/modversions.h>
++#endif /* MODVERSIONS */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/cdev.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#ifndef CONFIG_FMAN_ARM
++#include <linux/fsl/svr.h>
++#endif
++#include <linux/io.h>
++
++#include "sprint_ext.h"
++#include "fm_common.h"
++#include "lnxwrp_fsl_fman.h"
++#include "fm_port_ext.h"
++#if (DPAA_VERSION >= 11)
++#include "fm_vsp_ext.h"
++#endif /* DPAA_VERSION >= 11 */
++#include "fm_ioctls.h"
++#include "lnxwrp_resources.h"
++#include "lnxwrp_sysfs_fm_port.h"
++
++#define __ERR_MODULE__ MODULE_FM
++
++extern struct device_node *GetFmAdvArgsDevTreeNode (uint8_t fmIndx);
++
++/* TODO: duplicated, see lnxwrp_fm.c */
++#define ADD_ADV_CONFIG_NO_RET(_func, _param)\
++do {\
++ if (i < max) {\
++ p_Entry = &p_Entrys[i];\
++ p_Entry->p_Function = _func;\
++ _param\
++ i++;\
++ } else {\
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,\
++ ("Number of advanced-configuration entries exceeded"));\
++ } \
++} while (0)
++
++#ifndef CONFIG_FMAN_ARM
++#define IS_T1023_T1024 (SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1024 || \
++ SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1023)
++#endif
++
++static volatile int hcFrmRcv/* = 0 */;
++static spinlock_t lock;
++
++static enum qman_cb_dqrr_result qm_tx_conf_dqrr_cb(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry
++ *dq)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = ((t_FmTestFq *) fq)->h_Arg;
++ unsigned long flags;
++
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++{
++ /* extract the HC frame address */
++ uint32_t *hcf_va = XX_PhysToVirt(qm_fd_addr((struct qm_fd *)&dq->fd));
++ int hcf_l = ((struct qm_fd *)&dq->fd)->length20;
++ int i;
++
++ /* 32b byteswap of all data in the HC Frame */
++ for(i = 0; i < hcf_l / 4; ++i)
++ hcf_va[i] =
++ ___constant_swab32(hcf_va[i]);
++}
++#endif
++ FM_PCD_HcTxConf(p_LnxWrpFmDev->h_PcdDev, (t_DpaaFD *)&dq->fd);
++ spin_lock_irqsave(&lock, flags);
++ hcFrmRcv--;
++ spin_unlock_irqrestore(&lock, flags);
++
++ return qman_cb_dqrr_consume;
++}
++
++static enum qman_cb_dqrr_result qm_tx_dqrr_cb(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dq)
++{
++ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
++ __func__);
++ return qman_cb_dqrr_consume;
++}
++
++static void qm_err_cb(struct qman_portal *portal,
++ struct qman_fq *fq, const struct qm_mr_entry *msg)
++{
++ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
++ __func__);
++}
++
++static struct qman_fq *FqAlloc(t_LnxWrpFmDev * p_LnxWrpFmDev,
++ uint32_t fqid,
++ uint32_t flags, uint16_t channel, uint8_t wq)
++{
++ int _errno;
++ struct qman_fq *fq = NULL;
++ t_FmTestFq *p_FmtFq;
++ struct qm_mcc_initfq initfq;
++
++ p_FmtFq = (t_FmTestFq *) XX_Malloc(sizeof(t_FmTestFq));
++ if (!p_FmtFq) {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
++ return NULL;
++ }
++
++ p_FmtFq->fq_base.cb.dqrr = ((flags & QMAN_FQ_FLAG_NO_ENQUEUE)
++ ? qm_tx_conf_dqrr_cb
++ : qm_tx_dqrr_cb);
++ p_FmtFq->fq_base.cb.ern = qm_err_cb;
++ /* p_FmtFq->fq_base.cb.fqs = qm_err_cb; */
++ /* qm_err_cb wrongly called when the FQ is parked */
++ p_FmtFq->fq_base.cb.fqs = NULL;
++ p_FmtFq->h_Arg = (t_Handle) p_LnxWrpFmDev;
++ if (fqid == 0) {
++ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
++ flags &= ~QMAN_FQ_FLAG_NO_MODIFY;
++ } else {
++ flags &= ~QMAN_FQ_FLAG_DYNAMIC_FQID;
++ }
++
++ if (qman_create_fq(fqid, flags, &p_FmtFq->fq_base)) {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj - qman_new_fq!!!"));
++ XX_Free(p_FmtFq);
++ return NULL;
++ }
++ fq = &p_FmtFq->fq_base;
++
++ if (!(flags & QMAN_FQ_FLAG_NO_MODIFY)) {
++ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
++ initfq.fqd.dest.channel = channel;
++ initfq.fqd.dest.wq = wq;
++
++ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
++ if (unlikely(_errno < 0)) {
++ REPORT_ERROR(MAJOR, E_NO_MEMORY,
++ ("FQ obj - qman_init_fq!!!"));
++ qman_destroy_fq(fq, 0);
++ XX_Free(p_FmtFq);
++ return NULL;
++ }
++ }
++
++ DBG(TRACE,
++ ("fqid %d, flags 0x%08x, channel %d, wq %d", qman_fq_fqid(fq),
++ flags, channel, wq));
++
++ return fq;
++}
++
++static void FqFree(struct qman_fq *fq)
++{
++ int _errno;
++
++ _errno = qman_retire_fq(fq, NULL);
++ if (unlikely(_errno < 0))
++ printk(KERN_WARNING "qman_retire_fq(%u) = %d\n", qman_fq_fqid(fq), _errno);
++
++ _errno = qman_oos_fq(fq);
++ if (unlikely(_errno < 0))
++ printk(KERN_WARNING "qman_oos_fq(%u) = %d\n", qman_fq_fqid(fq), _errno);
++
++ qman_destroy_fq(fq, 0);
++ XX_Free((t_FmTestFq *) fq);
++}
++
++static t_Error QmEnqueueCB(t_Handle h_Arg, void *p_Fd)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_Arg;
++ int _errno, timeout = 1000000;
++ unsigned long flags;
++
++ ASSERT_COND(p_LnxWrpFmDev);
++
++ spin_lock_irqsave(&lock, flags);
++ hcFrmRcv++;
++ spin_unlock_irqrestore(&lock, flags);
++
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++{
++ /* extract the HC frame address */
++ uint32_t *hcf_va = XX_PhysToVirt(qm_fd_addr((struct qm_fd *) p_Fd));
++ int hcf_l = ((struct qm_fd *)p_Fd)->length20;
++ int i;
++
++ /* 32b byteswap of all data in the HC Frame */
++ for(i = 0; i < hcf_l / 4; ++i)
++ hcf_va[i] =
++ ___constant_swab32(hcf_va[i]);
++}
++#endif
++
++ _errno = qman_enqueue(p_LnxWrpFmDev->hc_tx_fq, (struct qm_fd *) p_Fd,
++ 0);
++ if (_errno)
++ RETURN_ERROR(MINOR, E_INVALID_STATE,
++ ("qman_enqueue() failed"));
++
++ while (hcFrmRcv && --timeout) {
++ udelay(1);
++ cpu_relax();
++ }
++ if (timeout == 0) {
++ dump_stack();
++ RETURN_ERROR(MINOR, E_WRITE_FAILED,
++ ("timeout waiting for Tx confirmation"));
++ return E_WRITE_FAILED;
++ }
++
++ return E_OK;
++}
++
++static t_LnxWrpFmPortDev *ReadFmPortDevTreeNode(struct platform_device
++ *of_dev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++ struct device_node *fm_node, *port_node;
++ struct resource res;
++ const uint32_t *uint32_prop;
++ int _errno = 0, lenp;
++ uint32_t tmp_prop;
++
++#ifdef CONFIG_FMAN_P1023
++ static unsigned char have_oh_port/* = 0 */;
++#endif
++
++ port_node = of_node_get(of_dev->dev.of_node);
++
++ /* Get the FM node */
++ fm_node = of_get_parent(port_node);
++ if (unlikely(fm_node == NULL)) {
++ REPORT_ERROR(MAJOR, E_NO_DEVICE,
++ ("of_get_parent() = %d", _errno));
++ return NULL;
++ }
++
++ p_LnxWrpFmDev =
++ dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
++ of_node_put(fm_node);
++
++ /* if fm_probe() failed, no point in going further with port probing */
++ if (p_LnxWrpFmDev == NULL)
++ return NULL;
++
++ uint32_prop =
++ (uint32_t *) of_get_property(port_node, "cell-index", &lenp);
++ if (unlikely(uint32_prop == NULL)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ port_node->full_name));
++ return NULL;
++ }
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ return NULL;
++ if (of_device_is_compatible(port_node, "fsl,fman-port-oh")) {
++ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_OH_PORTS)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ port_node->full_name));
++ return NULL;
++ }
++
++#ifdef CONFIG_FMAN_P1023
++ /* Beware, this can be done when there is only
++ one FMan to be initialized */
++ if (!have_oh_port) {
++ have_oh_port = 1; /* first OP/HC port
++ is used for host command */
++#else
++ /* Here it is hardcoded the use of the OH port 1
++ (with cell-index 0) */
++ if (tmp_prop == 0) {
++#endif
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort;
++ p_LnxWrpFmPortDev->id = 0;
++ /*
++ p_LnxWrpFmPortDev->id = *uint32_prop-1;
++ p_LnxWrpFmPortDev->id = *uint32_prop;
++ */
++ p_LnxWrpFmPortDev->settings.param.portType =
++ e_FM_PORT_TYPE_OH_HOST_COMMAND;
++ } else {
++ p_LnxWrpFmPortDev =
++ &p_LnxWrpFmDev->opPorts[tmp_prop - 1];
++ p_LnxWrpFmPortDev->id = tmp_prop- 1;
++ p_LnxWrpFmPortDev->settings.param.portType =
++ e_FM_PORT_TYPE_OH_OFFLINE_PARSING;
++ }
++ p_LnxWrpFmPortDev->settings.param.portId = tmp_prop;
++
++ uint32_prop =
++ (uint32_t *) of_get_property(port_node,
++ "fsl,qman-channel-id",
++ &lenp);
++ if (uint32_prop == NULL) {
++ /*
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("missing fsl,qman-channel-id"));
++ */
++ XX_Print("FM warning: missing fsl,qman-channel-id"
++ " for OH port.\n");
++ return NULL;
++ }
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ return NULL;
++ p_LnxWrpFmPortDev->txCh = tmp_prop;
++
++ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.
++ qmChannel = p_LnxWrpFmPortDev->txCh;
++ } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-tx")) {
++ tmp_prop -= 0x28;
++ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_1G_TX_PORTS)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ port_node->full_name));
++ return NULL;
++ }
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[tmp_prop];
++
++ p_LnxWrpFmPortDev->id = tmp_prop;
++ p_LnxWrpFmPortDev->settings.param.portId =
++ p_LnxWrpFmPortDev->id;
++ p_LnxWrpFmPortDev->settings.param.portType = e_FM_PORT_TYPE_TX;
++
++ uint32_prop = (uint32_t *) of_get_property(port_node,
++ "fsl,qman-channel-id", &lenp);
++ if (uint32_prop == NULL) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("missing fsl,qman-channel-id"));
++ return NULL;
++ }
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ return NULL;
++ p_LnxWrpFmPortDev->txCh = tmp_prop;
++ p_LnxWrpFmPortDev->
++ settings.param.specificParams.nonRxParams.qmChannel =
++ p_LnxWrpFmPortDev->txCh;
++ } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-tx")) {
++ tmp_prop -= 0x30;
++ if (unlikely(tmp_prop>= FM_MAX_NUM_OF_10G_TX_PORTS)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ port_node->full_name));
++ return NULL;
++ }
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[tmp_prop +
++ FM_MAX_NUM_OF_1G_TX_PORTS];
++#ifndef CONFIG_FMAN_ARM
++ if (IS_T1023_T1024)
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[*uint32_prop];
++#endif
++
++ p_LnxWrpFmPortDev->id = tmp_prop;
++ p_LnxWrpFmPortDev->settings.param.portId =
++ p_LnxWrpFmPortDev->id;
++ p_LnxWrpFmPortDev->settings.param.portType =
++ e_FM_PORT_TYPE_TX_10G;
++ uint32_prop = (uint32_t *) of_get_property(port_node,
++ "fsl,qman-channel-id", &lenp);
++ if (uint32_prop == NULL) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("missing fsl,qman-channel-id"));
++ return NULL;
++ }
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ return NULL;
++ p_LnxWrpFmPortDev->txCh = tmp_prop;
++ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.
++ qmChannel = p_LnxWrpFmPortDev->txCh;
++ } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-rx")) {
++ tmp_prop -= 0x08;
++ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_1G_RX_PORTS)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ port_node->full_name));
++ return NULL;
++ }
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[tmp_prop];
++
++ p_LnxWrpFmPortDev->id = tmp_prop;
++ p_LnxWrpFmPortDev->settings.param.portId =
++ p_LnxWrpFmPortDev->id;
++ p_LnxWrpFmPortDev->settings.param.portType = e_FM_PORT_TYPE_RX;
++ if (p_LnxWrpFmDev->pcdActive)
++ p_LnxWrpFmPortDev->defPcd = p_LnxWrpFmDev->defPcd;
++ } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-rx")) {
++ tmp_prop -= 0x10;
++ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_10G_RX_PORTS)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ port_node->full_name));
++ return NULL;
++ }
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[tmp_prop +
++ FM_MAX_NUM_OF_1G_RX_PORTS];
++
++#ifndef CONFIG_FMAN_ARM
++ if (IS_T1023_T1024)
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[*uint32_prop];
++#endif
++
++ p_LnxWrpFmPortDev->id = tmp_prop;
++ p_LnxWrpFmPortDev->settings.param.portId =
++ p_LnxWrpFmPortDev->id;
++ p_LnxWrpFmPortDev->settings.param.portType =
++ e_FM_PORT_TYPE_RX_10G;
++ if (p_LnxWrpFmDev->pcdActive)
++ p_LnxWrpFmPortDev->defPcd = p_LnxWrpFmDev->defPcd;
++ } else {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal port type"));
++ return NULL;
++ }
++
++ _errno = of_address_to_resource(port_node, 0, &res);
++ if (unlikely(_errno < 0)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_address_to_resource() = %d", _errno));
++ return NULL;
++ }
++
++ p_LnxWrpFmPortDev->dev = &of_dev->dev;
++ p_LnxWrpFmPortDev->baseAddr = 0;
++ p_LnxWrpFmPortDev->phys_baseAddr = res.start;
++ p_LnxWrpFmPortDev->memSize = res.end + 1 - res.start;
++ p_LnxWrpFmPortDev->settings.param.h_Fm = p_LnxWrpFmDev->h_Dev;
++ p_LnxWrpFmPortDev->h_LnxWrpFmDev = (t_Handle) p_LnxWrpFmDev;
++
++ of_node_put(port_node);
++
++ p_LnxWrpFmPortDev->active = TRUE;
++
++#if defined(CONFIG_FMAN_DISABLE_OH_TO_REUSE_RESOURCES)
++ /* for performance mode no OH port available. */
++ if (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ p_LnxWrpFmPortDev->active = FALSE;
++#endif
++
++ return p_LnxWrpFmPortDev;
++}
++
++struct device_node * GetFmPortAdvArgsDevTreeNode (struct device_node *fm_node,
++ e_FmPortType portType,
++ uint8_t portId)
++{
++ struct device_node *port_node;
++ const uint32_t *uint32_prop;
++ int lenp;
++ char *portTypeString;
++ uint32_t tmp_prop;
++
++ switch(portType) {
++ case e_FM_PORT_TYPE_OH_OFFLINE_PARSING:
++ portTypeString = "fsl,fman-port-op-extended-args";
++ break;
++ case e_FM_PORT_TYPE_TX:
++ portTypeString = "fsl,fman-port-1g-tx-extended-args";
++ break;
++ case e_FM_PORT_TYPE_TX_10G:
++ portTypeString = "fsl,fman-port-10g-tx-extended-args";
++ break;
++ case e_FM_PORT_TYPE_RX:
++ portTypeString = "fsl,fman-port-1g-rx-extended-args";
++ break;
++ case e_FM_PORT_TYPE_RX_10G:
++ portTypeString = "fsl,fman-port-10g-rx-extended-args";
++ break;
++ default:
++ return NULL;
++ }
++
++ for_each_child_of_node(fm_node, port_node) {
++ uint32_prop = (uint32_t *)of_get_property(port_node, "cell-index", &lenp);
++ if (unlikely(uint32_prop == NULL)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
++ ("of_get_property(%s, cell-index) failed",
++ port_node->full_name));
++ return NULL;
++ }
++ tmp_prop = be32_to_cpu(*uint32_prop);
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ return NULL;
++ if ((portId == tmp_prop) &&
++ (of_device_is_compatible(port_node, portTypeString))) {
++ return port_node;
++ }
++ }
++
++ return NULL;
++}
++
++static t_Error CheckNConfigFmPortAdvArgs (t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
++{
++ struct device_node *fm_node, *port_node;
++ t_Error err;
++ t_FmPortRsrc portRsrc;
++ const uint32_t *uint32_prop;
++ /*const char *str_prop;*/
++ int lenp;
++#ifdef CONFIG_FMAN_PFC
++ uint8_t i, id, num_pools;
++ t_FmBufPoolDepletion poolDepletion;
++
++ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX ||
++ p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX_10G) {
++ memset(&poolDepletion, 0, sizeof(t_FmBufPoolDepletion));
++ poolDepletion.singlePoolModeEnable = true;
++ num_pools = p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
++ extBufPools.numOfPoolsUsed;
++ for (i = 0; i < num_pools; i++) {
++ id = p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
++ extBufPools.extBufPool[i].id;
++ poolDepletion.poolsToConsiderForSingleMode[id] = true;
++ }
++
++ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++)
++ poolDepletion.pfcPrioritiesEn[i] = true;
++
++ err = FM_PORT_ConfigPoolDepletion(p_LnxWrpFmPortDev->h_Dev,
++ &poolDepletion);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, ("FM_PORT_ConfigPoolDepletion() failed"));
++ }
++#endif
++
++ fm_node = GetFmAdvArgsDevTreeNode(((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev)->id);
++ if (!fm_node) /* no advance parameters for FMan */
++ return E_OK;
++
++ port_node = GetFmPortAdvArgsDevTreeNode(fm_node,
++ p_LnxWrpFmPortDev->settings.param.portType,
++ p_LnxWrpFmPortDev->settings.param.portId);
++ if (!port_node) /* no advance parameters for FMan-Port */
++ return E_OK;
++
++ uint32_prop = (uint32_t *)of_get_property(port_node, "num-tnums", &lenp);
++ if (uint32_prop) {
++ if (WARN_ON(lenp != sizeof(uint32_t)*2))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ portRsrc.num = be32_to_cpu(uint32_prop[0]);
++ portRsrc.extra = be32_to_cpu(uint32_prop[1]);
++
++ if ((err = FM_PORT_ConfigNumOfTasks(p_LnxWrpFmPortDev->h_Dev,
++ &portRsrc)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ uint32_prop = (uint32_t *)of_get_property(port_node, "num-dmas", &lenp);
++ if (uint32_prop) {
++ if (WARN_ON(lenp != sizeof(uint32_t)*2))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ portRsrc.num = be32_to_cpu(uint32_prop[0]);
++ portRsrc.extra = be32_to_cpu(uint32_prop[1]);
++
++ if ((err = FM_PORT_ConfigNumOfOpenDmas(p_LnxWrpFmPortDev->h_Dev,
++ &portRsrc)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ uint32_prop = (uint32_t *)of_get_property(port_node, "fifo-size", &lenp);
++ if (uint32_prop) {
++ if (WARN_ON(lenp != sizeof(uint32_t)*2))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ portRsrc.num = be32_to_cpu(uint32_prop[0]);
++ portRsrc.extra = be32_to_cpu(uint32_prop[1]);
++
++ if ((err = FM_PORT_ConfigSizeOfFifo(p_LnxWrpFmPortDev->h_Dev,
++ &portRsrc)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ uint32_prop = (uint32_t *)of_get_property(port_node, "errors-to-discard", &lenp);
++ if (uint32_prop) {
++ if (WARN_ON(lenp != sizeof(uint32_t)))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++ if ((err = FM_PORT_ConfigErrorsToDiscard(p_LnxWrpFmPortDev->h_Dev,
++ be32_to_cpu(uint32_prop[0]))) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ uint32_prop = (uint32_t *)of_get_property(port_node, "ar-tables-sizes",
++ &lenp);
++ if (uint32_prop) {
++
++ if (WARN_ON(lenp != sizeof(uint32_t)*8))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++ if (WARN_ON(p_LnxWrpFmPortDev->settings.param.portType !=
++ e_FM_PORT_TYPE_RX) &&
++ (p_LnxWrpFmPortDev->settings.param.portType !=
++ e_FM_PORT_TYPE_RX_10G))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE,
++ ("Auto Response is an Rx port atribute."));
++
++ memset(&p_LnxWrpFmPortDev->dsar_table_sizes, 0, sizeof(struct auto_res_tables_sizes));
++
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_arp_entries =
++ (uint16_t)be32_to_cpu(uint32_prop[0]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_echo_ipv4_entries =
++ (uint16_t)be32_to_cpu(uint32_prop[1]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_ndp_entries =
++ (uint16_t)be32_to_cpu(uint32_prop[2]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_echo_ipv6_entries =
++ (uint16_t)be32_to_cpu(uint32_prop[3]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_ipv4_entries =
++ (uint16_t)be32_to_cpu(uint32_prop[4]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_ipv6_entries =
++ (uint16_t)be32_to_cpu(uint32_prop[5]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_oid_entries =
++ (uint16_t)be32_to_cpu(uint32_prop[6]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_char =
++ (uint16_t)be32_to_cpu(uint32_prop[7]);
++
++ uint32_prop = (uint32_t *)of_get_property(port_node,
++ "ar-filters-sizes", &lenp);
++ if (uint32_prop) {
++ if (WARN_ON(lenp != sizeof(uint32_t)*3))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_ip_prot_filtering =
++ (uint16_t)be32_to_cpu(uint32_prop[0]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_tcp_port_filtering =
++ (uint16_t)be32_to_cpu(uint32_prop[1]);
++ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_udp_port_filtering =
++ (uint16_t)be32_to_cpu(uint32_prop[2]);
++ }
++
++ if ((err = FM_PORT_ConfigDsarSupport(p_LnxWrpFmPortDev->h_Dev,
++ (t_FmPortDsarTablesSizes*)&p_LnxWrpFmPortDev->dsar_table_sizes)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++
++ of_node_put(port_node);
++ of_node_put(fm_node);
++
++ return E_OK;
++}
++
++static t_Error CheckNSetFmPortAdvArgs (t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
++{
++ struct device_node *fm_node, *port_node;
++ t_Error err;
++ const uint32_t *uint32_prop;
++ /*const char *str_prop;*/
++ int lenp;
++
++ fm_node = GetFmAdvArgsDevTreeNode(((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev)->id);
++ if (!fm_node) /* no advance parameters for FMan */
++ return E_OK;
++
++ port_node = GetFmPortAdvArgsDevTreeNode(fm_node,
++ p_LnxWrpFmPortDev->settings.param.portType,
++ p_LnxWrpFmPortDev->settings.param.portId);
++ if (!port_node) /* no advance parameters for FMan-Port */
++ return E_OK;
++
++#if (DPAA_VERSION >= 11)
++ uint32_prop = (uint32_t *)of_get_property(port_node, "vsp-window", &lenp);
++ if (uint32_prop) {
++ t_FmPortVSPAllocParams portVSPAllocParams;
++ t_FmVspParams fmVspParams;
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ uint8_t portId;
++
++ p_LnxWrpFmDev = ((t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev);
++
++ if (WARN_ON(lenp != sizeof(uint32_t)*2))
++ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
++
++ if ((p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_TX) ||
++ (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_TX_10G) ||
++ ((p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
++ p_LnxWrpFmPortDev->settings.frag_enabled))
++ return E_OK;
++
++ memset(&portVSPAllocParams, 0, sizeof(portVSPAllocParams));
++ memset(&fmVspParams, 0, sizeof(fmVspParams));
++
++ portVSPAllocParams.numOfProfiles = (uint8_t)be32_to_cpu(uint32_prop[0]);
++ portVSPAllocParams.dfltRelativeId = (uint8_t)be32_to_cpu(uint32_prop[1]);
++ fmVspParams.h_Fm = p_LnxWrpFmDev->h_Dev;
++
++ fmVspParams.portParams.portType = p_LnxWrpFmPortDev->settings.param.portType;
++ fmVspParams.portParams.portId = p_LnxWrpFmPortDev->settings.param.portId;
++ fmVspParams.relativeProfileId = portVSPAllocParams.dfltRelativeId;
++
++ if (p_LnxWrpFmPortDev->settings.param.portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
++ {
++ portId = fmVspParams.portParams.portId;
++ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX_10G){
++#ifndef CONFIG_FMAN_ARM
++ if (!(IS_T1023_T1024))
++#endif
++ portId += FM_MAX_NUM_OF_1G_RX_PORTS;
++ }
++ portVSPAllocParams.h_FmTxPort =
++ p_LnxWrpFmDev->txPorts[portId].h_Dev;
++ fmVspParams.liodnOffset =
++ p_LnxWrpFmDev->rxPorts[portId].settings.param.specificParams.rxParams.liodnOffset;
++ memcpy(&fmVspParams.extBufPools,
++ &p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.extBufPools,
++ sizeof(t_FmExtPools));
++ }
++ else
++ {
++ memcpy(&fmVspParams.extBufPools,
++ &p_LnxWrpFmPortDev->opExtPools,
++ sizeof(t_FmExtPools));
++ }
++
++ if ((err = FM_PORT_VSPAlloc(p_LnxWrpFmPortDev->h_Dev,
++ &portVSPAllocParams)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ /* We're initializing only the default VSP that are being used by the Linux-Ethernet-driver */
++ if ((p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
++ !p_LnxWrpFmPortDev->opExtPools.numOfPoolsUsed)
++ return E_OK;
++
++ p_LnxWrpFmPortDev->h_DfltVsp = FM_VSP_Config(&fmVspParams);
++ if (!p_LnxWrpFmPortDev->h_DfltVsp)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("default-VSP for port!"));
++
++ if ((err = FM_VSP_ConfigBufferPrefixContent(p_LnxWrpFmPortDev->h_DfltVsp,
++ &p_LnxWrpFmPortDev->buffPrefixContent)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++
++ if ((err = FM_VSP_Init(p_LnxWrpFmPortDev->h_DfltVsp)) != E_OK)
++ RETURN_ERROR(MINOR, err, NO_MSG);
++ }
++#else
++UNUSED(err); UNUSED(uint32_prop); UNUSED(lenp);
++#endif /* (DPAA_VERSION >= 11) */
++
++ of_node_put(port_node);
++ of_node_put(fm_node);
++
++ return E_OK;
++}
++
++static t_Error ConfigureFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev =
++ (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ struct resource *dev_res;
++
++ if (!p_LnxWrpFmPortDev->active)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("FM port not configured!!!"));
++
++ dev_res =
++ __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res,
++ p_LnxWrpFmPortDev->phys_baseAddr,
++ p_LnxWrpFmPortDev->memSize,
++ "fman-port-hc");
++ if (unlikely(dev_res == NULL))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE,
++ ("__devm_request_region() failed"));
++ p_LnxWrpFmPortDev->baseAddr =
++ PTR_TO_UINT(devm_ioremap
++ (p_LnxWrpFmDev->dev,
++ p_LnxWrpFmPortDev->phys_baseAddr,
++ p_LnxWrpFmPortDev->memSize));
++ if (unlikely(p_LnxWrpFmPortDev->baseAddr == 0))
++ REPORT_ERROR(MAJOR, E_INVALID_STATE,
++ ("devm_ioremap() failed"));
++
++ p_LnxWrpFmPortDev->settings.param.baseAddr =
++ p_LnxWrpFmPortDev->baseAddr;
++
++ return E_OK;
++}
++
++static t_Error InitFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
++{
++#define MY_ADV_CONFIG_CHECK_END \
++ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,\
++ ("Advanced configuration routine"));\
++ if (errCode != E_OK)\
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);\
++ }
++
++ int i = 0;
++
++ if (!p_LnxWrpFmPortDev->active || p_LnxWrpFmPortDev->h_Dev)
++ return E_INVALID_STATE;
++
++ p_LnxWrpFmPortDev->h_Dev =
++ FM_PORT_Config(&p_LnxWrpFmPortDev->settings.param);
++ if (p_LnxWrpFmPortDev->h_Dev == NULL)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-port"));
++
++#ifndef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
++ if ((p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_TX_10G)
++ || (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_TX)) {
++ t_Error errCode = E_OK;
++ errCode =
++ FM_PORT_ConfigDeqHighPriority(p_LnxWrpFmPortDev->h_Dev,
++ TRUE);
++ if (errCode != E_OK)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++ errCode =
++ FM_PORT_ConfigDeqPrefetchOption(p_LnxWrpFmPortDev->h_Dev,
++ e_FM_PORT_DEQ_FULL_PREFETCH);
++ if (errCode
++ != E_OK)
++ RETURN_ERROR(MAJOR, errCode, NO_MSG);
++ }
++#endif /* !FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
++
++#ifndef CONFIG_FMAN_ARM
++#ifdef FM_BCB_ERRATA_BMI_SW001
++/* Configure BCB workaround on Rx ports, only for B4860 rev1 */
++#define SVR_SECURITY_MASK 0x00080000
++#define SVR_PERSONALITY_MASK 0x0000FF00
++#define SVR_VER_IGNORE_MASK (SVR_SECURITY_MASK | SVR_PERSONALITY_MASK)
++#define SVR_B4860_REV1_VALUE 0x86800010
++
++ if ((p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_RX_10G) ||
++ (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_RX)) {
++ unsigned int svr;
++
++ svr = mfspr(SPRN_SVR);
++
++ if ((svr & ~SVR_VER_IGNORE_MASK) == SVR_B4860_REV1_VALUE)
++ FM_PORT_ConfigBCBWorkaround(p_LnxWrpFmPortDev->h_Dev);
++ }
++#endif /* FM_BCB_ERRATA_BMI_SW001 */
++#endif /* CONFIG_FMAN_ARM */
++/* Call the driver's advanced configuration routines, if requested:
++ Compare the function pointer of each entry to the available routines,
++ and invoke the matching routine with proper casting of arguments. */
++ while (p_LnxWrpFmPortDev->settings.advConfig[i].p_Function
++ && (i < FM_MAX_NUM_OF_ADV_SETTINGS)) {
++
++/* TODO: Change this MACRO */
++ ADV_CONFIG_CHECK_START(
++ &(p_LnxWrpFmPortDev->settings.advConfig[i]))
++
++ ADV_CONFIG_CHECK(p_LnxWrpFmPortDev->h_Dev,
++ FM_PORT_ConfigBufferPrefixContent,
++ NCSW_PARAMS(1,
++ (t_FmBufferPrefixContent *)))
++
++ if ((p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
++ (p_LnxWrpFmPortDev->settings.frag_enabled == TRUE)) {
++
++ ADV_CONFIG_CHECK(p_LnxWrpFmPortDev->h_Dev,
++ FM_PORT_ConfigExtBufPools,
++ NCSW_PARAMS(1, (t_FmExtPools *)))
++
++ /* this define contains an else */
++ MY_ADV_CONFIG_CHECK_END
++ }
++
++ /* Advance to next advanced configuration entry */
++ i++;
++ }
++
++
++ if ((p_LnxWrpFmPortDev->settings.param.portType != e_FM_PORT_TYPE_TX) &&
++ (p_LnxWrpFmPortDev->settings.param.portType != e_FM_PORT_TYPE_TX_10G)) {
++ if (FM_PORT_ConfigErrorsToDiscard(p_LnxWrpFmPortDev->h_Dev, (FM_PORT_FRM_ERR_IPRE |
++ FM_PORT_FRM_ERR_IPR_NCSP |
++ FM_PORT_FRM_ERR_CLS_DISCARD)) !=E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++ }
++
++ if (CheckNConfigFmPortAdvArgs(p_LnxWrpFmPortDev) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (FM_PORT_Init(p_LnxWrpFmPortDev->h_Dev) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++ if (CheckNSetFmPortAdvArgs(p_LnxWrpFmPortDev) != E_OK)
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++
++/* FMan Fifo sizes behind the scene":
++ * Using the following formulae (*), under a set of simplifying assumptions (.):
++ * . all ports are configured in Normal Mode (rather than Independent Mode)
++ * . the DPAA Eth driver allocates buffers of size:
++ * . MAXFRM + NET_IP_ALIGN + DPA_PRIV_DATA_SIZE + DPA_PARSE_RESULTS_SIZE
++ * + DPA_HASH_RESULTS_SIZE, i.e.:
++ * MAXFRM + 2 + 16 + sizeof(t_FmPrsResult) + 16, i.e.:
++ * MAXFRM + 66
++ * . excessive buffer pools not accounted for
++ *
++ * * for Rx ports on P4080:
++ * . IFSZ = ceil(max(FMBM_EBMPI[PBS]) / 256) * 256 + 7 * 256
++ * . no internal frame offset (FMBM_RIM[FOF] == 0) - otherwise,
++ * add up to 256 to the above
++ *
++ * * for Rx ports on P1023:
++ * . IFSZ = ceil(second_largest(FMBM_EBMPI[PBS] / 256)) * 256 + 7 * 256,
++ * if at least 2 bpools are configured
++ * . IFSZ = 8 * 256, if only a single bpool is configured
++ *
++ * * for Tx ports:
++ * . IFSZ = ceil(frame_size / 256) * 256 + 3 * 256
++ * + FMBM_TFP[DPDE] * 256, i.e.:
++ * IFSZ = ceil(MAXFRM / 256) * 256 + 3 x 256 + FMBM_TFP[DPDE] * 256
++ *
++ * * for OH ports on P4080:
++ * . IFSZ = ceil(frame_size / 256) * 256 + 1 * 256 + FMBM_PP[MXT] * 256
++ * * for OH ports on P1023:
++ * . IFSZ = ceil(frame_size / 256) * 256 + 3 * 256 + FMBM_TFP[DPDE] * 256
++ * * for both P4080 and P1023:
++ * . (conservative decisions, assuming that BMI must bring the entire
++ * frame, not only the frame header)
++ * . no internal frame offset (FMBM_OIM[FOF] == 0) - otherwise,
++ * add up to 256 to the above
++ *
++ * . for P4080/P5020/P3041/P2040, DPDE is:
++ * > 0 or 1, for 1Gb ports, HW default: 0
++ * > 2..7 (recommended: 3..7) for 10Gb ports, HW default: 3
++ * . for P1023, DPDE should be 1
++ *
++ * . for P1023, MXT is in range (0..31)
++ * . for P4080, MXT is in range (0..63)
++ *
++ */
++#if 0
++ if ((p_LnxWrpFmPortDev->defPcd != e_NO_PCD) &&
++ (InitFmPort3TupleDefPcd(p_LnxWrpFmPortDev) != E_OK))
++ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
++#endif
++ return E_OK;
++}
++
++void fm_set_rx_port_params(struct fm_port *port,
++ struct fm_port_params *params)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) port;
++ int i;
++
++ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.errFqid =
++ params->errq;
++ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.dfltFqid =
++ params->defq;
++ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.extBufPools.
++ numOfPoolsUsed = params->num_pools;
++ for (i = 0; i < params->num_pools; i++) {
++ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
++ extBufPools.extBufPool[i].id =
++ params->pool_param[i].id;
++ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
++ extBufPools.extBufPool[i].size =
++ params->pool_param[i].size;
++ }
++
++ p_LnxWrpFmPortDev->buffPrefixContent.privDataSize =
++ params->priv_data_size;
++ p_LnxWrpFmPortDev->buffPrefixContent.passPrsResult =
++ params->parse_results;
++ p_LnxWrpFmPortDev->buffPrefixContent.passHashResult =
++ params->hash_results;
++ p_LnxWrpFmPortDev->buffPrefixContent.passTimeStamp =
++ params->time_stamp;
++ p_LnxWrpFmPortDev->buffPrefixContent.dataAlign =
++ params->data_align;
++ p_LnxWrpFmPortDev->buffPrefixContent.manipExtraSpace =
++ params->manip_extra_space;
++
++ ADD_ADV_CONFIG_START(p_LnxWrpFmPortDev->settings.advConfig,
++ FM_MAX_NUM_OF_ADV_SETTINGS)
++
++ ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigBufferPrefixContent,
++ ARGS(1,
++ (&p_LnxWrpFmPortDev->
++ buffPrefixContent)));
++
++ ADD_ADV_CONFIG_END InitFmPortDev(p_LnxWrpFmPortDev);
++}
++EXPORT_SYMBOL(fm_set_rx_port_params);
++
++/* this function is called from oh_probe as well, thus it contains oh port
++ * specific parameters (make sure everything is checked) */
++void fm_set_tx_port_params(struct fm_port *port,
++ struct fm_port_params *params)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) port;
++
++ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.errFqid =
++ params->errq;
++ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.
++ dfltFqid = params->defq;
++
++ p_LnxWrpFmPortDev->buffPrefixContent.privDataSize =
++ params->priv_data_size;
++ p_LnxWrpFmPortDev->buffPrefixContent.passPrsResult =
++ params->parse_results;
++ p_LnxWrpFmPortDev->buffPrefixContent.passHashResult =
++ params->hash_results;
++ p_LnxWrpFmPortDev->buffPrefixContent.passTimeStamp =
++ params->time_stamp;
++ p_LnxWrpFmPortDev->settings.frag_enabled =
++ params->frag_enable;
++ p_LnxWrpFmPortDev->buffPrefixContent.dataAlign =
++ params->data_align;
++ p_LnxWrpFmPortDev->buffPrefixContent.manipExtraSpace =
++ params->manip_extra_space;
++
++ ADD_ADV_CONFIG_START(p_LnxWrpFmPortDev->settings.advConfig,
++ FM_MAX_NUM_OF_ADV_SETTINGS)
++
++ ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigBufferPrefixContent,
++ ARGS(1,
++ (&p_LnxWrpFmPortDev->
++ buffPrefixContent)));
++
++ /* oh port specific parameter (for fragmentation only) */
++ if ((p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
++ params->num_pools) {
++ int i;
++
++ p_LnxWrpFmPortDev->opExtPools.numOfPoolsUsed = params->num_pools;
++ for (i = 0; i < params->num_pools; i++) {
++ p_LnxWrpFmPortDev->opExtPools.extBufPool[i].id = params->pool_param[i].id;
++ p_LnxWrpFmPortDev->opExtPools.extBufPool[i].size = params->pool_param[i].size;
++ }
++
++ if (p_LnxWrpFmPortDev->settings.frag_enabled)
++ ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigExtBufPools,
++ ARGS(1, (&p_LnxWrpFmPortDev->opExtPools)));
++ }
++
++ ADD_ADV_CONFIG_END InitFmPortDev(p_LnxWrpFmPortDev);
++}
++EXPORT_SYMBOL(fm_set_tx_port_params);
++
++void fm_mac_set_handle(t_Handle h_lnx_wrp_fm_dev,
++ t_Handle h_fm_mac,
++ int mac_id)
++{
++ t_LnxWrpFmDev *p_lnx_wrp_fm_dev = (t_LnxWrpFmDev *)h_lnx_wrp_fm_dev;
++
++ p_lnx_wrp_fm_dev->macs[mac_id].h_Dev = h_fm_mac;
++ p_lnx_wrp_fm_dev->macs[mac_id].h_LnxWrpFmDev = h_lnx_wrp_fm_dev;
++}
++EXPORT_SYMBOL(fm_mac_set_handle);
++
++static void LnxwrpFmPcdDevExceptionsCb(t_Handle h_App,
++ e_FmPcdExceptions exception)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_App;
++
++ ASSERT_COND(p_LnxWrpFmDev);
++
++ DBG(INFO, ("got fm-pcd exception %d", exception));
++
++ /* do nothing */
++ UNUSED(exception);
++}
++
++static void LnxwrpFmPcdDevIndexedExceptionsCb(t_Handle h_App,
++ e_FmPcdExceptions exception,
++ uint16_t index)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_App;
++
++ ASSERT_COND(p_LnxWrpFmDev);
++
++ DBG(INFO,
++ ("got fm-pcd-indexed exception %d, indx %d", exception, index));
++
++ /* do nothing */
++ UNUSED(exception);
++ UNUSED(index);
++}
++
++static t_Error InitFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++ spin_lock_init(&lock);
++
++ if (p_LnxWrpFmDev->pcdActive) {
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort;
++ t_FmPcdParams fmPcdParams;
++ t_Error err;
++
++ memset(&fmPcdParams, 0, sizeof(fmPcdParams));
++ fmPcdParams.h_Fm = p_LnxWrpFmDev->h_Dev;
++ fmPcdParams.prsSupport = p_LnxWrpFmDev->prsActive;
++ fmPcdParams.kgSupport = p_LnxWrpFmDev->kgActive;
++ fmPcdParams.plcrSupport = p_LnxWrpFmDev->plcrActive;
++ fmPcdParams.ccSupport = p_LnxWrpFmDev->ccActive;
++ fmPcdParams.numOfSchemes = FM_PCD_KG_NUM_OF_SCHEMES;
++
++#ifndef CONFIG_GUEST_PARTITION
++ fmPcdParams.f_Exception = LnxwrpFmPcdDevExceptionsCb;
++ if (fmPcdParams.kgSupport)
++ fmPcdParams.f_ExceptionId =
++ LnxwrpFmPcdDevIndexedExceptionsCb;
++ fmPcdParams.h_App = p_LnxWrpFmDev;
++#endif /* !CONFIG_GUEST_PARTITION */
++
++#ifdef CONFIG_MULTI_PARTITION_SUPPORT
++ fmPcdParams.numOfSchemes = 0;
++ fmPcdParams.numOfClsPlanEntries = 0;
++ fmPcdParams.partitionId = 0;
++#endif /* CONFIG_MULTI_PARTITION_SUPPORT */
++ fmPcdParams.useHostCommand = TRUE;
++
++ p_LnxWrpFmDev->hc_tx_fq =
++ FqAlloc(p_LnxWrpFmDev,
++ 0,
++ QMAN_FQ_FLAG_TO_DCPORTAL,
++ p_LnxWrpFmPortDev->txCh, 0);
++ if (!p_LnxWrpFmDev->hc_tx_fq)
++ RETURN_ERROR(MAJOR, E_NULL_POINTER,
++ ("Frame queue allocation failed..."));
++
++ p_LnxWrpFmDev->hc_tx_conf_fq =
++ FqAlloc(p_LnxWrpFmDev,
++ 0,
++ QMAN_FQ_FLAG_NO_ENQUEUE,
++ p_LnxWrpFmDev->hcCh, 1);
++ if (!p_LnxWrpFmDev->hc_tx_conf_fq)
++ RETURN_ERROR(MAJOR, E_NULL_POINTER,
++ ("Frame queue allocation failed..."));
++
++ p_LnxWrpFmDev->hc_tx_err_fq =
++ FqAlloc(p_LnxWrpFmDev,
++ 0,
++ QMAN_FQ_FLAG_NO_ENQUEUE,
++ p_LnxWrpFmDev->hcCh, 2);
++ if (!p_LnxWrpFmDev->hc_tx_err_fq)
++ RETURN_ERROR(MAJOR, E_NULL_POINTER,
++ ("Frame queue allocation failed..."));
++
++ fmPcdParams.hc.portBaseAddr = p_LnxWrpFmPortDev->baseAddr;
++ fmPcdParams.hc.portId =
++ p_LnxWrpFmPortDev->settings.param.portId;
++ fmPcdParams.hc.liodnBase =
++ p_LnxWrpFmPortDev->settings.param.liodnBase;
++ fmPcdParams.hc.errFqid =
++ qman_fq_fqid(p_LnxWrpFmDev->hc_tx_err_fq);
++ fmPcdParams.hc.confFqid =
++ qman_fq_fqid(p_LnxWrpFmDev->hc_tx_conf_fq);
++ fmPcdParams.hc.qmChannel = p_LnxWrpFmPortDev->txCh;
++ fmPcdParams.hc.f_QmEnqueue = QmEnqueueCB;
++ fmPcdParams.hc.h_QmArg = (t_Handle) p_LnxWrpFmDev;
++
++ p_LnxWrpFmDev->h_PcdDev = FM_PCD_Config(&fmPcdParams);
++ if (!p_LnxWrpFmDev->h_PcdDev)
++ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM PCD!"));
++
++ err =
++ FM_PCD_ConfigPlcrNumOfSharedProfiles(p_LnxWrpFmDev->h_PcdDev,
++ LNXWRP_FM_NUM_OF_SHARED_PROFILES);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ err = FM_PCD_Init(p_LnxWrpFmDev->h_PcdDev);
++ if (err != E_OK)
++ RETURN_ERROR(MAJOR, err, NO_MSG);
++
++ if (p_LnxWrpFmDev->err_irq == 0) {
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC,
++ FALSE);
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW,
++ FALSE);
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR,
++ FALSE);
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC,
++ FALSE);
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC,
++ FALSE);
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE,
++ FALSE);
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE,
++ FALSE);
++ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
++ e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC,
++ FALSE);
++ }
++ }
++
++ return E_OK;
++}
++
++void FreeFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
++{
++
++ if (p_LnxWrpFmDev->h_PcdDev)
++ FM_PCD_Free(p_LnxWrpFmDev->h_PcdDev);
++
++ if (p_LnxWrpFmDev->hc_tx_err_fq)
++ FqFree(p_LnxWrpFmDev->hc_tx_err_fq);
++
++ if (p_LnxWrpFmDev->hc_tx_conf_fq)
++ FqFree(p_LnxWrpFmDev->hc_tx_conf_fq);
++
++ if (p_LnxWrpFmDev->hc_tx_fq)
++ FqFree(p_LnxWrpFmDev->hc_tx_fq);
++}
++
++static void FreeFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev =
++ (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++
++ if (!p_LnxWrpFmPortDev->active)
++ return;
++
++ if (p_LnxWrpFmPortDev->h_Dev)
++ FM_PORT_Free(p_LnxWrpFmPortDev->h_Dev);
++
++ devm_iounmap(p_LnxWrpFmDev->dev,
++ UINT_TO_PTR(p_LnxWrpFmPortDev->baseAddr));
++ __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res,
++ p_LnxWrpFmPortDev->phys_baseAddr,
++ p_LnxWrpFmPortDev->memSize);
++}
++
++static int /*__devinit*/ fm_port_probe(struct platform_device *of_dev)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ struct device *dev;
++
++ dev = &of_dev->dev;
++
++ p_LnxWrpFmPortDev = ReadFmPortDevTreeNode(of_dev);
++ if (p_LnxWrpFmPortDev == NULL)
++ return -EIO;
++ /* Port can be inactive, thus will not be probed:
++ - in performance mode, OH ports are disabled
++ ...
++ */
++ if (!p_LnxWrpFmPortDev->active)
++ return 0;
++
++ if (ConfigureFmPortDev(p_LnxWrpFmPortDev) != E_OK)
++ return -EIO;
++
++ dev_set_drvdata(dev, p_LnxWrpFmPortDev);
++
++ if (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_OH_HOST_COMMAND)
++ InitFmPcdDev((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev);
++
++ p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++
++ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d",
++ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id + DEV_FM_RX_PORTS_MINOR_BASE;
++ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_RX_10G) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d",
++ p_LnxWrpFmDev->name,
++ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_RX_PORTS);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_RX_PORTS +
++ DEV_FM_RX_PORTS_MINOR_BASE;
++#ifndef CONFIG_FMAN_ARM
++ if (IS_T1023_T1024) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d",
++ p_LnxWrpFmDev->name,
++ p_LnxWrpFmPortDev->id);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id +
++ DEV_FM_RX_PORTS_MINOR_BASE;
++ }
++#endif
++ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_TX) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d",
++ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id + DEV_FM_TX_PORTS_MINOR_BASE;
++ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_TX_10G) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d",
++ p_LnxWrpFmDev->name,
++ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_TX_PORTS);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_TX_PORTS +
++ DEV_FM_TX_PORTS_MINOR_BASE;
++#ifndef CONFIG_FMAN_ARM
++ if (IS_T1023_T1024) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d",
++ p_LnxWrpFmDev->name,
++ p_LnxWrpFmPortDev->id);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id +
++ DEV_FM_TX_PORTS_MINOR_BASE;
++ }
++#endif
++ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_OH_HOST_COMMAND) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-oh%d",
++ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id + DEV_FM_OH_PORTS_MINOR_BASE;
++ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
++ e_FM_PORT_TYPE_OH_OFFLINE_PARSING) {
++ Sprint(p_LnxWrpFmPortDev->name, "%s-port-oh%d",
++ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id + 1);
++ p_LnxWrpFmPortDev->minor =
++ p_LnxWrpFmPortDev->id + 1 +
++ DEV_FM_OH_PORTS_MINOR_BASE;
++ }
++
++ device_create(p_LnxWrpFmDev->fm_class, NULL,
++ MKDEV(p_LnxWrpFmDev->major, p_LnxWrpFmPortDev->minor),
++ NULL, p_LnxWrpFmPortDev->name);
++
++ /* create sysfs entries for stats and regs */
++
++ if (fm_port_sysfs_create(dev) != 0) {
++ FreeFmPortDev(p_LnxWrpFmPortDev);
++ REPORT_ERROR(MAJOR, E_INVALID_STATE,
++ ("Unable to create sys entry - fm port!!!"));
++ return -EIO;
++ }
++
++#ifdef FM_TX_INVALID_ECC_ERRATA_10GMAC_A009
++ FM_DisableRamsEcc(p_LnxWrpFmDev->h_Dev);
++#endif /* FM_TX_INVALID_ECC_ERRATA_10GMAC_A009 */
++
++ DBG(TRACE, ("%s probed", p_LnxWrpFmPortDev->name));
++
++ return 0;
++}
++
++static int fm_port_remove(struct platform_device *of_dev)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ struct device *dev;
++
++ dev = &of_dev->dev;
++ p_LnxWrpFmPortDev = dev_get_drvdata(dev);
++
++ fm_port_sysfs_destroy(dev);
++
++ p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ device_destroy(p_LnxWrpFmDev->fm_class,
++ MKDEV(p_LnxWrpFmDev->major, p_LnxWrpFmPortDev->minor));
++
++ FreeFmPortDev(p_LnxWrpFmPortDev);
++
++ dev_set_drvdata(dev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id fm_port_match[] = {
++ {
++ .compatible = "fsl,fman-port-oh"},
++ {
++ .compatible = "fsl,fman-port-1g-rx"},
++ {
++ .compatible = "fsl,fman-port-10g-rx"},
++ {
++ .compatible = "fsl,fman-port-1g-tx"},
++ {
++ .compatible = "fsl,fman-port-10g-tx"},
++ {}
++};
++
++#ifndef MODULE
++MODULE_DEVICE_TABLE(of, fm_port_match);
++#endif /* !MODULE */
++
++static struct platform_driver fm_port_driver = {
++
++ .driver = {
++ .name = "fsl-fman-port",
++ .of_match_table = fm_port_match,
++ .owner = THIS_MODULE,
++ },
++ .probe = fm_port_probe,
++ .remove = fm_port_remove
++};
++
++
++t_Error LNXWRP_FM_Port_Init(void)
++{
++ /* Register to the DTB for basic FM port API */
++ if (platform_driver_register(&fm_port_driver))
++ return E_NO_DEVICE;
++
++ return E_OK;
++}
++
++void LNXWRP_FM_Port_Free(void)
++{
++ platform_driver_unregister(&fm_port_driver);
++}
++
++static int __init __cold fm_port_load(void)
++{
++ if (LNXWRP_FM_Port_Init() != E_OK) {
++ printk(KERN_CRIT "Failed to init FM Ports wrapper!\n");
++ return -ENODEV;
++ }
++
++ printk(KERN_CRIT "Freescale FM Ports module\n");
++
++ return 0;
++}
++
++static void __exit __cold fm_port_unload(void)
++{
++ LNXWRP_FM_Port_Free();
++}
++
++module_init(fm_port_load);
++module_exit(fm_port_unload);
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c
+new file mode 100644
+index 00000000..1ddde856
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c
+@@ -0,0 +1,4813 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_ioctls_fm.c
++ @Author Shlomi Gridish
++ @Description FM Linux wrapper functions.
++*/
++
++/* Linux Headers ------------------- */
++#include <linux/version.h>
++
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#include <config/modversions.h>
++#endif /* MODVERSIONS */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/of_platform.h>
++#include <linux/uaccess.h>
++#include <asm/errno.h>
++#ifndef CONFIG_FMAN_ARM
++#include <sysdev/fsl_soc.h>
++#include <linux/fsl/svr.h>
++#endif
++
++#if defined(CONFIG_COMPAT)
++#include <linux/compat.h>
++#endif
++
++#include "part_ext.h"
++#include "fm_ioctls.h"
++#include "fm_pcd_ioctls.h"
++#include "fm_port_ioctls.h"
++#include "fm_vsp_ext.h"
++
++#ifndef CONFIG_FMAN_ARM
++#define IS_T1023_T1024 (SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1024 || \
++ SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1023)
++#endif
++
++#define __ERR_MODULE__ MODULE_FM
++
++#if defined(CONFIG_COMPAT)
++#include "lnxwrp_ioctls_fm_compat.h"
++#endif
++
++#include "lnxwrp_fm.h"
++
++#define CMP_IOC_DEFINE(def) (IOC_##def != def)
++
++/* fm_pcd_ioctls.h === fm_pcd_ext.h assertions */
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_PRIVATE_HDRS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_PRS_NUM_OF_HDRS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_GENERIC_REGS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_EXTRACT_MASKS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_DEFAULT_GROUPS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_PRS_NUM_OF_LABELS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_SW_PRS_SIZE)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if DPAA_VERSION >= 11
++#if CMP_IOC_DEFINE(FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES)
++#error Error: please synchronize IOC_ defines!
++#endif
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_TREES)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_GROUPS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_UNITS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_KEYS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_SIZE_OF_KEY)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(FM_PCD_LAST_KEY_INDEX)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++/* net_ioctls.h === net_ext.h assertions */
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPP_PID)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPP_COMPRESSED)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPoE_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPMUX_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ETH_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPv4_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPv6_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ICMP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IGMP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_TCP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SCTP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_DCCP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_UDP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPHC_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv2_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_VLAN_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_LLC_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_NLPID_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SNAP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS)
++#warning Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ARP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_RFC2684_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_GRE_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MINENCAP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MACSEC_ALL_FIELDS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++/* fm_ioctls.h === fm_ext.h assertions */
++#if CMP_IOC_DEFINE(FM_MAX_NUM_OF_VALID_PORTS)
++#error Error: please synchronize IOC_ defines!
++#endif
++
++void LnxWrpPCDIOCTLTypeChecking(void)
++{
++ /* fm_ext.h == fm_ioctls.h */
++ ASSERT_COND(sizeof(ioc_fm_port_bandwidth_params) == sizeof(t_FmPortsBandwidthParams));
++ ASSERT_COND(sizeof(ioc_fm_revision_info_t) == sizeof(t_FmRevisionInfo));
++
++ /* fm_pcd_ext.h == fm_pcd_ioctls.h */
++ /*ioc_fm_pcd_counters_params_t : NOT USED */
++ /*ioc_fm_pcd_exception_params_t : private */
++#if (DPAA_VERSION >= 11)
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_capwap_params_t) == sizeof(t_FmPcdManipFragCapwapParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_capwap_params_t) == sizeof(t_FmPcdManipReassemCapwapParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t) == sizeof(t_FmPcdManipHdrInsrtByHdrParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_ip_params_t) == sizeof(t_FmPcdManipHdrInsrtIpParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_t) == sizeof(t_FmPcdManipHdrInsrt));
++ ASSERT_COND(sizeof(ioc_fm_manip_hdr_info_t) == sizeof(t_FmManipHdrInfo));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t) == sizeof(t_FmPcdManipHdrRmvByHdrParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_special_offload_capwap_params_t) == sizeof(t_FmPcdManipSpecialOffloadCapwapParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_capwap_stats_t) == sizeof(t_FmPcdManipFragCapwapStats));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_capwap_stats_t) == sizeof(t_FmPcdManipReassemCapwapStats));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_params_t) == sizeof(t_FmPcdManipFragParams));
++#endif /* (DPAA_VERSION >= 11) */
++
++ ASSERT_COND(sizeof(ioc_fm_pcd_prs_label_params_t) == sizeof(t_FmPcdPrsLabelParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_prs_sw_params_t) == sizeof(t_FmPcdPrsSwParams));
++ /*ioc_fm_pcd_kg_dflt_value_params_t : private */
++ ASSERT_COND(sizeof(ioc_fm_pcd_hdr_protocol_opt_u) == sizeof(u_FmPcdHdrProtocolOpt));
++ ASSERT_COND(sizeof(ioc_fm_pcd_fields_u) == sizeof(t_FmPcdFields));
++ ASSERT_COND(sizeof(ioc_fm_pcd_from_hdr_t) == sizeof(t_FmPcdFromHdr));
++ ASSERT_COND(sizeof(ioc_fm_pcd_from_field_t) == sizeof(t_FmPcdFromField));
++ ASSERT_COND(sizeof(ioc_fm_pcd_distinction_unit_t) == sizeof(t_FmPcdDistinctionUnit));
++
++#if defined(CONFIG_ARM64)
++ /* different alignment */
++ ASSERT_COND(sizeof(ioc_fm_pcd_net_env_params_t) == sizeof(t_FmPcdNetEnvParams) + sizeof(void *) + 4);
++#else
++#if !defined(CONFIG_COMPAT)
++ /* different alignment */
++ ASSERT_COND(sizeof(ioc_fm_pcd_net_env_params_t) == sizeof(t_FmPcdNetEnvParams) + sizeof(void *));
++#endif
++#endif
++ ASSERT_COND(sizeof(ioc_fm_pcd_extract_entry_t) == sizeof(t_FmPcdExtractEntry));
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_extract_mask_t) == sizeof(t_FmPcdKgExtractMask));
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_extract_dflt_t) == sizeof(t_FmPcdKgExtractDflt));
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t) == sizeof(t_FmPcdKgKeyExtractAndHashParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_extracted_or_params_t) == sizeof(t_FmPcdKgExtractedOrParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_scheme_counter_t) == sizeof(t_FmPcdKgSchemeCounter));
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_plcr_profile_t) == sizeof(t_FmPcdKgPlcrProfile));
++#if (DPAA_VERSION >= 11)
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_storage_profile_t) == sizeof(t_FmPcdKgStorageProfile));
++#endif
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_cc_t) == sizeof(t_FmPcdKgCc));
++#if !defined(CONFIG_COMPAT)
++ /* different alignment */
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_scheme_params_t) == sizeof(t_FmPcdKgSchemeParams) + sizeof(void *));
++#endif
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_cc_params_t) == sizeof(t_FmPcdCcNextCcParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_plcr_params_t) == sizeof(t_FmPcdCcNextPlcrParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_enqueue_params_t) == sizeof(t_FmPcdCcNextEnqueueParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_kg_params_t) == sizeof(t_FmPcdCcNextKgParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_engine_params_t) == sizeof(t_FmPcdCcNextEngineParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_key_params_t) == sizeof(t_FmPcdCcKeyParams));
++ ASSERT_COND(sizeof(ioc_keys_params_t) == sizeof(t_KeysParams));
++#if !defined(CONFIG_COMPAT)
++ /* different alignment */
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_node_params_t) == sizeof(t_FmPcdCcNodeParams) + sizeof(void *));
++ ASSERT_COND(sizeof(ioc_fm_pcd_hash_table_params_t) == sizeof(t_FmPcdHashTableParams) + sizeof(void *));
++#endif
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_grp_params_t) == sizeof(t_FmPcdCcGrpParams));
++#if !defined(CONFIG_COMPAT)
++ /* different alignment */
++ ASSERT_COND(sizeof(ioc_fm_pcd_cc_tree_params_t) == sizeof(t_FmPcdCcTreeParams) + sizeof(void *));
++#endif
++ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_byte_rate_mode_param_t) == sizeof(t_FmPcdPlcrByteRateModeParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t) == sizeof(t_FmPcdPlcrNonPassthroughAlgParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_next_engine_params_u) == sizeof(u_FmPcdPlcrNextEngineParams));
++ /*ioc_fm_pcd_port_params_t : private */
++ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_profile_params_t) == sizeof(t_FmPcdPlcrProfileParams) + sizeof(void *));
++ /*ioc_fm_pcd_cc_tree_modify_next_engine_params_t : private */
++
++#ifdef FM_CAPWAP_SUPPORT
++#error TODO: unsupported feature
++/*
++ ASSERT_COND(sizeof(TODO) == sizeof(t_FmPcdManipHdrInsrtByTemplateParams));
++ ASSERT_COND(sizeof(TODO) == sizeof(t_CapwapFragmentationParams));
++ ASSERT_COND(sizeof(TODO) == sizeof(t_CapwapReassemblyParams));
++*/
++#endif
++
++ /*ioc_fm_pcd_cc_node_modify_next_engine_params_t : private */
++ /*ioc_fm_pcd_cc_node_remove_key_params_t : private */
++ /*ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t : private */
++ /*ioc_fm_pcd_cc_node_modify_key_params_t : private */
++ /*ioc_fm_manip_hdr_info_t : private */
++ /*ioc_fm_pcd_hash_table_set_t : private */
++
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_ip_params_t) == sizeof(t_FmPcdManipFragIpParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_ip_params_t) == sizeof(t_FmPcdManipReassemIpParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_special_offload_ipsec_params_t) == sizeof(t_FmPcdManipSpecialOffloadIPSecParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_special_offload_params_t) == sizeof(t_FmPcdManipSpecialOffloadParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_rmv_generic_params_t) == sizeof(t_FmPcdManipHdrRmvGenericParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_generic_params_t) == sizeof(t_FmPcdManipHdrInsrtGenericParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_params_t) == sizeof(t_FmPcdManipHdrInsrtParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_rmv_params_t) == sizeof(t_FmPcdManipHdrRmvParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_params_t) == sizeof(t_FmPcdManipHdrParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_params_t) == sizeof(t_FmPcdManipFragParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_params_t) == sizeof(t_FmPcdManipReassemParams));
++#if !defined(CONFIG_COMPAT)
++ /* different alignment */
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_params_t) == sizeof(t_FmPcdManipParams) + sizeof(void *));
++#endif
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_ip_stats_t) == sizeof(t_FmPcdManipReassemIpStats));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_ip_stats_t) == sizeof(t_FmPcdManipFragIpStats));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_stats_t) == sizeof(t_FmPcdManipReassemStats));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_stats_t) == sizeof(t_FmPcdManipFragStats));
++ ASSERT_COND(sizeof(ioc_fm_pcd_manip_stats_t) == sizeof(t_FmPcdManipStats));
++#if DPAA_VERSION >= 11
++ ASSERT_COND(sizeof(ioc_fm_pcd_frm_replic_group_params_t) == sizeof(t_FmPcdFrmReplicGroupParams) + sizeof(void *));
++#endif
++
++ /* fm_port_ext.h == fm_port_ioctls.h */
++ ASSERT_COND(sizeof(ioc_fm_port_rate_limit_t) == sizeof(t_FmPortRateLimit));
++ ASSERT_COND(sizeof(ioc_fm_port_pcd_params_t) == sizeof(t_FmPortPcdParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_kg_scheme_select_t) == sizeof(t_FmPcdKgSchemeSelect));
++ ASSERT_COND(sizeof(ioc_fm_pcd_port_schemes_params_t) == sizeof(t_FmPcdPortSchemesParams));
++ ASSERT_COND(sizeof(ioc_fm_pcd_prs_start_t) == sizeof(t_FmPcdPrsStart));
++
++ return;
++}
++
++#define ASSERT_IOC_NET_ENUM(def) ASSERT_COND((unsigned long)e_IOC_NET_##def == (unsigned long)def)
++
++void LnxWrpPCDIOCTLEnumChecking(void)
++{
++ /* net_ext.h == net_ioctls.h : sampling checks */
++ ASSERT_IOC_NET_ENUM(HEADER_TYPE_MACSEC);
++ ASSERT_IOC_NET_ENUM(HEADER_TYPE_PPP);
++ ASSERT_IOC_NET_ENUM(MAX_HEADER_TYPE_COUNT);
++
++ /* fm_ext.h == fm_ioctls.h */
++ ASSERT_COND((unsigned long)e_IOC_FM_PORT_TYPE_DUMMY == (unsigned long)e_FM_PORT_TYPE_DUMMY);
++ ASSERT_COND((unsigned long)e_IOC_EX_MURAM_ECC == (unsigned long)e_FM_EX_MURAM_ECC);
++ ASSERT_COND((unsigned long)e_IOC_FM_COUNTERS_DEQ_CONFIRM == (unsigned long)e_FM_COUNTERS_DEQ_CONFIRM);
++
++ /* fm_pcd_ext.h == fm_pcd_ioctls.h */
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES == (unsigned long)e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS_EXCEPTION_SINGLE_ECC == (unsigned long)e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS == (unsigned long)e_FM_PCD_PRS);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_EXTRACT_FULL_FIELD == (unsigned long)e_FM_PCD_EXTRACT_FULL_FIELD);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_EXTRACT_FROM_FLOW_ID == (unsigned long)e_FM_PCD_EXTRACT_FROM_FLOW_ID);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO == (unsigned long)e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_DFLT_ILLEGAL == (unsigned long)e_FM_PCD_KG_DFLT_ILLEGAL);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_GENERIC_NOT_FROM_DATA == (unsigned long)e_FM_PCD_KG_GENERIC_NOT_FROM_DATA);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_HDR_INDEX_LAST == (unsigned long)e_FM_PCD_HDR_INDEX_LAST);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_SHARED == (unsigned long)e_FM_PCD_PLCR_SHARED);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_RFC_4115 == (unsigned long)e_FM_PCD_PLCR_RFC_4115);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_COLOR_AWARE == (unsigned long)e_FM_PCD_PLCR_COLOR_AWARE);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_OVERRIDE == (unsigned long)e_FM_PCD_PLCR_OVERRIDE);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_FULL_FRM_LEN == (unsigned long)e_FM_PCD_PLCR_FULL_FRM_LEN);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN == (unsigned long)e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_PACKET_MODE == (unsigned long)e_FM_PCD_PLCR_PACKET_MODE);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_DROP_FRAME == (unsigned long)e_FM_PCD_DROP_FRAME);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER == (unsigned long)e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP == (unsigned long)e_FM_PCD_ACTION_INDEXED_LOOKUP);
++ ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR);
++#if !defined(FM_CAPWAP_SUPPORT)
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_INSRT_GENERIC == (unsigned long)e_FM_PCD_MANIP_INSRT_GENERIC);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_RMV_GENERIC == (unsigned long)e_FM_PCD_MANIP_RMV_GENERIC);
++#else
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_INSRT_BY_TEMPLATE == (unsigned long)e_FM_PCD_MANIP_INSRT_BY_TEMPLATE);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_RMV_BY_HDR == (unsigned long)e_FM_PCD_MANIP_RMV_BY_HDR);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_RMV_BY_HDR_FROM_START == (unsigned long)e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START);
++#endif
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG == (unsigned long)e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH == (unsigned long)e_FM_PCD_MANIP_EIGHT_WAYS_HASH);
++
++#ifdef FM_CAPWAP_SUPPORT
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_STATS_PER_FLOWID == (unsigned long)e_FM_PCD_STATS_PER_FLOWID);
++#endif
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD == (unsigned long)e_FM_PCD_MANIP_SPECIAL_OFFLOAD);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_CC_STATS_MODE_FRAME == (unsigned long)e_FM_PCD_CC_STATS_MODE_FRAME);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG == (unsigned long)e_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG);
++ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC == (unsigned long)e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC);
++
++ /* fm_port_ext.h == fm_port_ioctls.h */
++#if !defined(FM_CAPWAP_SUPPORT)
++ ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR);
++#else
++ ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR);
++#endif
++ ASSERT_COND((unsigned long)e_IOC_FM_PORT_COUNTERS_DEQ_CONFIRM == (unsigned long)e_FM_PORT_COUNTERS_DEQ_CONFIRM);
++ ASSERT_COND((unsigned long)e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 == (unsigned long)e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8);
++
++ return;
++}
++
++static t_Error LnxwrpFmPcdIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat)
++{
++ t_Error err = E_OK;
++
++/*
++Status: PCD API to fmlib (file: drivers/net/dpa/NetCommSw/inc/Peripherals/fm_pcd_ext.h):
++
++ FM_PCD_PrsLoadSw
++ FM_PCD_SetAdvancedOffloadSupport
++ FM_PCD_Enable
++ FM_PCD_Disable
++ FM_PCD_ForceIntr
++ FM_PCD_SetException
++ FM_PCD_KgSetAdditionalDataAfterParsing
++ FM_PCD_KgSetDfltValue
++ FM_PCD_NetEnvCharacteristicsSet
++ FM_PCD_NetEnvCharacteristicsDelete
++ FM_PCD_KgSchemeSet
++ FM_PCD_KgSchemeDelete
++ FM_PCD_MatchTableSet
++ FM_PCD_MatchTableDelete
++ FM_PCD_CcRootBuild
++ FM_PCD_CcRootDelete
++ FM_PCD_PlcrProfileSet
++ FM_PCD_PlcrProfileDelete
++ FM_PCD_CcRootModifyNextEngine
++ FM_PCD_MatchTableModifyNextEngine
++ FM_PCD_MatchTableModifyMissNextEngine
++ FM_PCD_MatchTableRemoveKey
++ FM_PCD_MatchTableAddKey
++ FM_PCD_MatchTableModifyKeyAndNextEngine
++ FM_PCD_HashTableSet
++ FM_PCD_HashTableDelete
++ FM_PCD_HashTableAddKey
++ FM_PCD_HashTableRemoveKey
++ FM_PCD_MatchTableModifyKey
++ FM_PCD_ManipNodeReplace
++ FM_PCD_ManipNodeSet
++ FM_PCD_ManipNodeDelete
++
++Status: not exported, should be thru sysfs
++ FM_PCD_KgSchemeGetCounter
++ FM_PCD_KgSchemeSetCounter
++ FM_PCD_PlcrProfileGetCounter
++ FM_PCD_PlcrProfileSetCounter
++
++Status: not exported
++ FM_PCD_MatchTableFindNRemoveKey
++ FM_PCD_MatchTableFindNModifyNextEngine
++ FM_PCD_MatchTableFindNModifyKeyAndNextEngine
++ FM_PCD_MatchTableFindNModifyKey
++ FM_PCD_MatchTableGetIndexedHashBucket
++ FM_PCD_MatchTableGetNextEngine
++ FM_PCD_MatchTableGetKeyCounter
++
++Status: not exported, would be nice to have
++ FM_PCD_HashTableModifyNextEngine
++ FM_PCD_HashTableModifyMissNextEngine
++ FM_PCD_HashTableGetMissNextEngine
++ FM_PCD_ManipGetStatistics
++
++Status: not exported
++#if DPAA_VERSION >= 11
++
++ FM_VSP_GetStatistics -- it's not available yet
++#endif
++
++Status: feature not supported
++#ifdef FM_CAPWAP_SUPPORT
++#error unsupported feature
++ FM_PCD_StatisticsSetNode
++#endif
++
++ */
++ _fm_ioctl_dbg("cmd:0x%08x(type:0x%02x, nr:%u).\n",
++ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd) - 20);
++
++ switch (cmd)
++ {
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_PRS_LOAD_SW_COMPAT:
++#endif
++ case FM_PCD_IOC_PRS_LOAD_SW:
++ {
++ ioc_fm_pcd_prs_sw_params_t *param;
++ uint8_t *p_code;
++
++ param = (ioc_fm_pcd_prs_sw_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_prs_sw_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_prs_sw_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_prs_sw_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_prs_sw_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_prs_sw_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_prs_sw_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_prs_sw_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_prs_sw_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_fm_pcd_prs_sw(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_prs_sw_params_t *)arg,
++ sizeof(ioc_fm_pcd_prs_sw_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (!param->p_code || !param->size)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ p_code = (uint8_t *) XX_Malloc(param->size);
++ if (!p_code)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(p_code, 0, param->size);
++ if (copy_from_user(p_code, param->p_code, param->size))
++ {
++ XX_Free(p_code);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->p_code = p_code;
++
++ err = FM_PCD_PrsLoadSw(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdPrsSwParams*)param);
++
++ XX_Free(p_code);
++ XX_Free(param);
++ break;
++ }
++
++ case FM_PCD_IOC_SET_ADVANCED_OFFLOAD_SUPPORT:
++ err = FM_PCD_SetAdvancedOffloadSupport(p_LnxWrpFmDev->h_PcdDev);
++ break;
++
++ case FM_PCD_IOC_ENABLE:
++ err = FM_PCD_Enable(p_LnxWrpFmDev->h_PcdDev);
++ break;
++
++ case FM_PCD_IOC_DISABLE:
++ err = FM_PCD_Disable(p_LnxWrpFmDev->h_PcdDev);
++ break;
++
++ case FM_PCD_IOC_FORCE_INTR:
++ {
++ int exception;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (get_user(exception, (int *) compat_ptr(arg)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ else
++#endif
++ {
++ if (get_user(exception, (int *)arg))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_ForceIntr(p_LnxWrpFmDev->h_PcdDev, (e_FmPcdExceptions)exception);
++ break;
++ }
++
++ case FM_PCD_IOC_SET_EXCEPTION:
++ {
++ ioc_fm_pcd_exception_params_t *param;
++
++ param = (ioc_fm_pcd_exception_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_exception_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_exception_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_exception_params_t *)compat_ptr(arg),
++ sizeof(ioc_fm_pcd_exception_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_exception_params_t *)arg,
++ sizeof(ioc_fm_pcd_exception_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, param->exception, param->enable);
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_PCD_IOC_KG_SET_ADDITIONAL_DATA_AFTER_PARSING:
++ {
++ uint8_t payloadOffset;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (get_user(payloadOffset, (uint8_t*) compat_ptr(arg)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ else
++#endif
++ {
++ if (get_user(payloadOffset, (uint8_t*) arg))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_KgSetAdditionalDataAfterParsing(p_LnxWrpFmDev->h_PcdDev, payloadOffset);
++ break;
++ }
++
++ case FM_PCD_IOC_KG_SET_DFLT_VALUE:
++ {
++ ioc_fm_pcd_kg_dflt_value_params_t *param;
++
++ param = (ioc_fm_pcd_kg_dflt_value_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_kg_dflt_value_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_kg_dflt_value_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_kg_dflt_value_params_t *)compat_ptr(arg),
++ sizeof(ioc_fm_pcd_kg_dflt_value_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_kg_dflt_value_params_t *)arg,
++ sizeof(ioc_fm_pcd_kg_dflt_value_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PCD_KgSetDfltValue(p_LnxWrpFmDev->h_PcdDev, param->valueId, param->value);
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET_COMPAT:
++#endif
++ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET:
++ {
++ ioc_fm_pcd_net_env_params_t *param;
++
++ param = (ioc_fm_pcd_net_env_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_net_env_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_net_env_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_net_env_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_net_env_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_net_env_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_net_env_params_t));
++ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_net_env_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_net_env_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_net_env(compat_param, param, COMPAT_US_TO_K);
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_net_env_params_t *) arg,
++ sizeof(ioc_fm_pcd_net_env_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ param->id = FM_PCD_NetEnvCharacteristicsSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdNetEnvParams*)param);
++
++ if (!param->id)
++ {
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /* Since the LLD has no errno-style error reporting,
++ we're left here with no other option than to report
++ a generic E_INVALID_VALUE */
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_net_env_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_net_env_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_net_env_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_net_env_params_t));
++ compat_copy_fm_pcd_net_env(compat_param, param, COMPAT_K_TO_US);
++
++ if (copy_to_user((ioc_compat_fm_pcd_net_env_params_t *) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_net_env_params_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_net_env_params_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_net_env_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0 , sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_NetEnvCharacteristicsDelete(id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_KG_SCHEME_SET_COMPAT:
++#endif
++ case FM_PCD_IOC_KG_SCHEME_SET:
++ {
++ ioc_fm_pcd_kg_scheme_params_t *param;
++
++ param = (ioc_fm_pcd_kg_scheme_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_kg_scheme_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param = NULL;
++
++ compat_param = (ioc_compat_fm_pcd_kg_scheme_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
++
++ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_kg_scheme_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_kg_scheme(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_params_t *)arg,
++ sizeof(ioc_fm_pcd_kg_scheme_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ param->id = FM_PCD_KgSchemeSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdKgSchemeParams*)param);
++
++ if (!param->id)
++ {
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /* Since the LLD has no errno-style error reporting,
++ we're left here with no other option than to report
++ a generic E_INVALID_VALUE */
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_kg_scheme_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
++ compat_copy_fm_pcd_kg_scheme(compat_param, param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_kg_scheme_params_t *)compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_kg_scheme_params_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_kg_scheme_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_KG_SCHEME_GET_CNTR_COMPAT:
++#endif
++ case FM_PCD_IOC_KG_SCHEME_GET_CNTR:
++ {
++ ioc_fm_pcd_kg_scheme_spc_t *param;
++
++ param = (ioc_fm_pcd_kg_scheme_spc_t *) XX_Malloc(sizeof(ioc_fm_pcd_kg_scheme_spc_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_spc_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param = NULL;
++
++ compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
++
++ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_kg_scheme_spc_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_spc_t *)arg,
++ sizeof(ioc_fm_pcd_kg_scheme_spc_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ param->val = FM_PCD_KgSchemeGetCounter((t_Handle)param->id);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
++ compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_kg_scheme_spc_t *)compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_kg_scheme_spc_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_kg_scheme_spc_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_KG_SCHEME_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0 , sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_KgSchemeDelete(id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_SET_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_SET:
++ {
++ ioc_fm_pcd_cc_node_params_t *param;
++ uint8_t *keys;
++ uint8_t *masks;
++ int i,k;
++
++ param = (ioc_fm_pcd_cc_node_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_node_params_t) +
++ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_params_t) +
++ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
++
++ keys = (uint8_t *) (param + 1);
++ masks = keys + IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_node_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
++ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
++ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
++
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_node_params_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_node_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_node(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_node_params_t *)arg, sizeof(ioc_fm_pcd_cc_node_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ ASSERT_COND(param->keys_params.num_of_keys <= IOC_FM_PCD_MAX_NUM_OF_KEYS);
++ ASSERT_COND(param->keys_params.key_size <= IOC_FM_PCD_MAX_SIZE_OF_KEY);
++
++ /* support for indexed lookup */
++ if( !(param->extract_cc_params.type == e_IOC_FM_PCD_EXTRACT_NON_HDR &&
++ param->extract_cc_params.extract_params.extract_non_hdr.src == e_IOC_FM_PCD_EXTRACT_FROM_HASH &&
++ param->extract_cc_params.extract_params.extract_non_hdr.action == e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP))
++ {
++ for (i=0, k=0;
++ i < param->keys_params.num_of_keys;
++ i++, k += IOC_FM_PCD_MAX_SIZE_OF_KEY)
++ {
++ if (param->keys_params.key_params[i].p_key &&
++ param->keys_params.key_size)
++ {
++ if (copy_from_user(&keys[k],
++ param->keys_params.key_params[i].p_key,
++ param->keys_params.key_size))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->keys_params.key_params[i].p_key = &keys[k];
++ }
++
++ if (param->keys_params.key_params[i].p_mask)
++ {
++ if (copy_from_user(&masks[k],
++ param->keys_params.key_params[i].p_mask,
++ param->keys_params.key_size))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->keys_params.key_params[i].p_mask = &masks[k];
++ }
++ }
++ }
++
++ param->id = FM_PCD_MatchTableSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdCcNodeParams*)param);
++
++ if (!param->id) {
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /* Since the LLD has no errno-style error reporting,
++ we're left here with no other option than to report
++ a generic E_INVALID_VALUE */
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_params_t *compat_param;
++ compat_param = (ioc_compat_fm_pcd_cc_node_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
++ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
++ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
++ compat_copy_fm_pcd_cc_node(compat_param, param, COMPAT_K_TO_US);
++
++ if (copy_to_user((ioc_compat_fm_pcd_cc_node_params_t *)compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_cc_node_params_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_cc_node_params_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_cc_node_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0 , sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_MatchTableDelete(id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_CC_ROOT_BUILD_COMPAT:
++#endif
++ case FM_PCD_IOC_CC_ROOT_BUILD:
++ {
++ ioc_fm_pcd_cc_tree_params_t *param;
++
++ param = (ioc_fm_pcd_cc_tree_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_cc_tree_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_tree_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tree_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tree_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_tree_params_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_tree_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_tree(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_tree_params_t *)arg,
++ sizeof(ioc_fm_pcd_cc_tree_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ param->id = FM_PCD_CcRootBuild(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdCcTreeParams*)param);
++
++ if (!param->id) {
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /* Since the LLD has no errno-style error reporting,
++ we're left here with no other option than to report
++ a generic E_INVALID_VALUE */
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tree_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tree_params_t *) XX_Malloc(sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
++
++ compat_copy_fm_pcd_cc_tree(compat_param, param, COMPAT_K_TO_US);
++
++ if (copy_to_user((ioc_compat_fm_pcd_cc_tree_params_t *)compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_cc_tree_params_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_cc_tree_params_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_cc_tree_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_CC_ROOT_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_CC_ROOT_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0 , sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_CcRootDelete(id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_PLCR_PROFILE_SET_COMPAT:
++#endif
++ case FM_PCD_IOC_PLCR_PROFILE_SET:
++ {
++ ioc_fm_pcd_plcr_profile_params_t *param;
++
++ param = (ioc_fm_pcd_plcr_profile_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_plcr_profile_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_plcr_profile_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_plcr_profile_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
++ if (copy_from_user(compat_param, (
++ ioc_compat_fm_pcd_plcr_profile_params_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_plcr_profile(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_plcr_profile_params_t *)arg,
++ sizeof(ioc_fm_pcd_plcr_profile_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (!param->modify &&
++ (((t_FmPcdPlcrProfileParams*)param)->id.newParams.profileType != e_FM_PCD_PLCR_SHARED))
++ {
++ t_Handle h_Port;
++ ioc_fm_pcd_port_params_t *port_params;
++
++ port_params = (ioc_fm_pcd_port_params_t*) XX_Malloc(sizeof(ioc_fm_pcd_port_params_t));
++ if (!port_params)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(port_params, 0, sizeof(ioc_fm_pcd_port_params_t));
++ if (copy_from_user(port_params, (ioc_fm_pcd_port_params_t*)((t_FmPcdPlcrProfileParams*)param)->id.newParams.h_FmPort,
++ sizeof(ioc_fm_pcd_port_params_t)))
++ {
++ XX_Free(port_params);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ switch(port_params->port_type)
++ {
++ case (e_IOC_FM_PORT_TYPE_RX):
++ if (port_params->port_id < FM_MAX_NUM_OF_1G_RX_PORTS) {
++ h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id].h_Dev;
++ break;
++ }
++ goto invalid_port_id;
++
++ case (e_IOC_FM_PORT_TYPE_RX_10G):
++ if (port_params->port_id < FM_MAX_NUM_OF_10G_RX_PORTS) {
++#ifndef CONFIG_FMAN_ARM
++ if (IS_T1023_T1024) {
++ h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id].h_Dev;
++ } else {
++#else
++ {
++#endif
++ h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id + FM_MAX_NUM_OF_1G_RX_PORTS].h_Dev;
++ }
++ break;
++ }
++ goto invalid_port_id;
++
++ case (e_IOC_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ if (port_params->port_id && port_params->port_id < FM_MAX_NUM_OF_OH_PORTS) {
++ h_Port = p_LnxWrpFmDev->opPorts[port_params->port_id - 1].h_Dev;
++ break;
++ }
++ goto invalid_port_id;
++
++ default:
++invalid_port_id:
++ XX_Free(port_params);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG);
++ }
++
++ ((t_FmPcdPlcrProfileParams*)param)->id.newParams.h_FmPort = h_Port;
++ XX_Free(port_params);
++ }
++
++ param->id = FM_PCD_PlcrProfileSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdPlcrProfileParams*)param);
++
++ if (!param->id) {
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /* Since the LLD has no errno-style error reporting,
++ we're left here with no other option than to report
++ a generic E_INVALID_VALUE */
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_plcr_profile_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
++ compat_copy_fm_pcd_plcr_profile(compat_param, param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_plcr_profile_params_t *) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_plcr_profile_params_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_plcr_profile_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_PLCR_PROFILE_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_PLCR_PROFILE_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0 , sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_PlcrProfileDelete(id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE_COMPAT:
++#endif
++ case FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE:
++ {
++ ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param;
++
++ param = (ioc_fm_pcd_cc_tree_modify_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t));
++ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_fm_pcd_cc_tree_modify_next_engine(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_tree_modify_next_engine_params_t *)arg,
++ sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PCD_CcRootModifyNextEngine(param->id,
++ param->grp_indx,
++ param->indx,
++ (t_FmPcdCcNextEngineParams*)(&param->cc_next_engine_params));
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE:
++ {
++ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param;
++
++ param = (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
++ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_node_modify_next_engine(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_next_engine_params_t *)arg,
++ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PCD_MatchTableModifyNextEngine(param->id,
++ param->key_indx,
++ (t_FmPcdCcNextEngineParams*)(&param->cc_next_engine_params));
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE:
++ {
++ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param;
++
++ param = (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
++ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_node_modify_next_engine(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) arg,
++ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PCD_MatchTableModifyMissNextEngine(param->id,
++ (t_FmPcdCcNextEngineParams*)(&param->cc_next_engine_params));
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY:
++ {
++ ioc_fm_pcd_cc_node_remove_key_params_t *param;
++
++ param = (ioc_fm_pcd_cc_node_remove_key_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_node_remove_key_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_remove_key_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_remove_key_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_node_remove_key_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_node_remove_key_params_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->id = compat_ptr(compat_param->id);
++ param->key_indx = compat_param->key_indx;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_node_remove_key_params_t *) arg,
++ sizeof(ioc_fm_pcd_cc_node_remove_key_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PCD_MatchTableRemoveKey(param->id, param->key_indx);
++
++ XX_Free(param);
++ break;
++ }
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_ADD_KEY_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_ADD_KEY:
++ {
++ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param;
++
++ param = (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)arg,
++ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (param->key_size)
++ {
++ int size = 0;
++
++ if (param->key_params.p_key) size += param->key_size;
++ if (param->key_params.p_mask) size += param->key_size;
++
++ if (size)
++ {
++ uint8_t *p_tmp;
++
++ p_tmp = (uint8_t*) XX_Malloc(size);
++ if (!p_tmp)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD key/mask"));
++ }
++
++ if (param->key_params.p_key)
++ {
++ if (copy_from_user(p_tmp, param->key_params.p_key, param->key_size))
++ {
++ XX_Free(p_tmp);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->key_params.p_key = p_tmp;
++ }
++
++ if (param->key_params.p_mask)
++ {
++ p_tmp += param->key_size;
++ if (copy_from_user(p_tmp, param->key_params.p_mask, param->key_size))
++ {
++ XX_Free(p_tmp - param->key_size);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->key_params.p_mask = p_tmp;
++ }
++ }
++ }
++
++ err = FM_PCD_MatchTableAddKey(
++ param->id,
++ param->key_indx,
++ param->key_size,
++ (t_FmPcdCcKeyParams*)&param->key_params);
++
++ if (param->key_params.p_key)
++ XX_Free(param->key_params.p_key);
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE:
++ {
++ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param;
++
++ param = (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)arg,
++ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PCD_MatchTableModifyKeyAndNextEngine(param->id,
++ param->key_indx,
++ param->key_size,
++ (t_FmPcdCcKeyParams*)(&param->key_params));
++
++ XX_Free(param);
++ break;
++ }
++
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT:
++ {
++ ioc_fm_pcd_cc_tbl_get_stats_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_tbl_get_stats_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t)))
++ {
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
++ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++
++ err = FM_PCD_MatchTableGetKeyStatistics((t_Handle) param.id,
++ param.key_index,
++ (t_FmPcdCcKeyStatistics *) &param.statistics);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_stats_t*) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t))){
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
++ &param,
++ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++
++ break;
++ }
++
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT:
++ {
++ ioc_fm_pcd_cc_tbl_get_stats_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_tbl_get_stats_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t)))
++ {
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
++ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++
++ err = FM_PCD_MatchTableGetMissStatistics((t_Handle) param.id,
++ (t_FmPcdCcKeyStatistics *) &param.statistics);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_stats_t*) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t))){
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
++ &param,
++ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++
++ break;
++ }
++
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT:
++#endif
++ case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT:
++ {
++ ioc_fm_pcd_cc_tbl_get_stats_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_cc_tbl_get_stats_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t)))
++ {
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
++ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++
++ err = FM_PCD_HashTableGetMissStatistics((t_Handle) param.id,
++ (t_FmPcdCcKeyStatistics *) &param.statistics);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
++ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_stats_t*) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t))){
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
++ &param,
++ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_HASH_TABLE_SET_COMPAT:
++#endif
++ case FM_PCD_IOC_HASH_TABLE_SET:
++ {
++ ioc_fm_pcd_hash_table_params_t *param;
++
++ param = (ioc_fm_pcd_hash_table_params_t*) XX_Malloc(
++ sizeof(ioc_fm_pcd_hash_table_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_hash_table_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_hash_table_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_hash_table_params_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_hash_table_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_hash_table_params_t*)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_hash_table_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_hash_table(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_hash_table_params_t *)arg,
++ sizeof(ioc_fm_pcd_hash_table_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ param->id = FM_PCD_HashTableSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdHashTableParams *) param);
++
++ if (!param->id)
++ {
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /* Since the LLD has no errno-style error reporting,
++ we're left here with no other option than to report
++ a generic E_INVALID_VALUE */
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_hash_table_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_hash_table_params_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_hash_table_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_params_t));
++ compat_copy_fm_pcd_hash_table(compat_param, param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_hash_table_params_t*) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_hash_table_params_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_hash_table_params_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_hash_table_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_HASH_TABLE_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_HASH_TABLE_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0, sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ id.obj = compat_pcd_id2ptr(compat_id.obj);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_HashTableDelete(id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_HASH_TABLE_ADD_KEY_COMPAT:
++#endif
++ case FM_PCD_IOC_HASH_TABLE_ADD_KEY:
++ {
++ ioc_fm_pcd_hash_table_add_key_params_t *param = NULL;
++
++ param = (ioc_fm_pcd_hash_table_add_key_params_t*) XX_Malloc(
++ sizeof(ioc_fm_pcd_hash_table_add_key_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_hash_table_add_key_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_hash_table_add_key_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_hash_table_add_key_params_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_hash_table_add_key_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_add_key_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_hash_table_add_key_params_t*) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_hash_table_add_key_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ if (compat_param->key_size)
++ {
++ param->p_hash_tbl = compat_pcd_id2ptr(compat_param->p_hash_tbl);
++ param->key_size = compat_param->key_size;
++
++ compat_copy_fm_pcd_cc_key(&compat_param->key_params, &param->key_params, COMPAT_US_TO_K);
++ }
++ else
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ break;
++ }
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_hash_table_add_key_params_t*) arg,
++ sizeof(ioc_fm_pcd_hash_table_add_key_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (param->key_size)
++ {
++ int size = 0;
++
++ if (param->key_params.p_key) size += param->key_size;
++ if (param->key_params.p_mask) size += param->key_size;
++
++ if (size)
++ {
++ uint8_t *p_tmp;
++
++ p_tmp = (uint8_t*) XX_Malloc(size);
++ if (!p_tmp)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD key/mask"));
++ }
++
++ if (param->key_params.p_key)
++ {
++ if (copy_from_user(p_tmp, param->key_params.p_key, param->key_size))
++ {
++ XX_Free(p_tmp);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->key_params.p_key = p_tmp;
++ }
++
++ if (param->key_params.p_mask)
++ {
++ p_tmp += param->key_size;
++ if (copy_from_user(p_tmp, param->key_params.p_mask, param->key_size))
++ {
++ XX_Free(p_tmp - param->key_size);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->key_params.p_mask = p_tmp;
++ }
++ }
++ }
++
++ err = FM_PCD_HashTableAddKey(
++ param->p_hash_tbl,
++ param->key_size,
++ (t_FmPcdCcKeyParams*)&param->key_params);
++
++ if (param->key_params.p_key)
++ XX_Free(param->key_params.p_key);
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_HASH_TABLE_REMOVE_KEY_COMPAT:
++#endif
++ case FM_PCD_IOC_HASH_TABLE_REMOVE_KEY:
++ {
++ ioc_fm_pcd_hash_table_remove_key_params_t *param = NULL;
++
++ param = (ioc_fm_pcd_hash_table_remove_key_params_t*) XX_Malloc(
++ sizeof(ioc_fm_pcd_hash_table_remove_key_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_hash_table_remove_key_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_hash_table_remove_key_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_hash_table_remove_key_params_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_hash_table_remove_key_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_remove_key_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_hash_table_remove_key_params_t*) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_hash_table_remove_key_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->p_hash_tbl = compat_pcd_id2ptr(compat_param->p_hash_tbl);
++ param->key_size = compat_param->key_size;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_hash_table_remove_key_params_t*)arg,
++ sizeof(ioc_fm_pcd_hash_table_remove_key_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (param->key_size)
++ {
++ uint8_t *p_key;
++
++ p_key = (uint8_t*) XX_Malloc(param->key_size);
++ if (!p_key)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ if (param->p_key && copy_from_user(p_key, param->p_key, param->key_size))
++ {
++ XX_Free(p_key);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ param->p_key = p_key;
++ }
++
++ err = FM_PCD_HashTableRemoveKey(
++ param->p_hash_tbl,
++ param->key_size,
++ param->p_key);
++
++ if (param->p_key)
++ XX_Free(param->p_key);
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_COMPAT:
++#endif
++ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY:
++ {
++ ioc_fm_pcd_cc_node_modify_key_params_t *param;
++
++ param = (ioc_fm_pcd_cc_node_modify_key_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_cc_node_modify_key_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_key_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t));
++ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_key_params_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_cc_node_modify_key(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_params_t *)arg,
++ sizeof(ioc_fm_pcd_cc_node_modify_key_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (param->key_size)
++ {
++ int size = 0;
++
++ if (param->p_key) size += param->key_size;
++ if (param->p_mask) size += param->key_size;
++
++ if (size)
++ {
++ uint8_t *p_tmp;
++
++ p_tmp = (uint8_t*) XX_Malloc(size);
++ if (!p_tmp)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD key/mask"));
++ }
++
++ if (param->p_key)
++ {
++ if (copy_from_user(p_tmp, param->p_key, param->key_size))
++ {
++ XX_Free(p_tmp);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->p_key = p_tmp;
++ }
++
++ if (param->p_mask)
++ {
++ p_tmp += param->key_size;
++ if (copy_from_user(p_tmp, param->p_mask, param->key_size))
++ {
++ XX_Free(p_tmp - param->key_size);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->p_mask = p_tmp;
++ }
++ }
++ }
++
++ err = FM_PCD_MatchTableModifyKey(param->id,
++ param->key_indx,
++ param->key_size,
++ param->p_key,
++ param->p_mask);
++
++ if (param->p_key)
++ XX_Free(param->p_key);
++ else if (param->p_mask)
++ XX_Free(param->p_mask);
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MANIP_NODE_SET_COMPAT:
++#endif
++ case FM_PCD_IOC_MANIP_NODE_SET:
++ {
++ ioc_fm_pcd_manip_params_t *param;
++ uint8_t *p_data = NULL;
++ uint8_t size;
++
++ param = (ioc_fm_pcd_manip_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_manip_params_t));
++
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_manip_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_manip_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_manip_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_manip_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_manip_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_manip_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_fm_pcd_manip_set_node(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_manip_params_t *)arg,
++ sizeof(ioc_fm_pcd_manip_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (param->type == e_IOC_FM_PCD_MANIP_HDR)
++ {
++ size = param->u.hdr.insrt_params.u.generic.size;
++ p_data = (uint8_t *) XX_Malloc(size);
++ if (!p_data )
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, NO_MSG);
++ }
++
++ if (param->u.hdr.insrt_params.u.generic.p_data &&
++ copy_from_user(p_data,
++ param->u.hdr.insrt_params.u.generic.p_data, size))
++ {
++ XX_Free(p_data);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ param->u.hdr.insrt_params.u.generic.p_data = p_data;
++ }
++
++ if (param->id)
++ {
++ /* Security Hole: the user can pass any piece of garbage
++ in 'param->id', and that will go straight through to the LLD,
++ no checks being done by the wrapper! */
++ err = FM_PCD_ManipNodeReplace(
++ (t_Handle) param->id,
++ (t_FmPcdManipParams*) param);
++ if (err)
++ {
++ if (p_data)
++ XX_Free(p_data);
++ XX_Free(param);
++ break;
++ }
++ }
++ else
++ {
++ param->id = FM_PCD_ManipNodeSet(
++ p_LnxWrpFmDev->h_PcdDev,
++ (t_FmPcdManipParams*) param);
++ if (!param->id)
++ {
++ if (p_data)
++ XX_Free(p_data);
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /* Since the LLD has no errno-style error reporting,
++ we're left here with no other option than to report
++ a generic E_INVALID_VALUE */
++ break;
++ }
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_manip_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_manip_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_manip_params_t));
++ if (!compat_param)
++ {
++ if (p_data)
++ XX_Free(p_data);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_params_t));
++
++ compat_fm_pcd_manip_set_node(compat_param, param, COMPAT_K_TO_US);
++
++ if (copy_to_user((ioc_compat_fm_pcd_manip_params_t *) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_manip_params_t)))
++ err = E_READ_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_pcd_manip_params_t *)arg,
++ param, sizeof(ioc_fm_pcd_manip_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ if (p_data)
++ XX_Free(p_data);
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MANIP_NODE_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_MANIP_NODE_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0, sizeof(ioc_fm_obj_t));
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_ManipNodeDelete(id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_MANIP_GET_STATS_COMPAT:
++#endif
++ case FM_PCD_IOC_MANIP_GET_STATS:
++ {
++ ioc_fm_pcd_manip_get_stats_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_manip_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_manip_get_stats_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_manip_get_stats_t *)compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_manip_get_stats_t)))
++ {
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_manip_get_stats(compat_param, &param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&param, (ioc_fm_pcd_manip_get_stats_t *)arg,
++ sizeof(ioc_fm_pcd_manip_get_stats_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PCD_ManipGetStatistics((t_Handle) param.id,
++ (t_FmPcdManipStats*) &param.stats);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_manip_get_stats_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_manip_get_stats_t*) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
++ if (!compat_param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
++ compat_copy_fm_pcd_manip_get_stats(compat_param, &param, COMPAT_K_TO_US);
++ if (copy_to_user((ioc_compat_fm_pcd_manip_get_stats_t*) compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_manip_get_stats_t))){
++ XX_Free(compat_param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ if (copy_to_user((ioc_fm_pcd_manip_get_stats_t *)arg,
++ &param,
++ sizeof(ioc_fm_pcd_manip_get_stats_t)))
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++
++ break;
++ }
++
++#if (DPAA_VERSION >= 11)
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_FRM_REPLIC_GROUP_SET_COMPAT:
++#endif
++ case FM_PCD_IOC_FRM_REPLIC_GROUP_SET:
++ {
++ ioc_fm_pcd_frm_replic_group_params_t *param;
++
++ param = (ioc_fm_pcd_frm_replic_group_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_frm_replic_group_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_frm_replic_group_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_frm_replic_group_params_t
++ *compat_param;
++
++ compat_param =
++ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
++ XX_Malloc(sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY,
++ ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
++ compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t))) {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_frm_replic_group_params(compat_param,
++ param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param,
++ (ioc_fm_pcd_frm_replic_group_params_t *)arg,
++ sizeof(ioc_fm_pcd_frm_replic_group_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++ }
++
++ param->id = FM_PCD_FrmReplicSetGroup(p_LnxWrpFmDev->h_PcdDev,
++ (t_FmPcdFrmReplicGroupParams*)param);
++
++ if (!param->id) {
++ XX_Free(param);
++ err = E_INVALID_VALUE;
++ /*
++ * Since the LLD has no errno-style error reporting,
++ * we're left here with no other option than to report
++ * a generic E_INVALID_VALUE
++ */
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_frm_replic_group_params_t
++ *compat_param;
++
++ compat_param =
++ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
++ XX_Malloc(sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY,
++ ("IOCTL FM PCD"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
++ compat_copy_fm_pcd_frm_replic_group_params(compat_param,
++ param, COMPAT_K_TO_US);
++ if (copy_to_user(
++ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
++ compat_ptr(arg),
++ compat_param,
++ sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t)))
++ err = E_WRITE_FAILED;
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_to_user(
++ (ioc_fm_pcd_frm_replic_group_params_t *)arg,
++ param,
++ sizeof(ioc_fm_pcd_frm_replic_group_params_t)))
++ err = E_WRITE_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++ break;
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE_COMPAT:
++#endif
++ case FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0, sizeof(ioc_fm_obj_t));
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id,
++ (ioc_compat_fm_obj_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_obj_t)))
++ break;
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg,
++ sizeof(ioc_fm_obj_t)))
++ break;
++ }
++
++ return FM_PCD_FrmReplicDeleteGroup(id.obj);
++ }
++ break;
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD_COMPAT:
++#endif
++ case FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD:
++ {
++ ioc_fm_pcd_frm_replic_member_params_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_frm_replic_member_params_t compat_param;
++
++ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_pcd_frm_replic_member_params(&compat_param, &param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ if (copy_from_user(&param, (void *)arg, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ return FM_PCD_FrmReplicAddMember(param.member.h_replic_group,
++ param.member.member_index,
++ (t_FmPcdCcNextEngineParams*)&param.next_engine_params);
++ }
++ break;
++
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE_COMPAT:
++#endif
++ case FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE:
++ {
++ ioc_fm_pcd_frm_replic_member_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_frm_replic_member_t compat_param;
++
++ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_pcd_frm_replic_member(&compat_param, &param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ if (copy_from_user(&param, (void *)arg, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ return FM_PCD_FrmReplicRemoveMember(param.h_replic_group, param.member_index);
++ }
++ break;
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_VSP_CONFIG_COMPAT:
++#endif
++ case FM_IOC_VSP_CONFIG:
++ {
++ ioc_fm_vsp_params_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_vsp_params_t compat_param;
++
++ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_vsp_params(&compat_param, &param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ if (copy_from_user(&param, (void *)arg, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ {
++ uint8_t portId = param.port_params.port_id;
++ param.liodn_offset =
++ p_LnxWrpFmDev->rxPorts[portId].settings.param.specificParams.rxParams.liodnOffset;
++ }
++ param.p_fm = p_LnxWrpFmDev->h_Dev;
++ param.id = FM_VSP_Config((t_FmVspParams *)&param);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_vsp_params_t compat_param;
++
++ memset(&compat_param, 0, sizeof(compat_param));
++ compat_copy_fm_vsp_params(&compat_param, &param, COMPAT_K_TO_US);
++
++ if (copy_to_user(compat_ptr(arg), &compat_param, sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ else
++#endif
++ if (copy_to_user((void *)arg, &param, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_VSP_INIT_COMPAT:
++#endif
++ case FM_IOC_VSP_INIT:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0, sizeof(ioc_fm_obj_t));
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id,
++ (ioc_compat_fm_obj_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_obj_t)))
++ break;
++ id.obj = compat_pcd_id2ptr(compat_id.obj);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg,
++ sizeof(ioc_fm_obj_t)))
++ break;
++ }
++
++ return FM_VSP_Init(id.obj);
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_VSP_FREE_COMPAT:
++#endif
++ case FM_IOC_VSP_FREE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0, sizeof(ioc_fm_obj_t));
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id,
++ (ioc_compat_fm_obj_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_obj_t)))
++ break;
++ compat_obj_delete(&compat_id, &id);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg,
++ sizeof(ioc_fm_obj_t)))
++ break;
++ }
++
++ return FM_VSP_Free(id.obj);
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_VSP_CONFIG_POOL_DEPLETION_COMPAT:
++#endif
++ case FM_IOC_VSP_CONFIG_POOL_DEPLETION:
++ {
++ ioc_fm_buf_pool_depletion_params_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_buf_pool_depletion_params_t compat_param;
++
++ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_buf_pool_depletion_params(&compat_param, &param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ if (copy_from_user(&param, (void *)arg, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ if (FM_VSP_ConfigPoolDepletion(param.p_fm_vsp,
++ (t_FmBufPoolDepletion *)&param.fm_buf_pool_depletion))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ break;
++ }
++
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT_COMPAT:
++#endif
++ case FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT:
++ {
++ ioc_fm_buffer_prefix_content_params_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_buffer_prefix_content_params_t compat_param;
++
++ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_buffer_prefix_content_params(&compat_param, &param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ if (copy_from_user(&param, (void *)arg, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ if (FM_VSP_ConfigBufferPrefixContent(param.p_fm_vsp,
++ (t_FmBufferPrefixContent *)&param.fm_buffer_prefix_content))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_VSP_CONFIG_NO_SG_COMPAT:
++#endif
++ case FM_IOC_VSP_CONFIG_NO_SG:
++ {
++ ioc_fm_vsp_config_no_sg_params_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_vsp_config_no_sg_params_t compat_param;
++
++ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_vsp_config_no_sg_params(&compat_param, &param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ if (copy_from_user(&param, (void *)arg, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ if (FM_VSP_ConfigNoScatherGather(param.p_fm_vsp, param.no_sg))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_VSP_GET_BUFFER_PRS_RESULT_COMPAT:
++#endif
++ case FM_IOC_VSP_GET_BUFFER_PRS_RESULT:
++ {
++ ioc_fm_vsp_prs_result_params_t param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_vsp_prs_result_params_t compat_param;
++
++ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_vsp_prs_result_params(&compat_param, &param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ if (copy_from_user(&param, (void *)arg, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ /* this call just adds the parse results offset to p_data */
++ param.p_data = FM_VSP_GetBufferPrsResult(param.p_fm_vsp, param.p_data);
++
++ if (!param.p_data)
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_vsp_prs_result_params_t compat_param;
++
++ memset(&compat_param, 0, sizeof(compat_param));
++ compat_copy_fm_vsp_prs_result_params(&compat_param, &param, COMPAT_K_TO_US);
++
++ if (copy_to_user(compat_ptr(arg), &compat_param, sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ else
++#endif
++ if (copy_to_user((void *)arg, &param, sizeof(param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ break;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++#ifdef FM_CAPWAP_SUPPORT
++#warning "feature not supported!"
++#if defined(CONFIG_COMPAT)
++ case FM_PCD_IOC_STATISTICS_SET_NODE_COMPAT:
++#endif
++ case FM_PCD_IOC_STATISTICS_SET_NODE:
++ {
++/* ioc_fm_pcd_stats_params_t param;
++ ...
++ param->id = FM_PCD_StatisticsSetNode(p_LnxWrpFmDev->h_PcdDev,
++ (t_FmPcdStatsParams *)&param);
++*/
++ err = E_NOT_SUPPORTED;
++ break;
++ }
++#endif /* FM_CAPWAP_SUPPORT */
++
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("invalid ioctl: cmd:0x%08x(type:0x%02x, nr: %d.\n",
++ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)));
++ }
++
++ if (err)
++ RETURN_ERROR(MINOR, err, ("IOCTL FM PCD"));
++
++ return E_OK;
++}
++
++void FM_Get_Api_Version(ioc_fm_api_version_t *p_version)
++{
++ p_version->version.major = FMD_API_VERSION_MAJOR;
++ p_version->version.minor = FMD_API_VERSION_MINOR;
++ p_version->version.respin = FMD_API_VERSION_RESPIN;
++ p_version->version.reserved = 0;
++}
++
++t_Error LnxwrpFmIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat)
++{
++ t_Error err = E_OK;
++
++ switch (cmd)
++ {
++ case FM_IOC_SET_PORTS_BANDWIDTH:
++ {
++ ioc_fm_port_bandwidth_params *param;
++
++ param = (ioc_fm_port_bandwidth_params*) XX_Malloc(sizeof(ioc_fm_port_bandwidth_params));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_port_bandwidth_params));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_port_bandwidth_params*)compat_ptr(arg), sizeof(ioc_fm_port_bandwidth_params)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_port_bandwidth_params*)arg, sizeof(ioc_fm_port_bandwidth_params)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_SetPortsBandwidth(p_LnxWrpFmDev->h_Dev, (t_FmPortsBandwidthParams*) param);
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_IOC_GET_REVISION:
++ {
++ ioc_fm_revision_info_t *param;
++
++ param = (ioc_fm_revision_info_t *) XX_Malloc(sizeof(ioc_fm_revision_info_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ FM_GetRevision(p_LnxWrpFmDev->h_Dev, (t_FmRevisionInfo*)param);
++ /* This one never returns anything other than E_OK */
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_to_user((ioc_fm_revision_info_t *)compat_ptr(arg),
++ param,
++ sizeof(ioc_fm_revision_info_t))){
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_revision_info_t *)arg,
++ param,
++ sizeof(ioc_fm_revision_info_t))){
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
++ }
++ }
++ XX_Free(param);
++ break;
++ }
++
++ case FM_IOC_SET_COUNTER:
++ {
++ ioc_fm_counters_params_t *param;
++
++ param = (ioc_fm_counters_params_t *) XX_Malloc(sizeof(ioc_fm_counters_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_counters_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_counters_params_t *)compat_ptr(arg), sizeof(ioc_fm_counters_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_counters_params_t *)arg, sizeof(ioc_fm_counters_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_ModifyCounter(p_LnxWrpFmDev->h_Dev, param->cnt, param->val);
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_IOC_GET_COUNTER:
++ {
++ ioc_fm_counters_params_t *param;
++
++ param = (ioc_fm_counters_params_t *) XX_Malloc(sizeof(ioc_fm_counters_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
++
++ memset(param, 0, sizeof(ioc_fm_counters_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_counters_params_t *)compat_ptr(arg), sizeof(ioc_fm_counters_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_counters_params_t *)arg, sizeof(ioc_fm_counters_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ param->val = FM_GetCounter(p_LnxWrpFmDev->h_Dev, param->cnt);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_to_user((ioc_fm_counters_params_t *)compat_ptr(arg), param, sizeof(ioc_fm_counters_params_t)))
++ err = E_READ_FAILED;
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_counters_params_t *)arg, param, sizeof(ioc_fm_counters_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_IOC_FORCE_INTR:
++ {
++ ioc_fm_exceptions param;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (get_user(param, (ioc_fm_exceptions*) compat_ptr(arg)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ else
++#endif
++ {
++ if (get_user(param, (ioc_fm_exceptions*)arg))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_ForceIntr(p_LnxWrpFmDev->h_Dev, (e_FmExceptions)param);
++ break;
++ }
++
++ case FM_IOC_GET_API_VERSION:
++ {
++ ioc_fm_api_version_t version;
++
++ FM_Get_Api_Version(&version);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_to_user(
++ (ioc_fm_api_version_t *)compat_ptr(arg),
++ &version, sizeof(version)))
++ err = E_READ_FAILED;
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_api_version_t *)arg,
++ &version, sizeof(version)))
++ err = E_READ_FAILED;
++ }
++ }
++ break;
++
++ case FM_IOC_CTRL_MON_START:
++ {
++ FM_CtrlMonStart(p_LnxWrpFmDev->h_Dev);
++ }
++ break;
++
++ case FM_IOC_CTRL_MON_STOP:
++ {
++ FM_CtrlMonStop(p_LnxWrpFmDev->h_Dev);
++ }
++ break;
++
++#if defined(CONFIG_COMPAT)
++ case FM_IOC_CTRL_MON_GET_COUNTERS_COMPAT:
++#endif
++ case FM_IOC_CTRL_MON_GET_COUNTERS:
++ {
++ ioc_fm_ctrl_mon_counters_params_t param;
++ t_FmCtrlMon mon;
++
++#if defined(CONFIG_COMPAT)
++ ioc_compat_fm_ctrl_mon_counters_params_t compat_param;
++
++ if (compat)
++ {
++ if (copy_from_user(&compat_param, (void *)compat_ptr(arg),
++ sizeof(compat_param)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ param.fm_ctrl_index = compat_param.fm_ctrl_index;
++ param.p_mon = (fm_ctrl_mon_t *)compat_ptr(compat_param.p_mon);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&param, (void *)arg, sizeof(ioc_fm_ctrl_mon_counters_params_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ if (FM_CtrlMonGetCounters(p_LnxWrpFmDev->h_Dev, param.fm_ctrl_index, &mon))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ if (copy_to_user(param.p_mon, &mon, sizeof(t_FmCtrlMon)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ break;
++
++ default:
++ return LnxwrpFmPcdIOCTL(p_LnxWrpFmDev, cmd, arg, compat);
++ }
++
++ if (err)
++ RETURN_ERROR(MINOR, E_INVALID_OPERATION, ("IOCTL FM"));
++
++ return E_OK;
++}
++
++t_Error LnxwrpFmPortIOCTL(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev, unsigned int cmd, unsigned long arg, bool compat)
++{
++ t_Error err = E_OK;
++
++ _fm_ioctl_dbg("cmd:0x%08x(type:0x%02x, nr:%u).\n",
++ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd) - 70);
++
++ switch (cmd)
++ {
++ case FM_PORT_IOC_DISABLE:
++ FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev);
++ /* deliberately ignoring error codes here */
++ return E_OK;
++
++ case FM_PORT_IOC_ENABLE:
++ FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev);
++ /* deliberately ignoring error codes here */
++ return E_OK;
++
++ case FM_PORT_IOC_SET_ERRORS_ROUTE:
++ {
++ ioc_fm_port_frame_err_select_t errs;
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (get_user(errs, (ioc_fm_port_frame_err_select_t*)compat_ptr(arg)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ else
++#endif
++ {
++ if (get_user(errs, (ioc_fm_port_frame_err_select_t*)arg))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PORT_SetErrorsRoute(p_LnxWrpFmPortDev->h_Dev, (fmPortFrameErrSelect_t)errs);
++ break;
++ }
++
++ case FM_PORT_IOC_SET_RATE_LIMIT:
++ {
++ ioc_fm_port_rate_limit_t *param;
++
++ param = (ioc_fm_port_rate_limit_t *) XX_Malloc(sizeof(ioc_fm_port_rate_limit_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0, sizeof(ioc_fm_port_rate_limit_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_port_rate_limit_t *)compat_ptr(arg), sizeof(ioc_fm_port_rate_limit_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_port_rate_limit_t *)arg, sizeof(ioc_fm_port_rate_limit_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PORT_SetRateLimit(p_LnxWrpFmPortDev->h_Dev, (t_FmPortRateLimit *)param);
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_PORT_IOC_REMOVE_RATE_LIMIT:
++ FM_PORT_DeleteRateLimit(p_LnxWrpFmPortDev->h_Dev);
++ /* deliberately ignoring error codes here */
++ return E_OK;
++
++ case FM_PORT_IOC_ALLOC_PCD_FQIDS:
++ {
++ ioc_fm_port_pcd_fqids_params_t *param;
++
++ if (!p_LnxWrpFmPortDev->pcd_owner_params.cba)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("No one to listen on this PCD!!!"));
++
++ param = (ioc_fm_port_pcd_fqids_params_t *) XX_Malloc(sizeof(ioc_fm_port_pcd_fqids_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0, sizeof(ioc_fm_port_pcd_fqids_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_port_pcd_fqids_params_t *)compat_ptr(arg),
++ sizeof(ioc_fm_port_pcd_fqids_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_port_pcd_fqids_params_t *)arg,
++ sizeof(ioc_fm_port_pcd_fqids_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (p_LnxWrpFmPortDev->pcd_owner_params.cba(p_LnxWrpFmPortDev->pcd_owner_params.dev,
++ param->num_fqids,
++ param->alignment,
++ &param->base_fqid))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("can't allocate fqids for PCD!!!"));
++ }
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_to_user((ioc_fm_port_pcd_fqids_params_t *)compat_ptr(arg),
++ param, sizeof(ioc_fm_port_pcd_fqids_params_t)))
++ err = E_READ_FAILED;
++ }
++ else
++#endif
++ {
++ if (copy_to_user((ioc_fm_port_pcd_fqids_params_t *)arg,
++ param, sizeof(ioc_fm_port_pcd_fqids_params_t)))
++ err = E_READ_FAILED;
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_PORT_IOC_FREE_PCD_FQIDS:
++ {
++ uint32_t base_fqid;
++
++ if (!p_LnxWrpFmPortDev->pcd_owner_params.cbf)
++ RETURN_ERROR(MINOR, E_INVALID_STATE, ("No one to listen on this PCD!!!"));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (get_user(base_fqid, (uint32_t*) compat_ptr(arg)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ else
++#endif
++ {
++ if (get_user(base_fqid, (uint32_t*)arg))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ if (p_LnxWrpFmPortDev->pcd_owner_params.cbf(p_LnxWrpFmPortDev->pcd_owner_params.dev, base_fqid))
++ err = E_WRITE_FAILED;
++
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PORT_IOC_SET_PCD_COMPAT:
++#endif
++ case FM_PORT_IOC_SET_PCD:
++ {
++ ioc_fm_port_pcd_params_t *port_pcd_params;
++ ioc_fm_port_pcd_prs_params_t *port_pcd_prs_params;
++ ioc_fm_port_pcd_cc_params_t *port_pcd_cc_params;
++ ioc_fm_port_pcd_kg_params_t *port_pcd_kg_params;
++ ioc_fm_port_pcd_plcr_params_t *port_pcd_plcr_params;
++
++ port_pcd_params = (ioc_fm_port_pcd_params_t *) XX_Malloc(
++ sizeof(ioc_fm_port_pcd_params_t) +
++ sizeof(ioc_fm_port_pcd_prs_params_t) +
++ sizeof(ioc_fm_port_pcd_cc_params_t) +
++ sizeof(ioc_fm_port_pcd_kg_params_t) +
++ sizeof(ioc_fm_port_pcd_plcr_params_t));
++ if (!port_pcd_params)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(port_pcd_params, 0,
++ sizeof(ioc_fm_port_pcd_params_t) +
++ sizeof(ioc_fm_port_pcd_prs_params_t) +
++ sizeof(ioc_fm_port_pcd_cc_params_t) +
++ sizeof(ioc_fm_port_pcd_kg_params_t) +
++ sizeof(ioc_fm_port_pcd_plcr_params_t));
++
++ port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (port_pcd_params + 1);
++ port_pcd_cc_params = (ioc_fm_port_pcd_cc_params_t *) (port_pcd_prs_params + 1);
++ port_pcd_kg_params = (ioc_fm_port_pcd_kg_params_t *) (port_pcd_cc_params + 1);
++ port_pcd_plcr_params = (ioc_fm_port_pcd_plcr_params_t *) (port_pcd_kg_params + 1);
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_port_pcd_params_t *compat_port_pcd_params;
++ ioc_fm_port_pcd_prs_params_t *same_port_pcd_prs_params;
++ ioc_compat_fm_port_pcd_cc_params_t *compat_port_pcd_cc_params;
++ ioc_compat_fm_port_pcd_kg_params_t *compat_port_pcd_kg_params;
++ ioc_compat_fm_port_pcd_plcr_params_t *compat_port_pcd_plcr_params;
++
++ compat_port_pcd_params = (ioc_compat_fm_port_pcd_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_port_pcd_params_t) +
++ sizeof(ioc_fm_port_pcd_prs_params_t) +
++ sizeof(ioc_compat_fm_port_pcd_cc_params_t) +
++ sizeof(ioc_compat_fm_port_pcd_kg_params_t) +
++ sizeof(ioc_compat_fm_port_pcd_plcr_params_t));
++ if (!compat_port_pcd_params)
++ {
++ XX_Free(port_pcd_params);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++ }
++
++ memset(compat_port_pcd_params, 0,
++ sizeof(ioc_compat_fm_port_pcd_params_t) +
++ sizeof(ioc_fm_port_pcd_prs_params_t) +
++ sizeof(ioc_compat_fm_port_pcd_cc_params_t) +
++ sizeof(ioc_compat_fm_port_pcd_kg_params_t) +
++ sizeof(ioc_compat_fm_port_pcd_plcr_params_t));
++ same_port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (compat_port_pcd_params + 1);
++ compat_port_pcd_cc_params = (ioc_compat_fm_port_pcd_cc_params_t *) (same_port_pcd_prs_params + 1);
++ compat_port_pcd_kg_params = (ioc_compat_fm_port_pcd_kg_params_t *) (compat_port_pcd_cc_params + 1);
++ compat_port_pcd_plcr_params = (ioc_compat_fm_port_pcd_plcr_params_t *) (compat_port_pcd_kg_params + 1);
++
++ if (copy_from_user(compat_port_pcd_params,
++ (ioc_compat_fm_port_pcd_params_t*) compat_ptr(arg),
++ sizeof(ioc_compat_fm_port_pcd_params_t)))
++ err = E_WRITE_FAILED;
++
++ while (!err) /* pseudo-while */
++ {
++ /* set pointers from where to copy from: */
++ port_pcd_params->p_prs_params = compat_ptr(compat_port_pcd_params->p_prs_params); /* same structure */
++ port_pcd_params->p_cc_params = compat_ptr(compat_port_pcd_params->p_cc_params);
++ port_pcd_params->p_kg_params = compat_ptr(compat_port_pcd_params->p_kg_params);
++ port_pcd_params->p_plcr_params = compat_ptr(compat_port_pcd_params->p_plcr_params);
++ port_pcd_params->p_ip_reassembly_manip = compat_ptr(compat_port_pcd_params->p_ip_reassembly_manip);
++#if (DPAA_VERSION >= 11)
++ port_pcd_params->p_capwap_reassembly_manip = compat_ptr(compat_port_pcd_params->p_capwap_reassembly_manip);
++#endif
++ /* the prs member is the same, no compat structure...memcpy only */
++ if (port_pcd_params->p_prs_params)
++ {
++ if (copy_from_user(same_port_pcd_prs_params,
++ port_pcd_params->p_prs_params,
++ sizeof(ioc_fm_port_pcd_prs_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ memcpy(port_pcd_prs_params, same_port_pcd_prs_params, sizeof(ioc_fm_port_pcd_prs_params_t));
++ port_pcd_params->p_prs_params = port_pcd_prs_params;
++ }
++
++ if (port_pcd_params->p_cc_params)
++ {
++ if (copy_from_user(compat_port_pcd_cc_params,
++ port_pcd_params->p_cc_params,
++ sizeof(ioc_compat_fm_port_pcd_cc_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ port_pcd_params->p_cc_params = port_pcd_cc_params;
++ }
++
++ if (port_pcd_params->p_kg_params)
++ {
++ if (copy_from_user(compat_port_pcd_kg_params,
++ port_pcd_params->p_kg_params,
++ sizeof(ioc_compat_fm_port_pcd_kg_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ port_pcd_params->p_kg_params = port_pcd_kg_params;
++ }
++
++ if (port_pcd_params->p_plcr_params)
++ {
++ if (copy_from_user(compat_port_pcd_plcr_params,
++ port_pcd_params->p_plcr_params,
++ sizeof(ioc_compat_fm_port_pcd_plcr_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ port_pcd_params->p_plcr_params = port_pcd_plcr_params;
++ }
++
++ break; /* pseudo-while: always run once! */
++ }
++
++ if (!err)
++ compat_copy_fm_port_pcd(compat_port_pcd_params, port_pcd_params, COMPAT_US_TO_K);
++
++ XX_Free(compat_port_pcd_params);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(port_pcd_params,
++ (ioc_fm_port_pcd_params_t*) arg,
++ sizeof(ioc_fm_port_pcd_params_t)))
++ err = E_WRITE_FAILED;
++
++ while (!err) /* pseudo-while */
++ {
++ if (port_pcd_params->p_prs_params)
++ {
++ if (copy_from_user(port_pcd_prs_params,
++ port_pcd_params->p_prs_params,
++ sizeof(ioc_fm_port_pcd_prs_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ port_pcd_params->p_prs_params = port_pcd_prs_params;
++ }
++
++ if (port_pcd_params->p_cc_params)
++ {
++ if (copy_from_user(port_pcd_cc_params,
++ port_pcd_params->p_cc_params,
++ sizeof(ioc_fm_port_pcd_cc_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ port_pcd_params->p_cc_params = port_pcd_cc_params;
++ }
++
++ if (port_pcd_params->p_kg_params)
++ {
++ if (copy_from_user(port_pcd_kg_params,
++ port_pcd_params->p_kg_params,
++ sizeof(ioc_fm_port_pcd_kg_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ port_pcd_params->p_kg_params = port_pcd_kg_params;
++ }
++
++ if (port_pcd_params->p_plcr_params)
++ {
++ if (copy_from_user(port_pcd_plcr_params,
++ port_pcd_params->p_plcr_params,
++ sizeof(ioc_fm_port_pcd_plcr_params_t)))
++ {
++ err = E_WRITE_FAILED;
++ break; /* from pseudo-while */
++ }
++
++ port_pcd_params->p_plcr_params = port_pcd_plcr_params;
++ }
++
++ break; /* pseudo-while: always run once! */
++ }
++ }
++
++ if (!err)
++ err = FM_PORT_SetPCD(p_LnxWrpFmPortDev->h_Dev, (t_FmPortPcdParams*) port_pcd_params);
++
++ XX_Free(port_pcd_params);
++ break;
++ }
++
++ case FM_PORT_IOC_DELETE_PCD:
++ err = FM_PORT_DeletePCD(p_LnxWrpFmPortDev->h_Dev);
++ break;
++
++#if defined(CONFIG_COMPAT)
++ case FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME_COMPAT:
++#endif
++ case FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME:
++ {
++ ioc_fm_pcd_kg_scheme_select_t *param;
++
++ param = (ioc_fm_pcd_kg_scheme_select_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_kg_scheme_select_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_select_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_kg_scheme_select_t *compat_param;
++
++ compat_param = (ioc_compat_fm_pcd_kg_scheme_select_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_pcd_kg_scheme_select_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_select_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_pcd_kg_scheme_select_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_kg_scheme_select_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_kg_scheme_select(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_select_t *)arg,
++ sizeof(ioc_fm_pcd_kg_scheme_select_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PORT_PcdKgModifyInitialScheme(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdKgSchemeSelect *)param);
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE_COMPAT:
++#endif
++ case FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0 , sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ id.obj = compat_ptr(compat_id.obj);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PORT_PcdPlcrModifyInitialProfile(p_LnxWrpFmPortDev->h_Dev, id.obj);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PORT_IOC_PCD_KG_BIND_SCHEMES_COMPAT:
++#endif
++ case FM_PORT_IOC_PCD_KG_BIND_SCHEMES:
++ {
++ ioc_fm_pcd_port_schemes_params_t *param;
++
++ param = (ioc_fm_pcd_port_schemes_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_port_schemes_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0 , sizeof(ioc_fm_pcd_port_schemes_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_port_schemes_params_t compat_param;
++
++ if (copy_from_user(&compat_param,
++ (ioc_compat_fm_pcd_port_schemes_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_port_schemes_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_kg_schemes_params(&compat_param, param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_port_schemes_params_t *) arg,
++ sizeof(ioc_fm_pcd_port_schemes_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PORT_PcdKgBindSchemes(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdPortSchemesParams *)param);
++
++ XX_Free(param);
++ break;
++ }
++
++#if defined(CONFIG_COMPAT)
++ case FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES_COMPAT:
++#endif
++ case FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES:
++ {
++ ioc_fm_pcd_port_schemes_params_t *param;
++
++ param = (ioc_fm_pcd_port_schemes_params_t *) XX_Malloc(
++ sizeof(ioc_fm_pcd_port_schemes_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0 , sizeof(ioc_fm_pcd_port_schemes_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_pcd_port_schemes_params_t compat_param;
++
++ if (copy_from_user(&compat_param,
++ (ioc_compat_fm_pcd_port_schemes_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_pcd_port_schemes_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_pcd_kg_schemes_params(&compat_param, param, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_pcd_port_schemes_params_t *) arg,
++ sizeof(ioc_fm_pcd_port_schemes_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = FM_PORT_PcdKgUnbindSchemes(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdPortSchemesParams *)param);
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_PORT_IOC_PCD_PLCR_ALLOC_PROFILES:
++ {
++ uint16_t num;
++ if (get_user(num, (uint16_t*) arg))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ err = FM_PORT_PcdPlcrAllocProfiles(p_LnxWrpFmPortDev->h_Dev, num);
++ break;
++ }
++
++ case FM_PORT_IOC_PCD_PLCR_FREE_PROFILES:
++ err = FM_PORT_PcdPlcrFreeProfiles(p_LnxWrpFmPortDev->h_Dev);
++ break;
++
++ case FM_PORT_IOC_DETACH_PCD:
++ err = FM_PORT_DetachPCD(p_LnxWrpFmPortDev->h_Dev);
++ break;
++
++ case FM_PORT_IOC_ATTACH_PCD:
++ err = FM_PORT_AttachPCD(p_LnxWrpFmPortDev->h_Dev);
++ break;
++
++#if defined(CONFIG_COMPAT)
++ case FM_PORT_IOC_PCD_CC_MODIFY_TREE_COMPAT:
++#endif
++ case FM_PORT_IOC_PCD_CC_MODIFY_TREE:
++ {
++ ioc_fm_obj_t id;
++
++ memset(&id, 0 , sizeof(ioc_fm_obj_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_obj_t compat_id;
++
++ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ compat_copy_fm_port_pcd_modify_tree(&compat_id, &id, COMPAT_US_TO_K);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ err = FM_PORT_PcdCcModifyTree(p_LnxWrpFmPortDev->h_Dev, id.obj);
++ break;
++ }
++
++ case FM_PORT_IOC_ADD_CONGESTION_GRPS:
++ case FM_PORT_IOC_REMOVE_CONGESTION_GRPS:
++ {
++ ioc_fm_port_congestion_groups_t *param;
++
++ param = (ioc_fm_port_congestion_groups_t*) XX_Malloc(sizeof(ioc_fm_port_congestion_groups_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0, sizeof(ioc_fm_port_congestion_groups_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (t_FmPortCongestionGrps*) compat_ptr(arg),
++ sizeof(t_FmPortCongestionGrps)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif /* CONFIG_COMPAT */
++ {
++ if (copy_from_user(param, (t_FmPortCongestionGrps*) arg,
++ sizeof(t_FmPortCongestionGrps)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ err = (cmd == FM_PORT_IOC_ADD_CONGESTION_GRPS)
++ ? FM_PORT_AddCongestionGrps(p_LnxWrpFmPortDev->h_Dev, (t_FmPortCongestionGrps*) param)
++ : FM_PORT_RemoveCongestionGrps(p_LnxWrpFmPortDev->h_Dev, (t_FmPortCongestionGrps*) param)
++ ;
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR:
++ case FM_PORT_IOC_REMOVE_RX_HASH_MAC_ADDR:
++ {
++ ioc_fm_port_mac_addr_params_t *param;
++
++ param = (ioc_fm_port_mac_addr_params_t*) XX_Malloc(
++ sizeof(ioc_fm_port_mac_addr_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0, sizeof(ioc_fm_port_mac_addr_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ if (copy_from_user(param, (ioc_fm_port_mac_addr_params_t*) compat_ptr(arg),
++ sizeof(ioc_fm_port_mac_addr_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++ else
++#endif /* CONFIG_COMPAT */
++ {
++ if (copy_from_user(param, (ioc_fm_port_mac_addr_params_t*) arg,
++ sizeof(ioc_fm_port_mac_addr_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ if (p_LnxWrpFmPortDev->pcd_owner_params.dev)
++ {
++ int id = -1;
++
++ switch(p_LnxWrpFmPortDev->settings.param.portType)
++ {
++ case e_FM_PORT_TYPE_RX:
++ case e_FM_PORT_TYPE_TX:
++ id = p_LnxWrpFmPortDev->id;
++ break;
++ case e_FM_PORT_TYPE_RX_10G:
++ case e_FM_PORT_TYPE_TX_10G:
++ id = p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_MACS;
++ break;
++ default:
++ err = E_NOT_AVAILABLE;
++ REPORT_ERROR(MINOR, err, ("Attempt to add/remove hash MAC addr. to/from MAC-less port!"));
++ }
++ if (id >= 0)
++ {
++ t_LnxWrpFmDev *fm = (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ t_Handle mac_handle = fm->macs[id].h_Dev;
++
++ err = (cmd == FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR)
++ ? FM_MAC_AddHashMacAddr(mac_handle, (t_EnetAddr*) param)
++ : FM_MAC_RemoveHashMacAddr(mac_handle, (t_EnetAddr*) param);
++ }
++ }
++ else
++ {
++ err = E_NOT_AVAILABLE;
++ REPORT_ERROR(MINOR, err, ("Port not initialized or other error!?!?"));
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++ case FM_PORT_IOC_SET_TX_PAUSE_FRAMES:
++ {
++ t_LnxWrpFmDev *p_LnxWrpFmDev =
++ (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ ioc_fm_port_tx_pause_frames_params_t param;
++ int mac_id = p_LnxWrpFmPortDev->id;
++
++ if(&p_LnxWrpFmDev->txPorts[mac_id] != p_LnxWrpFmPortDev)
++ mac_id += FM_MAX_NUM_OF_1G_MACS; /* 10G port */
++
++ if (copy_from_user(&param, (ioc_fm_port_tx_pause_frames_params_t *)arg,
++ sizeof(ioc_fm_port_tx_pause_frames_params_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ if (p_LnxWrpFmDev && p_LnxWrpFmDev->macs[mac_id].h_Dev)
++ {
++ FM_MAC_SetTxPauseFrames(p_LnxWrpFmDev->macs[mac_id].h_Dev,
++ param.priority,
++ param.pause_time,
++ param.thresh_time);
++ }
++ else
++ {
++ err = E_NOT_AVAILABLE;
++ REPORT_ERROR(MINOR, err, ("Port not initialized or other error!"));
++ }
++
++ break;
++ }
++
++ case FM_PORT_IOC_CONFIG_BUFFER_PREFIX_CONTENT:
++ {
++ ioc_fm_buffer_prefix_content_t *param;
++
++ param = (ioc_fm_buffer_prefix_content_t*) XX_Malloc(sizeof(ioc_fm_buffer_prefix_content_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0, sizeof(ioc_fm_buffer_prefix_content_t));
++
++ if (copy_from_user(param, (ioc_fm_buffer_prefix_content_t*) arg,
++ sizeof(ioc_fm_buffer_prefix_content_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ if (FM_PORT_ConfigBufferPrefixContent(p_LnxWrpFmPortDev->h_Dev,
++ (t_FmBufferPrefixContent *)param))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ XX_Free(param);
++ break;
++ }
++
++#if (DPAA_VERSION >= 11)
++#if defined(CONFIG_COMPAT)
++ case FM_PORT_IOC_VSP_ALLOC_COMPAT:
++#endif
++ case FM_PORT_IOC_VSP_ALLOC:
++ {
++ ioc_fm_port_vsp_alloc_params_t *param;
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ t_LnxWrpFmPortDev *p_LnxWrpFmTxPortDev;
++
++ param = (ioc_fm_port_vsp_alloc_params_t *) XX_Malloc(
++ sizeof(ioc_fm_port_vsp_alloc_params_t));
++ if (!param)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++
++ memset(param, 0, sizeof(ioc_fm_port_vsp_alloc_params_t));
++
++#if defined(CONFIG_COMPAT)
++ if (compat)
++ {
++ ioc_compat_fm_port_vsp_alloc_params_t *compat_param;
++
++ compat_param = (ioc_compat_fm_port_vsp_alloc_params_t *) XX_Malloc(
++ sizeof(ioc_compat_fm_port_vsp_alloc_params_t));
++ if (!compat_param)
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
++ }
++
++ memset(compat_param, 0, sizeof(ioc_compat_fm_port_vsp_alloc_params_t));
++ if (copy_from_user(compat_param,
++ (ioc_compat_fm_port_vsp_alloc_params_t *) compat_ptr(arg),
++ sizeof(ioc_compat_fm_port_vsp_alloc_params_t)))
++ {
++ XX_Free(compat_param);
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ compat_copy_fm_port_vsp_alloc_params(compat_param, param, COMPAT_US_TO_K);
++
++ XX_Free(compat_param);
++ }
++ else
++#endif
++ {
++ if (copy_from_user(param, (ioc_fm_port_vsp_alloc_params_t *)arg,
++ sizeof(ioc_fm_port_vsp_alloc_params_t)))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++ }
++
++ /* Userspace may not have the Tx port t_handle when issuing the IOCTL */
++ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX ||
++ p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX_10G)
++ {
++ /* Determine the Tx port t_Handle from the Rx port id */
++ p_LnxWrpFmDev = p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ p_LnxWrpFmTxPortDev = &p_LnxWrpFmDev->txPorts[p_LnxWrpFmPortDev->id];
++ param->p_fm_tx_port = p_LnxWrpFmTxPortDev->h_Dev;
++ }
++
++ if (FM_PORT_VSPAlloc(p_LnxWrpFmPortDev->h_Dev, (t_FmPortVSPAllocParams *)param))
++ {
++ XX_Free(param);
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++ }
++
++ XX_Free(param);
++ break;
++ }
++#endif /* (DPAA_VERSION >= 11) */
++
++ case FM_PORT_IOC_GET_MAC_STATISTICS:
++ {
++ t_LnxWrpFmDev *p_LnxWrpFmDev =
++ (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ ioc_fm_port_mac_statistics_t param;
++ int mac_id = p_LnxWrpFmPortDev->id;
++
++ if (!p_LnxWrpFmDev)
++ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
++
++ if (&p_LnxWrpFmDev->txPorts[mac_id] != p_LnxWrpFmPortDev &&
++ &p_LnxWrpFmDev->rxPorts[mac_id] != p_LnxWrpFmPortDev)
++ mac_id += FM_MAX_NUM_OF_1G_MACS; /* 10G port */
++
++ if (!p_LnxWrpFmDev->macs[mac_id].h_Dev)
++ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
++
++ if (FM_MAC_GetStatistics(p_LnxWrpFmDev->macs[mac_id].h_Dev,
++ (t_FmMacStatistics *)&param))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ if (copy_to_user((ioc_fm_port_mac_statistics_t *)arg, &param,
++ sizeof(ioc_fm_port_mac_statistics_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ break;
++ }
++
++ case FM_PORT_IOC_GET_BMI_COUNTERS:
++ {
++ t_LnxWrpFmDev *p_LnxWrpFmDev =
++ (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ ioc_fm_port_bmi_stats_t param;
++
++ if (!p_LnxWrpFmDev)
++ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
++
++ if (FM_PORT_GetBmiCounters(p_LnxWrpFmPortDev->h_Dev,
++ (t_FmPortBmiStats *)&param))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ if (copy_to_user((ioc_fm_port_bmi_stats_t *)arg, &param,
++ sizeof(ioc_fm_port_bmi_stats_t)))
++ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
++
++ break;
++ }
++
++ default:
++ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
++ ("invalid ioctl: cmd:0x%08x(type:0x%02x, nr:0x%02x.\n",
++ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)));
++ }
++
++ if (err)
++ RETURN_ERROR(MINOR, E_INVALID_OPERATION, ("IOCTL FM PORT"));
++
++ return E_OK;
++}
++
++/*****************************************************************************/
++/* API routines for the FM Linux Device */
++/*****************************************************************************/
++
++static int fm_open(struct inode *inode, struct file *file)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev = NULL;
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = NULL;
++ unsigned int major = imajor(inode);
++ unsigned int minor = iminor(inode);
++ struct device_node *fm_node;
++ static struct of_device_id fm_node_of_match[] = {
++ { .compatible = "fsl,fman", },
++ { /* end of list */ },
++ };
++
++ DBG(TRACE, ("Opening minor - %d - ", minor));
++
++ if (file->private_data != NULL)
++ return 0;
++
++ /* Get all the FM nodes */
++ for_each_matching_node(fm_node, fm_node_of_match) {
++ struct platform_device *of_dev;
++
++ of_dev = of_find_device_by_node(fm_node);
++ if (unlikely(of_dev == NULL)) {
++ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!"));
++ return -ENXIO;
++ }
++
++ p_LnxWrpFmDev = (t_LnxWrpFmDev *)fm_bind(&of_dev->dev);
++ if (p_LnxWrpFmDev->major == major)
++ break;
++ fm_unbind((struct fm *)p_LnxWrpFmDev);
++ p_LnxWrpFmDev = NULL;
++ }
++
++ if (!p_LnxWrpFmDev)
++ return -ENODEV;
++
++ if (minor == DEV_FM_MINOR_BASE)
++ file->private_data = p_LnxWrpFmDev;
++ else if (minor == DEV_FM_PCD_MINOR_BASE)
++ file->private_data = p_LnxWrpFmDev;
++ else {
++ if (minor == DEV_FM_OH_PORTS_MINOR_BASE)
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort;
++ else if ((minor > DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE))
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->opPorts[minor-DEV_FM_OH_PORTS_MINOR_BASE-1];
++ else if ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE))
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[minor-DEV_FM_RX_PORTS_MINOR_BASE];
++ else if ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS))
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[minor-DEV_FM_TX_PORTS_MINOR_BASE];
++ else
++ return -EINVAL;
++
++ /* if trying to open port, check if it initialized */
++ if (!p_LnxWrpFmPortDev->h_Dev)
++ return -ENODEV;
++
++ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)fm_port_bind(p_LnxWrpFmPortDev->dev);
++ file->private_data = p_LnxWrpFmPortDev;
++ fm_unbind((struct fm *)p_LnxWrpFmDev);
++ }
++
++ if (file->private_data == NULL)
++ return -ENXIO;
++
++ return 0;
++}
++
++static int fm_close(struct inode *inode, struct file *file)
++{
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++ unsigned int minor = iminor(inode);
++ int err = 0;
++
++ DBG(TRACE, ("Closing minor - %d - ", minor));
++
++ if ((minor == DEV_FM_MINOR_BASE) ||
++ (minor == DEV_FM_PCD_MINOR_BASE))
++ {
++ p_LnxWrpFmDev = (t_LnxWrpFmDev*)file->private_data;
++ if (!p_LnxWrpFmDev)
++ return -ENODEV;
++ fm_unbind((struct fm *)p_LnxWrpFmDev);
++ }
++ else if (((minor >= DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE)) ||
++ ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE)) ||
++ ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS)))
++ {
++ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)file->private_data;
++ if (!p_LnxWrpFmPortDev)
++ return -ENODEV;
++ fm_port_unbind((struct fm_port *)p_LnxWrpFmPortDev);
++ }
++
++ return err;
++}
++
++static int fm_ioctls(unsigned int minor, struct file *file, unsigned int cmd, unsigned long arg, bool compat)
++{
++ DBG(TRACE, ("IOCTL minor - %u, cmd - 0x%08x, arg - 0x%08lx \n", minor, cmd, arg));
++
++ if ((minor == DEV_FM_MINOR_BASE) ||
++ (minor == DEV_FM_PCD_MINOR_BASE))
++ {
++ t_LnxWrpFmDev *p_LnxWrpFmDev = ((t_LnxWrpFmDev*)file->private_data);
++ if (!p_LnxWrpFmDev)
++ return -ENODEV;
++ if (LnxwrpFmIOCTL(p_LnxWrpFmDev, cmd, arg, compat))
++ return -EFAULT;
++ }
++ else if (((minor >= DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE)) ||
++ ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE)) ||
++ ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS)))
++ {
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = ((t_LnxWrpFmPortDev*)file->private_data);
++ if (!p_LnxWrpFmPortDev)
++ return -ENODEV;
++ if (LnxwrpFmPortIOCTL(p_LnxWrpFmPortDev, cmd, arg, compat))
++ return -EFAULT;
++ }
++ else
++ {
++ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("minor"));
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_COMPAT
++static long fm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ unsigned int minor = iminor(file->f_path.dentry->d_inode);
++ long res;
++
++ fm_mutex_lock();
++ res = fm_ioctls(minor, file, cmd, arg, true);
++ fm_mutex_unlock();
++
++ return res;
++}
++#endif
++
++static long fm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ unsigned int minor = iminor(file->f_path.dentry->d_inode);
++ long res;
++
++ fm_mutex_lock();
++ res = fm_ioctls(minor, file, cmd, arg, false);
++ fm_mutex_unlock();
++
++ return res;
++}
++
++/* Globals for FM character device */
++struct file_operations fm_fops =
++{
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = fm_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = fm_compat_ioctl,
++#endif
++ .open = fm_open,
++ .release = fm_close,
++};
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
+new file mode 100644
+index 00000000..322ae9ef
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
+@@ -0,0 +1,1297 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_fm_compat_ioctls.c
++
++ @Description FM PCD compat functions
++
++*/
++
++#if !defined(CONFIG_COMPAT)
++#error "missing COMPAT layer..."
++#endif
++
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#ifndef CONFIG_FMAN_ARM
++#include <sysdev/fsl_soc.h>
++#endif
++
++#include "part_ext.h"
++#include "fm_ioctls.h"
++#include "fm_pcd_ioctls.h"
++#include "fm_port_ioctls.h"
++#include "lnxwrp_ioctls_fm_compat.h"
++
++#if defined(FM_COMPAT_DBG)
++static void hex_dump(void * p_addr, unsigned int size)
++{
++ int i;
++
++ for(i=0; i<size; i+=16)
++ {
++ printk("%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", p_addr + i,
++ *(unsigned int *)(p_addr + i),
++ *(unsigned int *)(p_addr + i + 4),
++ *(unsigned int *)(p_addr + i + 8),
++ *(unsigned int *)(p_addr + i +12)
++ );
++ }
++}
++#endif
++
++/* maping kernel pointers w/ UserSpace id's { */
++struct map_node {
++ void *ptr;
++ u8 node_type;
++};
++
++static struct map_node compat_ptr2id_array[COMPAT_PTR2ID_ARRAY_MAX] = {{NULL},{FM_MAP_TYPE_UNSPEC}};
++
++void compat_del_ptr2id(void *p, enum fm_map_node_type node_type)
++{
++ compat_uptr_t k;
++
++ _fm_cpt_dbg(COMPAT_GENERIC, "delete (%p)\n", p);
++
++ for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++)
++ if(compat_ptr2id_array[k].ptr == p){
++ compat_ptr2id_array[k].ptr = NULL;
++ compat_ptr2id_array[k].node_type = FM_MAP_TYPE_UNSPEC;
++ }
++}
++EXPORT_SYMBOL(compat_del_ptr2id);
++
++compat_uptr_t compat_add_ptr2id(void *p, enum fm_map_node_type node_type)
++{
++ compat_uptr_t k;
++
++ _fm_cpt_dbg(COMPAT_GENERIC, " (%p) do ->\n", p);
++
++ if(!p)
++ return 0;
++
++ for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++)
++ if(compat_ptr2id_array[k].ptr == NULL)
++ {
++ compat_ptr2id_array[k].ptr = p;
++ compat_ptr2id_array[k].node_type = node_type;
++ _fm_cpt_dbg(COMPAT_GENERIC, "0x%08x \n", k | COMPAT_PTR2ID_WATERMARK);
++ return k | COMPAT_PTR2ID_WATERMARK;
++ }
++
++ printk(KERN_WARNING "FMan map list full! No more PCD space on kernel!\n");
++ return 0;
++}
++EXPORT_SYMBOL(compat_add_ptr2id);
++
++compat_uptr_t compat_get_ptr2id(void *p, enum fm_map_node_type node_type)
++{
++ compat_uptr_t k;
++
++ _fm_cpt_dbg(COMPAT_GENERIC, " (%p) get -> \n", p);
++
++ for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++)
++ if(compat_ptr2id_array[k].ptr == p &&
++ compat_ptr2id_array[k].node_type == node_type) {
++
++ _fm_cpt_dbg(COMPAT_GENERIC, "0x%08x\n", k | COMPAT_PTR2ID_WATERMARK);
++ return k | COMPAT_PTR2ID_WATERMARK;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(compat_get_ptr2id);
++
++void *compat_get_id2ptr(compat_uptr_t comp, enum fm_map_node_type node_type)
++{
++
++ _fm_cpt_dbg(COMPAT_GENERIC, " (0x%08x) get -> \n", comp);
++
++ if((COMPAT_PTR2ID_WM_MASK & comp) != COMPAT_PTR2ID_WATERMARK) {
++ _fm_cpt_dbg(COMPAT_GENERIC, "Error, invalid watermark (0x%08x)!\n\n", comp);
++ dump_stack();
++ return compat_ptr(comp);
++ }
++
++ comp &= ~COMPAT_PTR2ID_WM_MASK;
++
++ if(((0 < comp) && (comp < COMPAT_PTR2ID_ARRAY_MAX) && (compat_ptr2id_array[comp].ptr != NULL)
++ && compat_ptr2id_array[comp].node_type == node_type)) {
++ _fm_cpt_dbg(COMPAT_GENERIC, "%p\n", compat_ptr2id_array[comp].ptr);
++ return compat_ptr2id_array[comp].ptr;
++ }
++ return NULL;
++}
++EXPORT_SYMBOL(compat_get_id2ptr);
++/* } maping kernel pointers w/ UserSpace id's */
++
++void compat_obj_delete(
++ ioc_compat_fm_obj_t *compat_id,
++ ioc_fm_obj_t *id)
++{
++ id->obj = compat_pcd_id2ptr(compat_id->obj);
++ compat_del_ptr2id(id->obj, FM_MAP_TYPE_PCD_NODE);
++}
++
++static inline void compat_copy_fm_pcd_plcr_next_engine(
++ ioc_compat_fm_pcd_plcr_next_engine_params_u *compat_param,
++ ioc_fm_pcd_plcr_next_engine_params_u *param,
++ ioc_fm_pcd_engine next_engine,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ switch (next_engine)
++ {
++ case e_IOC_FM_PCD_PLCR:
++ if (compat == COMPAT_US_TO_K)
++ param->p_profile = compat_pcd_id2ptr(compat_param->p_profile);
++ else
++ compat_param->p_profile = compat_pcd_ptr2id(param->p_profile);
++ break;
++ case e_IOC_FM_PCD_KG:
++ if (compat == COMPAT_US_TO_K)
++ param->p_direct_scheme = compat_pcd_id2ptr(compat_param->p_direct_scheme);
++ else
++ compat_param->p_direct_scheme = compat_pcd_ptr2id(param->p_direct_scheme);
++ break;
++ default:
++ if (compat == COMPAT_US_TO_K)
++ param->action = compat_param->action;
++ else
++ compat_param->action = param->action;
++ break;
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_pcd_plcr_profile(
++ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param,
++ ioc_fm_pcd_plcr_profile_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->modify = compat_param->modify;
++
++ /* profile_select */
++ if (!compat_param->modify)
++ {
++ param->profile_select.new_params.profile_type =
++ compat_param->profile_select.new_params.profile_type;
++ param->profile_select.new_params.p_fm_port =
++ compat_ptr(compat_param->profile_select.new_params.p_fm_port);
++ param->profile_select.new_params.relative_profile_id =
++ compat_param->profile_select.new_params.relative_profile_id;
++ }
++ else
++ param->profile_select.p_profile =
++ compat_pcd_id2ptr(compat_param->profile_select.p_profile);
++
++ param->alg_selection = compat_param->alg_selection;
++ param->color_mode = compat_param->color_mode;
++
++ /* both parameters in the union has the same size, so memcpy works */
++ memcpy(&param->color, &compat_param->color, sizeof(param->color));
++
++ memcpy(&param->non_passthrough_alg_param,
++ &compat_param->non_passthrough_alg_param,
++ sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t));
++
++ param->next_engine_on_green = compat_param->next_engine_on_green;
++ param->next_engine_on_yellow = compat_param->next_engine_on_yellow;
++ param->next_engine_on_red = compat_param->next_engine_on_red;
++
++ param->trap_profile_on_flow_A = compat_param->trap_profile_on_flow_A;
++ param->trap_profile_on_flow_B = compat_param->trap_profile_on_flow_B;
++ param->trap_profile_on_flow_C = compat_param->trap_profile_on_flow_C;
++ }
++ else
++ {
++ compat_param->modify = param->modify;
++
++ /* profile_select */
++ if (!param->modify)
++ {
++ compat_param->profile_select.new_params.profile_type =
++ param->profile_select.new_params.profile_type;
++ compat_param->profile_select.new_params.p_fm_port =
++ ptr_to_compat(param->profile_select.new_params.p_fm_port);
++ compat_param->profile_select.new_params.relative_profile_id =
++ param->profile_select.new_params.relative_profile_id;
++ }
++ else
++ compat_param->profile_select.p_profile =
++ compat_pcd_ptr2id(param->profile_select.p_profile);
++
++ compat_param->alg_selection = param->alg_selection;
++ compat_param->color_mode = param->color_mode;
++
++ /* both parameters in the union has the same size, so memcpy works */
++ memcpy(&compat_param->color, &param->color, sizeof(compat_param->color));
++
++ memcpy(&compat_param->non_passthrough_alg_param,
++ &param->non_passthrough_alg_param,
++ sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t));
++
++ compat_param->next_engine_on_green = param->next_engine_on_green;
++ compat_param->next_engine_on_yellow = param->next_engine_on_yellow;
++ compat_param->next_engine_on_red = param->next_engine_on_red;
++
++ compat_param->trap_profile_on_flow_A = param->trap_profile_on_flow_A;
++ compat_param->trap_profile_on_flow_B = param->trap_profile_on_flow_B;
++ compat_param->trap_profile_on_flow_C = param->trap_profile_on_flow_C;
++
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++
++ compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_green,
++ &param->params_on_green, param->next_engine_on_green, compat);
++
++ compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_yellow,
++ &param->params_on_yellow, param->next_engine_on_yellow, compat);
++
++ compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_red,
++ &param->params_on_red, param->next_engine_on_red, compat);
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++static inline void compat_copy_fm_pcd_cc_next_kg(
++ ioc_compat_fm_pcd_cc_next_kg_params_t *compat_param,
++ ioc_fm_pcd_cc_next_kg_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->new_fqid = compat_param->new_fqid;
++ param->override_fqid = compat_param->override_fqid;
++#if DPAA_VERSION >= 11
++ param->new_relative_storage_profile_id = compat_param->new_relative_storage_profile_id;
++#endif
++ param->p_direct_scheme = compat_pcd_id2ptr(compat_param->p_direct_scheme);
++ }
++ else
++ {
++ compat_param->new_fqid = param->new_fqid;
++ compat_param->override_fqid = param->override_fqid;
++#if DPAA_VERSION >= 11
++ compat_param->new_relative_storage_profile_id = param->new_relative_storage_profile_id;
++#endif
++ compat_param->p_direct_scheme = compat_pcd_ptr2id(param->p_direct_scheme);
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++static inline void compat_copy_fm_pcd_cc_next_cc(
++ ioc_compat_fm_pcd_cc_next_cc_params_t *compat_param,
++ ioc_fm_pcd_cc_next_cc_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ param->cc_node_id = compat_pcd_id2ptr(compat_param->cc_node_id);
++ else
++ compat_param->cc_node_id = compat_pcd_ptr2id(param->cc_node_id);
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++static inline void compat_copy_fm_pcd_cc_next_engine(
++ ioc_compat_fm_pcd_cc_next_engine_params_t *compat_param,
++ ioc_fm_pcd_cc_next_engine_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->next_engine = compat_param->next_engine;
++ if (param->next_engine != e_IOC_FM_PCD_INVALID )
++ _fm_cpt_dbg(compat, " param->next_engine = %i \n", param->next_engine);
++
++ switch (param->next_engine)
++ {
++#if DPAA_VERSION >= 11
++ case e_IOC_FM_PCD_FR:
++ param->params.fr_params.frm_replic_id = compat_pcd_id2ptr(compat_param->params.fr_params.frm_replic_id);
++ break;
++#endif /* DPAA_VERSION >= 11 */
++ case e_IOC_FM_PCD_CC:
++ param->manip_id = compat_pcd_id2ptr(compat_param->manip_id);
++ compat_copy_fm_pcd_cc_next_cc(&compat_param->params.cc_params, &param->params.cc_params, compat);
++ break;
++ case e_IOC_FM_PCD_KG:
++ param->manip_id = compat_pcd_id2ptr(compat_param->manip_id);
++ compat_copy_fm_pcd_cc_next_kg(&compat_param->params.kg_params, &param->params.kg_params, compat);
++ break;
++ case e_IOC_FM_PCD_DONE:
++ case e_IOC_FM_PCD_PLCR:
++ param->manip_id = compat_pcd_id2ptr(compat_param->manip_id);
++ default:
++ memcpy(&param->params, &compat_param->params, sizeof(param->params));
++ }
++ param->statistics_en = compat_param->statistics_en;
++ }
++ else
++ {
++ compat_param->next_engine = param->next_engine;
++
++ switch (compat_param->next_engine)
++ {
++#if DPAA_VERSION >= 11
++ case e_IOC_FM_PCD_FR:
++ compat_param->params.fr_params.frm_replic_id = compat_pcd_ptr2id(param->params.fr_params.frm_replic_id);
++ break;
++#endif /* DPAA_VERSION >= 11 */
++ case e_IOC_FM_PCD_CC:
++ compat_param->manip_id = compat_pcd_ptr2id(param->manip_id);
++ compat_copy_fm_pcd_cc_next_cc(&compat_param->params.cc_params, &param->params.cc_params, compat);
++ break;
++ case e_IOC_FM_PCD_KG:
++ compat_param->manip_id = compat_pcd_ptr2id(param->manip_id);
++ compat_copy_fm_pcd_cc_next_kg(&compat_param->params.kg_params, &param->params.kg_params, compat);
++ break;
++ case e_IOC_FM_PCD_DONE:
++ case e_IOC_FM_PCD_PLCR:
++ compat_param->manip_id = compat_pcd_ptr2id(param->manip_id);
++ default:
++ memcpy(&compat_param->params, &param->params, sizeof(compat_param->params));
++ }
++ compat_param->statistics_en = param->statistics_en;
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_pcd_cc_key(
++ ioc_compat_fm_pcd_cc_key_params_t *compat_param,
++ ioc_fm_pcd_cc_key_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->p_key = compat_ptr(compat_param->p_key);
++ param->p_mask = compat_ptr(compat_param->p_mask);
++ }
++ else
++ {
++ compat_param->p_key = ptr_to_compat(param->p_key);
++ compat_param->p_mask = ptr_to_compat(param->p_mask);
++ }
++
++ compat_copy_fm_pcd_cc_next_engine(
++ &compat_param->cc_next_engine_params,
++ &param->cc_next_engine_params,
++ compat);
++}
++
++void compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(
++ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param,
++ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ param->key_indx = compat_param->key_indx;
++ param->key_size = compat_param->key_size;
++ compat_copy_fm_pcd_cc_key(
++ &compat_param->key_params,
++ &param->key_params,
++ compat);
++ }
++ else
++ {
++ compat_param->id = compat_pcd_ptr2id(param->id);
++ compat_param->key_indx = param->key_indx;
++ compat_param->key_size = param->key_size;
++ compat_copy_fm_pcd_cc_key(
++ &compat_param->key_params,
++ &param->key_params,
++ compat);
++ }
++}
++
++void compat_copy_fm_pcd_cc_node_modify_next_engine(
++ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param,
++ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ param->key_indx = compat_param->key_indx;
++ param->key_size = compat_param->key_size;
++ }
++ else
++ {
++ compat_param->id = compat_pcd_ptr2id(param->id);
++ compat_param->key_indx = param->key_indx;
++ compat_param->key_size = param->key_size;
++ }
++
++ compat_copy_fm_pcd_cc_next_engine(
++ &compat_param->cc_next_engine_params,
++ &param->cc_next_engine_params,
++ compat);
++}
++
++void compat_fm_pcd_cc_tree_modify_next_engine(
++ ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param,
++ ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ param->grp_indx = compat_param->grp_indx;
++ param->indx = compat_param->indx;
++ }
++ else
++ {
++ compat_param->id = compat_pcd_ptr2id(param->id);
++ compat_param->grp_indx = param->grp_indx;
++ compat_param->indx = param->indx;
++ }
++
++ compat_copy_fm_pcd_cc_next_engine(
++ &compat_param->cc_next_engine_params,
++ &param->cc_next_engine_params,
++ compat);
++}
++
++void compat_copy_fm_pcd_hash_table(
++ ioc_compat_fm_pcd_hash_table_params_t *compat_param,
++ ioc_fm_pcd_hash_table_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->max_num_of_keys = compat_param->max_num_of_keys;
++ param->statistics_mode = compat_param->statistics_mode;
++ param->kg_hash_shift = compat_param->kg_hash_shift;
++ param->hash_res_mask = compat_param->hash_res_mask;
++ param->hash_shift = compat_param->hash_shift;
++ param->match_key_size = compat_param->match_key_size;
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ }
++ else
++ {
++ compat_param->max_num_of_keys = param->max_num_of_keys;
++ compat_param->statistics_mode = param->statistics_mode;
++ compat_param->kg_hash_shift = param->kg_hash_shift;
++ compat_param->hash_res_mask = param->hash_res_mask;
++ compat_param->hash_shift = param->hash_shift;
++ compat_param->match_key_size = param->match_key_size;
++
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++
++ compat_copy_fm_pcd_cc_next_engine(
++ &compat_param->cc_next_engine_params_for_miss,
++ &param->cc_next_engine_params_for_miss,
++ compat);
++}
++
++void compat_copy_fm_pcd_cc_grp(
++ ioc_compat_fm_pcd_cc_grp_params_t *compat_param,
++ ioc_fm_pcd_cc_grp_params_t *param,
++ uint8_t compat)
++{
++ int k;
++
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->num_of_distinction_units = compat_param->num_of_distinction_units;
++ memcpy(param->unit_ids, compat_param->unit_ids, IOC_FM_PCD_MAX_NUM_OF_CC_UNITS);
++ }
++ else
++ {
++ compat_param->num_of_distinction_units = param->num_of_distinction_units;
++ memcpy(compat_param->unit_ids, param->unit_ids, IOC_FM_PCD_MAX_NUM_OF_CC_UNITS);
++ }
++
++ for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP; k++)
++ compat_copy_fm_pcd_cc_next_engine(
++ &compat_param->next_engine_per_entries_in_grp[k],
++ &param->next_engine_per_entries_in_grp[k],
++ compat);
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_pcd_cc_tree(
++ ioc_compat_fm_pcd_cc_tree_params_t *compat_param,
++ ioc_fm_pcd_cc_tree_params_t *param,
++ uint8_t compat)
++{
++ int k;
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->net_env_id = compat_pcd_id2ptr(compat_param->net_env_id);
++ param->num_of_groups = compat_param->num_of_groups;
++ }
++ else
++ {
++ compat_param->net_env_id = compat_pcd_ptr2id(param->net_env_id);
++ compat_param->num_of_groups = param->num_of_groups;
++
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++
++ for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS; k++)
++ compat_copy_fm_pcd_cc_grp(
++ &compat_param->fm_pcd_cc_group_params[k],
++ &param->fm_pcd_cc_group_params[k],
++ compat);
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_fm_pcd_prs_sw(
++ ioc_compat_fm_pcd_prs_sw_params_t *compat_param,
++ ioc_fm_pcd_prs_sw_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->override = compat_param->override;
++ param->size = compat_param->size;
++ param->base = compat_param->base;
++ param->p_code = compat_ptr(compat_param->p_code);
++ memcpy(param->sw_prs_data_params,compat_param->sw_prs_data_params,IOC_FM_PCD_PRS_NUM_OF_HDRS*sizeof(uint32_t));
++ param->num_of_labels = compat_param->num_of_labels;
++ memcpy(param->labels_table,compat_param->labels_table,IOC_FM_PCD_PRS_NUM_OF_LABELS*sizeof(ioc_fm_pcd_prs_label_params_t));
++ }
++}
++
++void compat_copy_fm_pcd_kg_scheme(
++ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param,
++ ioc_fm_pcd_kg_scheme_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg(compat," {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->modify = compat_param->modify;
++
++ /* scm_id */
++ if (compat_param->modify)
++ {
++ param->scm_id.scheme_id = compat_pcd_id2ptr(compat_param->scm_id.scheme_id);
++ _fm_cpt_dbg(compat," param->scm_id.scheme_id = %p \n", param->scm_id.scheme_id);
++ }
++ else
++ param->scm_id.relative_scheme_id = compat_param->scm_id.relative_scheme_id;
++
++ param->always_direct = compat_param->always_direct;
++ /* net_env_params */
++ param->net_env_params.net_env_id = compat_pcd_id2ptr(compat_param->net_env_params.net_env_id);
++ param->net_env_params.num_of_distinction_units = compat_param->net_env_params.num_of_distinction_units;
++ memcpy(param->net_env_params.unit_ids,
++ compat_param->net_env_params.unit_ids,
++ IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++
++ param->use_hash = compat_param->use_hash;
++ memcpy(&param->key_extract_and_hash_params,
++ &compat_param->key_extract_and_hash_params,
++ sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t));
++ param->bypass_fqid_generation = compat_param->bypass_fqid_generation;
++ param->base_fqid = compat_param->base_fqid;
++#if DPAA_VERSION >= 11
++ param->override_storage_profile =
++ compat_param->override_storage_profile;
++ param->storage_profile = compat_param->storage_profile;
++#endif
++ param->num_of_used_extracted_ors = compat_param->num_of_used_extracted_ors;
++ memcpy(param->extracted_ors,
++ compat_param->extracted_ors,
++ IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS * sizeof(ioc_fm_pcd_kg_extracted_or_params_t));
++ param->next_engine = compat_param->next_engine;
++
++ /* kg_next_engine_params */
++ if (param->next_engine == e_IOC_FM_PCD_CC)
++ {
++ param->kg_next_engine_params.cc.tree_id = compat_pcd_id2ptr(compat_param->kg_next_engine_params.cc.tree_id);
++ param->kg_next_engine_params.cc.grp_id = compat_param->kg_next_engine_params.cc.grp_id;
++ param->kg_next_engine_params.cc.plcr_next = compat_param->kg_next_engine_params.cc.plcr_next;
++ param->kg_next_engine_params.cc.bypass_plcr_profile_generation
++ = compat_param->kg_next_engine_params.cc.bypass_plcr_profile_generation;
++ memcpy(&param->kg_next_engine_params.cc.plcr_profile,
++ &compat_param->kg_next_engine_params.cc.plcr_profile,
++ sizeof(ioc_fm_pcd_kg_plcr_profile_t));
++ }
++ else
++ memcpy(&param->kg_next_engine_params,
++ &compat_param->kg_next_engine_params,
++ sizeof(param->kg_next_engine_params));
++
++ memcpy(&param->scheme_counter,
++ &compat_param->scheme_counter,
++ sizeof(ioc_fm_pcd_kg_scheme_counter_t));
++ }
++ else
++ {
++ compat_param->modify = param->modify;
++
++ /* scm_id */
++ if (param->modify)
++ compat_param->scm_id.scheme_id = compat_pcd_ptr2id(param->scm_id.scheme_id);
++ else
++ compat_param->scm_id.relative_scheme_id = param->scm_id.relative_scheme_id;
++
++ compat_param->always_direct = param->always_direct;
++
++ /* net_env_params */
++ compat_param->net_env_params.net_env_id = compat_pcd_ptr2id(param->net_env_params.net_env_id);
++ compat_param->net_env_params.num_of_distinction_units = param->net_env_params.num_of_distinction_units;
++ memcpy(compat_param->net_env_params.unit_ids, param->net_env_params.unit_ids, IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++
++ compat_param->use_hash = param->use_hash;
++ memcpy(&compat_param->key_extract_and_hash_params, &param->key_extract_and_hash_params, sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t));
++ compat_param->bypass_fqid_generation = param->bypass_fqid_generation;
++ compat_param->base_fqid = param->base_fqid;
++#if DPAA_VERSION >= 11
++ compat_param->override_storage_profile =
++ param->override_storage_profile;
++ compat_param->storage_profile = param->storage_profile;
++#endif
++ compat_param->num_of_used_extracted_ors = param->num_of_used_extracted_ors;
++ memcpy(compat_param->extracted_ors, param->extracted_ors, IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS * sizeof(ioc_fm_pcd_kg_extracted_or_params_t));
++ compat_param->next_engine = param->next_engine;
++
++ /* kg_next_engine_params */
++ if (compat_param->next_engine == e_IOC_FM_PCD_CC)
++ {
++ compat_param->kg_next_engine_params.cc.tree_id = compat_pcd_ptr2id(param->kg_next_engine_params.cc.tree_id);
++ compat_param->kg_next_engine_params.cc.grp_id = param->kg_next_engine_params.cc.grp_id;
++ compat_param->kg_next_engine_params.cc.plcr_next = param->kg_next_engine_params.cc.plcr_next;
++ compat_param->kg_next_engine_params.cc.bypass_plcr_profile_generation
++ = param->kg_next_engine_params.cc.bypass_plcr_profile_generation;
++ memcpy(&compat_param->kg_next_engine_params.cc.plcr_profile,
++ &param->kg_next_engine_params.cc.plcr_profile,
++ sizeof(ioc_fm_pcd_kg_plcr_profile_t));
++ }
++ else
++ memcpy(&param->kg_next_engine_params, &compat_param->kg_next_engine_params, sizeof(compat_param->kg_next_engine_params));
++
++ memcpy(&compat_param->scheme_counter, &param->scheme_counter, sizeof(ioc_fm_pcd_kg_scheme_counter_t));
++
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++
++ _fm_cpt_dbg(compat," ...->}\n");
++}
++
++void compat_copy_fm_pcd_kg_scheme_spc(
++ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param,
++ ioc_fm_pcd_kg_scheme_spc_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ param->val = compat_param->val;
++ } else {
++ compat_param->id = compat_pcd_ptr2id(param->id);
++ compat_param->val = param->val;
++ }
++}
++
++
++void compat_copy_fm_pcd_kg_scheme_select(
++ ioc_compat_fm_pcd_kg_scheme_select_t *compat_param,
++ ioc_fm_pcd_kg_scheme_select_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->direct = compat_param->direct;
++ if (param->direct)
++ param->scheme_id = compat_pcd_id2ptr(compat_param->scheme_id);
++ }
++}
++
++void compat_copy_fm_pcd_kg_schemes_params(
++ ioc_compat_fm_pcd_port_schemes_params_t *compat_param,
++ ioc_fm_pcd_port_schemes_params_t *param,
++ uint8_t compat)
++{
++ int k;
++
++ if (compat == COMPAT_US_TO_K) {
++ param->num_of_schemes = compat_param->num_of_schemes;
++ for(k=0; k < compat_param->num_of_schemes; k++)
++ param->scheme_ids[k] = compat_pcd_id2ptr(compat_param->scheme_ids[k]);
++ }
++}
++
++void compat_copy_fm_port_pcd_cc(
++ ioc_compat_fm_port_pcd_cc_params_t *compat_cc_params ,
++ ioc_fm_port_pcd_cc_params_t *p_cc_params,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K){
++ p_cc_params->cc_tree_id = compat_pcd_id2ptr(compat_cc_params->cc_tree_id);
++ }
++}
++
++void compat_copy_fm_port_pcd_kg(
++ ioc_compat_fm_port_pcd_kg_params_t *compat_param,
++ ioc_fm_port_pcd_kg_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K){
++ uint8_t k;
++
++ param->num_of_schemes = compat_param->num_of_schemes;
++ for(k=0; k<compat_param->num_of_schemes; k++)
++ param->scheme_ids[k] = compat_pcd_id2ptr(compat_param->scheme_ids[k]);
++
++ param->direct_scheme = compat_param->direct_scheme;
++ if (param->direct_scheme)
++ param->direct_scheme_id = compat_pcd_id2ptr(compat_param->direct_scheme_id);
++ }
++}
++
++void compat_copy_fm_port_pcd(
++ ioc_compat_fm_port_pcd_params_t *compat_param,
++ ioc_fm_port_pcd_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ ioc_fm_port_pcd_prs_params_t *same_port_pcd_prs_params;
++ ioc_compat_fm_port_pcd_cc_params_t *compat_port_pcd_cc_params;
++ ioc_compat_fm_port_pcd_kg_params_t *compat_port_pcd_kg_params;
++ ioc_compat_fm_port_pcd_plcr_params_t *compat_port_pcd_plcr_params;
++
++ same_port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (compat_param + 1);
++ compat_port_pcd_cc_params = (ioc_compat_fm_port_pcd_cc_params_t *) (same_port_pcd_prs_params + 1);
++ compat_port_pcd_kg_params = (ioc_compat_fm_port_pcd_kg_params_t *) (compat_port_pcd_cc_params + 1);
++ compat_port_pcd_plcr_params = (ioc_compat_fm_port_pcd_plcr_params_t *) (compat_port_pcd_kg_params + 1);
++
++ _fm_cpt_dbg(compat,"\n param->p_prs_params=%p \n", param->p_prs_params);
++ _fm_cpt_dbg(compat," param->p_cc_params=%p \n", param->p_cc_params);
++ _fm_cpt_dbg(compat," param->p_kg_params=%p \n", param->p_kg_params);
++ _fm_cpt_dbg(compat," param->p_plcr_params=%p \n", param->p_plcr_params);
++ _fm_cpt_dbg(compat," param->p_ip_reassembly_manip=%p \n", param->p_ip_reassembly_manip);
++#if (DPAA_VERSION >= 11)
++ _fm_cpt_dbg(compat," param->p_capwap_reassembly_manip=%p \n", param->p_capwap_reassembly_manip);
++#endif
++ param->pcd_support = compat_param->pcd_support;
++ param->net_env_id = compat_pcd_id2ptr(compat_param->net_env_id);
++
++ if (param->p_cc_params)
++ compat_copy_fm_port_pcd_cc(compat_port_pcd_cc_params, param->p_cc_params, COMPAT_US_TO_K);
++ if (param->p_kg_params)
++ compat_copy_fm_port_pcd_kg(compat_port_pcd_kg_params, param->p_kg_params, COMPAT_US_TO_K);
++ if (param->p_plcr_params)
++ param->p_plcr_params->plcr_profile_id = compat_pcd_id2ptr(compat_port_pcd_plcr_params->plcr_profile_id);
++ param->p_ip_reassembly_manip = compat_pcd_id2ptr(compat_param->p_ip_reassembly_manip);
++#if (DPAA_VERSION >= 11)
++ param->p_capwap_reassembly_manip = compat_pcd_id2ptr(compat_param->p_capwap_reassembly_manip);
++#endif
++ }
++}
++
++void compat_copy_fm_port_pcd_modify_tree(
++ ioc_compat_fm_obj_t *compat_id,
++ ioc_fm_obj_t *id,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ id->obj = compat_pcd_id2ptr(compat_id->obj);
++}
++
++#if (DPAA_VERSION >= 11)
++void compat_copy_fm_port_vsp_alloc_params(
++ ioc_compat_fm_port_vsp_alloc_params_t *compat_param,
++ ioc_fm_port_vsp_alloc_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ _fm_cpt_dbg(compat," param->p_fm_tx_port=%p \n", param->p_fm_tx_port);
++
++ param->dflt_relative_id = compat_param->dflt_relative_id;
++ param->num_of_profiles = compat_param->num_of_profiles;
++ param->p_fm_tx_port = compat_pcd_id2ptr(compat_param->p_fm_tx_port);
++ }
++}
++#endif /* (DPAA_VERSION >= 11) */
++
++void compat_copy_fm_pcd_cc_tbl_get_stats(
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param,
++ ioc_fm_pcd_cc_tbl_get_stats_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ param->key_index = compat_param->key_index;
++ memcpy(&param->statistics, &compat_param->statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t));
++ } else {
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ compat_param->key_index = param->key_index;
++ memcpy(&compat_param->statistics, &param->statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t));
++ }
++}
++
++
++void compat_copy_fm_pcd_net_env(
++ ioc_compat_fm_pcd_net_env_params_t *compat_param,
++ ioc_fm_pcd_net_env_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->num_of_distinction_units = compat_param->num_of_distinction_units;
++ memcpy(param->units, compat_param->units, sizeof(ioc_fm_pcd_distinction_unit_t)*IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++ param->id = NULL; /* to avoid passing garbage to the kernel */
++ }
++ else
++ {
++ compat_param->num_of_distinction_units = param->num_of_distinction_units;
++ memcpy(compat_param->units, param->units, sizeof(ioc_fm_pcd_distinction_unit_t)*IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
++
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++}
++
++void compat_copy_fm_pcd_cc_node_modify_key(
++ ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param,
++ ioc_fm_pcd_cc_node_modify_key_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->key_indx = compat_param->key_indx;
++ param->key_size = compat_param->key_size;
++ param->p_key = (uint8_t *)compat_ptr(compat_param->p_key);
++ _fm_cpt_dbg(compat," param->p_key = %p \n", param->p_key);
++ param->p_mask = (uint8_t *)compat_ptr(compat_param->p_mask);
++ _fm_cpt_dbg(compat," param->p_mask = %p\n", param->p_mask);
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ _fm_cpt_dbg(compat," param->id = %p \n", param->id);
++ }
++ else
++ {
++ compat_param->key_indx = param->key_indx;
++ compat_param->key_size = param->key_size;
++ compat_param->p_key = ptr_to_compat((void *)param->p_key);
++ compat_param->p_mask = ptr_to_compat((void *)param->p_mask);
++
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++}
++
++void compat_copy_keys(
++ ioc_compat_keys_params_t *compat_param,
++ ioc_keys_params_t *param,
++ uint8_t compat)
++{
++ int k = 0;
++
++ _fm_cpt_dbg(compat," {->...\n");
++
++ if (compat == COMPAT_US_TO_K) {
++ param->max_num_of_keys = compat_param->max_num_of_keys;
++ param->mask_support = compat_param->mask_support;
++ param->statistics_mode = compat_param->statistics_mode;
++ param->num_of_keys = compat_param->num_of_keys;
++ param->key_size = compat_param->key_size;
++#if (DPAA_VERSION >= 11)
++ memcpy(&param->frame_length_ranges,
++ &compat_param->frame_length_ranges,
++ sizeof(param->frame_length_ranges[0]) *
++ IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR);
++#endif /* (DPAA_VERSION >= 11) */
++ }
++ else {
++ compat_param->max_num_of_keys = param->max_num_of_keys;
++ compat_param->mask_support = param->mask_support;
++ compat_param->statistics_mode = param->statistics_mode;
++ compat_param->num_of_keys = param->num_of_keys;
++ compat_param->key_size = param->key_size;
++#if (DPAA_VERSION >= 11)
++ memcpy(&compat_param->frame_length_ranges,
++ &param->frame_length_ranges,
++ sizeof(compat_param->frame_length_ranges[0]) *
++ IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR);
++#endif /* (DPAA_VERSION >= 11) */
++ }
++
++ for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_KEYS; k++)
++ compat_copy_fm_pcd_cc_key(
++ &compat_param->key_params[k],
++ &param->key_params[k],
++ compat);
++
++ compat_copy_fm_pcd_cc_next_engine(
++ &compat_param->cc_next_engine_params_for_miss,
++ &param->cc_next_engine_params_for_miss,
++ compat);
++
++ _fm_cpt_dbg(compat," ...->}\n");
++}
++
++void compat_copy_fm_pcd_cc_node(
++ ioc_compat_fm_pcd_cc_node_params_t *compat_param,
++ ioc_fm_pcd_cc_node_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg(compat," {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ memcpy(&param->extract_cc_params, &compat_param->extract_cc_params, sizeof(ioc_fm_pcd_extract_entry_t));
++
++ else
++ {
++ compat_copy_keys(&compat_param->keys_params, &param->keys_params, compat);
++
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ _fm_cpt_dbg(compat," param->id = %p \n", param->id);
++ }
++
++ compat_copy_keys(&compat_param->keys_params, &param->keys_params, compat);
++
++ _fm_cpt_dbg(compat," ...->}\n");
++}
++
++void compat_fm_pcd_manip_set_node(
++ ioc_compat_fm_pcd_manip_params_t *compat_param,
++ ioc_fm_pcd_manip_params_t *param,
++ uint8_t compat)
++{
++ if (compat == COMPAT_US_TO_K) {
++ param->type = compat_param->type;
++ switch (param->type) {
++ case e_IOC_FM_PCD_MANIP_HDR:
++ param->u.hdr.rmv = compat_param->u.hdr.rmv;
++ memcpy(&param->u.hdr.rmv_params,
++ &compat_param->u.hdr.rmv_params,
++ sizeof(param->u.hdr.rmv_params));
++
++ param->u.hdr.insrt = compat_param->u.hdr.insrt;
++ param->u.hdr.insrt_params.type =
++ compat_param->u.hdr.insrt_params.type;
++ switch (compat_param->u.hdr.insrt_params.type)
++ {
++ case e_IOC_FM_PCD_MANIP_INSRT_GENERIC:
++ param->u.hdr.insrt_params.u.generic.offset =
++ compat_param->u.hdr.insrt_params.u.generic.offset;
++ param->u.hdr.insrt_params.u.generic.size =
++ compat_param->u.hdr.insrt_params.u.generic.size;
++ param->u.hdr.insrt_params.u.generic.replace =
++ compat_param->u.hdr.insrt_params.u.generic.replace;
++ param->u.hdr.insrt_params.u.generic.p_data =
++ compat_ptr(compat_param->u.hdr.insrt_params.u.generic.p_data);
++ break;
++ case e_IOC_FM_PCD_MANIP_INSRT_BY_HDR:
++ param->u.hdr.insrt_params.u.by_hdr.type =
++ compat_param->u.hdr.insrt_params.u.by_hdr.type;
++ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.specific_l2 =
++ compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.specific_l2;
++ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.update =
++ compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.update;
++ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.size =
++ compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.size;
++ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.p_data =
++ compat_ptr(compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.p_data);
++ break;
++ default:
++ _fm_cpt_err("Unsupported type: %d", compat_param->u.hdr.insrt_params.type);
++ }
++
++ param->u.hdr.field_update = compat_param->u.hdr.field_update;
++ memcpy(&param->u.hdr.field_update_params,
++ &compat_param->u.hdr.field_update_params,
++ sizeof(param->u.hdr.field_update_params));
++
++ param->u.hdr.custom = compat_param->u.hdr.custom;
++ memcpy(&param->u.hdr.custom_params,
++ &compat_param->u.hdr.custom_params,
++ sizeof(param->u.hdr.custom_params));
++
++ param->u.hdr.dont_parse_after_manip =
++ compat_param->u.hdr.dont_parse_after_manip;
++ break;
++ case e_IOC_FM_PCD_MANIP_REASSEM:
++ memcpy(&param->u.reassem, &compat_param->u.reassem, sizeof(param->u.reassem));
++ break;
++ case e_IOC_FM_PCD_MANIP_FRAG:
++ memcpy(&param->u.frag, &compat_param->u.frag, sizeof(param->u.frag));
++ break;
++ case e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD:
++ memcpy(&param->u.special_offload,
++ &compat_param->u.special_offload,
++ sizeof(param->u.special_offload));
++ break;
++ }
++
++ param->p_next_manip = compat_pcd_id2ptr(compat_param->p_next_manip);
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ }
++ else {
++ compat_param->type = param->type;
++ memcpy(&compat_param->u, &param->u, sizeof(compat_param->u));
++
++ if (param->type == e_IOC_FM_PCD_MANIP_HDR &&
++ param->u.hdr.insrt_params.type == e_IOC_FM_PCD_MANIP_INSRT_GENERIC)
++ compat_param->u.hdr.insrt_params.u.generic.p_data =
++ ptr_to_compat(param->u.hdr.insrt_params.u.generic.p_data);
++
++ compat_param->p_next_manip = compat_pcd_ptr2id(param->id);
++ /* ... should be one that was added previously by the very call to
++ compat_add_ptr2id() below: */
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++}
++
++void compat_copy_fm_pcd_manip_get_stats(
++ ioc_compat_fm_pcd_manip_get_stats_t *compat_param,
++ ioc_fm_pcd_manip_get_stats_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ memcpy(&param->stats, &compat_param->stats,
++ sizeof(ioc_fm_pcd_manip_stats_t));
++ }
++ else
++ {
++ compat_param->id = compat_add_ptr2id(param->id,
++ FM_MAP_TYPE_PCD_NODE);
++ memcpy(&compat_param->stats, &param->stats,
++ sizeof(ioc_fm_pcd_manip_stats_t));
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++#if (DPAA_VERSION >= 11)
++void compat_copy_fm_pcd_frm_replic_group_params(
++ ioc_compat_fm_pcd_frm_replic_group_params_t *compat_param,
++ ioc_fm_pcd_frm_replic_group_params_t *param,
++ uint8_t compat)
++{
++ int k;
++
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->max_num_of_entries = compat_param->max_num_of_entries;
++ param->num_of_entries = compat_param->num_of_entries;
++ param->id = compat_pcd_id2ptr(compat_param->id);
++ }
++ else
++ {
++ compat_param->max_num_of_entries = param->max_num_of_entries;
++ compat_param->num_of_entries = param->num_of_entries;
++ compat_param->id = compat_add_ptr2id(param->id,
++ FM_MAP_TYPE_PCD_NODE);
++ }
++
++ for (k=0; k < IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES; k++)
++ compat_copy_fm_pcd_cc_next_engine(
++ &compat_param->next_engine_params[k],
++ &param->next_engine_params[k],
++ compat);
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_pcd_frm_replic_member(
++ ioc_compat_fm_pcd_frm_replic_member_t *compat_param,
++ ioc_fm_pcd_frm_replic_member_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->h_replic_group = compat_pcd_id2ptr(compat_param->h_replic_group);
++ param->member_index = compat_param->member_index;
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_pcd_frm_replic_member_params(
++ ioc_compat_fm_pcd_frm_replic_member_params_t *compat_param,
++ ioc_fm_pcd_frm_replic_member_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ compat_copy_fm_pcd_frm_replic_member(&compat_param->member,
++ &param->member, compat);
++
++ compat_copy_fm_pcd_cc_next_engine(&compat_param->next_engine_params,
++ &param->next_engine_params, compat);
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_vsp_params(
++ ioc_compat_fm_vsp_params_t *compat_param,
++ ioc_fm_vsp_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ memcpy(&param->ext_buf_pools, &compat_param->ext_buf_pools, sizeof(ioc_fm_ext_pools));
++ param->liodn_offset = compat_param->liodn_offset;
++ param->port_params.port_id = compat_param->port_params.port_id;
++ param->port_params.port_type = compat_param->port_params.port_type;
++ param->relative_profile_id = compat_param->relative_profile_id;
++ }
++ else
++ {
++ memcpy(&compat_param->ext_buf_pools, &param->ext_buf_pools, sizeof(ioc_fm_ext_pools));
++ compat_param->liodn_offset = param->liodn_offset;
++ compat_param->port_params.port_id = param->port_params.port_id;
++ compat_param->port_params.port_type = param->port_params.port_type;
++ compat_param->relative_profile_id = param->relative_profile_id;
++ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_buf_pool_depletion_params(
++ ioc_compat_fm_buf_pool_depletion_params_t *compat_param,
++ ioc_fm_buf_pool_depletion_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
++ memcpy(&param->fm_buf_pool_depletion,
++ &compat_param->fm_buf_pool_depletion,
++ sizeof(ioc_fm_buf_pool_depletion_t));
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_buffer_prefix_content_params(
++ ioc_compat_fm_buffer_prefix_content_params_t *compat_param,
++ ioc_fm_buffer_prefix_content_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
++ memcpy(&param->fm_buffer_prefix_content,
++ &compat_param->fm_buffer_prefix_content,
++ sizeof(ioc_fm_buffer_prefix_content_t));
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_vsp_config_no_sg_params(
++ ioc_compat_fm_vsp_config_no_sg_params_t *compat_param,
++ ioc_fm_vsp_config_no_sg_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
++ param->no_sg = compat_param->no_sg;
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++
++void compat_copy_fm_vsp_prs_result_params(
++ ioc_compat_fm_vsp_prs_result_params_t *compat_param,
++ ioc_fm_vsp_prs_result_params_t *param,
++ uint8_t compat)
++{
++ _fm_cpt_dbg (compat, " {->...\n");
++
++ if (compat == COMPAT_US_TO_K)
++ {
++ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
++ /* p_data is an user-space pointer that needs to remain unmodified */
++ param->p_data = (void *)(unsigned long long)compat_param->p_data;
++ }
++ else
++ {
++ compat_param->p_fm_vsp = compat_pcd_ptr2id(param->p_fm_vsp);
++ /* p_data is an user-space pointer that needs to remain unmodified */
++ compat_param->p_data = (compat_uptr_t)((unsigned long long)param->p_data & 0xFFFFFFFF);
++ }
++
++ _fm_cpt_dbg (compat, " ...->}\n");
++}
++#endif /* (DPAA_VERSION >= 11) */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
+new file mode 100644
+index 00000000..187011f7
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
+@@ -0,0 +1,755 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_ioctls_fm_compat.h
++
++ @Description FM PCD compat structures definition.
++
++*/
++
++#ifndef __FM_COMPAT_IOCTLS_H
++#define __FM_COMPAT_IOCTLS_H
++
++#include <linux/compat.h>
++
++#define COMPAT_K_TO_US 0 /* copy from Kernel to User */
++#define COMPAT_US_TO_K 1 /* copy from User to Kernel */
++#define COMPAT_GENERIC 2
++
++#define COMPAT_COPY_K2US(dest, src, type) compat_copy_##type(src, dest, 0)
++#define COMPAT_COPY_US2K(dest, src, type) compat_copy_##type(dest, src, 1)
++
++/* mapping kernel pointers w/ UserSpace id's { */
++/* Because compat_ptr(ptr_to_compat(X)) != X, this way we cannot exchange pointers
++ back and forth (US - KS). compat_ptr is a cast and pointers are broken. */
++#define COMPAT_PTR2ID_ARRAY_MAX (512+1) /* first location is not used */
++#define COMPAT_PTR2ID_WATERMARK 0xface0000
++#define COMPAT_PTR2ID_WM_MASK 0xffff0000
++
++/* define it for debug trace */
++/*#define FM_COMPAT_DBG*/
++
++#define _fm_cpt_prk(stage, format, arg...) \
++ printk(stage "fm_cpt (cpu:%u): " format, raw_smp_processor_id(), ##arg)
++
++#define _fm_cpt_inf(format, arg...) _fm_cpt_prk(KERN_INFO, format, ##arg)
++#define _fm_cpt_wrn(format, arg...) _fm_cpt_prk(KERN_WARNING, format, ##arg)
++#define _fm_cpt_err(format, arg...) _fm_cpt_prk(KERN_ERR, format, ##arg)
++
++/* used for compat IOCTL debugging */
++#if defined(FM_COMPAT_DBG)
++ #define _fm_cpt_dbg(from, format, arg...) \
++ do{ \
++ if (from == COMPAT_US_TO_K) \
++ printk("fm_cpt to KS [%s:%u](cpu:%u) - " format, \
++ __func__, __LINE__, raw_smp_processor_id(), ##arg); \
++ else if (from == COMPAT_K_TO_US) \
++ printk("fm_cpt to US [%s:%u](cpu:%u) - " format, \
++ __func__, __LINE__, raw_smp_processor_id(), ##arg); \
++ else \
++ printk("fm_cpt [%s:%u](cpu:%u) - " format, \
++ __func__, __LINE__, raw_smp_processor_id(), ##arg); \
++ }while(0)
++#else
++# define _fm_cpt_dbg(arg...)
++#endif
++
++/*TODO: per FMan module:
++ *
++ * Parser: FM_MAP_TYPE_PARSER_NODE,
++ * Kg: FM_MAP_TYPE_KG_NODE,
++ * Policer: FM_MAP_TYPE_POLICER_NODE
++ * Manip: FM_MAP_TYPE_MANIP_NODE
++ **/
++enum fm_map_node_type {
++ FM_MAP_TYPE_UNSPEC = 0,
++ FM_MAP_TYPE_PCD_NODE,
++
++ /* add types here, update the policy */
++
++ __FM_MAP_TYPE_AFTER_LAST,
++ FM_MAP_TYPE_MAX = __FM_MAP_TYPE_AFTER_LAST - 1
++};
++
++void compat_del_ptr2id(void *p, enum fm_map_node_type);
++compat_uptr_t compat_add_ptr2id(void *p, enum fm_map_node_type);
++compat_uptr_t compat_get_ptr2id(void *p, enum fm_map_node_type);
++void *compat_get_id2ptr(compat_uptr_t comp, enum fm_map_node_type);
++
++static inline compat_uptr_t compat_pcd_ptr2id(void *ptr) {
++ return (ptr)? compat_get_ptr2id(ptr, FM_MAP_TYPE_PCD_NODE)
++ : (compat_uptr_t) 0;
++}
++
++static inline void *compat_pcd_id2ptr(compat_uptr_t id) {
++ return (id) ? compat_get_id2ptr(id, FM_MAP_TYPE_PCD_NODE)
++ : NULL;
++}
++
++/* other similar inlines may be added as new nodes are added
++ to enum fm_map_node_type above... */
++/* } mapping kernel pointers w/ UserSpace id's */
++
++/* pcd compat structures { */
++typedef struct ioc_compat_fm_pcd_cc_node_remove_key_params_t {
++ compat_uptr_t id;
++ uint16_t key_indx;
++} ioc_compat_fm_pcd_cc_node_remove_key_params_t;
++
++typedef union ioc_compat_fm_pcd_plcr_next_engine_params_u {
++ ioc_fm_pcd_done_action action;
++ compat_uptr_t p_profile;
++ compat_uptr_t p_direct_scheme;
++} ioc_compat_fm_pcd_plcr_next_engine_params_u;
++
++typedef struct ioc_compat_fm_pcd_plcr_profile_params_t {
++ bool modify;
++ union {
++ struct {
++ ioc_fm_pcd_profile_type_selection profile_type;
++ compat_uptr_t p_fm_port;
++ uint16_t relative_profile_id;
++ } new_params;
++ compat_uptr_t p_profile;
++ } profile_select;
++ ioc_fm_pcd_plcr_algorithm_selection alg_selection;
++ ioc_fm_pcd_plcr_color_mode color_mode;
++
++ union {
++ ioc_fm_pcd_plcr_color dflt_color;
++ ioc_fm_pcd_plcr_color override;
++ } color;
++
++ ioc_fm_pcd_plcr_non_passthrough_alg_param_t non_passthrough_alg_param;
++
++ ioc_fm_pcd_engine next_engine_on_green;
++ ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_green;
++
++ ioc_fm_pcd_engine next_engine_on_yellow;
++ ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_yellow;
++
++ ioc_fm_pcd_engine next_engine_on_red;
++ ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_red;
++
++ bool trap_profile_on_flow_A;
++ bool trap_profile_on_flow_B;
++ bool trap_profile_on_flow_C;
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_plcr_profile_params_t;
++
++typedef struct ioc_compat_fm_obj_t {
++ compat_uptr_t obj;
++} ioc_compat_fm_obj_t;
++
++typedef struct ioc_compat_fm_pcd_kg_scheme_select_t {
++ bool direct;
++ compat_uptr_t scheme_id;
++} ioc_compat_fm_pcd_kg_scheme_select_t;
++
++typedef struct ioc_compat_fm_pcd_port_schemes_params_t {
++ uint8_t num_of_schemes;
++ compat_uptr_t scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES];
++} ioc_compat_fm_pcd_port_schemes_params_t;
++
++#if (DPAA_VERSION >= 11)
++typedef struct ioc_compat_fm_port_vsp_alloc_params_t {
++ uint8_t num_of_profiles; /**< Number of Virtual Storage Profiles */
++ uint8_t dflt_relative_id; /**< The default Virtual-Storage-Profile-id dedicated to Rx/OP port
++ The same default Virtual-Storage-Profile-id will be for coupled Tx port
++ if relevant function called for Rx port */
++ compat_uptr_t p_fm_tx_port; /**< Handle to coupled Tx Port; not relevant for OP port. */
++}ioc_compat_fm_port_vsp_alloc_params_t;
++#endif /* (DPAA_VERSION >= 11) */
++
++typedef struct ioc_compat_fm_pcd_net_env_params_t {
++ uint8_t num_of_distinction_units;
++ ioc_fm_pcd_distinction_unit_t units[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /* same structure*/
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_net_env_params_t;
++
++typedef struct ioc_compat_fm_pcd_prs_sw_params_t {
++ bool override;
++ uint32_t size;
++ uint16_t base;
++ compat_uptr_t p_code;
++ uint32_t sw_prs_data_params[IOC_FM_PCD_PRS_NUM_OF_HDRS];
++ uint8_t num_of_labels;
++ ioc_fm_pcd_prs_label_params_t labels_table[IOC_FM_PCD_PRS_NUM_OF_LABELS];
++} ioc_compat_fm_pcd_prs_sw_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_next_kg_params_t {
++ bool override_fqid;
++ uint32_t new_fqid;
++#if DPAA_VERSION >= 11
++ uint8_t new_relative_storage_profile_id;
++#endif
++ compat_uptr_t p_direct_scheme;
++} ioc_compat_fm_pcd_cc_next_kg_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_next_cc_params_t {
++ compat_uptr_t cc_node_id;
++} ioc_compat_fm_pcd_cc_next_cc_params_t;
++
++#if DPAA_VERSION >= 11
++typedef struct ioc_compat_fm_pcd_cc_next_fr_params_t {
++ compat_uptr_t frm_replic_id;
++} ioc_compat_fm_pcd_cc_next_fr_params_t;
++#endif /* DPAA_VERSION >= 11 */
++
++typedef struct ioc_compat_fm_pcd_cc_next_engine_params_t {
++ ioc_fm_pcd_engine next_engine;
++ union {
++ ioc_compat_fm_pcd_cc_next_cc_params_t cc_params; /**< compat structure*/
++ ioc_fm_pcd_cc_next_plcr_params_t plcr_params; /**< same structure*/
++ ioc_fm_pcd_cc_next_enqueue_params_t enqueue_params; /**< same structure*/
++ ioc_compat_fm_pcd_cc_next_kg_params_t kg_params; /**< compat structure*/
++#if DPAA_VERSION >= 11
++ ioc_compat_fm_pcd_cc_next_fr_params_t fr_params; /**< compat structure*/
++#endif /* DPAA_VERSION >= 11 */
++ } params;
++ compat_uptr_t manip_id;
++ bool statistics_en;
++} ioc_compat_fm_pcd_cc_next_engine_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_grp_params_t {
++ uint8_t num_of_distinction_units;
++ uint8_t unit_ids [IOC_FM_PCD_MAX_NUM_OF_CC_UNITS];
++ ioc_compat_fm_pcd_cc_next_engine_params_t next_engine_per_entries_in_grp[IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP];
++} ioc_compat_fm_pcd_cc_grp_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_tree_params_t {
++ compat_uptr_t net_env_id;
++ uint8_t num_of_groups;
++ ioc_compat_fm_pcd_cc_grp_params_t fm_pcd_cc_group_params [IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS];
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_cc_tree_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t {
++ compat_uptr_t id;
++ uint8_t grp_indx;
++ uint8_t indx;
++ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
++} ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_key_params_t {
++ compat_uptr_t p_key;
++ compat_uptr_t p_mask;
++ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params; /**< compat structure*/
++} ioc_compat_fm_pcd_cc_key_params_t;
++
++typedef struct ioc_compat_keys_params_t {
++ uint16_t max_num_of_keys;
++ bool mask_support;
++ ioc_fm_pcd_cc_stats_mode statistics_mode;
++#if (DPAA_VERSION >= 11)
++ uint16_t frame_length_ranges[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
++#endif /* (DPAA_VERSION >= 11) */
++ uint16_t num_of_keys;
++ uint8_t key_size;
++ ioc_compat_fm_pcd_cc_key_params_t key_params[IOC_FM_PCD_MAX_NUM_OF_KEYS]; /**< compat structure*/
++ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss; /**< compat structure*/
++} ioc_compat_keys_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_node_params_t {
++ ioc_fm_pcd_extract_entry_t extract_cc_params; /**< same structure*/
++ ioc_compat_keys_params_t keys_params; /**< compat structure*/
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_cc_node_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a hash table
++*//***************************************************************************/
++typedef struct ioc_compat_fm_pcd_hash_table_params_t {
++ uint16_t max_num_of_keys;
++ ioc_fm_pcd_cc_stats_mode statistics_mode;
++ uint8_t kg_hash_shift;
++ uint16_t hash_res_mask;
++ uint8_t hash_shift;
++ uint8_t match_key_size;
++ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss;
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_hash_table_params_t;
++
++typedef struct ioc_compat_fm_pcd_hash_table_add_key_params_t {
++ compat_uptr_t p_hash_tbl;
++ uint8_t key_size;
++ ioc_compat_fm_pcd_cc_key_params_t key_params;
++} ioc_compat_fm_pcd_hash_table_add_key_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_node_modify_key_params_t {
++ compat_uptr_t id;
++ uint16_t key_indx;
++ uint8_t key_size;
++ compat_uptr_t p_key;
++ compat_uptr_t p_mask;
++} ioc_compat_fm_pcd_cc_node_modify_key_params_t;
++
++typedef struct ioc_compat_fm_pcd_hash_table_remove_key_params_t {
++ compat_uptr_t p_hash_tbl;
++ uint8_t key_size;
++ compat_uptr_t p_key;
++} ioc_compat_fm_pcd_hash_table_remove_key_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t {
++ compat_uptr_t id;
++ uint16_t key_indx;
++ uint8_t key_size;
++ ioc_compat_fm_pcd_cc_key_params_t key_params;
++} ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t;
++
++typedef struct ioc_compat_fm_port_pcd_plcr_params_t {
++ compat_uptr_t plcr_profile_id;
++} ioc_compat_fm_port_pcd_plcr_params_t;
++
++typedef struct ioc_compat_fm_port_pcd_cc_params_t {
++ compat_uptr_t cc_tree_id;
++} ioc_compat_fm_port_pcd_cc_params_t;
++
++typedef struct ioc_compat_fm_port_pcd_kg_params_t {
++ uint8_t num_of_schemes;
++ compat_uptr_t scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES];
++ bool direct_scheme;
++ compat_uptr_t direct_scheme_id;
++} ioc_compat_fm_port_pcd_kg_params_t;
++
++typedef struct ioc_compat_fm_port_pcd_params_t {
++ ioc_fm_port_pcd_support pcd_support;
++ compat_uptr_t net_env_id;
++ compat_uptr_t p_prs_params;
++ compat_uptr_t p_cc_params;
++ compat_uptr_t p_kg_params;
++ compat_uptr_t p_plcr_params;
++ compat_uptr_t p_ip_reassembly_manip;
++#if DPAA_VERSION >= 11
++ compat_uptr_t p_capwap_reassembly_manip;
++#endif
++} ioc_compat_fm_port_pcd_params_t;
++
++typedef struct ioc_compat_fm_pcd_kg_cc_t {
++ compat_uptr_t tree_id;
++ uint8_t grp_id;
++ bool plcr_next;
++ bool bypass_plcr_profile_generation;
++ ioc_fm_pcd_kg_plcr_profile_t plcr_profile;
++} ioc_compat_fm_pcd_kg_cc_t;
++
++typedef struct ioc_compat_fm_pcd_kg_scheme_params_t {
++ bool modify;
++ union {
++ uint8_t relative_scheme_id;
++ compat_uptr_t scheme_id;
++ } scm_id;
++ bool always_direct;
++ struct {
++ compat_uptr_t net_env_id;
++ uint8_t num_of_distinction_units;
++ uint8_t unit_ids[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
++ } net_env_params;
++ bool use_hash;
++ ioc_fm_pcd_kg_key_extract_and_hash_params_t key_extract_and_hash_params;
++ bool bypass_fqid_generation;
++ uint32_t base_fqid;
++ uint8_t num_of_used_extracted_ors;
++ ioc_fm_pcd_kg_extracted_or_params_t extracted_ors[IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS];
++#if DPAA_VERSION >= 11
++ bool override_storage_profile;
++ ioc_fm_pcd_kg_storage_profile_t storage_profile;
++#endif /* DPAA_VERSION >= 11 */
++ ioc_fm_pcd_engine next_engine;
++ union{
++ ioc_fm_pcd_done_action done_action;
++ ioc_fm_pcd_kg_plcr_profile_t plcr_profile;
++ ioc_compat_fm_pcd_kg_cc_t cc;
++ } kg_next_engine_params;
++ ioc_fm_pcd_kg_scheme_counter_t scheme_counter;
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_kg_scheme_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t {
++ compat_uptr_t id;
++ uint16_t key_indx;
++ uint8_t key_size;
++ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
++} ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_generic_params_t {
++ uint8_t offset;
++ uint8_t size;
++ bool replace;
++ compat_uptr_t p_data;
++} ioc_compat_fm_pcd_manip_hdr_insrt_generic_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_specific_l2_params_t {
++ ioc_fm_pcd_manip_hdr_insrt_specific_l2 specific_l2;
++ bool update;
++ uint8_t size;
++ compat_uptr_t p_data;
++} ioc_compat_fm_pcd_manip_hdr_insrt_specific_l2_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_t {
++ uint8_t size; /**< size of inserted section */
++ compat_uptr_t p_data; /**< data to be inserted */
++} ioc_compat_fm_pcd_manip_hdr_insrt_t;
++
++#if (DPAA_VERSION >= 11)
++typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_ip_params_t {
++ bool calc_l4_checksum; /**< Calculate L4 checksum. */
++ ioc_fm_pcd_manip_hdr_qos_mapping_mode mapping_mode; /**< TODO */
++ uint8_t last_pid_offset; /**< the offset of the last Protocol within
++ the inserted header */
++ uint16_t id; /**< 16 bit New IP ID */
++ bool dont_frag_overwrite;
++ /**< IPv4 only. DF is overwritten with the hash-result next-to-last byte.
++ * This byte is configured to be overwritten when RPD is set. */
++ uint8_t last_dst_offset;
++ /**< IPv6 only. if routing extension exist, user should set the offset of the destination address
++ * in order to calculate UDP checksum pseudo header;
++ * Otherwise set it to '0'. */
++ ioc_compat_fm_pcd_manip_hdr_insrt_t insrt; /**< size and data to be inserted. */
++} ioc_compat_fm_pcd_manip_hdr_insrt_ip_params_t;
++#endif /* (DPAA_VERSION >= 11) */
++
++typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_by_hdr_params_t {
++ ioc_fm_pcd_manip_hdr_insrt_by_hdr_type type;
++ union {
++ ioc_compat_fm_pcd_manip_hdr_insrt_specific_l2_params_t specific_l2_params;
++#if (DPAA_VERSION >= 11)
++ ioc_compat_fm_pcd_manip_hdr_insrt_ip_params_t ip_params;
++ ioc_compat_fm_pcd_manip_hdr_insrt_t insrt;
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} ioc_compat_fm_pcd_manip_hdr_insrt_by_hdr_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_params_t {
++ ioc_fm_pcd_manip_hdr_insrt_type type;
++ union {
++ ioc_compat_fm_pcd_manip_hdr_insrt_by_hdr_params_t by_hdr;
++ ioc_compat_fm_pcd_manip_hdr_insrt_generic_params_t generic;
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++#error "FM_CAPWAP_SUPPORT feature not supported!"
++ ioc_fm_pcd_manip_hdr_insrt_by_template_params_t by_template;
++#endif /* FM_CAPWAP_SUPPORT */
++ } u;
++} ioc_compat_fm_pcd_manip_hdr_insrt_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_hdr_params_t {
++ bool rmv;
++ ioc_fm_pcd_manip_hdr_rmv_params_t rmv_params;
++ bool insrt;
++ ioc_compat_fm_pcd_manip_hdr_insrt_params_t insrt_params;
++ bool field_update;
++ ioc_fm_pcd_manip_hdr_field_update_params_t field_update_params;
++ bool custom;
++ ioc_fm_pcd_manip_hdr_custom_params_t custom_params;
++ bool dont_parse_after_manip;
++} ioc_compat_fm_pcd_manip_hdr_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_special_offload_params_t {
++ bool decryption;
++ bool ecn_copy;
++ bool dscp_copy;
++ bool variable_ip_hdr_len;
++ bool variable_ip_version;
++ uint8_t outer_ip_hdr_len;
++ uint16_t arw_size;
++ compat_uptr_t arw_addr;
++} ioc_compat_fm_pcd_manip_special_offload_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_params_t {
++ ioc_fm_pcd_manip_type type;
++ union {
++ ioc_compat_fm_pcd_manip_hdr_params_t hdr;
++ ioc_fm_pcd_manip_reassem_params_t reassem;
++ ioc_fm_pcd_manip_frag_params_t frag;
++ ioc_compat_fm_pcd_manip_special_offload_params_t special_offload;
++ } u;
++ compat_uptr_t p_next_manip;
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++#error "FM_CAPWAP_SUPPORT feature not supported!"
++ bool frag_or_reasm;
++ ioc_fm_pcd_manip_frag_or_reasm_params_t frag_or_reasm_params;
++#endif /* FM_CAPWAP_SUPPORT */
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_manip_params_t;
++
++typedef struct ioc_compat_fm_pcd_manip_get_stats_t {
++ compat_uptr_t id;
++ ioc_fm_pcd_manip_stats_t stats;
++} ioc_compat_fm_pcd_manip_get_stats_t;
++
++#if (DPAA_VERSION >= 11)
++typedef struct ioc_compat_fm_pcd_frm_replic_group_params_t {
++ uint8_t max_num_of_entries;
++ uint8_t num_of_entries;
++ ioc_compat_fm_pcd_cc_next_engine_params_t
++ next_engine_params[IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES];
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_frm_replic_group_params_t;
++
++typedef struct ioc_compat_fm_pcd_frm_replic_member_t {
++ compat_uptr_t h_replic_group;
++ uint16_t member_index;
++} ioc_compat_fm_pcd_frm_replic_member_t;
++
++typedef struct ioc_compat_fm_pcd_frm_replic_member_params_t {
++ ioc_compat_fm_pcd_frm_replic_member_t member;
++ ioc_compat_fm_pcd_cc_next_engine_params_t next_engine_params;
++} ioc_compat_fm_pcd_frm_replic_member_params_t;
++
++typedef struct ioc_compat_fm_vsp_params_t {
++ compat_uptr_t p_fm; /**< A handle to the FM object this VSP related to */
++ ioc_fm_ext_pools ext_buf_pools; /**< Which external buffer pools are used
++ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes.
++ parameter associated with Rx / OP port */
++ uint16_t liodn_offset; /**< VSP's LIODN offset */
++ struct {
++ ioc_fm_port_type port_type; /**< Port type */
++ uint8_t port_id; /**< Port Id - relative to type */
++ } port_params;
++ uint8_t relative_profile_id; /**< VSP Id - relative to VSP's range
++ defined in relevant FM object */
++ compat_uptr_t id; /**< return value */
++} ioc_compat_fm_vsp_params_t;
++
++typedef struct ioc_compat_fm_buf_pool_depletion_params_t {
++ compat_uptr_t p_fm_vsp;
++ ioc_fm_buf_pool_depletion_t fm_buf_pool_depletion;
++} ioc_compat_fm_buf_pool_depletion_params_t;
++
++typedef struct ioc_compat_fm_buffer_prefix_content_params_t {
++ compat_uptr_t p_fm_vsp;
++ ioc_fm_buffer_prefix_content_t fm_buffer_prefix_content;
++} ioc_compat_fm_buffer_prefix_content_params_t;
++
++typedef struct ioc_compat_fm_vsp_config_no_sg_params_t {
++ compat_uptr_t p_fm_vsp;
++ bool no_sg;
++} ioc_compat_fm_vsp_config_no_sg_params_t;
++
++typedef struct ioc_compat_fm_vsp_prs_result_params_t {
++ compat_uptr_t p_fm_vsp;
++ compat_uptr_t p_data;
++} ioc_compat_fm_vsp_prs_result_params_t;
++
++#endif /* (DPAA_VERSION >= 11) */
++typedef struct ioc_compat_fm_pcd_kg_scheme_spc_t {
++ uint32_t val;
++ compat_uptr_t id;
++} ioc_compat_fm_pcd_kg_scheme_spc_t;
++
++typedef struct ioc_compat_fm_ctrl_mon_counters_params_t {
++ uint8_t fm_ctrl_index;
++ compat_uptr_t p_mon;
++} ioc_compat_fm_ctrl_mon_counters_params_t;
++
++typedef struct ioc_compat_fm_pcd_cc_tbl_get_stats_t {
++ compat_uptr_t id;
++ uint16_t key_index;
++ ioc_fm_pcd_cc_key_statistics_t statistics;
++} ioc_compat_fm_pcd_cc_tbl_get_stats_t;
++
++
++/* } pcd compat structures */
++
++void compat_obj_delete(
++ ioc_compat_fm_obj_t *compat_id,
++ ioc_fm_obj_t *id);
++
++/* pcd compat functions { */
++void compat_copy_fm_pcd_plcr_profile(
++ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param,
++ ioc_fm_pcd_plcr_profile_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_key(
++ ioc_compat_fm_pcd_cc_key_params_t *compat_param,
++ ioc_fm_pcd_cc_key_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(
++ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param,
++ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_node_modify_next_engine(
++ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param,
++ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param,
++ uint8_t compat);
++
++void compat_fm_pcd_cc_tree_modify_next_engine(
++ ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param,
++ ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_hash_table(
++ ioc_compat_fm_pcd_hash_table_params_t *compat_param,
++ ioc_fm_pcd_hash_table_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_grp(
++ ioc_compat_fm_pcd_cc_grp_params_t *compat_param,
++ ioc_fm_pcd_cc_grp_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_tree(
++ ioc_compat_fm_pcd_cc_tree_params_t *compat_param,
++ ioc_fm_pcd_cc_tree_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_tbl_get_stats(
++ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param,
++ ioc_fm_pcd_cc_tbl_get_stats_t *param,
++ uint8_t compat);
++
++void compat_fm_pcd_prs_sw(
++ ioc_compat_fm_pcd_prs_sw_params_t *compat_param,
++ ioc_fm_pcd_prs_sw_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_kg_scheme(
++ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param,
++ ioc_fm_pcd_kg_scheme_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_kg_scheme_select(
++ ioc_compat_fm_pcd_kg_scheme_select_t *compat_param,
++ ioc_fm_pcd_kg_scheme_select_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_kg_schemes_params(
++ ioc_compat_fm_pcd_port_schemes_params_t *compat_param,
++ ioc_fm_pcd_port_schemes_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_port_pcd_kg(
++ ioc_compat_fm_port_pcd_kg_params_t *compat_param,
++ ioc_fm_port_pcd_kg_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_port_pcd(
++ ioc_compat_fm_port_pcd_params_t *compat_param,
++ ioc_fm_port_pcd_params_t *param,
++ uint8_t compat);
++
++#if (DPAA_VERSION >= 11)
++void compat_copy_fm_port_vsp_alloc_params(
++ ioc_compat_fm_port_vsp_alloc_params_t *compat_param,
++ ioc_fm_port_vsp_alloc_params_t *param,
++ uint8_t compat);
++#endif /* (DPAA_VERSION >= 11) */
++
++void compat_copy_fm_pcd_net_env(
++ ioc_compat_fm_pcd_net_env_params_t *compat_param,
++ ioc_fm_pcd_net_env_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_node_modify_key(
++ ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param,
++ ioc_fm_pcd_cc_node_modify_key_params_t *param,
++ uint8_t compat);
++
++void compat_copy_keys(
++ ioc_compat_keys_params_t *compat_param,
++ ioc_keys_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_cc_node(
++ ioc_compat_fm_pcd_cc_node_params_t *compat_param,
++ ioc_fm_pcd_cc_node_params_t *param,
++ uint8_t compat);
++
++void compat_fm_pcd_manip_set_node(
++ ioc_compat_fm_pcd_manip_params_t *compat_param,
++ ioc_fm_pcd_manip_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_manip_get_stats(
++ ioc_compat_fm_pcd_manip_get_stats_t *compat_param,
++ ioc_fm_pcd_manip_get_stats_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_port_pcd_modify_tree(
++ ioc_compat_fm_obj_t *compat_id,
++ ioc_fm_obj_t *id,
++ uint8_t compat);
++
++#if (DPAA_VERSION >= 11)
++void compat_copy_fm_pcd_frm_replic_group_params(
++ ioc_compat_fm_pcd_frm_replic_group_params_t *compat_param,
++ ioc_fm_pcd_frm_replic_group_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_frm_replic_member(
++ ioc_compat_fm_pcd_frm_replic_member_t *compat_param,
++ ioc_fm_pcd_frm_replic_member_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_pcd_frm_replic_member_params(
++ ioc_compat_fm_pcd_frm_replic_member_params_t *compat_param,
++ ioc_fm_pcd_frm_replic_member_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_vsp_params(
++ ioc_compat_fm_vsp_params_t *compat_param,
++ ioc_fm_vsp_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_buf_pool_depletion_params(
++ ioc_compat_fm_buf_pool_depletion_params_t *compat_param,
++ ioc_fm_buf_pool_depletion_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_buffer_prefix_content_params(
++ ioc_compat_fm_buffer_prefix_content_params_t *compat_param,
++ ioc_fm_buffer_prefix_content_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_vsp_config_no_sg_params(
++ ioc_compat_fm_vsp_config_no_sg_params_t *compat_param,
++ ioc_fm_vsp_config_no_sg_params_t *param,
++ uint8_t compat);
++
++void compat_copy_fm_vsp_prs_result_params(
++ ioc_compat_fm_vsp_prs_result_params_t *compat_param,
++ ioc_fm_vsp_prs_result_params_t *param,
++ uint8_t compat);
++
++#endif /* (DPAA_VERSION >= 11) */
++
++void compat_copy_fm_pcd_kg_scheme_spc(
++ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param,
++ ioc_fm_pcd_kg_scheme_spc_t *param,
++ uint8_t compat);
++
++/* } pcd compat functions */
++#endif
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources.h b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources.h
+new file mode 100644
+index 00000000..1b72e1d5
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources.h
+@@ -0,0 +1,121 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_resources.h
++
++ @Description FMD wrapper resource allocation functions.
++
++*/
++
++#ifndef LNXWRP_RESOURCES_H_
++#define LNXWRP_RESOURCES_H_
++
++#if !defined(FMAN_RESOURCES_UNIT_TEST)
++#include "lnxwrp_fm.h"
++#else
++#include "lnxwrp_resources_ut.h"
++#endif
++
++#define ROUND(X) ((2*(X)+1)/2)
++#define CEIL(X) ((X)+1)
++/* #define ROUND_DIV(X, Y) (((X)+(Y)/2)/(Y)) */
++#define ROUND_DIV(X, Y) ((2*(X)+(Y))/(2*(Y)))
++#define CEIL_DIV(X, Y) (((X)+(Y)-1)/(Y))
++
++/* used for resource calculus */
++#define DPDE_1G 2 /* DQDP 1g - from LLD:
++ DEFAULT_PORT_txFifoDeqPipelineDepth_1G */
++#define DPDE_10G 8 /* DQDP 10g - from LLD:
++ DEFAULT_PORT_txFifoDeqPipelineDepth_10G */
++
++int fm_set_active_fman_ports(struct platform_device *of_dev,
++ t_LnxWrpFmDev *p_LnxWrpFmDev);
++
++/* Calculate the fifosize based on MURAM allocation, number of ports, dpde
++ * value and s/g software support (! Kernel does not suport s/g).
++ *
++ * Algorithm summary:
++ * - Calculate the the minimum fifosize required for every type of port
++ * (TX,RX for 1G, 2.5G and 10G).
++ * - Set TX the minimum fifosize required.
++ * - Distribute the remaining buffers (after all TX were set) to RX ports
++ * based on:
++ * 1G RX = Remaining_buffers * 1/(1+2.5+10)
++ * 2.5G RX = Remaining_buffers * 2.5/(1+2.5+10)
++ * 10G RX = Remaining_buffers * 10/(1+2.5+10)
++ * - if the RX is smaller than the minimum required, then set the minimum
++ * required
++ * - In the end distribuite the leftovers if there are any (due to
++ * unprecise calculus) or if over allocation cat some buffers from all RX
++ * ports w/o pass over minimum required treshold, but if there must be
++ * pass the treshold in order to cat the over allocation ,then this
++ * configuration can not be set - KERN_ALERT.
++*/
++int fm_precalculate_fifosizes(t_LnxWrpFmDev *p_LnxWrpFmDev,
++ int muram_fifo_size);
++
++#if !defined(FMAN_RESOURCES_UNIT_TEST)
++int fm_config_precalculate_fifosize(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev);
++#endif
++
++/* Compute FMan open DMA based on total number of open DMAs and
++ * number of available fman ports.
++ *
++ * By default 10g ports are set to input parameters. The other ports
++ * tries to keep the proportion rx=2tx open dmas or tresholds.
++ *
++ * If leftovers, then those will be set as shared.
++ *
++ * If after computing overflow appears, then it decrements open dma
++ * for all ports w/o cross the tresholds. If the tresholds are meet
++ * and is still overflow, then it returns error.
++*/
++int fm_precalculate_open_dma(t_LnxWrpFmDev *p_LnxWrpFmDev,
++ int max_fm_open_dma,
++ int default_tx_10g_dmas,
++ int default_rx_10g_dmas,
++ int min_tx_10g_treshold, int min_rx_10g_treshold);
++
++#if !defined(FMAN_RESOURCES_UNIT_TEST)
++int fm_config_precalculate_open_dma(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev);
++#endif
++
++/* Compute FMan tnums based on available tnums and number of ports.
++ * Set defaults (minim tresholds) and then distribute leftovers.*/
++int fm_precalculate_tnums(t_LnxWrpFmDev *p_LnxWrpFmDev, int max_fm_tnums);
++
++#if !defined(FMAN_RESOURCES_UNIT_TEST)
++int fm_config_precalculate_tnums(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev);
++#endif
++
++#endif /* LNXWRP_RESOURCES_H_ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.c
+new file mode 100644
+index 00000000..6c06a5a6
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.c
+@@ -0,0 +1,191 @@
++/* Copyright (c) 2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "lnxwrp_resources.h"
++#include "lnxwrp_resources_ut.h"
++
++#define KILOBYTE 0x400 /* 1024 */
++
++typedef enum e_board_type {
++ e_p3041,
++ e_p4080,
++ e_p5020,
++ e_p1023
++} e_board_type;
++
++uint8_t board_type;
++uint32_t muram_size = 0;
++uint32_t dmas_num = 0;
++uint32_t task_num = 0;
++uint32_t frame_size = 0;
++uint32_t oh_num = 0;
++uint32_t num_ports_1g = 0;
++uint32_t num_ports_10g = 0;
++uint32_t num_ports_2g5 = 0;
++uint32_t fsl_fman_phy_maxfrm = 0;
++uint32_t dpa_rx_extra_headroom = 0;
++
++void show_help(void){
++ printf(" help: \n");
++ printf(" -b <board_type> -f <max_fram_size(mtu)> -o <num_oh_ports> -g1"
++ " <num_1g_ports> -g10 <num_10g_ports> -g25 <num_2g5_ports>\n");
++ printf(" Maxim num of DMAS availbale: P3/P4/P5:32 , P1023:16 \n");
++ printf(" Maxim num of TNUMs availbale: P3/P4/P5:128, P1023:32 \n");
++ printf(" Muram size: P3/P4/P5:160K, P1023:64K \n");
++ printf(" Number of ports:\n");
++ printf(" P3/P5: 5p 1g, 1p 10g, 7p oh \n");
++ printf(" P4 : 4p 1g, 1p 10g, 7p oh \n");
++ printf(" P1 : 2p 1g, 0p 10g, 4p oh \n");
++ printf(" MTU: Default:1522, Jumbo:9600 \n");
++}
++
++int fm_set_param(t_LnxWrpFmDev *p_LnxWrpFmDev) {
++ struct fm_active_ports *fm_active_ports_info = NULL;
++ fm_active_ports_info = &p_LnxWrpFmDev->fm_active_ports_info;
++
++ switch(board_type){
++ case e_p3041:
++ case e_p5020:
++ muram_size = 160*KILOBYTE;
++ dmas_num = 32;
++ task_num = 128;
++ if ((num_ports_1g+num_ports_2g5) > 5 || num_ports_10g > 1 || oh_num > 7)
++ goto err_fm_set_param;
++ break;
++ case e_p4080:
++ muram_size = 160*KILOBYTE;
++ dmas_num = 32;
++ task_num = 128;
++ if ((num_ports_1g+num_ports_2g5) > 4 || num_ports_10g > 1 || oh_num > 7)
++ goto err_fm_set_param;
++ break;
++ case e_p1023:
++ muram_size = 64*KILOBYTE;
++ dmas_num = 16;
++ task_num = 128;
++ if ((num_ports_1g+num_ports_2g5) > 2 || oh_num > 4)
++ goto err_fm_set_param;
++ break;
++ default:
++ goto err_fm_set_param;
++ break;
++ }
++
++ p_LnxWrpFmDev->id = 0;
++ fsl_fman_phy_maxfrm = frame_size;
++ dpa_rx_extra_headroom = 0; /* ATTENTION: can be != 0 */
++ fm_active_ports_info->num_oh_ports = oh_num;
++ fm_active_ports_info->num_tx_ports = num_ports_1g;
++ fm_active_ports_info->num_rx_ports = num_ports_1g;
++ fm_active_ports_info->num_tx25_ports = num_ports_2g5;
++ fm_active_ports_info->num_rx25_ports = num_ports_2g5;
++ fm_active_ports_info->num_tx10_ports = num_ports_10g;
++ fm_active_ports_info->num_rx10_ports = num_ports_10g;
++
++ return 0;
++
++err_fm_set_param:
++ printf(" ERR: To many ports!!! \n");
++ return -1;
++}
++
++int main (int argc, char *argv[]){
++ t_LnxWrpFmDev LnxWrpFmDev;
++ t_LnxWrpFmDev *p_LnxWrpFmDev = &LnxWrpFmDev;
++ int tokens_cnt = 1;
++
++ char *token = NULL;
++
++ while(tokens_cnt < argc)
++ {
++ token = argv[tokens_cnt++];
++ if (strcmp(token, "-b") == 0){
++ if(strcmp(argv[tokens_cnt],"p3") == 0)
++ board_type = e_p3041;
++ else if(strcmp(argv[tokens_cnt],"p4") == 0)
++ board_type = e_p4080;
++ else if(strcmp(argv[tokens_cnt],"p5") == 0)
++ board_type = e_p5020;
++ else if(strcmp(argv[tokens_cnt],"p1") == 0)
++ board_type = e_p1023;
++ else
++ show_help();
++ tokens_cnt++;
++ }
++ else if(strcmp(token, "-d") == 0){
++ dmas_num = atoi(argv[tokens_cnt++]);
++ }
++ else if(strcmp(token, "-t") == 0)
++ task_num = atoi(argv[tokens_cnt++]);
++ else if(strcmp(token, "-f") == 0)
++ frame_size = atoi(argv[tokens_cnt++]);
++ else if(strcmp(token, "-o") == 0)
++ oh_num = atoi(argv[tokens_cnt++]);
++ else if(strcmp(token, "-g1") == 0)
++ num_ports_1g = atoi(argv[tokens_cnt++]);
++ else if(strcmp(token, "-g10") == 0)
++ num_ports_10g = atoi(argv[tokens_cnt++]);
++ else if(strcmp(token, "-g25") == 0)
++ num_ports_2g5 = atoi(argv[tokens_cnt++]);
++ else {
++ show_help();
++ return -1;
++ }
++ }
++
++ if(fm_set_param(p_LnxWrpFmDev) < 0){
++ show_help();
++ return -1;
++ }
++
++ if(fm_precalculate_fifosizes(
++ p_LnxWrpFmDev,
++ 128*KILOBYTE)
++ != 0)
++ return -1;
++ if(fm_precalculate_open_dma(
++ p_LnxWrpFmDev,
++ dmas_num, /* max open dmas:dpaa_integration_ext.h */
++ FM_DEFAULT_TX10G_OPENDMA, /* default TX 10g open dmas */
++ FM_DEFAULT_RX10G_OPENDMA, /* default RX 10g open dmas */
++ FM_10G_OPENDMA_MIN_TRESHOLD,/* TX 10g minimum treshold */
++ FM_10G_OPENDMA_MIN_TRESHOLD)/* RX 10g minimum treshold */
++ != 0)
++ return -1;
++ if(fm_precalculate_tnums(
++ p_LnxWrpFmDev,
++ task_num) /* max TNUMS: dpa integration file. */
++ != 0)
++ return -1;
++
++ return 0;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.h b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.h
+new file mode 100644
+index 00000000..063946eb
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.h
+@@ -0,0 +1,144 @@
++/* Copyright (c) 2012 Freescale Semiconductor, Inc
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef FM_RESS_TEST_H_
++#define FM_RESS_TEST_H_
++
++#include <stdint.h>
++#include <stdbool.h>
++#include <stdio.h>
++#include <assert.h>
++#include <string.h>
++#include <stdlib.h>
++
++#define _Packed
++#define _PackedType __attribute__ ((packed))
++#define MAX(x, y) (((x) > (y)) ? (x) : (y))
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++#define KERN_ALERT ""
++#define KERN_INFO ""
++#define ASSERT_COND assert
++#define printk printf
++#define NET_IP_ALIGN 0
++#define FM_FIFO_ALLOCATION_OLD_ALG
++
++#if defined(CONFIG_FMAN_DISABLE_OH_AND_DISTRIBUTE_RESOURCES)
++#define FM_10G_OPENDMA_MIN_TRESHOLD 8 /* 10g minimum treshold if only HC is enabled and no OH port enabled */
++#define FM_OPENDMA_RX_TX_RAPORT 2 /* RX = 2*TX */
++#else
++#define FM_10G_OPENDMA_MIN_TRESHOLD 7 /* 10g minimum treshold if 7 OH ports are enabled */
++#define FM_OPENDMA_RX_TX_RAPORT 1 /* RX = TX */
++#endif
++#define FM_DEFAULT_TX10G_OPENDMA 8 /* default TX 10g open dmas */
++#define FM_DEFAULT_RX10G_OPENDMA 8 /* default RX 10g open dmas */
++
++/* information about all active ports for an FMan.
++ * !Some ports may be disabled by u-boot, thus will not be available */
++struct fm_active_ports {
++ uint32_t num_oh_ports;
++ uint32_t num_tx_ports;
++ uint32_t num_rx_ports;
++ uint32_t num_tx25_ports;
++ uint32_t num_rx25_ports;
++ uint32_t num_tx10_ports;
++ uint32_t num_rx10_ports;
++};
++
++/* FMan resources precalculated at fm probe based
++ * on available FMan port. */
++struct fm_resource_settings {
++ /* buffers - fifo sizes */
++ uint32_t tx1g_num_buffers;
++ uint32_t rx1g_num_buffers;
++ uint32_t tx2g5_num_buffers; /* Not supported yet by LLD */
++ uint32_t rx2g5_num_buffers; /* Not supported yet by LLD */
++ uint32_t tx10g_num_buffers;
++ uint32_t rx10g_num_buffers;
++ uint32_t oh_num_buffers;
++ uint32_t shared_ext_buffers;
++
++
++ /* open DMAs */
++ uint32_t tx_1g_dmas;
++ uint32_t rx_1g_dmas;
++ uint32_t tx_2g5_dmas; /* Not supported yet by LLD */
++ uint32_t rx_2g5_dmas; /* Not supported yet by LLD */
++ uint32_t tx_10g_dmas;
++ uint32_t rx_10g_dmas;
++ uint32_t oh_dmas;
++ uint32_t shared_ext_open_dma;
++
++ /* Tnums */
++ uint32_t tx_1g_tnums;
++ uint32_t rx_1g_tnums;
++ uint32_t tx_2g5_tnums; /* Not supported yet by LLD */
++ uint32_t rx_2g5_tnums; /* Not supported yet by LLD */
++ uint32_t tx_10g_tnums;
++ uint32_t rx_10g_tnums;
++ uint32_t oh_tnums;
++ uint32_t shared_ext_tnums;
++};
++
++typedef struct {
++ uint8_t id;
++ struct fm_active_ports fm_active_ports_info;
++ struct fm_resource_settings fm_resource_settings_info;
++} t_LnxWrpFmDev;
++
++typedef struct {
++ uint8_t id;
++} t_LnxWrpFmPortDev;
++
++typedef _Packed struct t_FmPrsResult {
++ volatile uint8_t lpid; /**< Logical port id */
++ volatile uint8_t shimr; /**< Shim header result */
++ volatile uint16_t l2r; /**< Layer 2 result */
++ volatile uint16_t l3r; /**< Layer 3 result */
++ volatile uint8_t l4r; /**< Layer 4 result */
++ volatile uint8_t cplan; /**< Classification plan id */
++ volatile uint16_t nxthdr; /**< Next Header */
++ volatile uint16_t cksum; /**< Checksum */
++ volatile uint32_t lcv; /**< LCV */
++ volatile uint8_t shim_off[3]; /**< Shim offset */
++ volatile uint8_t eth_off; /**< ETH offset */
++ volatile uint8_t llc_snap_off; /**< LLC_SNAP offset */
++ volatile uint8_t vlan_off[2]; /**< VLAN offset */
++ volatile uint8_t etype_off; /**< ETYPE offset */
++ volatile uint8_t pppoe_off; /**< PPP offset */
++ volatile uint8_t mpls_off[2]; /**< MPLS offset */
++ volatile uint8_t ip_off[2]; /**< IP offset */
++ volatile uint8_t gre_off; /**< GRE offset */
++ volatile uint8_t l4_off; /**< Layer 4 offset */
++ volatile uint8_t nxthdr_off; /**< Parser end point */
++} _PackedType t_FmPrsResult;
++
++#endif
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.make b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.make
+new file mode 100644
+index 00000000..58009cd8
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.make
+@@ -0,0 +1,28 @@
++CC=gcc
++
++LNXWRP_RESS_UT=lnxwrp_resources_ut
++OBJ=lnxwrp_resources
++
++INC_PATH=
++LIB_PATH=
++
++INC=$(addprefix -I,$(INC_PATH))
++LIB=$(addprefix -L,$(LIB_PATH))
++
++CFLAGS= -gdwarf-2 -g -O0 -Wall
++XFLAGS= -DFMAN_RESOURCES_UNIT_TEST
++
++all: $(LNXWRP_RESS_UT)
++
++$(LNXWRP_RESS_UT):$(addsuffix .o,$(OBJ)) $(LNXWRP_RESS_UT).o
++ $(CC) -o $(LNXWRP_RESS_UT) $(LNXWRP_RESS_UT).o $(addsuffix .o,$(OBJ))
++
++%.o: %.c
++ @(echo " (CC) $@")
++ @($(CC) $(INC) $(CFLAGS) $(XFLAGS) -o $(@) -c $<)
++
++.PHONY: clean
++
++clean:
++ rm -f *.o
++ rm -f $(LNXWRP_RESS_UT)
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c
+new file mode 100644
+index 00000000..813771bf
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c
+@@ -0,0 +1,60 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_sysfs.c
++
++ @Description FM wrapper sysfs related functions.
++
++*/
++
++#include <linux/types.h>
++#include "lnxwrp_sysfs.h"
++
++uint8_t fm_find_statistic_counter_by_name(const char *attr_name,
++ const struct sysfs_stats_t *sysfs_stats,
++ uint8_t *offset)
++{
++ int i = 0;
++
++ while (sysfs_stats[i].stat_name != NULL) {
++ if (strcmp(sysfs_stats[i].stat_name, attr_name) == 0) {
++ if (offset != NULL)
++ *offset = i;
++ return sysfs_stats[i].stat_counter;
++ }
++
++ i++;
++ }
++ WARN(1, "FMD: Should never get here!");
++ return 0;
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h
+new file mode 100644
+index 00000000..2098b244
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h
+@@ -0,0 +1,60 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef LNXWRP_SYSFS_H_
++#define LNXWRP_SYSFS_H_
++
++/* Linux Headers ------------------- */
++#include <linux/version.h>
++
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#include <config/modversions.h>
++#endif /* MODVERSIONS */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/sysfs.h>
++
++struct sysfs_stats_t {
++ const char *stat_name;
++ uint8_t stat_counter;
++};
++
++uint8_t fm_find_statistic_counter_by_name(const char *attr_name,
++ const struct sysfs_stats_t *sysfs_stats,
++ uint8_t *offset);
++
++#endif /* LNXWRP_SYSFS_H_ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c
+new file mode 100644
+index 00000000..1badbf98
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c
+@@ -0,0 +1,1855 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "lnxwrp_sysfs.h"
++#include "lnxwrp_sysfs_fm.h"
++#include "lnxwrp_fm.h"
++
++#include "../../sdk_fman/Peripherals/FM/inc/fm_common.h"
++#include "../../sdk_fman/Peripherals/FM/Pcd/fm_pcd.h"
++#include "../../sdk_fman/Peripherals/FM/Pcd/fm_kg.h"
++#include "../../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h"
++
++#if defined(__ERR_MODULE__)
++#undef __ERR_MODULE__
++#endif
++
++#include "../../sdk_fman/Peripherals/FM/fm.h"
++#include <linux/delay.h>
++
++
++static int fm_get_counter(void *h_fm, e_FmCounters cnt_e, uint32_t *cnt_val);
++
++enum fm_dma_match_stats {
++ FM_DMA_COUNTERS_CMQ_NOT_EMPTY,
++ FM_DMA_COUNTERS_BUS_ERROR,
++ FM_DMA_COUNTERS_READ_BUF_ECC_ERROR,
++ FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR,
++ FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR
++};
++
++static const struct sysfs_stats_t fm_sysfs_stats[] = {
++ /* FM statistics */
++ {
++ .stat_name = "enq_total_frame",
++ .stat_counter = e_FM_COUNTERS_ENQ_TOTAL_FRAME,
++ },
++ {
++ .stat_name = "deq_total_frame",
++ .stat_counter = e_FM_COUNTERS_DEQ_TOTAL_FRAME,
++ },
++ {
++ .stat_name = "deq_0",
++ .stat_counter = e_FM_COUNTERS_DEQ_0,
++ },
++ {
++ .stat_name = "deq_1",
++ .stat_counter = e_FM_COUNTERS_DEQ_1,
++ },
++ {
++ .stat_name = "deq_2",
++ .stat_counter = e_FM_COUNTERS_DEQ_2,
++ },
++ {
++ .stat_name = "deq_3",
++ .stat_counter = e_FM_COUNTERS_DEQ_3,
++ },
++ {
++ .stat_name = "deq_from_default",
++ .stat_counter = e_FM_COUNTERS_DEQ_FROM_DEFAULT,
++ },
++ {
++ .stat_name = "deq_from_context",
++ .stat_counter = e_FM_COUNTERS_DEQ_FROM_CONTEXT,
++ },
++ {
++ .stat_name = "deq_from_fd",
++ .stat_counter = e_FM_COUNTERS_DEQ_FROM_FD,
++ },
++ {
++ .stat_name = "deq_confirm",
++ .stat_counter = e_FM_COUNTERS_DEQ_CONFIRM,
++ },
++ /* FM:DMA statistics */
++ {
++ .stat_name = "cmq_not_empty",
++ .stat_counter = FM_DMA_COUNTERS_CMQ_NOT_EMPTY,
++ },
++ {
++ .stat_name = "bus_error",
++ .stat_counter = FM_DMA_COUNTERS_BUS_ERROR,
++ },
++ {
++ .stat_name = "read_buf_ecc_error",
++ .stat_counter = FM_DMA_COUNTERS_READ_BUF_ECC_ERROR,
++ },
++ {
++ .stat_name = "write_buf_ecc_sys_error",
++ .stat_counter = FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR,
++ },
++ {
++ .stat_name = "write_buf_ecc_fm_error",
++ .stat_counter = FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR,
++ },
++ /* FM:PCD statistics */
++ {
++ .stat_name = "pcd_kg_total",
++ .stat_counter = e_FM_PCD_KG_COUNTERS_TOTAL,
++ },
++ {
++ .stat_name = "pcd_plcr_yellow",
++ .stat_counter = e_FM_PCD_PLCR_COUNTERS_YELLOW,
++ },
++ {
++ .stat_name = "pcd_plcr_red",
++ .stat_counter = e_FM_PCD_PLCR_COUNTERS_RED,
++ },
++ {
++ .stat_name = "pcd_plcr_recolored_to_red",
++ .stat_counter = e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED,
++ },
++ {
++ .stat_name = "pcd_plcr_recolored_to_yellow",
++ .stat_counter = e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW,
++ },
++ {
++ .stat_name = "pcd_plcr_total",
++ .stat_counter = e_FM_PCD_PLCR_COUNTERS_TOTAL,
++ },
++ {
++ .stat_name = "pcd_plcr_length_mismatch",
++ .stat_counter = e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH,
++ },
++ {
++ .stat_name = "pcd_prs_parse_dispatch",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH,
++ },
++ {
++ .stat_name = "pcd_prs_l2_parse_result_returned",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED,
++ },
++ {
++ .stat_name = "pcd_prs_l3_parse_result_returned",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED,
++ },
++ {
++ .stat_name = "pcd_prs_l4_parse_result_returned",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED,
++ },
++ {
++ .stat_name = "pcd_prs_shim_parse_result_returned",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED,
++ },
++ {
++ .stat_name = "pcd_prs_l2_parse_result_returned_with_err",
++ .stat_counter =
++ e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR,
++ },
++ {
++ .stat_name = "pcd_prs_l3_parse_result_returned_with_err",
++ .stat_counter =
++ e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR,
++ },
++ {
++ .stat_name = "pcd_prs_l4_parse_result_returned_with_err",
++ .stat_counter =
++ e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR,
++ },
++ {
++ .stat_name = "pcd_prs_shim_parse_result_returned_with_err",
++ .stat_counter =
++ e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR,
++ },
++ {
++ .stat_name = "pcd_prs_soft_prs_cycles",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES,
++ },
++ {
++ .stat_name = "pcd_prs_soft_prs_stall_cycles",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES,
++ },
++ {
++ .stat_name = "pcd_prs_hard_prs_cycle_incl_stall_cycles",
++ .stat_counter =
++ e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES,
++ },
++ {
++ .stat_name = "pcd_prs_muram_read_cycles",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES,
++ },
++ {
++ .stat_name = "pcd_prs_muram_read_stall_cycles",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES,
++ },
++ {
++ .stat_name = "pcd_prs_muram_write_cycles",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES,
++ },
++ {
++ .stat_name = "pcd_prs_muram_write_stall_cycles",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES,
++ },
++ {
++ .stat_name = "pcd_prs_fpm_command_stall_cycles",
++ .stat_counter = e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES,
++ },
++ {}
++};
++
++
++static ssize_t show_fm_risc_load(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++ unsigned long flags;
++ int m =0;
++ int err =0;
++ unsigned n = 0;
++ t_FmCtrlMon util;
++ uint8_t i =0 ;
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
++ return -EIO;
++
++ local_irq_save(flags);
++
++ /* Calculate risc load */
++ FM_CtrlMonStart(p_wrp_fm_dev->h_Dev);
++ msleep(1000);
++ FM_CtrlMonStop(p_wrp_fm_dev->h_Dev);
++
++ for (i = 0; i < FM_NUM_OF_CTRL; i++) {
++ err |= FM_CtrlMonGetCounters(p_wrp_fm_dev->h_Dev, i, &util);
++ m = snprintf(&buf[n],PAGE_SIZE,"\tRisc%u: util-%u%%, efficiency-%u%%\n",
++ i, util.percentCnt[0], util.percentCnt[1]);
++ n=m+n;
++ }
++
++ local_irq_restore(flags);
++
++ return n;
++}
++
++/* Fm stats and regs dumps via sysfs */
++static ssize_t show_fm_dma_stats(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++ t_FmDmaStatus dma_status;
++ unsigned long flags = 0;
++ unsigned n = 0;
++ uint8_t counter_value = 0, counter = 0;
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
++ return -EIO;
++
++ counter = fm_find_statistic_counter_by_name(
++ attr->attr.name,
++ fm_sysfs_stats, NULL);
++
++ local_irq_save(flags);
++
++ memset(&dma_status, 0, sizeof(dma_status));
++ FM_GetDmaStatus(p_wrp_fm_dev->h_Dev, &dma_status);
++
++ switch (counter) {
++ case FM_DMA_COUNTERS_CMQ_NOT_EMPTY:
++ counter_value = dma_status.cmqNotEmpty;
++ break;
++ case FM_DMA_COUNTERS_BUS_ERROR:
++ counter_value = dma_status.busError;
++ break;
++ case FM_DMA_COUNTERS_READ_BUF_ECC_ERROR:
++ counter_value = dma_status.readBufEccError;
++ break;
++ case FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR:
++ counter_value = dma_status.writeBufEccSysError;
++ break;
++ case FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR:
++ counter_value = dma_status.writeBufEccFmError;
++ break;
++ default:
++ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
++ __func__);
++ break;
++ };
++
++ n = snprintf(buf, PAGE_SIZE, "\tFM %u counter: %c\n",
++ p_wrp_fm_dev->id, counter_value ? 'T' : 'F');
++
++ local_irq_restore(flags);
++
++ return n;
++}
++
++static ssize_t show_fm_stats(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++ unsigned long flags = 0;
++ unsigned n = 0, cnt_e = 0;
++ uint32_t cnt_val;
++ int err;
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
++ return -EIO;
++
++ cnt_e = fm_find_statistic_counter_by_name(
++ attr->attr.name,
++ fm_sysfs_stats, NULL);
++
++ err = fm_get_counter(p_wrp_fm_dev->h_Dev,
++ (e_FmCounters) cnt_e, &cnt_val);
++
++ if (err)
++ return err;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "\tFM %d counter: %d\n",
++ p_wrp_fm_dev->id, cnt_val);
++
++ local_irq_restore(flags);
++
++ return n;
++}
++
++static ssize_t show_fm_muram_free_sz(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++ unsigned long flags = 0;
++ unsigned n = 0;
++ uint64_t muram_free_size = 0;
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
++ return -EIO;
++
++ muram_free_size = FM_MURAM_GetFreeMemSize(p_wrp_fm_dev->h_MuramDev);
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "\tFM %d muram_free_size: %lld\n",
++ p_wrp_fm_dev->id, muram_free_size);
++
++ local_irq_restore(flags);
++
++ return n;
++}
++
++static ssize_t show_fm_ctrl_code_ver(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++ unsigned long flags = 0;
++ unsigned n = 0;
++ t_FmCtrlCodeRevisionInfo rv_info;
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
++ return -EIO;
++
++ FM_GetFmanCtrlCodeRevision((t_Fm *)p_wrp_fm_dev->h_Dev, &rv_info);
++
++ local_irq_save(flags);
++
++ FM_DMP_LN(buf, n, "- FM %d ctrl code pkg info:\n", p_wrp_fm_dev->id);
++ FM_DMP_LN(buf, n, "Package rev: %d\n", rv_info.packageRev);
++ FM_DMP_LN(buf, n, "major rev: %d\n", rv_info.majorRev);
++ FM_DMP_LN(buf, n, "minor rev: %d\n", rv_info.minorRev);
++
++ local_irq_restore(flags);
++
++ return n;
++}
++
++static ssize_t show_fm_pcd_stats(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++ unsigned long flags = 0;
++ unsigned n = 0, counter = 0;
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev ||
++ !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++
++ counter = fm_find_statistic_counter_by_name(
++ attr->attr.name,
++ fm_sysfs_stats, NULL);
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "\tFM %d counter: %d\n",
++ p_wrp_fm_dev->id,
++ FM_PCD_GetCounter(p_wrp_fm_dev->h_PcdDev,
++ (e_FmPcdCounters) counter));
++
++ local_irq_restore(flags);
++
++ return n;
++}
++
++static ssize_t show_fm_tnum_dbg(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ if (!p_wrp_fm_dev->active)
++ return -EIO;
++ else {
++ int tn_s;
++
++ if (!sscanf(attr->attr.name, "tnum_dbg_%d", &tn_s))
++ return -EINVAL;
++
++ n = fm_dump_tnum_dbg(p_wrp_fm_dev->h_Dev,
++ tn_s, tn_s + 15, buf, n);
++ }
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static ssize_t show_fm_cls_plan(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "\n FM-KG classification plan dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++ else {
++ int cpn;
++
++ if (!sscanf(attr->attr.name, "cls_plan_%d", &cpn))
++ return -EINVAL;
++
++ n = fm_dump_cls_plan(p_wrp_fm_dev->h_PcdDev, cpn, buf, n);
++ }
++ local_irq_restore(flags);
++#else
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static ssize_t show_fm_profiles(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "FM policer profile dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++ else {
++ int pn;
++
++ if (!sscanf(attr->attr.name, "profile_%d", &pn))
++ return -EINVAL;
++
++ n = fm_profile_dump_regs(p_wrp_fm_dev->h_PcdDev, pn, buf, n);
++ }
++ local_irq_restore(flags);
++#else
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static ssize_t show_fm_schemes(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "FM-KG driver schemes dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++ else {
++ int sn;
++
++ if (!sscanf(attr->attr.name, "scheme_%d", &sn))
++ return -EINVAL;
++
++ n = fm_dump_scheme(p_wrp_fm_dev->h_PcdDev, sn, buf, n);
++ }
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++/* FM */
++static DEVICE_ATTR(enq_total_frame, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_total_frame, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(fm_risc_load_val, S_IRUGO, show_fm_risc_load, NULL);
++static DEVICE_ATTR(deq_0, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_1, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_2, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_3, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_from_default, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_from_context, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_from_fd, S_IRUGO, show_fm_stats, NULL);
++static DEVICE_ATTR(deq_confirm, S_IRUGO, show_fm_stats, NULL);
++/* FM:DMA */
++static DEVICE_ATTR(cmq_not_empty, S_IRUGO, show_fm_dma_stats, NULL);
++static DEVICE_ATTR(bus_error, S_IRUGO, show_fm_dma_stats, NULL);
++static DEVICE_ATTR(read_buf_ecc_error, S_IRUGO, show_fm_dma_stats, NULL);
++static DEVICE_ATTR(write_buf_ecc_sys_error, S_IRUGO, show_fm_dma_stats, NULL);
++static DEVICE_ATTR(write_buf_ecc_fm_error, S_IRUGO, show_fm_dma_stats, NULL);
++/* FM:PCD */
++static DEVICE_ATTR(pcd_kg_total, S_IRUGO, show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_plcr_yellow, S_IRUGO, show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_plcr_red, S_IRUGO, show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_plcr_recolored_to_red, S_IRUGO, show_fm_pcd_stats,
++ NULL);
++static DEVICE_ATTR(pcd_plcr_recolored_to_yellow, S_IRUGO, show_fm_pcd_stats,
++ NULL);
++static DEVICE_ATTR(pcd_plcr_total, S_IRUGO, show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_plcr_length_mismatch, S_IRUGO, show_fm_pcd_stats,
++ NULL);
++static DEVICE_ATTR(pcd_prs_parse_dispatch, S_IRUGO, show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_l2_parse_result_returned, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_l3_parse_result_returned, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_l4_parse_result_returned, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_shim_parse_result_returned, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_l2_parse_result_returned_with_err, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_l3_parse_result_returned_with_err, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_l4_parse_result_returned_with_err, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_shim_parse_result_returned_with_err, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_soft_prs_cycles, S_IRUGO, show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_soft_prs_stall_cycles, S_IRUGO, show_fm_pcd_stats,
++ NULL);
++static DEVICE_ATTR(pcd_prs_hard_prs_cycle_incl_stall_cycles, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_muram_read_cycles, S_IRUGO, show_fm_pcd_stats,
++ NULL);
++static DEVICE_ATTR(pcd_prs_muram_read_stall_cycles, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_muram_write_cycles, S_IRUGO, show_fm_pcd_stats,
++ NULL);
++static DEVICE_ATTR(pcd_prs_muram_write_stall_cycles, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++static DEVICE_ATTR(pcd_prs_fpm_command_stall_cycles, S_IRUGO,
++ show_fm_pcd_stats, NULL);
++
++static DEVICE_ATTR(tnum_dbg_0, S_IRUGO, show_fm_tnum_dbg, NULL);
++static DEVICE_ATTR(tnum_dbg_16, S_IRUGO, show_fm_tnum_dbg, NULL);
++static DEVICE_ATTR(tnum_dbg_32, S_IRUGO, show_fm_tnum_dbg, NULL);
++static DEVICE_ATTR(tnum_dbg_48, S_IRUGO, show_fm_tnum_dbg, NULL);
++static DEVICE_ATTR(tnum_dbg_64, S_IRUGO, show_fm_tnum_dbg, NULL);
++static DEVICE_ATTR(tnum_dbg_80, S_IRUGO, show_fm_tnum_dbg, NULL);
++static DEVICE_ATTR(tnum_dbg_96, S_IRUGO, show_fm_tnum_dbg, NULL);
++static DEVICE_ATTR(tnum_dbg_112, S_IRUGO, show_fm_tnum_dbg, NULL);
++
++static DEVICE_ATTR(cls_plan_0, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_1, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_2, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_3, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_4, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_5, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_6, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_7, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_8, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_9, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_10, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_11, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_12, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_13, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_14, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_15, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_16, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_17, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_18, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_19, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_20, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_21, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_22, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_23, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_24, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_25, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_26, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_27, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_28, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_29, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_30, S_IRUGO, show_fm_cls_plan, NULL);
++static DEVICE_ATTR(cls_plan_31, S_IRUGO, show_fm_cls_plan, NULL);
++
++static DEVICE_ATTR(profile_0, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_1, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_2, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_3, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_4, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_5, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_6, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_7, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_8, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_9, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_10, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_11, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_12, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_13, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_14, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_15, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_16, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_17, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_18, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_19, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_20, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_21, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_22, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_23, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_24, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_25, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_26, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_27, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_28, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_29, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_30, S_IRUGO, show_fm_profiles, NULL);
++static DEVICE_ATTR(profile_31, S_IRUGO, show_fm_profiles, NULL);
++
++static DEVICE_ATTR(scheme_0, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_1, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_2, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_3, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_4, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_5, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_6, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_7, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_8, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_9, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_10, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_11, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_12, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_13, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_14, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_15, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_16, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_17, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_18, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_19, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_20, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_21, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_22, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_23, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_24, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_25, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_26, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_27, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_28, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_29, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_30, S_IRUGO, show_fm_schemes, NULL);
++static DEVICE_ATTR(scheme_31, S_IRUGO, show_fm_schemes, NULL);
++
++
++static struct attribute *fm_dev_stats_attributes[] = {
++ &dev_attr_enq_total_frame.attr,
++ &dev_attr_deq_total_frame.attr,
++ &dev_attr_deq_0.attr,
++ &dev_attr_deq_1.attr,
++ &dev_attr_deq_2.attr,
++ &dev_attr_deq_3.attr,
++ &dev_attr_deq_from_default.attr,
++ &dev_attr_deq_from_context.attr,
++ &dev_attr_deq_from_fd.attr,
++ &dev_attr_deq_confirm.attr,
++ &dev_attr_cmq_not_empty.attr,
++ &dev_attr_bus_error.attr,
++ &dev_attr_read_buf_ecc_error.attr,
++ &dev_attr_write_buf_ecc_sys_error.attr,
++ &dev_attr_write_buf_ecc_fm_error.attr,
++ &dev_attr_pcd_kg_total.attr,
++ &dev_attr_pcd_plcr_yellow.attr,
++ &dev_attr_pcd_plcr_red.attr,
++ &dev_attr_pcd_plcr_recolored_to_red.attr,
++ &dev_attr_pcd_plcr_recolored_to_yellow.attr,
++ &dev_attr_pcd_plcr_total.attr,
++ &dev_attr_pcd_plcr_length_mismatch.attr,
++ &dev_attr_pcd_prs_parse_dispatch.attr,
++ &dev_attr_pcd_prs_l2_parse_result_returned.attr,
++ &dev_attr_pcd_prs_l3_parse_result_returned.attr,
++ &dev_attr_pcd_prs_l4_parse_result_returned.attr,
++ &dev_attr_pcd_prs_shim_parse_result_returned.attr,
++ &dev_attr_pcd_prs_l2_parse_result_returned_with_err.attr,
++ &dev_attr_pcd_prs_l3_parse_result_returned_with_err.attr,
++ &dev_attr_pcd_prs_l4_parse_result_returned_with_err.attr,
++ &dev_attr_pcd_prs_shim_parse_result_returned_with_err.attr,
++ &dev_attr_pcd_prs_soft_prs_cycles.attr,
++ &dev_attr_pcd_prs_soft_prs_stall_cycles.attr,
++ &dev_attr_pcd_prs_hard_prs_cycle_incl_stall_cycles.attr,
++ &dev_attr_pcd_prs_muram_read_cycles.attr,
++ &dev_attr_pcd_prs_muram_read_stall_cycles.attr,
++ &dev_attr_pcd_prs_muram_write_cycles.attr,
++ &dev_attr_pcd_prs_muram_write_stall_cycles.attr,
++ &dev_attr_pcd_prs_fpm_command_stall_cycles.attr,
++ NULL
++};
++
++static struct attribute *fm_dev_tnums_dbg_attributes[] = {
++ &dev_attr_tnum_dbg_0.attr,
++ &dev_attr_tnum_dbg_16.attr,
++ &dev_attr_tnum_dbg_32.attr,
++ &dev_attr_tnum_dbg_48.attr,
++ &dev_attr_tnum_dbg_64.attr,
++ &dev_attr_tnum_dbg_80.attr,
++ &dev_attr_tnum_dbg_96.attr,
++ &dev_attr_tnum_dbg_112.attr,
++ NULL
++};
++
++static struct attribute *fm_dev_cls_plans_attributes[] = {
++ &dev_attr_cls_plan_0.attr,
++ &dev_attr_cls_plan_1.attr,
++ &dev_attr_cls_plan_2.attr,
++ &dev_attr_cls_plan_3.attr,
++ &dev_attr_cls_plan_4.attr,
++ &dev_attr_cls_plan_5.attr,
++ &dev_attr_cls_plan_6.attr,
++ &dev_attr_cls_plan_7.attr,
++ &dev_attr_cls_plan_8.attr,
++ &dev_attr_cls_plan_9.attr,
++ &dev_attr_cls_plan_10.attr,
++ &dev_attr_cls_plan_11.attr,
++ &dev_attr_cls_plan_12.attr,
++ &dev_attr_cls_plan_13.attr,
++ &dev_attr_cls_plan_14.attr,
++ &dev_attr_cls_plan_15.attr,
++ &dev_attr_cls_plan_16.attr,
++ &dev_attr_cls_plan_17.attr,
++ &dev_attr_cls_plan_18.attr,
++ &dev_attr_cls_plan_19.attr,
++ &dev_attr_cls_plan_20.attr,
++ &dev_attr_cls_plan_21.attr,
++ &dev_attr_cls_plan_22.attr,
++ &dev_attr_cls_plan_23.attr,
++ &dev_attr_cls_plan_24.attr,
++ &dev_attr_cls_plan_25.attr,
++ &dev_attr_cls_plan_26.attr,
++ &dev_attr_cls_plan_27.attr,
++ &dev_attr_cls_plan_28.attr,
++ &dev_attr_cls_plan_29.attr,
++ &dev_attr_cls_plan_30.attr,
++ &dev_attr_cls_plan_31.attr,
++ NULL
++};
++
++static struct attribute *fm_dev_profiles_attributes[] = {
++ &dev_attr_profile_0.attr,
++ &dev_attr_profile_1.attr,
++ &dev_attr_profile_2.attr,
++ &dev_attr_profile_3.attr,
++ &dev_attr_profile_4.attr,
++ &dev_attr_profile_5.attr,
++ &dev_attr_profile_6.attr,
++ &dev_attr_profile_7.attr,
++ &dev_attr_profile_8.attr,
++ &dev_attr_profile_9.attr,
++ &dev_attr_profile_10.attr,
++ &dev_attr_profile_11.attr,
++ &dev_attr_profile_12.attr,
++ &dev_attr_profile_13.attr,
++ &dev_attr_profile_14.attr,
++ &dev_attr_profile_15.attr,
++ &dev_attr_profile_16.attr,
++ &dev_attr_profile_17.attr,
++ &dev_attr_profile_18.attr,
++ &dev_attr_profile_19.attr,
++ &dev_attr_profile_20.attr,
++ &dev_attr_profile_21.attr,
++ &dev_attr_profile_22.attr,
++ &dev_attr_profile_23.attr,
++ &dev_attr_profile_24.attr,
++ &dev_attr_profile_25.attr,
++ &dev_attr_profile_26.attr,
++ &dev_attr_profile_27.attr,
++ &dev_attr_profile_28.attr,
++ &dev_attr_profile_29.attr,
++ &dev_attr_profile_30.attr,
++ &dev_attr_profile_31.attr,
++ NULL
++};
++
++static struct attribute *fm_dev_schemes_attributes[] = {
++ &dev_attr_scheme_0.attr,
++ &dev_attr_scheme_1.attr,
++ &dev_attr_scheme_2.attr,
++ &dev_attr_scheme_3.attr,
++ &dev_attr_scheme_4.attr,
++ &dev_attr_scheme_5.attr,
++ &dev_attr_scheme_6.attr,
++ &dev_attr_scheme_7.attr,
++ &dev_attr_scheme_8.attr,
++ &dev_attr_scheme_9.attr,
++ &dev_attr_scheme_10.attr,
++ &dev_attr_scheme_11.attr,
++ &dev_attr_scheme_12.attr,
++ &dev_attr_scheme_13.attr,
++ &dev_attr_scheme_14.attr,
++ &dev_attr_scheme_15.attr,
++ &dev_attr_scheme_16.attr,
++ &dev_attr_scheme_17.attr,
++ &dev_attr_scheme_18.attr,
++ &dev_attr_scheme_19.attr,
++ &dev_attr_scheme_20.attr,
++ &dev_attr_scheme_21.attr,
++ &dev_attr_scheme_22.attr,
++ &dev_attr_scheme_23.attr,
++ &dev_attr_scheme_24.attr,
++ &dev_attr_scheme_25.attr,
++ &dev_attr_scheme_26.attr,
++ &dev_attr_scheme_27.attr,
++ &dev_attr_scheme_28.attr,
++ &dev_attr_scheme_29.attr,
++ &dev_attr_scheme_30.attr,
++ &dev_attr_scheme_31.attr,
++ NULL
++};
++
++static const struct attribute_group fm_dev_stats_attr_grp = {
++ .name = "statistics",
++ .attrs = fm_dev_stats_attributes
++};
++
++static const struct attribute_group fm_dev_tnums_dbg_attr_grp = {
++ .name = "tnums_dbg",
++ .attrs = fm_dev_tnums_dbg_attributes
++};
++
++static const struct attribute_group fm_dev_cls_plans_attr_grp = {
++ .name = "cls_plans",
++ .attrs = fm_dev_cls_plans_attributes
++};
++
++static const struct attribute_group fm_dev_schemes_attr_grp = {
++ .name = "schemes",
++ .attrs = fm_dev_schemes_attributes
++};
++
++static const struct attribute_group fm_dev_profiles_attr_grp = {
++ .name = "profiles",
++ .attrs = fm_dev_profiles_attributes
++};
++
++static ssize_t show_fm_regs(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "FM driver registers dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
++ return -EIO;
++ else
++ n = fm_dump_regs(p_wrp_fm_dev->h_Dev, buf, n);
++
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static ssize_t show_fm_kg_pe_regs(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE,
++ "\n FM-KG Port Partition Config registers dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++ else
++ n = fm_kg_pe_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
++
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static ssize_t show_fm_kg_regs(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "FM-KG registers dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++ else
++ n = fm_kg_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
++
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++
++static ssize_t show_fm_fpm_regs(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ n = snprintf(buf, PAGE_SIZE, "FM-FPM registers dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
++ return -EIO;
++ else
++ n = fm_fpm_dump_regs(p_wrp_fm_dev->h_Dev, buf, n);
++
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static ssize_t show_prs_regs(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE, "FM Policer registers dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++ else
++ n = fm_prs_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
++
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static ssize_t show_plcr_regs(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return -EINVAL;
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE, "FM Policer registers dump.\n");
++
++ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
++ return -EIO;
++ else
++ n = fm_plcr_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
++
++ local_irq_restore(flags);
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++#endif /* (defined(DEBUG_ERRORS) && ... */
++
++ return n;
++}
++
++static DEVICE_ATTR(fm_regs, S_IRUGO, show_fm_regs, NULL);
++static DEVICE_ATTR(fm_fpm_regs, S_IRUGO, show_fm_fpm_regs, NULL);
++static DEVICE_ATTR(fm_kg_regs, S_IRUGO, show_fm_kg_regs, NULL);
++static DEVICE_ATTR(fm_kg_pe_regs, S_IRUGO, show_fm_kg_pe_regs, NULL);
++static DEVICE_ATTR(fm_plcr_regs, S_IRUGO, show_plcr_regs, NULL);
++static DEVICE_ATTR(fm_prs_regs, S_IRUGO, show_prs_regs, NULL);
++static DEVICE_ATTR(fm_muram_free_size, S_IRUGO, show_fm_muram_free_sz, NULL);
++static DEVICE_ATTR(fm_ctrl_code_ver, S_IRUGO, show_fm_ctrl_code_ver, NULL);
++
++int fm_sysfs_create(struct device *dev)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++
++ if (dev == NULL)
++ return -EIO;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++
++ /* store to remove them when module is disabled */
++ p_wrp_fm_dev->dev_attr_regs = &dev_attr_fm_regs;
++ p_wrp_fm_dev->dev_attr_risc_load = &dev_attr_fm_risc_load_val;
++ p_wrp_fm_dev->dev_fm_fpm_attr_regs = &dev_attr_fm_fpm_regs;
++ p_wrp_fm_dev->dev_fm_kg_attr_regs = &dev_attr_fm_kg_regs;
++ p_wrp_fm_dev->dev_fm_kg_pe_attr_regs = &dev_attr_fm_kg_pe_regs;
++ p_wrp_fm_dev->dev_plcr_attr_regs = &dev_attr_fm_plcr_regs;
++ p_wrp_fm_dev->dev_prs_attr_regs = &dev_attr_fm_prs_regs;
++ p_wrp_fm_dev->dev_attr_muram_free_size = &dev_attr_fm_muram_free_size;
++ p_wrp_fm_dev->dev_attr_fm_ctrl_code_ver = &dev_attr_fm_ctrl_code_ver;
++
++ /* Create sysfs statistics group for FM module */
++ if (sysfs_create_group(&dev->kobj, &fm_dev_stats_attr_grp) != 0)
++ return -EIO;
++
++ if (sysfs_create_group(&dev->kobj, &fm_dev_schemes_attr_grp) != 0)
++ return -EIO;
++
++ if (sysfs_create_group(&dev->kobj, &fm_dev_profiles_attr_grp) != 0)
++ return -EIO;
++
++ if (sysfs_create_group(&dev->kobj, &fm_dev_tnums_dbg_attr_grp) != 0)
++ return -EIO;
++
++ if (sysfs_create_group(&dev->kobj, &fm_dev_cls_plans_attr_grp) != 0)
++ return -EIO;
++
++ /* Registers dump entry - in future will be moved to debugfs */
++ if (device_create_file(dev, &dev_attr_fm_regs) != 0)
++ return -EIO;
++
++ if (device_create_file(dev, &dev_attr_fm_risc_load_val) != 0)
++ return -EIO;
++
++ if (device_create_file(dev, &dev_attr_fm_fpm_regs) != 0)
++ return -EIO;
++
++ if (device_create_file(dev, &dev_attr_fm_kg_regs) != 0)
++ return -EIO;
++
++ if (device_create_file(dev, &dev_attr_fm_kg_pe_regs) != 0)
++ return -EIO;
++
++ if (device_create_file(dev, &dev_attr_fm_plcr_regs) != 0)
++ return -EIO;
++
++ if (device_create_file(dev, &dev_attr_fm_prs_regs) != 0)
++ return -EIO;
++
++ /* muram free size */
++ if (device_create_file(dev, &dev_attr_fm_muram_free_size) != 0)
++ return -EIO;
++
++ /* fm ctrl code version */
++ if (device_create_file(dev, &dev_attr_fm_ctrl_code_ver) != 0)
++ return -EIO;
++
++ return 0;
++}
++
++void fm_sysfs_destroy(struct device *dev)
++{
++ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
++
++ if (WARN_ON(dev == NULL))
++ return;
++
++ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_wrp_fm_dev == NULL))
++ return;
++
++ sysfs_remove_group(&dev->kobj, &fm_dev_stats_attr_grp);
++ sysfs_remove_group(&dev->kobj, &fm_dev_schemes_attr_grp);
++ sysfs_remove_group(&dev->kobj, &fm_dev_profiles_attr_grp);
++ sysfs_remove_group(&dev->kobj, &fm_dev_cls_plans_attr_grp);
++ sysfs_remove_group(&dev->kobj, &fm_dev_tnums_dbg_attr_grp);
++ device_remove_file(dev, p_wrp_fm_dev->dev_attr_regs);
++ device_remove_file(dev, p_wrp_fm_dev->dev_fm_fpm_attr_regs);
++ device_remove_file(dev, p_wrp_fm_dev->dev_fm_kg_attr_regs);
++ device_remove_file(dev, p_wrp_fm_dev->dev_fm_kg_pe_attr_regs);
++ device_remove_file(dev, p_wrp_fm_dev->dev_plcr_attr_regs);
++ device_remove_file(dev, p_wrp_fm_dev->dev_prs_attr_regs);
++ device_remove_file(dev, p_wrp_fm_dev->dev_attr_muram_free_size);
++ device_remove_file(dev, p_wrp_fm_dev->dev_attr_fm_ctrl_code_ver);
++}
++
++int fm_dump_regs(void *h_fm, char *buf, int nn)
++{
++ t_Fm *p_Fm = (t_Fm *)h_fm;
++ uint8_t i = 0;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ FM_DMP_TITLE(buf, n, p_Fm->p_FmDmaRegs, "FM-DMA Regs");
++
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmsr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmemsr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmmr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmhy);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmsetr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtah);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtal);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtcid);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmra);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmrd);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmwcr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmebcr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmdcr);
++
++ FM_DMP_TITLE(buf, n, &p_Fm->p_FmDmaRegs->fmdmplr, "fmdmplr");
++
++ for (i = 0; i < FM_MAX_NUM_OF_HW_PORT_IDS / 2 ; ++i)
++ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmDmaRegs->fmdmplr[i]);
++
++ FM_DMP_TITLE(buf, n, p_Fm->p_FmBmiRegs, "FM-BMI COMMON Regs");
++ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_init);
++ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_cfg1);
++ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_cfg2);
++ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_ievr);
++ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_ier);
++
++ FM_DMP_TITLE(buf, n, &p_Fm->p_FmBmiRegs->fmbm_arb, "fmbm_arb");
++ for (i = 0; i < 8 ; ++i)
++ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmBmiRegs->fmbm_arb[i]);
++
++ FM_DMP_TITLE(buf, n, p_Fm->p_FmQmiRegs, "FM-QMI COMMON Regs");
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_gc);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_eie);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_eien);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_eif);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_ie);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_ien);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_if);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_gs);
++ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_etfc);
++
++ return n;
++}
++
++int fm_dump_tnum_dbg(void *h_fm, int tn_s, int tn_e, char *buf, int nn)
++{
++ t_Fm *p_Fm = (t_Fm *)h_fm;
++ uint8_t i, j = 0;
++ int n = nn;
++
++ FM_DMP_TITLE(buf, n, NULL, "Tnums and Tnum dbg regs %d - %d",
++ tn_s, tn_e);
++
++ iowrite32be(tn_s << 24, &p_Fm->p_FmFpmRegs->fmfp_dra);
++
++ mb();
++
++ for (j = tn_s; j <= tn_e; j++) {
++ FM_DMP_LN(buf, n, "> fmfp_ts[%d]\n", j);
++ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmFpmRegs->fmfp_ts[j]);
++ FM_DMP_V32(buf, n, p_Fm->p_FmFpmRegs, fmfp_dra);
++ FM_DMP_LN(buf, n, "> fmfp_drd[0-3]\n");
++
++ for (i = 0; i < 4 ; ++i)
++ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmFpmRegs->fmfp_drd[i]);
++
++ FM_DMP_LN(buf, n, "\n");
++
++ }
++
++ return n;
++}
++
++int fm_dump_cls_plan(void *h_fm_pcd, int cpn, char *buf, int nn)
++{
++ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
++ int i = 0;
++ uint32_t tmp;
++ unsigned long i_flg;
++ int n = nn;
++ u_FmPcdKgIndirectAccessRegs *idac;
++ spinlock_t *p_lk;
++
++ p_lk = (spinlock_t *)p_pcd->p_FmPcdKg->h_HwSpinlock;
++ idac = p_pcd->p_FmPcdKg->p_IndirectAccessRegs;
++
++ spin_lock_irqsave(p_lk, i_flg);
++
++ /* Read ClsPlan Block Action Regs */
++ tmp = (uint32_t)(FM_KG_KGAR_GO |
++ FM_KG_KGAR_READ |
++ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
++ DUMMY_PORT_ID |
++ ((uint32_t)cpn << FM_PCD_KG_KGAR_NUM_SHIFT) |
++ FM_PCD_KG_KGAR_WSEL_MASK);
++
++ if (fman_kg_write_ar_wait(p_pcd->p_FmPcdKg->p_FmPcdKgRegs, tmp)) {
++ FM_DMP_LN(buf, nn, "Keygen scheme access violation");
++ spin_unlock_irqrestore(p_lk, i_flg);
++ return nn;
++ }
++ FM_DMP_TITLE(buf, n, &idac->clsPlanRegs,
++ "ClsPlan %d Indirect Access Regs", cpn);
++
++ for (i = 0; i < 8; i++)
++ FM_DMP_MEM_32(buf, n, &idac->clsPlanRegs.kgcpe[i]);
++
++ spin_unlock_irqrestore(p_lk, i_flg);
++
++ return n;
++}
++
++int fm_profile_dump_regs(void *h_fm_pcd, int ppn, char *buf, int nn)
++{
++ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
++ t_FmPcdPlcrProfileRegs *p_prof_regs;
++ t_FmPcdPlcrRegs *p_plcr_regs;
++ t_FmPcdPlcr *p_plcr;
++ uint32_t tmp;
++ unsigned long i_flg;
++ int n = nn;
++ int toc = 10;
++ spinlock_t *p_lk;
++
++ p_plcr = p_pcd->p_FmPcdPlcr;
++ p_prof_regs = &p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs;
++ p_plcr_regs = p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
++
++ p_lk = (spinlock_t *)((t_FmPcdPlcr *)p_plcr)->h_HwSpinlock;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_TITLE(buf, n, p_plcr_regs, "FM-PCD policer-profile regs");
++
++ tmp = (uint32_t)(FM_PCD_PLCR_PAR_GO |
++ FM_PCD_PLCR_PAR_R |
++ ((uint32_t)ppn << FM_PCD_PLCR_PAR_PNUM_SHIFT) |
++ FM_PCD_PLCR_PAR_PWSEL_MASK);
++
++ spin_lock_irqsave(p_lk, i_flg);
++
++ iowrite32be(tmp, &p_plcr_regs->fmpl_par);
++
++ mb();
++
++ /* wait for the porfile regs to be present */
++ do {
++ --toc;
++ udelay(10);
++ if (!toc) {
++ /* looks like PLCR_PAR_GO refuses to clear */
++ spin_unlock_irqrestore(p_lk, i_flg);
++ FM_DMP_LN(buf, n, "Profile regs not accessible -");
++ FM_DMP_LN(buf, n, " check profile init process\n");
++ return n;
++ }
++ } while ((ioread32be(&p_plcr_regs->fmpl_par) & FM_PCD_PLCR_PAR_GO));
++
++ FM_DMP_TITLE(buf, n, p_prof_regs, "Profile %d regs", ppn);
++
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pemode);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pegnia);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_peynia);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pernia);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pecir);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pecbs);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pepepir_eir);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pepbs_ebs);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pelts);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pects);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pepts_ets);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pegpc);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_peypc);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_perpc);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_perypc);
++ FM_DMP_V32(buf, n, p_prof_regs, fmpl_perrpc);
++
++ spin_unlock_irqrestore(p_lk, i_flg);
++
++ return n;
++}
++
++int fm_dump_scheme(void *h_fm_pcd, int scnum, char *buf, int nn)
++{
++ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
++ uint32_t tmp_ar;
++ unsigned long i_flg;
++ int i, n = nn;
++ spinlock_t *p_lk;
++ u_FmPcdKgIndirectAccessRegs *idac;
++
++ idac = p_pcd->p_FmPcdKg->p_IndirectAccessRegs;
++ p_lk = (spinlock_t *)p_pcd->p_FmPcdKg->h_HwSpinlock;
++
++ spin_lock_irqsave(p_lk, i_flg);
++
++ tmp_ar = FmPcdKgBuildReadSchemeActionReg((uint8_t)scnum);
++ if (fman_kg_write_ar_wait(p_pcd->p_FmPcdKg->p_FmPcdKgRegs, tmp_ar)) {
++ FM_DMP_LN(buf, nn,
++ "Keygen scheme access violation or no such scheme");
++ spin_unlock_irqrestore(p_lk, i_flg);
++ return nn;
++ }
++
++ FM_DMP_TITLE(buf, n, &idac->schemeRegs,
++ "Scheme %d Indirect Access Regs", scnum);
++
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_mode);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ekfc);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ekdv);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_bmch);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_bmcl);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_fqb);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_hc);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ppc);
++
++ FM_DMP_TITLE(buf, n, &idac->schemeRegs.kgse_gec, "kgse_gec");
++
++ for (i = 0; i < FM_KG_NUM_OF_GENERIC_REGS; i++)
++ FM_DMP_MEM_32(buf, n, &idac->schemeRegs.kgse_gec[i]);
++
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_spc);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_dv0);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_dv1);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ccbs);
++ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_mv);
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ spin_unlock_irqrestore(p_lk, i_flg);
++
++ return n;
++}
++
++int fm_kg_pe_dump_regs(void *h_fm_pcd, char *buf, int nn)
++{
++ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
++ int i = 0;
++ uint8_t prt_id = 0;
++ uint32_t tmp_ar;
++ unsigned long i_flg;
++ int n = nn;
++ u_FmPcdKgIndirectAccessRegs *idac;
++ t_FmPcdKg *p_kg;
++ spinlock_t *p_lk;
++
++ p_kg = p_pcd->p_FmPcdKg;
++ idac = p_pcd->p_FmPcdKg->p_IndirectAccessRegs;
++ p_lk = (spinlock_t *)p_kg->h_HwSpinlock;
++
++ spin_lock_irqsave(p_lk, i_flg);
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++) {
++ SW_PORT_INDX_TO_HW_PORT_ID(prt_id, i);
++
++ tmp_ar = FmPcdKgBuildReadPortSchemeBindActionReg(prt_id);
++
++ if (fman_kg_write_ar_wait(p_kg->p_FmPcdKgRegs, tmp_ar)) {
++ FM_DMP_LN(buf, nn, "Keygen scheme access violation");
++ spin_unlock_irqrestore(p_lk, i_flg);
++ return nn;
++ }
++ FM_DMP_TITLE(buf, n, &idac->portRegs, "Port %d regs", prt_id);
++ FM_DMP_V32(buf, n, &idac->portRegs, fmkg_pe_sp);
++ FM_DMP_V32(buf, n, &idac->portRegs, fmkg_pe_cpp);
++ }
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ spin_unlock_irqrestore(p_lk, i_flg);
++
++ return n;
++}
++
++int fm_kg_dump_regs(void *h_fm_pcd, char *buf, int nn)
++{
++ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_TITLE(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs,
++ "FmPcdKgRegs Regs");
++
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gcr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_eer);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_eeer);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_seer);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_seeer);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gsr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_tpc);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_serc);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_fdor);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gdv0r);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gdv1r);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_feer);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_ar);
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ return n;
++}
++
++
++int fm_fpm_dump_regs(void *h_fm, char *buf, int nn)
++{
++ t_Fm *p_fm = (t_Fm *)h_fm;
++ uint8_t i;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ FM_DMP_TITLE(buf, n, p_fm->p_FmFpmRegs, "FM-FPM Regs");
++
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tnc);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_prc);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_brkc);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_mxd);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_dist1);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_dist2);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_epi);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_rie);
++
++ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_fcev, "fmfp_fcev");
++ for (i = 0; i < 4; ++i)
++ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_fcev[i]);
++
++ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_cee, "fmfp_cee");
++ for (i = 0; i < 4; ++i)
++ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_cee[i]);
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsc1);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsc2);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsp);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsf);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_rcr);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_extc);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_ext1);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_ext2);
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_ip_rev_1);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_ip_rev_2);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_rstc);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_cld);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_npi);
++ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_ee);
++
++ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_cev, "fmfp_cev");
++ for (i = 0; i < 4; ++i)
++ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_cev[i]);
++
++ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_ps, "fmfp_ps");
++ for (i = 0; i < 64; ++i)
++ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_ps[i]);
++
++ return n;
++}
++
++int fm_prs_dump_regs(void *h_fm_pcd, char *buf, int nn)
++{
++ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ FM_DMP_TITLE(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs,
++ "FM-PCD parser regs");
++
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_rpclim);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_rpimac);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, pmeec);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_pevr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_pever);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_perr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_perer);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_ppsc);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_pds);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l2rrs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l3rrs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l4rrs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_srrs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l2rres);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l3rres);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l4rres);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_srres);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_spcs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_spscs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_hxscs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mrcs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mwcs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mrscs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mwscs);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_fcscs);
++
++ return n;
++}
++
++int fm_plcr_dump_regs(void *h_fm_pcd, char *buf, int nn)
++{
++ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
++ int i = 0;
++ int n = nn;
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ FM_DMP_TITLE(buf, n,
++ p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,
++ "FM policer regs");
++
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_gcr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_gsr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_evr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_ier);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_ifr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_eevr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_eier);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_eifr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_rpcnt);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_ypcnt);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_rrpcnt);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_rypcnt);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_tpcnt);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_flmcnt);
++
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_serc);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_upcr);
++ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_dpmr);
++
++ FM_DMP_TITLE(buf, n,
++ &p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr,
++ "fmpl_pmr");
++
++ for (i = 0; i < 63; ++i)
++ FM_DMP_MEM_32(buf, n,
++ &p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr[i]);
++
++ return n;
++}
++
++int fm_get_counter(void *h_fm, e_FmCounters cnt_e, uint32_t *cnt_val)
++{
++ t_Fm *p_fm = (t_Fm *)h_fm;
++
++ /* When applicable (when there is an "enable counters" bit),
++ check that counters are enabled */
++
++ switch (cnt_e) {
++ case (e_FM_COUNTERS_DEQ_1):
++ case (e_FM_COUNTERS_DEQ_2):
++ case (e_FM_COUNTERS_DEQ_3):
++ if (p_fm->p_FmStateStruct->revInfo.majorRev >= 6)
++ return -EINVAL; /* counter not available */
++
++ case (e_FM_COUNTERS_ENQ_TOTAL_FRAME):
++ case (e_FM_COUNTERS_DEQ_TOTAL_FRAME):
++ case (e_FM_COUNTERS_DEQ_0):
++ case (e_FM_COUNTERS_DEQ_FROM_DEFAULT):
++ case (e_FM_COUNTERS_DEQ_FROM_CONTEXT):
++ case (e_FM_COUNTERS_DEQ_FROM_FD):
++ case (e_FM_COUNTERS_DEQ_CONFIRM):
++ if (!(ioread32be(&p_fm->p_FmQmiRegs->fmqm_gc) &
++ QMI_CFG_EN_COUNTERS))
++ return -EINVAL; /* Requested counter not available */
++ break;
++ default:
++ break;
++ }
++
++ switch (cnt_e) {
++ case (e_FM_COUNTERS_ENQ_TOTAL_FRAME):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_etfc);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_TOTAL_FRAME):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dtfc);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_0):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc0);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_1):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc1);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_2):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc2);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_3):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc3);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_FROM_DEFAULT):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dfdc);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_FROM_CONTEXT):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dfcc);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_FROM_FD):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dffc);
++ return 0;
++ case (e_FM_COUNTERS_DEQ_CONFIRM):
++ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dcc);
++ return 0;
++ }
++ /* should never get here */
++ return -EINVAL; /* counter not available */
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h
+new file mode 100644
+index 00000000..137653e9
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h
+@@ -0,0 +1,136 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef LNXWRP_SYSFS_FM_H_
++#define LNXWRP_SYSFS_FM_H_
++
++#include "lnxwrp_sysfs.h"
++
++int fm_sysfs_create(struct device *dev);
++void fm_sysfs_destroy(struct device *dev);
++int fm_dump_regs(void *h_dev, char *buf, int nn);
++int fm_fpm_dump_regs(void *h_dev, char *buf, int nn);
++int fm_kg_dump_regs(void *h_pcd, char *buf, int nn);
++int fm_kg_pe_dump_regs(void *h_pcd, char *buf, int nn);
++int fm_dump_scheme(void *h_pcd, int scnum, char *buf, int nn);
++int fm_dump_tnum_dbg(void *h_fm, int tn_s, int tn_e, char *buf, int nn);
++int fm_dump_cls_plan(void *h_pcd, int cpn, char *buf, int nn);
++int fm_plcr_dump_regs(void *h_pcd, char *buf, int nn);
++int fm_prs_dump_regs(void *h_pcd, char *buf, int nn);
++int fm_profile_dump_regs(void *h_pcd, int ppnum, char *buf, int nn);
++
++#define FM_DMP_PGSZ_ERR { \
++ snprintf(&buf[PAGE_SIZE - 80], 70, \
++ "\n Err: current sysfs buffer reached PAGE_SIZE\n");\
++ n = PAGE_SIZE - 2; \
++ }
++
++#define FM_DMP_LN(buf, n, ...) \
++ do { \
++ int k, m = n; \
++ m += k = snprintf(&buf[m], PAGE_SIZE - m, __VA_ARGS__); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ n = m; \
++ } while (0)
++
++#define FM_DMP_TITLE(buf, n, addr, ...) \
++ do { \
++ int k, m = n; \
++ m += k = snprintf(&buf[m], PAGE_SIZE - m, "\n"); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ m += k = snprintf(&buf[m], PAGE_SIZE - m, __VA_ARGS__); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ if (addr) { \
++ phys_addr_t pa; \
++ pa = virt_to_phys(addr); \
++ m += k = \
++ snprintf(&buf[m], PAGE_SIZE - m, " (0x%lX)", \
++ (long unsigned int)(pa)); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ } \
++ m += k = snprintf(&buf[m], PAGE_SIZE - m, \
++ "\n----------------------------------------\n\n"); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ n = m; \
++ } while (0)
++
++#define FM_DMP_SUBTITLE(buf, n, ...) \
++ do { \
++ int k, m = n; \
++ m += k = snprintf(&buf[m], PAGE_SIZE - m, "------- "); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ m += k = snprintf(&buf[m], PAGE_SIZE - m, __VA_ARGS__); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ m += k = snprintf(&buf[m], PAGE_SIZE - m, "\n"); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ n = m; \
++ } while (0)
++
++#define FM_DMP_MEM_32(buf, n, addr) \
++ { \
++ uint32_t val; \
++ phys_addr_t pa; \
++ int k, m = n; \
++ pa = virt_to_phys(addr); \
++ val = ioread32be((addr)); \
++ do { \
++ m += k = snprintf(&buf[m], \
++ PAGE_SIZE - m, "0x%010llX: 0x%08x\n", \
++ pa, val); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ n += k; \
++ } while (0) ;\
++ }
++
++#define FM_DMP_V32(buf, n, st, phrase) \
++ do { \
++ int k, m = n; \
++ phys_addr_t pa = virt_to_phys(&((st)->phrase)); \
++ k = snprintf(&buf[m], PAGE_SIZE - m, \
++ "0x%010llX: 0x%08x%8s\t%s\n", (unsigned long long) pa, \
++ ioread32be((uint32_t *)&((st)->phrase)), "", #phrase); \
++ if (k < 0 || m > PAGE_SIZE - 90) \
++ FM_DMP_PGSZ_ERR \
++ n += k; \
++ } while (0)
++
++#endif /* LNXWRP_SYSFS_FM_H_ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c
+new file mode 100644
+index 00000000..db8e824c
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c
+@@ -0,0 +1,1268 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "lnxwrp_sysfs.h"
++#include "lnxwrp_fm.h"
++#include "debug_ext.h"
++#include "lnxwrp_sysfs_fm_port.h"
++#include "lnxwrp_sysfs_fm.h"
++
++#include "../../sdk_fman/Peripherals/FM/Port/fm_port.h"
++#include "../../sdk_fman/Peripherals/FM/Port/fm_port_dsar.h"
++
++#if defined(__ERR_MODULE__)
++#undef __ERR_MODULE__
++#endif
++
++#include "../../sdk_fman/Peripherals/FM/fm.h"
++
++static const struct sysfs_stats_t portSysfsStats[] = {
++ /* RX/TX/OH common statistics */
++ {
++ .stat_name = "port_frame",
++ .stat_counter = e_FM_PORT_COUNTERS_FRAME,
++ },
++ {
++ .stat_name = "port_discard_frame",
++ .stat_counter = e_FM_PORT_COUNTERS_DISCARD_FRAME,
++ },
++ {
++ .stat_name = "port_dealloc_buf",
++ .stat_counter = e_FM_PORT_COUNTERS_DEALLOC_BUF,
++ },
++ {
++ .stat_name = "port_enq_total",
++ .stat_counter = e_FM_PORT_COUNTERS_ENQ_TOTAL,
++ },
++ /* TX/OH */
++ {
++ .stat_name = "port_length_err",
++ .stat_counter = e_FM_PORT_COUNTERS_LENGTH_ERR,
++ },
++ {
++ .stat_name = "port_unsupprted_format",
++ .stat_counter = e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT,
++ },
++ {
++ .stat_name = "port_deq_total",
++ .stat_counter = e_FM_PORT_COUNTERS_DEQ_TOTAL,
++ },
++ {
++ .stat_name = "port_deq_from_default",
++ .stat_counter = e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT,
++ },
++ {
++ .stat_name = "port_deq_confirm",
++ .stat_counter = e_FM_PORT_COUNTERS_DEQ_CONFIRM,
++ },
++ /* RX/OH */
++ {
++ .stat_name = "port_rx_bad_frame",
++ .stat_counter = e_FM_PORT_COUNTERS_RX_BAD_FRAME,
++ },
++ {
++ .stat_name = "port_rx_large_frame",
++ .stat_counter = e_FM_PORT_COUNTERS_RX_LARGE_FRAME,
++ },
++ {
++ .stat_name = "port_rx_out_of_buffers_discard",
++ .stat_counter = e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD,
++ },
++ {
++ .stat_name = "port_rx_filter_frame",
++ .stat_counter = e_FM_PORT_COUNTERS_RX_FILTER_FRAME,
++ },
++ /* TODO: Particular statistics for OH ports */
++ {}
++};
++
++static ssize_t show_fm_port_stats(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++ t_LnxWrpFmDev *p_LnxWrpFmDev;
++ unsigned long flags;
++ int n = 0;
++ uint8_t counter = 0;
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_LnxWrpFmPortDev == NULL))
++ return -EINVAL;
++
++ p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
++ if (WARN_ON(p_LnxWrpFmDev == NULL))
++ return -EINVAL;
++
++ if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_Dev)
++ return -EIO;
++
++ if (!p_LnxWrpFmPortDev->h_Dev) {
++ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
++ return n;
++ }
++
++ counter = fm_find_statistic_counter_by_name(
++ attr->attr.name,
++ portSysfsStats, NULL);
++
++ if (counter == e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR) {
++ uint32_t fmRev = 0;
++ fmRev = 0xffff &
++ ioread32(UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr +
++ 0x000c30c4));
++
++ if (fmRev == 0x0100) {
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "counter not available for revision 1\n");
++ local_irq_restore(flags);
++ }
++ return n;
++ }
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE, "\t%s counter: %u\n",
++ p_LnxWrpFmPortDev->name,
++ FM_PORT_GetCounter(p_LnxWrpFmPortDev->h_Dev,
++ (e_FmPortCounters) counter));
++ local_irq_restore(flags);
++
++ return n;
++}
++
++/* FM PORT RX/TX/OH statistics */
++static DEVICE_ATTR(port_frame, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_discard_frame, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_dealloc_buf, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_enq_total, S_IRUGO, show_fm_port_stats, NULL);
++/* FM PORT TX/OH statistics */
++static DEVICE_ATTR(port_length_err, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_unsupprted_format, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_deq_total, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_deq_from_default, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_deq_confirm, S_IRUGO, show_fm_port_stats, NULL);
++/* FM PORT RX/OH statistics */
++static DEVICE_ATTR(port_rx_bad_frame, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_rx_large_frame, S_IRUGO, show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_rx_out_of_buffers_discard, S_IRUGO,
++ show_fm_port_stats, NULL);
++static DEVICE_ATTR(port_rx_filter_frame, S_IRUGO, show_fm_port_stats, NULL);
++
++/* FM PORT TX statistics */
++static struct attribute *fm_tx_port_dev_stats_attributes[] = {
++ &dev_attr_port_frame.attr,
++ &dev_attr_port_discard_frame.attr,
++ &dev_attr_port_dealloc_buf.attr,
++ &dev_attr_port_enq_total.attr,
++ &dev_attr_port_length_err.attr,
++ &dev_attr_port_unsupprted_format.attr,
++ &dev_attr_port_deq_total.attr,
++ &dev_attr_port_deq_from_default.attr,
++ &dev_attr_port_deq_confirm.attr,
++ NULL
++};
++
++static const struct attribute_group fm_tx_port_dev_stats_attr_grp = {
++ .name = "statistics",
++ .attrs = fm_tx_port_dev_stats_attributes
++};
++
++/* FM PORT RX statistics */
++static struct attribute *fm_rx_port_dev_stats_attributes[] = {
++ &dev_attr_port_frame.attr,
++ &dev_attr_port_discard_frame.attr,
++ &dev_attr_port_dealloc_buf.attr,
++ &dev_attr_port_enq_total.attr,
++ &dev_attr_port_rx_bad_frame.attr,
++ &dev_attr_port_rx_large_frame.attr,
++ &dev_attr_port_rx_out_of_buffers_discard.attr,
++ &dev_attr_port_rx_filter_frame.attr,
++ NULL
++};
++
++static const struct attribute_group fm_rx_port_dev_stats_attr_grp = {
++ .name = "statistics",
++ .attrs = fm_rx_port_dev_stats_attributes
++};
++
++/* TODO: add particular OH ports statistics */
++static struct attribute *fm_oh_port_dev_stats_attributes[] = {
++ &dev_attr_port_frame.attr,
++ &dev_attr_port_discard_frame.attr,
++ &dev_attr_port_dealloc_buf.attr,
++ &dev_attr_port_enq_total.attr,
++ /*TX*/ &dev_attr_port_length_err.attr,
++ &dev_attr_port_unsupprted_format.attr,
++ &dev_attr_port_deq_total.attr,
++ &dev_attr_port_deq_from_default.attr,
++ &dev_attr_port_deq_confirm.attr,
++ /* &dev_attr_port_rx_bad_frame.attr, */
++ /* &dev_attr_port_rx_large_frame.attr, */
++ &dev_attr_port_rx_out_of_buffers_discard.attr,
++ /*&dev_attr_port_rx_filter_frame.attr, */
++ NULL
++};
++
++static const struct attribute_group fm_oh_port_dev_stats_attr_grp = {
++ .name = "statistics",
++ .attrs = fm_oh_port_dev_stats_attributes
++};
++
++static ssize_t show_fm_port_regs(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++#endif
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_LnxWrpFmPortDev =
++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++
++
++ local_irq_save(flags);
++
++ if (!p_LnxWrpFmPortDev->h_Dev) {
++ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
++ return n;
++ } else {
++ n = snprintf(buf, PAGE_SIZE,
++ "FM port driver registers dump.\n");
++ n = fm_port_dump_regs(p_LnxWrpFmPortDev->h_Dev, buf, n);
++ }
++
++ local_irq_restore(flags);
++
++ return n;
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++ return n;
++#endif
++}
++static int fm_port_dsar_dump_mem(void *h_dev, char *buf, int nn)
++{
++ t_FmPort *p_FmPort;
++ t_Fm *p_Fm;
++ uint8_t hardwarePortId;
++ uint32_t *param_page;
++ t_ArCommonDesc *ArCommonDescPtr;
++ uint32_t *mem;
++ int i, n = nn;
++
++ p_FmPort = (t_FmPort *)h_dev;
++ hardwarePortId = p_FmPort->hardwarePortId;
++ p_Fm = (t_Fm *)p_FmPort->h_Fm;
++
++ if (!FM_PORT_IsInDsar(p_FmPort))
++ {
++ FM_DMP_LN(buf, n, "port %u is not a DSAR port\n",
++ hardwarePortId);
++ return n;
++ }
++ FM_DMP_LN(buf, n, "port %u DSAR mem\n", hardwarePortId);
++ FM_DMP_LN(buf, n, "========================\n");
++
++ /* do I need request_mem_region here? */
++ param_page = ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr), 4);
++ ArCommonDescPtr = (t_ArCommonDesc*)(ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(param_page), 300*4)); /* this should be changed*/
++ mem = (uint32_t*)ArCommonDescPtr;
++ for (i = 0; i < 300; i+=4)
++ FM_DMP_LN(buf, n, "%08x: %08x %08x %08x %08x\n", i*4, mem[i], mem[i + 1], mem[i + 2], mem[i + 3]);
++ iounmap(ArCommonDescPtr);
++ iounmap(param_page);
++ return n;
++}
++
++static int fm_port_dsar_dump_regs(void *h_dev, char *buf, int nn)
++{
++ t_FmPort *p_FmPort;
++ t_Fm *p_Fm;
++ uint8_t hardwarePortId;
++ uint32_t *param_page;
++ t_ArCommonDesc *ArCommonDescPtr;
++ int i, n = nn;
++
++ p_FmPort = (t_FmPort *)h_dev;
++ hardwarePortId = p_FmPort->hardwarePortId;
++ p_Fm = (t_Fm *)p_FmPort->h_Fm;
++
++ if (!FM_PORT_IsInDsar(p_FmPort))
++ {
++ FM_DMP_LN(buf, n, "port %u is not a DSAR port\n",
++ hardwarePortId);
++ return n;
++ }
++ FM_DMP_LN(buf, n, "port %u DSAR information\n", hardwarePortId);
++ FM_DMP_LN(buf, n, "========================\n");
++
++ /* do I need request_mem_region here? */
++ param_page = ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr), 4);
++ ArCommonDescPtr = (t_ArCommonDesc*)(ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(param_page), sizeof(t_ArCommonDesc))); /* this should be changed*/
++ FM_DMP_LN(buf, n, "Tx port: 0x%x\n", ArCommonDescPtr->arTxPort);
++ FM_DMP_LN(buf, n, "Active HPNIA: 0x%08x\n", ArCommonDescPtr->activeHPNIA);
++ FM_DMP_LN(buf, n, "Snmp port: 0x%x\n", ArCommonDescPtr->snmpPort);
++ FM_DMP_LN(buf, n, "MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", ArCommonDescPtr->macStationAddr[0],
++ ArCommonDescPtr->macStationAddr[1], ArCommonDescPtr->macStationAddr[2],
++ ArCommonDescPtr->macStationAddr[3], ArCommonDescPtr->macStationAddr[4],
++ ArCommonDescPtr->macStationAddr[5]);
++ FM_DMP_LN(buf, n, "filterControl: 0x%02x\n", ArCommonDescPtr->filterControl);
++ FM_DMP_LN(buf, n, "tcpControlPass: 0x%04x\n", ArCommonDescPtr->tcpControlPass);
++ FM_DMP_LN(buf, n, "ipProtocolTblSize: 0x%x\n", ArCommonDescPtr->ipProtocolTblSize);
++ FM_DMP_LN(buf, n, "udpPortTblSize: 0x%x\n", ArCommonDescPtr->udpPortTblSize);
++ FM_DMP_LN(buf, n, "tcpPortTblSize: 0x%x\n", ArCommonDescPtr->tcpPortTblSize);
++ if (ArCommonDescPtr->p_ArStats)
++ {
++ t_ArStatistics *arStatistics = (t_ArStatistics*)
++ ioremap(ioread32be(&ArCommonDescPtr->p_ArStats) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof (t_ArStatistics));
++ FM_DMP_LN(buf, n, "\nDSAR statistics\n");
++ FM_DMP_LN(buf, n, "DSAR_Discarded: 0x%x\n", arStatistics->dsarDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_Err_Discarded: 0x%x\n", arStatistics->dsarErrDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_Frag_Discarded: 0x%x\n", arStatistics->dsarFragDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_Tunnel_Discarded: 0x%x\n", arStatistics->dsarTunnelDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_ARP_Discarded: 0x%x\n", arStatistics->dsarArpDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_IP_Discarded: 0x%x\n", arStatistics->dsarIpDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_TCP_Discarded: 0x%x\n", arStatistics->dsarTcpDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_UDP_Discarded: 0x%x\n", arStatistics->dsarUdpDiscarded);
++ FM_DMP_LN(buf, n, "DSAR_ICMPv6_Checksum_Err: 0x%x\n", arStatistics->dsarIcmpV6ChecksumErr);
++ FM_DMP_LN(buf, n, "DSAR_ICMPv6_Other_Type: 0x%x\n", arStatistics->dsarIcmpV6OtherType);
++ FM_DMP_LN(buf, n, "DSAR_ICMPv4_Other_Type: 0x%x\n", arStatistics->dsarIcmpV4OtherType);
++
++ iounmap(arStatistics);
++ }
++ if (ArCommonDescPtr->p_ArpDescriptor)
++ {
++ t_DsarArpDescriptor* ArpDescriptor = (t_DsarArpDescriptor*)
++ ioremap(ioread32be(&ArCommonDescPtr->p_ArpDescriptor) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof (t_DsarArpDescriptor));
++ FM_DMP_LN(buf, n, "\nARP\n");
++ FM_DMP_LN(buf, n, "===\n");
++ FM_DMP_LN(buf, n, "control bits 0x%04x\n", ArpDescriptor->control);
++ if (ArpDescriptor->numOfBindings)
++ {
++ char ip_str[100];
++ t_DsarArpBindingEntry* bindings = ioremap(
++ ioread32be(&ArpDescriptor->p_Bindings) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ ArpDescriptor->numOfBindings *
++ sizeof(t_DsarArpBindingEntry));
++ uint8_t* ip_addr = (uint8_t*)&bindings->ipv4Addr;
++ FM_DMP_LN(buf, n, " ip vlan id\n");
++ for (i = 0; i < ArpDescriptor->numOfBindings; i++)
++ {
++ n += snprintf(ip_str, 100, "%d.%d.%d.%d",
++ ip_addr[0], ip_addr[1],
++ ip_addr[2], ip_addr[3]);
++ FM_DMP_LN(buf, n, "%-15s 0x%x\n",
++ ip_str, bindings->vlanId);
++ }
++ iounmap(bindings);
++ }
++ if (ArpDescriptor->p_Statistics)
++ {
++ t_DsarArpStatistics* arpStats = ioremap(
++ ioread32be(&ArpDescriptor->p_Statistics) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof(t_DsarArpStatistics));
++ FM_DMP_LN(buf, n, "statistics\n");
++ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", arpStats->invalCnt);
++ FM_DMP_LN(buf, n, "ECHO_CNT: 0x%x\n", arpStats->echoCnt);
++ FM_DMP_LN(buf, n, "CD_CNT: 0x%x\n", arpStats->cdCnt);
++ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", arpStats->arCnt);
++ FM_DMP_LN(buf, n, "RATM_CNT: 0x%x\n", arpStats->ratmCnt);
++ FM_DMP_LN(buf, n, "UKOP_CNT: 0x%x\n", arpStats->ukopCnt);
++ FM_DMP_LN(buf, n, "NMTP_CNT: 0x%x\n", arpStats->nmtpCnt);
++ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", arpStats->nmVlanCnt);
++ iounmap(arpStats);
++ }
++
++ iounmap(ArpDescriptor);
++ }
++ if (ArCommonDescPtr->p_IcmpV4Descriptor)
++ {
++ t_DsarIcmpV4Descriptor* ICMPV4Descriptor =
++ (t_DsarIcmpV4Descriptor*)ioremap(ioread32be(
++ &ArCommonDescPtr->p_IcmpV4Descriptor) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof (t_DsarIcmpV4Descriptor));
++ FM_DMP_LN(buf, n, "\nEcho ICMPv4\n");
++ FM_DMP_LN(buf, n, "===========\n");
++ FM_DMP_LN(buf, n, "control bits 0x%04x\n", ICMPV4Descriptor->control);
++ if (ICMPV4Descriptor->numOfBindings)
++ {
++ char ip_str[100];
++ t_DsarArpBindingEntry* bindings = ioremap(
++ ioread32be(&ICMPV4Descriptor->p_Bindings) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ ICMPV4Descriptor->numOfBindings *
++ sizeof(t_DsarArpBindingEntry));
++ uint8_t* ip_addr = (uint8_t*)&bindings->ipv4Addr;
++ FM_DMP_LN(buf, n, " ip vlan id\n");
++ for (i = 0; i < ICMPV4Descriptor->numOfBindings; i++)
++ {
++ n += snprintf(ip_str, 100, "%d.%d.%d.%d",
++ ip_addr[0], ip_addr[1],
++ ip_addr[2], ip_addr[3]);
++ FM_DMP_LN(buf, n, "%-15s 0x%x\n",
++ ip_str, bindings->vlanId);
++ }
++ iounmap(bindings);
++ }
++ if (ICMPV4Descriptor->p_Statistics)
++ {
++ t_DsarIcmpV4Statistics* icmpv4Stats = ioremap(
++ ioread32be(&ICMPV4Descriptor->p_Statistics) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof(t_DsarIcmpV4Statistics));
++ FM_DMP_LN(buf, n, "statistics\n");
++ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", icmpv4Stats->invalCnt);
++ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", icmpv4Stats->nmVlanCnt);
++ FM_DMP_LN(buf, n, "NMIP_CNT: 0x%x\n", icmpv4Stats->nmIpCnt);
++ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", icmpv4Stats->arCnt);
++ FM_DMP_LN(buf, n, "CSERR_CNT: 0x%x\n", icmpv4Stats->cserrCnt);
++ iounmap(icmpv4Stats);
++ }
++ iounmap(ICMPV4Descriptor);
++ }
++ if (ArCommonDescPtr->p_NdDescriptor)
++ {
++ t_DsarNdDescriptor *NDDescriptor =
++ (t_DsarNdDescriptor*)ioremap(ioread32be(
++ &ArCommonDescPtr->p_NdDescriptor) + p_FmPort->
++ fmMuramPhysBaseAddr, sizeof (t_DsarNdDescriptor));
++ FM_DMP_LN(buf, n, "\nNDP\n");
++ FM_DMP_LN(buf, n, "===\n");
++ FM_DMP_LN(buf, n, "control bits 0x%04x\n", NDDescriptor->control);
++ FM_DMP_LN(buf, n, "solicited address 0x%08x\n", NDDescriptor->solicitedAddr);
++ if (NDDescriptor->numOfBindings)
++ {
++ char ip_str[100];
++ t_DsarIcmpV6BindingEntry* bindings = ioremap(
++ ioread32be(&NDDescriptor->p_Bindings) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ NDDescriptor->numOfBindings *
++ sizeof(t_DsarIcmpV6BindingEntry));
++ uint16_t* ip_addr = (uint16_t*)&bindings->ipv6Addr;
++ FM_DMP_LN(buf, n, " ip vlan id\n");
++ for (i = 0; i < NDDescriptor->numOfBindings; i++)
++ {
++ n += snprintf(ip_str, 100,
++ "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
++ ip_addr[0], ip_addr[1], ip_addr[2], ip_addr[3],
++ ip_addr[4], ip_addr[5], ip_addr[6], ip_addr[7]);
++ FM_DMP_LN(buf, n, "%s 0x%x\n", ip_str, bindings->vlanId);
++ }
++ iounmap(bindings);
++ }
++ if (NDDescriptor->p_Statistics)
++ {
++ t_NdStatistics* ndStats = ioremap(
++ ioread32be(&NDDescriptor->p_Statistics) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof(t_NdStatistics));
++ FM_DMP_LN(buf, n, "statistics\n");
++ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", ndStats->invalCnt);
++ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", ndStats->nmVlanCnt);
++ FM_DMP_LN(buf, n, "NMIP_CNT: 0x%x\n", ndStats->nmIpCnt);
++ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", ndStats->arCnt);
++ FM_DMP_LN(buf, n, "USADVERT_CNT: 0x%x\n", ndStats->usadvertCnt);
++ FM_DMP_LN(buf, n, "NMMCAST_CNT: 0x%x\n", ndStats->nmmcastCnt);
++ FM_DMP_LN(buf, n, "NSLLA_CNT: 0x%x\n", ndStats->nsllaCnt);
++ iounmap(ndStats);
++ }
++ iounmap(NDDescriptor);
++ }
++ if (ArCommonDescPtr->p_IcmpV6Descriptor)
++ {
++ t_DsarIcmpV6Descriptor *ICMPV6Descriptor =
++ (t_DsarIcmpV6Descriptor*)ioremap(ioread32be(
++ &ArCommonDescPtr->p_IcmpV6Descriptor) + p_FmPort->
++ fmMuramPhysBaseAddr, sizeof (t_DsarIcmpV6Descriptor));
++ FM_DMP_LN(buf, n, "\nEcho ICMPv6\n");
++ FM_DMP_LN(buf, n, "===========\n");
++ FM_DMP_LN(buf, n, "control bits 0x%04x\n", ICMPV6Descriptor->control);
++ if (ICMPV6Descriptor->numOfBindings)
++ {
++ char ip_str[100];
++ t_DsarIcmpV6BindingEntry* bindings = ioremap(
++ ioread32be(&ICMPV6Descriptor->p_Bindings) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ ICMPV6Descriptor->numOfBindings *
++ sizeof(t_DsarIcmpV6BindingEntry));
++ uint16_t* ip_addr = (uint16_t*)&bindings->ipv6Addr;
++ FM_DMP_LN(buf, n, " ip vlan id\n");
++ for (i = 0; i < ICMPV6Descriptor->numOfBindings; i++)
++ {
++ n += snprintf(ip_str, 100,
++ "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
++ ip_addr[0], ip_addr[1], ip_addr[2], ip_addr[3],
++ ip_addr[4], ip_addr[5], ip_addr[6], ip_addr[7]);
++ FM_DMP_LN(buf, n, "%s 0x%x\n", ip_str, bindings->vlanId);
++ }
++ iounmap(bindings);
++ }
++ if (ICMPV6Descriptor->p_Statistics)
++ {
++ t_DsarIcmpV6Statistics* icmpv6Stats = ioremap(
++ ioread32be(&ICMPV6Descriptor->p_Statistics) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof(t_DsarIcmpV6Statistics));
++ FM_DMP_LN(buf, n, "statistics\n");
++ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", icmpv6Stats->invalCnt);
++ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", icmpv6Stats->nmVlanCnt);
++ FM_DMP_LN(buf, n, "NMIP_CNT: 0x%x\n", icmpv6Stats->nmIpCnt);
++ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", icmpv6Stats->arCnt);
++ iounmap(icmpv6Stats);
++ }
++ iounmap(ICMPV6Descriptor);
++ }
++ if (ArCommonDescPtr->p_SnmpDescriptor)
++ {
++ t_DsarSnmpDescriptor *SnmpDescriptor =
++ (t_DsarSnmpDescriptor*)ioremap(ioread32be(
++ &ArCommonDescPtr->p_SnmpDescriptor) + p_FmPort->
++ fmMuramPhysBaseAddr, sizeof (t_DsarSnmpDescriptor));
++ FM_DMP_LN(buf, n, "\nSNMP\n");
++ FM_DMP_LN(buf, n, "===========\n");
++ FM_DMP_LN(buf, n, "control bits 0x%04x\n", SnmpDescriptor->control);
++ FM_DMP_LN(buf, n, "max message length 0x%04x\n", SnmpDescriptor->maxSnmpMsgLength);
++ if (SnmpDescriptor->numOfIpv4Addresses)
++ {
++ char ip_str[100];
++ t_DsarSnmpIpv4AddrTblEntry* addrs = ioremap(
++ ioread32be(&SnmpDescriptor->p_Ipv4AddrTbl) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ SnmpDescriptor->numOfIpv4Addresses *
++ sizeof(t_DsarSnmpIpv4AddrTblEntry));
++ uint8_t* ip_addr = (uint8_t*)&addrs->ipv4Addr;
++ FM_DMP_LN(buf, n, " ip vlan id\n");
++ for (i = 0; i < SnmpDescriptor->numOfIpv4Addresses; i++)
++ {
++ n += snprintf(ip_str, 100, "%d.%d.%d.%d",
++ ip_addr[0], ip_addr[1],
++ ip_addr[2], ip_addr[3]);
++ FM_DMP_LN(buf, n, "%-15s 0x%x\n", ip_str, addrs->vlanId);
++ }
++ iounmap(addrs);
++ }
++ if (SnmpDescriptor->p_Statistics)
++ {
++ t_DsarSnmpStatistics* snmpStats = ioremap(
++ ioread32be(&SnmpDescriptor->p_Statistics) +
++ p_FmPort->fmMuramPhysBaseAddr,
++ sizeof(t_DsarSnmpStatistics));
++ FM_DMP_LN(buf, n, "statistics\n");
++ FM_DMP_LN(buf, n, "snmpErrCnt: 0x%x\n", snmpStats->snmpErrCnt);
++ FM_DMP_LN(buf, n, "snmpCommunityErrCnt: 0x%x\n", snmpStats->snmpCommunityErrCnt);
++ FM_DMP_LN(buf, n, "snmpTotalDiscardCnt: 0x%x\n", snmpStats->snmpTotalDiscardCnt);
++ FM_DMP_LN(buf, n, "snmpGetReqCnt: 0x%x\n", snmpStats->snmpGetReqCnt);
++ FM_DMP_LN(buf, n, "snmpGetNextReqCnt: 0x%x\n", snmpStats->snmpGetNextReqCnt);
++ iounmap(snmpStats);
++ }
++ iounmap(SnmpDescriptor);
++ }
++ iounmap(ArCommonDescPtr);
++ iounmap(param_page);
++ return n;
++}
++
++static ssize_t show_fm_port_dsar_mem(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++#endif
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_LnxWrpFmPortDev =
++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++
++ local_irq_save(flags);
++
++ if (!p_LnxWrpFmPortDev->h_Dev) {
++ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
++ return n;
++ } else {
++ n = snprintf(buf, PAGE_SIZE,
++ "FM port driver registers dump.\n");
++ n = fm_port_dsar_dump_mem(p_LnxWrpFmPortDev->h_Dev, buf, n);
++ }
++
++ local_irq_restore(flags);
++
++ return n;
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++ return n;
++#endif
++}
++
++static ssize_t show_fm_port_dsar_regs(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++#endif
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_LnxWrpFmPortDev =
++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++
++ local_irq_save(flags);
++
++ if (!p_LnxWrpFmPortDev->h_Dev) {
++ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
++ return n;
++ } else {
++ n = snprintf(buf, PAGE_SIZE,
++ "FM port driver registers dump.\n");
++ n = fm_port_dsar_dump_regs(p_LnxWrpFmPortDev->h_Dev, buf, n);
++ }
++
++ local_irq_restore(flags);
++
++ return n;
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++ return n;
++#endif
++}
++
++#if (DPAA_VERSION >= 11)
++static ssize_t show_fm_port_ipv4_options(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_LnxWrpFmPortDev =
++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++
++ local_irq_save(flags);
++
++ if (!p_LnxWrpFmPortDev->h_Dev) {
++ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
++ return n;
++ } else if (((t_FmPort *)p_LnxWrpFmPortDev->h_Dev)->p_ParamsPage
++ == NULL) {
++ n = snprintf(buf, PAGE_SIZE,
++ "\tPort: FMan-controller params page not set\n");
++ return n;
++ } else {
++ n = snprintf(buf, PAGE_SIZE,
++ "Counter for fragmented pkt with IP header options\n");
++ n = fm_port_dump_ipv4_opt(p_LnxWrpFmPortDev->h_Dev, buf, n);
++ }
++
++ local_irq_restore(flags);
++
++ return n;
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++ return n;
++#endif
++}
++
++#endif
++
++static ssize_t show_fm_port_bmi_regs(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_LnxWrpFmPortDev =
++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++
++ local_irq_save(flags);
++
++ if (!p_LnxWrpFmPortDev->h_Dev) {
++ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
++ return n;
++ } else {
++ n = snprintf(buf, PAGE_SIZE,
++ "FM port driver registers dump.\n");
++ n = fm_port_dump_regs_bmi(p_LnxWrpFmPortDev->h_Dev, buf, n);
++ }
++
++ local_irq_restore(flags);
++
++ return n;
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++ return n;
++#endif
++}
++
++static ssize_t show_fm_port_qmi_regs(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned long flags;
++ unsigned n = 0;
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++#endif
++
++ if (attr == NULL || buf == NULL || dev == NULL)
++ return -EINVAL;
++
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ p_LnxWrpFmPortDev =
++ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++
++ local_irq_save(flags);
++
++ if (!p_LnxWrpFmPortDev->h_Dev) {
++ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
++ return n;
++ } else {
++ n = snprintf(buf, PAGE_SIZE,
++ "FM port driver registers dump.\n");
++ n = fm_port_dump_regs_qmi(p_LnxWrpFmPortDev->h_Dev, buf, n);
++ }
++
++ local_irq_restore(flags);
++
++ return n;
++#else
++
++ local_irq_save(flags);
++ n = snprintf(buf, PAGE_SIZE,
++ "Debug level is too low to dump registers!!!\n");
++ local_irq_restore(flags);
++
++ return n;
++#endif
++}
++
++static DEVICE_ATTR(fm_port_regs, S_IRUGO | S_IRUSR, show_fm_port_regs, NULL);
++static DEVICE_ATTR(fm_port_qmi_regs, S_IRUGO | S_IRUSR, show_fm_port_qmi_regs, NULL);
++static DEVICE_ATTR(fm_port_bmi_regs, S_IRUGO | S_IRUSR, show_fm_port_bmi_regs, NULL);
++#if (DPAA_VERSION >= 11)
++static DEVICE_ATTR(fm_port_ipv4_opt, S_IRUGO | S_IRUSR, show_fm_port_ipv4_options, NULL);
++#endif
++static DEVICE_ATTR(fm_port_dsar_regs, S_IRUGO | S_IRUSR, show_fm_port_dsar_regs, NULL);
++static DEVICE_ATTR(fm_port_dsar_mem, S_IRUGO | S_IRUSR, show_fm_port_dsar_mem, NULL);
++
++int fm_port_sysfs_create(struct device *dev)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
++
++ if (dev == NULL)
++ return -EINVAL;
++
++ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_LnxWrpFmPortDev == NULL))
++ return -EINVAL;
++
++ /* store to remove them when module is disabled */
++ p_LnxWrpFmPortDev->dev_attr_regs = &dev_attr_fm_port_regs;
++ p_LnxWrpFmPortDev->dev_attr_qmi_regs = &dev_attr_fm_port_qmi_regs;
++ p_LnxWrpFmPortDev->dev_attr_bmi_regs = &dev_attr_fm_port_bmi_regs;
++#if (DPAA_VERSION >= 11)
++ p_LnxWrpFmPortDev->dev_attr_ipv4_opt = &dev_attr_fm_port_ipv4_opt;
++#endif
++ p_LnxWrpFmPortDev->dev_attr_dsar_regs = &dev_attr_fm_port_dsar_regs;
++ p_LnxWrpFmPortDev->dev_attr_dsar_mem = &dev_attr_fm_port_dsar_mem;
++ /* Registers dump entry - in future will be moved to debugfs */
++ if (device_create_file(dev, &dev_attr_fm_port_regs) != 0)
++ return -EIO;
++ if (device_create_file(dev, &dev_attr_fm_port_qmi_regs) != 0)
++ return -EIO;
++ if (device_create_file(dev, &dev_attr_fm_port_bmi_regs) != 0)
++ return -EIO;
++#if (DPAA_VERSION >= 11)
++ if (device_create_file(dev, &dev_attr_fm_port_ipv4_opt) != 0)
++ return -EIO;
++#endif
++ if (device_create_file(dev, &dev_attr_fm_port_dsar_regs) != 0)
++ return -EIO;
++ if (device_create_file(dev, &dev_attr_fm_port_dsar_mem) != 0)
++ return -EIO;
++
++ /* FM Ports statistics */
++ switch (p_LnxWrpFmPortDev->settings.param.portType) {
++ case e_FM_PORT_TYPE_TX:
++ case e_FM_PORT_TYPE_TX_10G:
++ if (sysfs_create_group
++ (&dev->kobj, &fm_tx_port_dev_stats_attr_grp) != 0)
++ return -EIO;
++ break;
++ case e_FM_PORT_TYPE_RX:
++ case e_FM_PORT_TYPE_RX_10G:
++ if (sysfs_create_group
++ (&dev->kobj, &fm_rx_port_dev_stats_attr_grp) != 0)
++ return -EIO;
++ break;
++ case e_FM_PORT_TYPE_DUMMY:
++ case e_FM_PORT_TYPE_OH_OFFLINE_PARSING:
++ if (sysfs_create_group
++ (&dev->kobj, &fm_oh_port_dev_stats_attr_grp) != 0)
++ return -EIO;
++ break;
++ default:
++ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
++ __func__);
++ return -EINVAL;
++ break;
++ };
++
++ return 0;
++}
++
++void fm_port_sysfs_destroy(struct device *dev)
++{
++ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = NULL;
++
++ /* this function has never been tested !!! */
++
++ if (WARN_ON(dev == NULL))
++ return;
++
++ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
++ if (WARN_ON(p_LnxWrpFmPortDev == NULL))
++ return;
++
++ /* The name attribute will be freed also by these 2 functions? */
++ switch (p_LnxWrpFmPortDev->settings.param.portType) {
++ case e_FM_PORT_TYPE_TX:
++ case e_FM_PORT_TYPE_TX_10G:
++ sysfs_remove_group(&dev->kobj, &fm_tx_port_dev_stats_attr_grp);
++ break;
++ case e_FM_PORT_TYPE_RX:
++ case e_FM_PORT_TYPE_RX_10G:
++ sysfs_remove_group(&dev->kobj, &fm_rx_port_dev_stats_attr_grp);
++ break;
++ case e_FM_PORT_TYPE_DUMMY:
++ case e_FM_PORT_TYPE_OH_OFFLINE_PARSING:
++ sysfs_remove_group(&dev->kobj, &fm_oh_port_dev_stats_attr_grp);
++ break;
++ default:
++ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
++ __func__);
++ break;
++ };
++
++ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_regs);
++ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_qmi_regs);
++ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_bmi_regs);
++#if (DPAA_VERSION >= 11)
++ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_ipv4_opt);
++#endif
++ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_dsar_regs);
++ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_dsar_mem);
++}
++
++
++int fm_port_dump_regs(void *h_dev, char *buf, int nn)
++{
++ t_FmPort *p_FmPort;
++ t_Fm *p_Fm;
++ uint8_t hardwarePortId;
++ int n = nn;
++
++ p_FmPort = (t_FmPort *)h_dev;
++ hardwarePortId = p_FmPort->hardwarePortId;
++ p_Fm = (t_Fm *)p_FmPort->h_Fm;
++
++ FM_DMP_TITLE(buf, n, &p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId - 1],
++ "fmbm_pp for port %u", hardwarePortId);
++ FM_DMP_MEM_32(buf, n,
++ &p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId - 1]);
++
++ FM_DMP_TITLE(buf, n, &p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId - 1],
++ "fmbm_pfs for port %u", hardwarePortId);
++ FM_DMP_MEM_32(buf, n,
++ &p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId - 1]);
++
++ FM_DMP_TITLE(buf, n,
++ &p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId - 1],
++ "fmbm_spliodn for port %u", hardwarePortId);
++ FM_DMP_MEM_32(buf, n,
++ &p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId - 1]);
++
++ FM_DMP_TITLE(buf, n, &p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId],
++ "fmfp_psfor port %u", hardwarePortId);
++ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId]);
++
++ FM_DMP_TITLE(buf, n, &p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId / 2],
++ "fmdmplrfor port %u", hardwarePortId);
++ FM_DMP_MEM_32(buf, n,
++ &p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId / 2]);
++ return n;
++}
++
++#if (DPAA_VERSION >= 11)
++
++int fm_port_dump_ipv4_opt(void *h_dev, char *buf, int nn)
++{
++ t_FmPort *p_FmPort;
++ int n = nn;
++
++ p_FmPort = (t_FmPort *)h_dev;
++
++ FM_DMP_V32(buf, n, p_FmPort->p_ParamsPage, ipfOptionsCounter);
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ return n;
++}
++#endif
++
++int fm_port_dump_regs_bmi(void *h_dev, char *buf, int nn)
++{
++ t_FmPort *p_FmPort;
++ u_FmPortBmiRegs *p_bmi;
++
++ char arr[20];
++ uint8_t flag;
++ int i = 0;
++ int n = nn;
++
++ p_FmPort = (t_FmPort *)h_dev;
++ p_bmi = p_FmPort->p_FmPortBmiRegs;
++
++ memset(arr, 0, sizeof(arr));
++ switch (p_FmPort->portType) {
++ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
++ strcpy(arr, "OFFLINE-PARSING");
++ flag = 0;
++ break;
++ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
++ strcpy(arr, "HOST-COMMAND");
++ flag = 0;
++ break;
++ case (e_FM_PORT_TYPE_RX):
++ strcpy(arr, "RX");
++ flag = 1;
++ break;
++ case (e_FM_PORT_TYPE_RX_10G):
++ strcpy(arr, "RX-10G");
++ flag = 1;
++ break;
++ case (e_FM_PORT_TYPE_TX):
++ strcpy(arr, "TX");
++ flag = 2;
++ break;
++ case (e_FM_PORT_TYPE_TX_10G):
++ strcpy(arr, "TX-10G");
++ flag = 2;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ FM_DMP_TITLE(buf, n, NULL,
++ "FMan-Port (%s #%d) registers:",
++ arr, p_FmPort->portId);
++
++ FM_DMP_TITLE(buf, n, p_bmi, "Bmi Port Regs");
++
++ switch (flag) {
++ case (0):
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ocfg);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ost);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oda);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oicp);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofdne);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofne);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofca);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofpne);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opso);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opp);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_occb);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oim);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofp);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofed);
++
++ FM_DMP_TITLE(buf, n,
++ &(p_bmi->ohPortBmiRegs.fmbm_oprai), "fmbm_oprai");
++ for (i = 0; i < FM_PORT_PRS_RESULT_NUM_OF_WORDS; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->ohPortBmiRegs.fmbm_oprai[i]));
++ }
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofqid);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oefqid);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofsdm);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofsem);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofene);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_orlmts);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_orlmt);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ocmne);
++ {
++#ifndef FM_NO_OP_OBSERVED_POOLS
++ if (p_FmPort->fmRevInfo.majorRev == 4) {
++ FM_DMP_TITLE(buf, n,
++ &p_bmi->ohPortBmiRegs.fmbm_oebmpi,
++ "fmbm_oebmpi");
++
++ for (i = 0; i < FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->ohPortBmiRegs.fmbm_oebmpi[i]));
++ }
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ocgm);
++ }
++#endif /* !FM_NO_OP_OBSERVED_POOLS */
++ }
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ostc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofrc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofdc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofledc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofufdc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_offc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofwdc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofldec);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opcp);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_occn);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_otuc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oduc);
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofuc);
++ FM_DMP_TITLE(buf, n, &(p_bmi->ohPortBmiRegs.fmbm_odcfg),
++ "fmbm_odcfg");
++ for (i = 0; i < 3; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->ohPortBmiRegs.fmbm_odcfg[i]));
++ }
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ogpr);
++ break;
++ case (1):
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rcfg);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rst);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rda);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfp);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_reth);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfed);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_ricp);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rebm);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfne);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfca);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfpne);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpso);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpp);
++ FM_DMP_TITLE(buf, n, &(p_bmi->rxPortBmiRegs.fmbm_rprai),
++ "fmbm_rprai");
++ for (i = 0; i < FM_PORT_PRS_RESULT_NUM_OF_WORDS; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->rxPortBmiRegs.fmbm_rprai[i]));
++ }
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfqid);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_refqid);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfsdm);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfsem);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfene);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rcmne);
++ FM_DMP_TITLE(buf, n, &p_bmi->rxPortBmiRegs.fmbm_ebmpi,
++ "fmbm_ebmpi");
++ for (i = 0; i < FM_PORT_MAX_NUM_OF_EXT_POOLS; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->rxPortBmiRegs.fmbm_ebmpi[i]));
++ }
++ FM_DMP_TITLE(buf, n, &p_bmi->rxPortBmiRegs.fmbm_acnt,
++ "fmbm_acnt");
++ for (i = 0; i < FM_PORT_MAX_NUM_OF_EXT_POOLS; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->rxPortBmiRegs.fmbm_acnt[i]));
++ }
++ FM_DMP_TITLE(buf, n, &p_bmi->rxPortBmiRegs.fmbm_rcgm,
++ "fmbm_rcgm");
++ for (i = 0; i < FM_PORT_NUM_OF_CONGESTION_GRPS / 32; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->rxPortBmiRegs.fmbm_rcgm[i]));
++ }
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rmpd);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rstc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfrc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfbc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rlfc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rffc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfcd);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfldec);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rodc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpcp);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rccn);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rtuc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rrquc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rduc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfuc);
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpac);
++ FM_DMP_TITLE(buf, n, &(p_bmi->rxPortBmiRegs.fmbm_rdcfg),
++ "fmbm_rdcfg");
++ for (i = 0; i < 3; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->rxPortBmiRegs.fmbm_rdcfg[i]));
++ }
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rgpr);
++ break;
++ case (2):
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tcfg);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tst);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tda);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfp);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfed);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_ticp);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfdne);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfca);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tcfqid);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfeqid);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfene);
++#if (DPAA_VERSION >= 11)
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfne);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tcmne);
++#endif /* (DPAA_VERSION >= 11) */
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_trlmts);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_trlmt);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tstc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfrc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfdc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfledc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfufdc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tpc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tpcp);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tccn);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_ttuc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_ttcquc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tduc);
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfuc);
++ FM_DMP_TITLE(buf, n, &(p_bmi->txPortBmiRegs.fmbm_tdcfg),
++ "fmbm_tdcfg");
++ for (i = 0; i < 3 ; ++i) {
++ FM_DMP_MEM_32(buf, n,
++ &(p_bmi->txPortBmiRegs.fmbm_tdcfg[i]));
++ }
++ FM_DMP_SUBTITLE(buf, n, "\n");
++ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tgpr);
++ break;
++ }
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ return n;
++}
++
++int fm_port_dump_regs_qmi(void *h_dev, char *buf, int nn)
++{
++ t_FmPort *p_FmPort;
++ int n = nn;
++
++ p_FmPort = (t_FmPort *)h_dev;
++
++ FM_DMP_TITLE(buf, n, p_FmPort->p_FmPortQmiRegs, "Qmi Port Regs");
++
++ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnc);
++ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pns);
++ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnts);
++ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnen);
++ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnetfc);
++ FM_DMP_V32(buf, n,
++ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndn);
++ FM_DMP_V32(buf, n,
++ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndc);
++ FM_DMP_V32(buf, n,
++ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndtfc);
++ FM_DMP_V32(buf, n,
++ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndfdc);
++ FM_DMP_V32(buf, n,
++ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndcc);
++
++ FM_DMP_SUBTITLE(buf, n, "\n");
++
++ return n;
++}
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h
+new file mode 100644
+index 00000000..1e7636f4
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h
+@@ -0,0 +1,56 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ @File lnxwrp_sysfs_fm_port.h
++
++ @Description FM port sysfs functions.
++
++*/
++
++#ifndef LNXWRP_SYSFS_FM_PORT_H_
++#define LNXWRP_SYSFS_FM_PORT_H_
++
++#include "lnxwrp_sysfs.h"
++
++int fm_port_sysfs_create(struct device *dev);
++void fm_port_sysfs_destroy(struct device *dev);
++
++int fm_port_dump_regs(void *h_dev, char *buf, int n);
++int fm_port_dump_regs_bmi(void *h_dev, char *buf, int n);
++int fm_port_dump_regs_qmi(void *h_dev, char *buf, int n);
++
++#if (DPAA_VERSION >= 11)
++int fm_port_dump_ipv4_opt(void *h_dev, char *buf, int n);
++#endif
++
++#endif /* LNXWRP_SYSFS_FM_PORT_H_ */
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/xx/Makefile b/drivers/net/ethernet/freescale/sdk_fman/src/xx/Makefile
+new file mode 100644
+index 00000000..1071c22a
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/Makefile
+@@ -0,0 +1,18 @@
++#
++# Makefile for the Freescale Ethernet controllers
++#
++ccflags-y += -DVERSION=\"\"
++#
++#Include netcomm SW specific definitions
++include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
++
++obj-y += fsl-ncsw-xx.o
++
++ifneq ($(CONFIG_FMAN_ARM),y)
++fsl-ncsw-xx-objs := xx_linux.o \
++ module_strings.o
++else
++fsl-ncsw-xx-objs := xx_arm_linux.o \
++ module_strings.o
++endif
++
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c b/drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c
+new file mode 100644
+index 00000000..d7fed170
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c
+@@ -0,0 +1,46 @@
++/*
++ * Copyright 2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/* Module names for debug messages */
++const char *moduleStrings[] =
++{
++ "", /* MODULE_UNKNOWN */
++ "FM", /* MODULE_FM */
++ "FM-MURAM", /* MODULE_FM_MURAM */
++ "FM-PCD", /* MODULE_FM_PCD */
++ "FM-RTC", /* MODULE_FM_RTC */
++ "FM-MAC", /* MODULE_FM_MAC */
++ "FM-Port", /* MODULE_FM_PORT */
++ "MM", /* MODULE_MM */
++ "FM-SP", /* MODULE_FM_SP */
++ "FM-MACSEC" /* MODULE_FM_MACSEC */
++};
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c b/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c
+new file mode 100644
+index 00000000..dd3e376e
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c
+@@ -0,0 +1,905 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File xx_arm_linux.c
++
++ @Description XX routines implementation for Linux.
++*//***************************************************************************/
++#include <linux/version.h>
++
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#include <config/modversions.h>
++#endif /* MODVERSIONS */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/ptrace.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/proc_fs.h>
++#include <linux/smp.h>
++#include <linux/of.h>
++#include <linux/irqdomain.h>
++
++#include <linux/workqueue.h>
++
++#ifdef BIGPHYSAREA_ENABLE
++#include <linux/bigphysarea.h>
++#endif /* BIGPHYSAREA_ENABLE */
++
++//#include <sysdev/fsl_soc.h>
++#include <asm/pgtable.h>
++#include <asm/irq.h>
++#include <asm/bitops.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/string.h>
++#include <asm/byteorder.h>
++#include <asm/page.h>
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "list_ext.h"
++#include "mm_ext.h"
++#include "sys_io_ext.h"
++#include "xx.h"
++
++
++#define __ERR_MODULE__ MODULE_UNKNOWN
++
++#ifdef BIGPHYSAREA_ENABLE
++#define MAX_ALLOCATION_SIZE 128 * 1024 /* Maximum size allocated with kmalloc is 128K */
++
++
++/* TODO: large allocations => use big phys area */
++/******************************************************************************
++ * routine: get_nr_pages
++ *
++ * description:
++ * calculates the number of memory pages for a given size (in bytes)
++ *
++ * arguments:
++ * size - the number of bytes
++ *
++ * return code:
++ * The number of pages
++ *
++ *****************************************************************************/
++static __inline__ uint32_t get_nr_pages (uint32_t size)
++{
++ return (uint32_t)((size >> PAGE_SHIFT) + (size & PAGE_SHIFT ? 1 : 0));
++}
++
++static bool in_big_phys_area (uint32_t addr)
++{
++ uint32_t base, size;
++
++ bigphysarea_get_details (&base, &size);
++ return ((addr >= base) && (addr < base + size));
++}
++#endif /* BIGPHYSAREA_ENABLE */
++
++void * xx_Malloc(uint32_t n)
++{
++ void *a;
++ uint32_t flags;
++
++ flags = XX_DisableAllIntr();
++#ifdef BIGPHYSAREA_ENABLE
++ if (n >= MAX_ALLOCATION_SIZE)
++ a = (void*)bigphysarea_alloc_pages(get_nr_pages(n), 0, GFP_ATOMIC);
++ else
++#endif /* BIGPHYSAREA_ENABLE */
++ a = (void *)kmalloc((uint32_t)n, GFP_ATOMIC);
++ if (!a)
++ XX_Print("No memory for XX_Malloc\n");
++ XX_RestoreAllIntr(flags);
++
++ return a;
++}
++
++void xx_Free(void *p)
++{
++#ifdef BIGPHYSAREA_ENABLE
++ if (in_big_phys_area ((uint32_t)p))
++ bigphysarea_free_pages(p);
++ else
++#endif /* BIGPHYSAREA_ENABLE */
++ kfree(p);
++}
++
++void XX_Exit(int status)
++{
++ WARN(1, "\n\nFMD: fatal error, driver can't go on!!!\n\n");
++}
++
++#define BUF_SIZE 512
++void XX_Print(char *str, ...)
++{
++ va_list args;
++#ifdef CONFIG_SMP
++ char buf[BUF_SIZE];
++#endif /* CONFIG_SMP */
++
++ va_start(args, str);
++#ifdef CONFIG_SMP
++ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
++ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
++ printk(KERN_CRIT "cpu %d: %s", raw_smp_processor_id(), buf);
++#else
++ vprintk(str, args);
++#endif /* CONFIG_SMP */
++ va_end(args);
++}
++
++void XX_Fprint(void *file, char *str, ...)
++{
++ va_list args;
++#ifdef CONFIG_SMP
++ char buf[BUF_SIZE];
++#endif /* CONFIG_SMP */
++
++ va_start(args, str);
++#ifdef CONFIG_SMP
++ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
++ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
++ printk (KERN_CRIT "cpu %d: %s", smp_processor_id(), buf);
++
++#else
++ vprintk(str, args);
++#endif /* CONFIG_SMP */
++ va_end(args);
++}
++
++#ifdef DEBUG_XX_MALLOC
++typedef void (*t_ffn)(void *);
++typedef struct {
++ t_ffn f_free;
++ void *mem;
++ char *fname;
++ int fline;
++ uint32_t size;
++ t_List node;
++} t_MemDebug;
++#define MEMDBG_OBJECT(p_List) LIST_OBJECT(p_List, t_MemDebug, node)
++
++LIST(memDbgLst);
++
++
++void * XX_MallocDebug(uint32_t size, char *fname, int line)
++{
++ void *mem;
++ t_MemDebug *p_MemDbg;
++
++ p_MemDbg = (t_MemDebug *)xx_Malloc(sizeof(t_MemDebug));
++ if (p_MemDbg == NULL)
++ return NULL;
++
++ mem = xx_Malloc(size);
++ if (mem == NULL)
++ {
++ XX_Free(p_MemDbg);
++ return NULL;
++ }
++
++ INIT_LIST(&p_MemDbg->node);
++ p_MemDbg->f_free = xx_Free;
++ p_MemDbg->mem = mem;
++ p_MemDbg->fname = fname;
++ p_MemDbg->fline = line;
++ p_MemDbg->size = size+sizeof(t_MemDebug);
++ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
++
++ return mem;
++}
++
++void * XX_MallocSmartDebug(uint32_t size,
++ int memPartitionId,
++ uint32_t align,
++ char *fname,
++ int line)
++{
++ void *mem;
++ t_MemDebug *p_MemDbg;
++
++ p_MemDbg = (t_MemDebug *)XX_Malloc(sizeof(t_MemDebug));
++ if (p_MemDbg == NULL)
++ return NULL;
++
++ mem = xx_MallocSmart((uint32_t)size, memPartitionId, align);
++ if (mem == NULL)
++ {
++ XX_Free(p_MemDbg);
++ return NULL;
++ }
++
++ INIT_LIST(&p_MemDbg->node);
++ p_MemDbg->f_free = xx_FreeSmart;
++ p_MemDbg->mem = mem;
++ p_MemDbg->fname = fname;
++ p_MemDbg->fline = line;
++ p_MemDbg->size = size+sizeof(t_MemDebug);
++ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
++
++ return mem;
++}
++
++static void debug_free(void *mem)
++{
++ t_List *p_MemDbgLh = NULL;
++ t_MemDebug *p_MemDbg;
++ bool found = FALSE;
++
++ if (LIST_IsEmpty(&memDbgLst))
++ {
++ REPORT_ERROR(MAJOR, E_ALREADY_FREE, ("Unbalanced free (0x%08x)", mem));
++ return;
++ }
++
++ LIST_FOR_EACH(p_MemDbgLh, &memDbgLst)
++ {
++ p_MemDbg = MEMDBG_OBJECT(p_MemDbgLh);
++ if (p_MemDbg->mem == mem)
++ {
++ found = TRUE;
++ break;
++ }
++ }
++
++ if (!found)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_FOUND,
++ ("Attempt to free unallocated address (0x%08x)",mem));
++ dump_stack();
++ return;
++ }
++
++ LIST_Del(p_MemDbgLh);
++ p_MemDbg->f_free(mem);
++ p_MemDbg->f_free(p_MemDbg);
++}
++
++void XX_FreeSmart(void *p)
++{
++ debug_free(p);
++}
++
++
++void XX_Free(void *p)
++{
++ debug_free(p);
++}
++
++#else /* not DEBUG_XX_MALLOC */
++void * XX_Malloc(uint32_t size)
++{
++ return xx_Malloc(size);
++}
++
++void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
++{
++ return xx_MallocSmart(size,memPartitionId, alignment);
++}
++
++void XX_FreeSmart(void *p)
++{
++ xx_FreeSmart(p);
++}
++
++
++void XX_Free(void *p)
++{
++ xx_Free(p);
++}
++#endif /* not DEBUG_XX_MALLOC */
++
++
++#if (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0))
++void XX_EventById(uint32_t event, t_Handle appId, uint16_t flags, char *msg)
++{
++ e_Event eventCode = (e_Event)event;
++
++ UNUSED(eventCode);
++ UNUSED(appId);
++ UNUSED(flags);
++ UNUSED(msg);
++}
++#endif /* (defined(REPORT_EVENTS) && ... */
++
++
++uint32_t XX_DisableAllIntr(void)
++{
++ unsigned long flags;
++
++#ifdef local_irq_save_nort
++ local_irq_save_nort(flags);
++#else
++ local_irq_save(flags);
++#endif
++
++ return (uint32_t)flags;
++}
++
++void XX_RestoreAllIntr(uint32_t flags)
++{
++#ifdef local_irq_restore_nort
++ local_irq_restore_nort((unsigned long)flags);
++#else
++ local_irq_restore((unsigned long)flags);
++#endif
++}
++
++t_Error XX_Call( uint32_t qid, t_Error (* f)(t_Handle), t_Handle id, t_Handle appId, uint16_t flags )
++{
++ UNUSED(qid);
++ UNUSED(appId);
++ UNUSED(flags);
++
++ return f(id);
++}
++
++int XX_IsICacheEnable(void)
++{
++ return TRUE;
++}
++
++int XX_IsDCacheEnable(void)
++{
++ return TRUE;
++}
++
++
++typedef struct {
++ t_Isr *f_Isr;
++ t_Handle handle;
++} t_InterruptHandler;
++
++
++t_Handle interruptHandlers[0x00010000];
++
++static irqreturn_t LinuxInterruptHandler (int irq, void *dev_id)
++{
++ t_InterruptHandler *p_IntrHndl = (t_InterruptHandler *)dev_id;
++ p_IntrHndl->f_Isr(p_IntrHndl->handle);
++ return IRQ_HANDLED;
++}
++
++t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle)
++{
++ const char *device;
++ t_InterruptHandler *p_IntrHndl;
++
++ device = GetDeviceName(irq);
++ if (device == NULL)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Interrupt source - %d", irq));
++
++ p_IntrHndl = (t_InterruptHandler *)XX_Malloc(sizeof(t_InterruptHandler));
++ if (p_IntrHndl == NULL)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ p_IntrHndl->f_Isr = f_Isr;
++ p_IntrHndl->handle = handle;
++ interruptHandlers[irq] = p_IntrHndl;
++
++ if (request_irq(GetDeviceIrqNum(irq), LinuxInterruptHandler, 0, device, p_IntrHndl) < 0)
++ RETURN_ERROR(MAJOR, E_BUSY, ("Can't get IRQ %s\n", device));
++ disable_irq(GetDeviceIrqNum(irq));
++
++ return E_OK;
++}
++
++t_Error XX_FreeIntr(int irq)
++{
++ t_InterruptHandler *p_IntrHndl = interruptHandlers[irq];
++ free_irq(GetDeviceIrqNum(irq), p_IntrHndl);
++ XX_Free(p_IntrHndl);
++ interruptHandlers[irq] = 0;
++ return E_OK;
++}
++
++t_Error XX_EnableIntr(int irq)
++{
++ enable_irq(GetDeviceIrqNum(irq));
++ return E_OK;
++}
++
++t_Error XX_DisableIntr(int irq)
++{
++ disable_irq(GetDeviceIrqNum(irq));
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* Tasklet Service Routines */
++/*****************************************************************************/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++typedef struct
++{
++ t_Handle h_Data;
++ void (*f_Callback) (void *);
++ struct delayed_work dwork;
++} t_Tasklet;
++
++static void GenericTaskletCallback(struct work_struct *p_Work)
++{
++ t_Tasklet *p_Task = container_of(p_Work, t_Tasklet, dwork.work);
++
++ p_Task->f_Callback(p_Task->h_Data);
++}
++#endif /* LINUX_VERSION_CODE */
++
++
++t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ struct work_struct *p_Task;
++ p_Task = (struct work_struct *)XX_Malloc(sizeof(struct work_struct));
++ INIT_WORK(p_Task, routine, data);
++#else
++ t_Tasklet *p_Task = (t_Tasklet *)XX_Malloc(sizeof(t_Tasklet));
++ p_Task->h_Data = data;
++ p_Task->f_Callback = routine;
++ INIT_DELAYED_WORK(&p_Task->dwork, GenericTaskletCallback);
++#endif /* LINUX_VERSION_CODE */
++
++ return (t_TaskletHandle)p_Task;
++}
++
++
++void XX_FreeTasklet (t_TaskletHandle h_Tasklet)
++{
++ if (h_Tasklet)
++ XX_Free(h_Tasklet);
++}
++
++int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate)
++{
++ int ans;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ if (immediate)
++ ans = schedule_work(h_Tasklet);
++ else
++ ans = schedule_delayed_work(h_Tasklet, 1);
++#else
++ if (immediate)
++ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, 0);
++ else
++ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, HZ);
++#endif /* LINUX_VERSION_CODE */
++
++ return ans;
++}
++
++void XX_FlushScheduledTasks(void)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++ flush_scheduled_tasks();
++#else
++ flush_scheduled_work();
++#endif /* LINUX_VERSION_CODE */
++}
++
++int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ return (int)(((struct work_struct *)h_Tasklet)->pending);
++#else
++ return (int)delayed_work_pending(&((t_Tasklet *)h_Tasklet)->dwork);
++#endif /* LINUX_VERSION_CODE */
++}
++
++void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++ ((struct tq_struct *)h_Tasklet)->data = data;
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ ((struct work_struct *)h_Tasklet)->data = data;
++#else
++ ((t_Tasklet *)h_Tasklet)->h_Data = data;
++#endif /* LINUX_VERSION_CODE */
++}
++
++t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ return (t_Handle)(((struct work_struct *)h_Tasklet)->data);
++#else
++ return ((t_Tasklet *)h_Tasklet)->h_Data;
++#endif /* LINUX_VERSION_CODE */
++}
++
++
++/*****************************************************************************/
++/* Spinlock Service Routines */
++/*****************************************************************************/
++
++t_Handle XX_InitSpinlock(void)
++{
++ spinlock_t *p_Spinlock = (spinlock_t *)XX_Malloc(sizeof(spinlock_t));
++ if (!p_Spinlock)
++ return NULL;
++
++ spin_lock_init(p_Spinlock);
++
++ return (t_Handle)p_Spinlock;
++}
++
++void XX_FreeSpinlock(t_Handle h_Spinlock)
++{
++ if (h_Spinlock)
++ XX_Free(h_Spinlock);
++}
++
++void XX_LockSpinlock(t_Handle h_Spinlock)
++{
++ spin_lock((spinlock_t *)h_Spinlock);
++}
++
++void XX_UnlockSpinlock(t_Handle h_Spinlock)
++{
++ spin_unlock((spinlock_t *)h_Spinlock);
++}
++
++uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock)
++{
++ unsigned long intrFlags;
++ spin_lock_irqsave((spinlock_t *)h_Spinlock, intrFlags);
++ return intrFlags;
++}
++
++void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags)
++{
++ spin_unlock_irqrestore((spinlock_t *)h_Spinlock, (unsigned long)intrFlags);
++}
++
++
++/*****************************************************************************/
++/* Timers Service Routines */
++/*****************************************************************************/
++/* The time now is in mili sec. resolution */
++uint32_t XX_CurrentTime(void)
++{
++ return (jiffies*1000)/HZ;
++}
++
++
++t_Handle XX_CreateTimer(void)
++{
++ struct timer_list *p_Timer = (struct timer_list *)XX_Malloc(sizeof(struct timer_list));
++ if (p_Timer)
++ {
++ memset(p_Timer, 0, sizeof(struct timer_list));
++ init_timer(p_Timer);
++ }
++ return (t_Handle)p_Timer;
++}
++
++void XX_FreeTimer(t_Handle h_Timer)
++{
++ if (h_Timer)
++ XX_Free(h_Timer);
++}
++
++void XX_StartTimer(t_Handle h_Timer,
++ uint32_t msecs,
++ bool periodic,
++ void (*f_TimerExpired)(t_Handle),
++ t_Handle h_Arg)
++{
++ int tmp_jiffies = (msecs*HZ)/1000;
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ SANITY_CHECK_RETURN((periodic == FALSE), E_NOT_SUPPORTED);
++
++ p_Timer->function = (void (*)(unsigned long))f_TimerExpired;
++ p_Timer->data = (unsigned long)h_Arg;
++ if ((msecs*HZ)%1000)
++ tmp_jiffies++;
++ p_Timer->expires = (jiffies + tmp_jiffies);
++
++ add_timer((struct timer_list *)h_Timer);
++}
++
++void XX_SetTimerData(t_Handle h_Timer, t_Handle data)
++{
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ p_Timer->data = (unsigned long)data;
++}
++
++t_Handle XX_GetTimerData(t_Handle h_Timer)
++{
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ return (t_Handle)p_Timer->data;
++}
++
++uint32_t XX_GetExpirationTime(t_Handle h_Timer)
++{
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ return (uint32_t)p_Timer->expires;
++}
++
++void XX_StopTimer(t_Handle h_Timer)
++{
++ del_timer((struct timer_list *)h_Timer);
++}
++
++void XX_ModTimer(t_Handle h_Timer, uint32_t msecs)
++{
++ int tmp_jiffies = (msecs*HZ)/1000;
++
++ if ((msecs*HZ)%1000)
++ tmp_jiffies++;
++ mod_timer((struct timer_list *)h_Timer, jiffies + tmp_jiffies);
++}
++
++int XX_TimerIsActive(t_Handle h_Timer)
++{
++ return timer_pending((struct timer_list *)h_Timer);
++}
++
++uint32_t XX_Sleep(uint32_t msecs)
++{
++ int tmp_jiffies = (msecs*HZ)/1000;
++
++ if ((msecs*HZ)%1000)
++ tmp_jiffies++;
++ return schedule_timeout(tmp_jiffies);
++}
++
++/*BEWARE!!!!! UDelay routine is BUSY WAITTING!!!!!*/
++void XX_UDelay(uint32_t usecs)
++{
++ udelay(usecs);
++}
++
++/* TODO: verify that these are correct */
++#define MSG_BODY_SIZE 512
++typedef t_Error (t_MsgHandler) (t_Handle h_Mod, uint32_t msgId, uint8_t msgBody[MSG_BODY_SIZE]);
++typedef void (t_MsgCompletionCB) (t_Handle h_Arg, uint8_t msgBody[MSG_BODY_SIZE]);
++t_Error XX_SendMessage(char *p_DestAddr,
++ uint32_t msgId,
++ uint8_t msgBody[MSG_BODY_SIZE],
++ t_MsgCompletionCB *f_CompletionCB,
++ t_Handle h_CBArg);
++
++typedef struct {
++ char *p_Addr;
++ t_MsgHandler *f_MsgHandlerCB;
++ t_Handle h_Mod;
++ t_List node;
++} t_MsgHndlr;
++#define MSG_HNDLR_OBJECT(ptr) LIST_OBJECT(ptr, t_MsgHndlr, node)
++
++LIST(msgHndlrList);
++
++static void EnqueueMsgHndlr(t_MsgHndlr *p_MsgHndlr)
++{
++ uint32_t intFlags;
++
++ intFlags = XX_DisableAllIntr();
++ LIST_AddToTail(&p_MsgHndlr->node, &msgHndlrList);
++ XX_RestoreAllIntr(intFlags);
++}
++/* TODO: add this for multi-platform support
++static t_MsgHndlr * DequeueMsgHndlr(void)
++{
++ t_MsgHndlr *p_MsgHndlr = NULL;
++ uint32_t intFlags;
++
++ intFlags = XX_DisableAllIntr();
++ if (!LIST_IsEmpty(&msgHndlrList))
++ {
++ p_MsgHndlr = MSG_HNDLR_OBJECT(msgHndlrList.p_Next);
++ LIST_DelAndInit(&p_MsgHndlr->node);
++ }
++ XX_RestoreAllIntr(intFlags);
++
++ return p_MsgHndlr;
++}
++*/
++static t_MsgHndlr * FindMsgHndlr(char *p_Addr)
++{
++ t_MsgHndlr *p_MsgHndlr;
++ t_List *p_Pos;
++
++ LIST_FOR_EACH(p_Pos, &msgHndlrList)
++ {
++ p_MsgHndlr = MSG_HNDLR_OBJECT(p_Pos);
++ if (strstr(p_MsgHndlr->p_Addr, p_Addr))
++ return p_MsgHndlr;
++ }
++
++ return NULL;
++}
++
++t_Error XX_RegisterMessageHandler (char *p_Addr, t_MsgHandler *f_MsgHandlerCB, t_Handle h_Mod)
++{
++ t_MsgHndlr *p_MsgHndlr;
++ uint32_t len;
++
++ p_MsgHndlr = (t_MsgHndlr*)XX_Malloc(sizeof(t_MsgHndlr));
++ if (!p_MsgHndlr)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!"));
++ memset(p_MsgHndlr, 0, sizeof(t_MsgHndlr));
++
++ len = strlen(p_Addr);
++ p_MsgHndlr->p_Addr = (char*)XX_Malloc(len+1);
++ strncpy(p_MsgHndlr->p_Addr,p_Addr, (uint32_t)(len+1));
++
++ p_MsgHndlr->f_MsgHandlerCB = f_MsgHandlerCB;
++ p_MsgHndlr->h_Mod = h_Mod;
++ INIT_LIST(&p_MsgHndlr->node);
++ EnqueueMsgHndlr(p_MsgHndlr);
++
++ return E_OK;
++}
++
++t_Error XX_UnregisterMessageHandler (char *p_Addr)
++{
++ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_Addr);
++ if (!p_MsgHndlr)
++ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
++
++ LIST_Del(&p_MsgHndlr->node);
++ XX_Free(p_MsgHndlr->p_Addr);
++ XX_Free(p_MsgHndlr);
++
++ return E_OK;
++}
++
++t_Error XX_SendMessage(char *p_DestAddr,
++ uint32_t msgId,
++ uint8_t msgBody[MSG_BODY_SIZE],
++ t_MsgCompletionCB *f_CompletionCB,
++ t_Handle h_CBArg)
++{
++ t_Error ans;
++ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_DestAddr);
++ if (!p_MsgHndlr)
++ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
++
++ ans = p_MsgHndlr->f_MsgHandlerCB(p_MsgHndlr->h_Mod, msgId, msgBody);
++
++ if (f_CompletionCB)
++ f_CompletionCB(h_CBArg, msgBody);
++
++ return ans;
++}
++
++t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH],
++ t_IpcMsgHandler *f_MsgHandler,
++ t_Handle h_Module,
++ uint32_t replyLength)
++{
++ UNUSED(addr);UNUSED(f_MsgHandler);UNUSED(h_Module);UNUSED(replyLength);
++ return E_OK;
++}
++
++t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH])
++{
++ UNUSED(addr);
++ return E_OK;
++}
++
++
++t_Error XX_IpcSendMessage(t_Handle h_Session,
++ uint8_t *p_Msg,
++ uint32_t msgLength,
++ uint8_t *p_Reply,
++ uint32_t *p_ReplyLength,
++ t_IpcMsgCompletion *f_Completion,
++ t_Handle h_Arg)
++{
++ UNUSED(h_Session); UNUSED(p_Msg); UNUSED(msgLength); UNUSED(p_Reply);
++ UNUSED(p_ReplyLength); UNUSED(f_Completion); UNUSED(h_Arg);
++ return E_OK;
++}
++
++t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH],
++ char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH])
++{
++ UNUSED(destAddr); UNUSED(srcAddr);
++ return E_OK;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++int GetDeviceIrqNum(int irq)
++{
++ struct device_node *iPar;
++ struct irq_domain *irqHost;
++ uint32_t hwIrq;
++
++ /* Get the interrupt controller */
++ iPar = of_find_node_by_name(NULL, "mpic");
++ hwIrq = 0;
++
++ ASSERT_COND(iPar != NULL);
++ /* Get the irq host */
++ irqHost = irq_find_host(iPar);
++ of_node_put(iPar);
++
++ /* Create irq mapping */
++ return irq_create_mapping(irqHost, hwIrq);
++}
++#else
++#error "kernel not supported!!!"
++#endif /* LINUX_VERSION_CODE */
++
++void * XX_PhysToVirt(physAddress_t addr)
++{
++ return UINT_TO_PTR(SYS_PhysToVirt((uint64_t)addr));
++}
++
++physAddress_t XX_VirtToPhys(void * addr)
++{
++ return (physAddress_t)SYS_VirtToPhys(PTR_TO_UINT(addr));
++}
++
++void * xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
++{
++ uintptr_t *returnCode, tmp;
++
++ if (alignment < sizeof(uintptr_t))
++ alignment = sizeof(uintptr_t);
++ size += alignment + sizeof(returnCode);
++ tmp = (uintptr_t)xx_Malloc(size);
++ if (tmp == 0)
++ return NULL;
++ returnCode = (uintptr_t*)((tmp + alignment + sizeof(returnCode)) & ~((uintptr_t)alignment - 1));
++ *(returnCode - 1) = tmp;
++
++ return (void*)returnCode;
++}
++
++void xx_FreeSmart(void *p)
++{
++ xx_Free((void*)(*((uintptr_t *)(p) - 1)));
++}
+diff --git a/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_linux.c b/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_linux.c
+new file mode 100644
+index 00000000..992757d4
+--- /dev/null
++++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_linux.c
+@@ -0,0 +1,918 @@
++/*
++ * Copyright 2008-2012 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File xx_linux.c
++
++ @Description XX routines implementation for Linux.
++*//***************************************************************************/
++#include <linux/version.h>
++
++#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++#ifdef MODVERSIONS
++#include <config/modversions.h>
++#endif /* MODVERSIONS */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/ptrace.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/proc_fs.h>
++#include <linux/smp.h>
++#include <linux/of.h>
++#ifdef CONFIG_FMAN_ARM
++#include <linux/irqdomain.h>
++#endif
++
++#include <linux/workqueue.h>
++
++#ifdef BIGPHYSAREA_ENABLE
++#include <linux/bigphysarea.h>
++#endif /* BIGPHYSAREA_ENABLE */
++
++#ifndef CONFIG_FMAN_ARM
++#include <sysdev/fsl_soc.h>
++#endif
++#include <asm/pgtable.h>
++#include <asm/irq.h>
++#include <asm/bitops.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/string.h>
++#include <asm/byteorder.h>
++#include <asm/page.h>
++
++#include "error_ext.h"
++#include "std_ext.h"
++#include "list_ext.h"
++#include "mm_ext.h"
++#include "sys_io_ext.h"
++#include "xx.h"
++
++
++#define __ERR_MODULE__ MODULE_UNKNOWN
++
++#ifdef BIGPHYSAREA_ENABLE
++#define MAX_ALLOCATION_SIZE 128 * 1024 /* Maximum size allocated with kmalloc is 128K */
++
++
++/* TODO: large allocations => use big phys area */
++/******************************************************************************
++ * routine: get_nr_pages
++ *
++ * description:
++ * calculates the number of memory pages for a given size (in bytes)
++ *
++ * arguments:
++ * size - the number of bytes
++ *
++ * return code:
++ * The number of pages
++ *
++ *****************************************************************************/
++static __inline__ uint32_t get_nr_pages (uint32_t size)
++{
++ return (uint32_t)((size >> PAGE_SHIFT) + (size & PAGE_SHIFT ? 1 : 0));
++}
++
++static bool in_big_phys_area (uint32_t addr)
++{
++ uint32_t base, size;
++
++ bigphysarea_get_details (&base, &size);
++ return ((addr >= base) && (addr < base + size));
++}
++#endif /* BIGPHYSAREA_ENABLE */
++
++void * xx_Malloc(uint32_t n)
++{
++ void *a;
++ uint32_t flags;
++
++ flags = XX_DisableAllIntr();
++#ifdef BIGPHYSAREA_ENABLE
++ if (n >= MAX_ALLOCATION_SIZE)
++ a = (void*)bigphysarea_alloc_pages(get_nr_pages(n), 0, GFP_ATOMIC);
++ else
++#endif /* BIGPHYSAREA_ENABLE */
++ a = (void *)kmalloc((uint32_t)n, GFP_ATOMIC);
++ if (!a)
++ XX_Print("No memory for XX_Malloc\n");
++ XX_RestoreAllIntr(flags);
++
++ return a;
++}
++
++void xx_Free(void *p)
++{
++#ifdef BIGPHYSAREA_ENABLE
++ if (in_big_phys_area ((uint32_t)p))
++ bigphysarea_free_pages(p);
++ else
++#endif /* BIGPHYSAREA_ENABLE */
++ kfree(p);
++}
++
++void XX_Exit(int status)
++{
++ WARN(1, "\n\nFMD: fatal error, driver can't go on!!!\n\n");
++}
++
++#define BUF_SIZE 512
++void XX_Print(char *str, ...)
++{
++ va_list args;
++#ifdef CONFIG_SMP
++ char buf[BUF_SIZE];
++#endif /* CONFIG_SMP */
++
++ va_start(args, str);
++#ifdef CONFIG_SMP
++ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
++ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
++ printk(KERN_CRIT "cpu%d/%d: %s", raw_smp_processor_id(), NR_CPUS, buf);
++#else
++ vprintk(str, args);
++#endif /* CONFIG_SMP */
++ va_end(args);
++}
++
++void XX_Fprint(void *file, char *str, ...)
++{
++ va_list args;
++#ifdef CONFIG_SMP
++ char buf[BUF_SIZE];
++#endif /* CONFIG_SMP */
++
++ va_start(args, str);
++#ifdef CONFIG_SMP
++ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
++ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
++ printk (KERN_CRIT "cpu%d/%d: %s", raw_smp_processor_id(), NR_CPUS, buf);
++
++#else
++ vprintk(str, args);
++#endif /* CONFIG_SMP */
++ va_end(args);
++}
++
++#ifdef DEBUG_XX_MALLOC
++typedef void (*t_ffn)(void *);
++typedef struct {
++ t_ffn f_free;
++ void *mem;
++ char *fname;
++ int fline;
++ uint32_t size;
++ t_List node;
++} t_MemDebug;
++#define MEMDBG_OBJECT(p_List) LIST_OBJECT(p_List, t_MemDebug, node)
++
++LIST(memDbgLst);
++
++
++void * XX_MallocDebug(uint32_t size, char *fname, int line)
++{
++ void *mem;
++ t_MemDebug *p_MemDbg;
++
++ p_MemDbg = (t_MemDebug *)xx_Malloc(sizeof(t_MemDebug));
++ if (p_MemDbg == NULL)
++ return NULL;
++
++ mem = xx_Malloc(size);
++ if (mem == NULL)
++ {
++ XX_Free(p_MemDbg);
++ return NULL;
++ }
++
++ INIT_LIST(&p_MemDbg->node);
++ p_MemDbg->f_free = xx_Free;
++ p_MemDbg->mem = mem;
++ p_MemDbg->fname = fname;
++ p_MemDbg->fline = line;
++ p_MemDbg->size = size+sizeof(t_MemDebug);
++ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
++
++ return mem;
++}
++
++void * XX_MallocSmartDebug(uint32_t size,
++ int memPartitionId,
++ uint32_t align,
++ char *fname,
++ int line)
++{
++ void *mem;
++ t_MemDebug *p_MemDbg;
++
++ p_MemDbg = (t_MemDebug *)XX_Malloc(sizeof(t_MemDebug));
++ if (p_MemDbg == NULL)
++ return NULL;
++
++ mem = xx_MallocSmart((uint32_t)size, memPartitionId, align);
++ if (mem == NULL)
++ {
++ XX_Free(p_MemDbg);
++ return NULL;
++ }
++
++ INIT_LIST(&p_MemDbg->node);
++ p_MemDbg->f_free = xx_FreeSmart;
++ p_MemDbg->mem = mem;
++ p_MemDbg->fname = fname;
++ p_MemDbg->fline = line;
++ p_MemDbg->size = size+sizeof(t_MemDebug);
++ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
++
++ return mem;
++}
++
++static void debug_free(void *mem)
++{
++ t_List *p_MemDbgLh = NULL;
++ t_MemDebug *p_MemDbg;
++ bool found = FALSE;
++
++ if (LIST_IsEmpty(&memDbgLst))
++ {
++ REPORT_ERROR(MAJOR, E_ALREADY_FREE, ("Unbalanced free (0x%08x)", mem));
++ return;
++ }
++
++ LIST_FOR_EACH(p_MemDbgLh, &memDbgLst)
++ {
++ p_MemDbg = MEMDBG_OBJECT(p_MemDbgLh);
++ if (p_MemDbg->mem == mem)
++ {
++ found = TRUE;
++ break;
++ }
++ }
++
++ if (!found)
++ {
++ REPORT_ERROR(MAJOR, E_NOT_FOUND,
++ ("Attempt to free unallocated address (0x%08x)",mem));
++ dump_stack();
++ return;
++ }
++
++ LIST_Del(p_MemDbgLh);
++ p_MemDbg->f_free(mem);
++ p_MemDbg->f_free(p_MemDbg);
++}
++
++void XX_FreeSmart(void *p)
++{
++ debug_free(p);
++}
++
++
++void XX_Free(void *p)
++{
++ debug_free(p);
++}
++
++#else /* not DEBUG_XX_MALLOC */
++void * XX_Malloc(uint32_t size)
++{
++ return xx_Malloc(size);
++}
++
++void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
++{
++ return xx_MallocSmart(size,memPartitionId, alignment);
++}
++
++void XX_FreeSmart(void *p)
++{
++ xx_FreeSmart(p);
++}
++
++
++void XX_Free(void *p)
++{
++ xx_Free(p);
++}
++#endif /* not DEBUG_XX_MALLOC */
++
++
++#if (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0))
++void XX_EventById(uint32_t event, t_Handle appId, uint16_t flags, char *msg)
++{
++ e_Event eventCode = (e_Event)event;
++
++ UNUSED(eventCode);
++ UNUSED(appId);
++ UNUSED(flags);
++ UNUSED(msg);
++}
++#endif /* (defined(REPORT_EVENTS) && ... */
++
++
++uint32_t XX_DisableAllIntr(void)
++{
++ unsigned long flags;
++
++#ifdef local_irq_save_nort
++ local_irq_save_nort(flags);
++#else
++ local_irq_save(flags);
++#endif
++
++ return (uint32_t)flags;
++}
++
++void XX_RestoreAllIntr(uint32_t flags)
++{
++#ifdef local_irq_restore_nort
++ local_irq_restore_nort((unsigned long)flags);
++#else
++ local_irq_restore((unsigned long)flags);
++#endif
++}
++
++t_Error XX_Call( uint32_t qid, t_Error (* f)(t_Handle), t_Handle id, t_Handle appId, uint16_t flags )
++{
++ UNUSED(qid);
++ UNUSED(appId);
++ UNUSED(flags);
++
++ return f(id);
++}
++
++int XX_IsICacheEnable(void)
++{
++ return TRUE;
++}
++
++int XX_IsDCacheEnable(void)
++{
++ return TRUE;
++}
++
++
++typedef struct {
++ t_Isr *f_Isr;
++ t_Handle handle;
++} t_InterruptHandler;
++
++
++t_Handle interruptHandlers[0x00010000];
++
++#ifdef CONFIG_FMAN_ARM
++static irqreturn_t LinuxInterruptHandler (int irq, void *dev_id)
++{
++ t_InterruptHandler *p_IntrHndl = (t_InterruptHandler *)dev_id;
++ p_IntrHndl->f_Isr(p_IntrHndl->handle);
++ return IRQ_HANDLED;
++}
++#endif
++
++t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle)
++{
++#ifdef CONFIG_FMAN_ARM
++ const char *device;
++ t_InterruptHandler *p_IntrHndl;
++
++ device = GetDeviceName(irq);
++ if (device == NULL)
++ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Interrupt source - %d", irq));
++
++ p_IntrHndl = (t_InterruptHandler *)XX_Malloc(sizeof(t_InterruptHandler));
++ if (p_IntrHndl == NULL)
++ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
++ p_IntrHndl->f_Isr = f_Isr;
++ p_IntrHndl->handle = handle;
++ interruptHandlers[irq] = p_IntrHndl;
++
++ if (request_irq(GetDeviceIrqNum(irq), LinuxInterruptHandler, 0, device, p_IntrHndl) < 0)
++ RETURN_ERROR(MAJOR, E_BUSY, ("Can't get IRQ %s\n", device));
++ disable_irq(GetDeviceIrqNum(irq));
++#endif
++ return E_OK;
++}
++
++t_Error XX_FreeIntr(int irq)
++{
++ t_InterruptHandler *p_IntrHndl = interruptHandlers[irq];
++ free_irq(GetDeviceIrqNum(irq), p_IntrHndl);
++ XX_Free(p_IntrHndl);
++ interruptHandlers[irq] = 0;
++ return E_OK;
++}
++
++t_Error XX_EnableIntr(int irq)
++{
++ enable_irq(GetDeviceIrqNum(irq));
++ return E_OK;
++}
++
++t_Error XX_DisableIntr(int irq)
++{
++ disable_irq(GetDeviceIrqNum(irq));
++ return E_OK;
++}
++
++
++/*****************************************************************************/
++/* Tasklet Service Routines */
++/*****************************************************************************/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++typedef struct
++{
++ t_Handle h_Data;
++ void (*f_Callback) (void *);
++ struct delayed_work dwork;
++} t_Tasklet;
++
++static void GenericTaskletCallback(struct work_struct *p_Work)
++{
++ t_Tasklet *p_Task = container_of(p_Work, t_Tasklet, dwork.work);
++
++ p_Task->f_Callback(p_Task->h_Data);
++}
++#endif /* LINUX_VERSION_CODE */
++
++
++t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ struct work_struct *p_Task;
++ p_Task = (struct work_struct *)XX_Malloc(sizeof(struct work_struct));
++ INIT_WORK(p_Task, routine, data);
++#else
++ t_Tasklet *p_Task = (t_Tasklet *)XX_Malloc(sizeof(t_Tasklet));
++ p_Task->h_Data = data;
++ p_Task->f_Callback = routine;
++ INIT_DELAYED_WORK(&p_Task->dwork, GenericTaskletCallback);
++#endif /* LINUX_VERSION_CODE */
++
++ return (t_TaskletHandle)p_Task;
++}
++
++
++void XX_FreeTasklet (t_TaskletHandle h_Tasklet)
++{
++ if (h_Tasklet)
++ XX_Free(h_Tasklet);
++}
++
++int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate)
++{
++ int ans;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ if (immediate)
++ ans = schedule_work(h_Tasklet);
++ else
++ ans = schedule_delayed_work(h_Tasklet, 1);
++#else
++ if (immediate)
++ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, 0);
++ else
++ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, HZ);
++#endif /* LINUX_VERSION_CODE */
++
++ return ans;
++}
++
++void XX_FlushScheduledTasks(void)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++ flush_scheduled_tasks();
++#else
++ flush_scheduled_work();
++#endif /* LINUX_VERSION_CODE */
++}
++
++int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ return (int)(((struct work_struct *)h_Tasklet)->pending);
++#else
++ return (int)delayed_work_pending(&((t_Tasklet *)h_Tasklet)->dwork);
++#endif /* LINUX_VERSION_CODE */
++}
++
++void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++ ((struct tq_struct *)h_Tasklet)->data = data;
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ ((struct work_struct *)h_Tasklet)->data = data;
++#else
++ ((t_Tasklet *)h_Tasklet)->h_Data = data;
++#endif /* LINUX_VERSION_CODE */
++}
++
++t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ return (t_Handle)(((struct work_struct *)h_Tasklet)->data);
++#else
++ return ((t_Tasklet *)h_Tasklet)->h_Data;
++#endif /* LINUX_VERSION_CODE */
++}
++
++
++/*****************************************************************************/
++/* Spinlock Service Routines */
++/*****************************************************************************/
++
++t_Handle XX_InitSpinlock(void)
++{
++ spinlock_t *p_Spinlock = (spinlock_t *)XX_Malloc(sizeof(spinlock_t));
++ if (!p_Spinlock)
++ return NULL;
++
++ spin_lock_init(p_Spinlock);
++
++ return (t_Handle)p_Spinlock;
++}
++
++void XX_FreeSpinlock(t_Handle h_Spinlock)
++{
++ if (h_Spinlock)
++ XX_Free(h_Spinlock);
++}
++
++void XX_LockSpinlock(t_Handle h_Spinlock)
++{
++ spin_lock((spinlock_t *)h_Spinlock);
++}
++
++void XX_UnlockSpinlock(t_Handle h_Spinlock)
++{
++ spin_unlock((spinlock_t *)h_Spinlock);
++}
++
++uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock)
++{
++ unsigned long intrFlags;
++ spin_lock_irqsave((spinlock_t *)h_Spinlock, intrFlags);
++ return intrFlags;
++}
++
++void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags)
++{
++ spin_unlock_irqrestore((spinlock_t *)h_Spinlock, (unsigned long)intrFlags);
++}
++
++
++/*****************************************************************************/
++/* Timers Service Routines */
++/*****************************************************************************/
++/* The time now is in mili sec. resolution */
++uint32_t XX_CurrentTime(void)
++{
++ return (jiffies*1000)/HZ;
++}
++
++
++t_Handle XX_CreateTimer(void)
++{
++ struct timer_list *p_Timer = (struct timer_list *)XX_Malloc(sizeof(struct timer_list));
++ if (p_Timer)
++ {
++ memset(p_Timer, 0, sizeof(struct timer_list));
++ init_timer(p_Timer);
++ }
++ return (t_Handle)p_Timer;
++}
++
++void XX_FreeTimer(t_Handle h_Timer)
++{
++ if (h_Timer)
++ XX_Free(h_Timer);
++}
++
++void XX_StartTimer(t_Handle h_Timer,
++ uint32_t msecs,
++ bool periodic,
++ void (*f_TimerExpired)(t_Handle),
++ t_Handle h_Arg)
++{
++ int tmp_jiffies = (msecs*HZ)/1000;
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ SANITY_CHECK_RETURN((periodic == FALSE), E_NOT_SUPPORTED);
++
++ p_Timer->function = (void (*)(unsigned long))f_TimerExpired;
++ p_Timer->data = (unsigned long)h_Arg;
++ if ((msecs*HZ)%1000)
++ tmp_jiffies++;
++ p_Timer->expires = (jiffies + tmp_jiffies);
++
++ add_timer((struct timer_list *)h_Timer);
++}
++
++void XX_SetTimerData(t_Handle h_Timer, t_Handle data)
++{
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ p_Timer->data = (unsigned long)data;
++}
++
++t_Handle XX_GetTimerData(t_Handle h_Timer)
++{
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ return (t_Handle)p_Timer->data;
++}
++
++uint32_t XX_GetExpirationTime(t_Handle h_Timer)
++{
++ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
++
++ return (uint32_t)p_Timer->expires;
++}
++
++void XX_StopTimer(t_Handle h_Timer)
++{
++ del_timer((struct timer_list *)h_Timer);
++}
++
++void XX_ModTimer(t_Handle h_Timer, uint32_t msecs)
++{
++ int tmp_jiffies = (msecs*HZ)/1000;
++
++ if ((msecs*HZ)%1000)
++ tmp_jiffies++;
++ mod_timer((struct timer_list *)h_Timer, jiffies + tmp_jiffies);
++}
++
++int XX_TimerIsActive(t_Handle h_Timer)
++{
++ return timer_pending((struct timer_list *)h_Timer);
++}
++
++uint32_t XX_Sleep(uint32_t msecs)
++{
++ int tmp_jiffies = (msecs*HZ)/1000;
++
++ if ((msecs*HZ)%1000)
++ tmp_jiffies++;
++ return schedule_timeout(tmp_jiffies);
++}
++
++/*BEWARE!!!!! UDelay routine is BUSY WAITTING!!!!!*/
++void XX_UDelay(uint32_t usecs)
++{
++ udelay(usecs);
++}
++
++/* TODO: verify that these are correct */
++#define MSG_BODY_SIZE 512
++typedef t_Error (t_MsgHandler) (t_Handle h_Mod, uint32_t msgId, uint8_t msgBody[MSG_BODY_SIZE]);
++typedef void (t_MsgCompletionCB) (t_Handle h_Arg, uint8_t msgBody[MSG_BODY_SIZE]);
++t_Error XX_SendMessage(char *p_DestAddr,
++ uint32_t msgId,
++ uint8_t msgBody[MSG_BODY_SIZE],
++ t_MsgCompletionCB *f_CompletionCB,
++ t_Handle h_CBArg);
++
++typedef struct {
++ char *p_Addr;
++ t_MsgHandler *f_MsgHandlerCB;
++ t_Handle h_Mod;
++ t_List node;
++} t_MsgHndlr;
++#define MSG_HNDLR_OBJECT(ptr) LIST_OBJECT(ptr, t_MsgHndlr, node)
++
++LIST(msgHndlrList);
++
++static void EnqueueMsgHndlr(t_MsgHndlr *p_MsgHndlr)
++{
++ uint32_t intFlags;
++
++ intFlags = XX_DisableAllIntr();
++ LIST_AddToTail(&p_MsgHndlr->node, &msgHndlrList);
++ XX_RestoreAllIntr(intFlags);
++}
++/* TODO: add this for multi-platform support
++static t_MsgHndlr * DequeueMsgHndlr(void)
++{
++ t_MsgHndlr *p_MsgHndlr = NULL;
++ uint32_t intFlags;
++
++ intFlags = XX_DisableAllIntr();
++ if (!LIST_IsEmpty(&msgHndlrList))
++ {
++ p_MsgHndlr = MSG_HNDLR_OBJECT(msgHndlrList.p_Next);
++ LIST_DelAndInit(&p_MsgHndlr->node);
++ }
++ XX_RestoreAllIntr(intFlags);
++
++ return p_MsgHndlr;
++}
++*/
++static t_MsgHndlr * FindMsgHndlr(char *p_Addr)
++{
++ t_MsgHndlr *p_MsgHndlr;
++ t_List *p_Pos;
++
++ LIST_FOR_EACH(p_Pos, &msgHndlrList)
++ {
++ p_MsgHndlr = MSG_HNDLR_OBJECT(p_Pos);
++ if (strstr(p_MsgHndlr->p_Addr, p_Addr))
++ return p_MsgHndlr;
++ }
++
++ return NULL;
++}
++
++t_Error XX_RegisterMessageHandler (char *p_Addr, t_MsgHandler *f_MsgHandlerCB, t_Handle h_Mod)
++{
++ t_MsgHndlr *p_MsgHndlr;
++ uint32_t len;
++
++ p_MsgHndlr = (t_MsgHndlr*)XX_Malloc(sizeof(t_MsgHndlr));
++ if (!p_MsgHndlr)
++ RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!"));
++ memset(p_MsgHndlr, 0, sizeof(t_MsgHndlr));
++
++ len = strlen(p_Addr);
++ p_MsgHndlr->p_Addr = (char*)XX_Malloc(len+1);
++ strncpy(p_MsgHndlr->p_Addr,p_Addr, (uint32_t)(len+1));
++
++ p_MsgHndlr->f_MsgHandlerCB = f_MsgHandlerCB;
++ p_MsgHndlr->h_Mod = h_Mod;
++ INIT_LIST(&p_MsgHndlr->node);
++ EnqueueMsgHndlr(p_MsgHndlr);
++
++ return E_OK;
++}
++
++t_Error XX_UnregisterMessageHandler (char *p_Addr)
++{
++ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_Addr);
++ if (!p_MsgHndlr)
++ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
++
++ LIST_Del(&p_MsgHndlr->node);
++ XX_Free(p_MsgHndlr->p_Addr);
++ XX_Free(p_MsgHndlr);
++
++ return E_OK;
++}
++
++t_Error XX_SendMessage(char *p_DestAddr,
++ uint32_t msgId,
++ uint8_t msgBody[MSG_BODY_SIZE],
++ t_MsgCompletionCB *f_CompletionCB,
++ t_Handle h_CBArg)
++{
++ t_Error ans;
++ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_DestAddr);
++ if (!p_MsgHndlr)
++ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
++
++ ans = p_MsgHndlr->f_MsgHandlerCB(p_MsgHndlr->h_Mod, msgId, msgBody);
++
++ if (f_CompletionCB)
++ f_CompletionCB(h_CBArg, msgBody);
++
++ return ans;
++}
++
++t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH],
++ t_IpcMsgHandler *f_MsgHandler,
++ t_Handle h_Module,
++ uint32_t replyLength)
++{
++ UNUSED(addr);UNUSED(f_MsgHandler);UNUSED(h_Module);UNUSED(replyLength);
++ return E_OK;
++}
++
++t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH])
++{
++ UNUSED(addr);
++ return E_OK;
++}
++
++
++t_Error XX_IpcSendMessage(t_Handle h_Session,
++ uint8_t *p_Msg,
++ uint32_t msgLength,
++ uint8_t *p_Reply,
++ uint32_t *p_ReplyLength,
++ t_IpcMsgCompletion *f_Completion,
++ t_Handle h_Arg)
++{
++ UNUSED(h_Session); UNUSED(p_Msg); UNUSED(msgLength); UNUSED(p_Reply);
++ UNUSED(p_ReplyLength); UNUSED(f_Completion); UNUSED(h_Arg);
++ return E_OK;
++}
++
++t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH],
++ char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH])
++{
++ UNUSED(destAddr); UNUSED(srcAddr);
++ return E_OK;
++}
++
++/*Forced to introduce due to PRINT_FMT_PARAMS define*/
++uint32_t E500_GetId(void)
++{
++ return raw_smp_processor_id();
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++int GetDeviceIrqNum(int irq)
++{
++ struct device_node *iPar;
++ struct irq_domain *irqHost;
++ uint32_t hwIrq;
++
++ /* Get the interrupt controller */
++ iPar = of_find_node_by_name(NULL, "mpic");
++ hwIrq = 0;
++
++ ASSERT_COND(iPar != NULL);
++ /* Get the irq host */
++ irqHost = irq_find_host(iPar);
++ of_node_put(iPar);
++
++ /* Create irq mapping */
++ return irq_create_mapping(irqHost, hwIrq);
++}
++#else
++#error "kernel not supported!!!"
++#endif /* LINUX_VERSION_CODE */
++
++void * XX_PhysToVirt(physAddress_t addr)
++{
++ return UINT_TO_PTR(SYS_PhysToVirt((uint64_t)addr));
++}
++
++physAddress_t XX_VirtToPhys(void * addr)
++{
++ return (physAddress_t)SYS_VirtToPhys(PTR_TO_UINT(addr));
++}
++
++void * xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
++{
++ uintptr_t *returnCode, tmp;
++
++ if (alignment < sizeof(uintptr_t))
++ alignment = sizeof(uintptr_t);
++ size += alignment + sizeof(returnCode);
++ tmp = (uintptr_t)xx_Malloc(size);
++ if (tmp == 0)
++ return NULL;
++ returnCode = (uintptr_t*)((tmp + alignment + sizeof(returnCode)) & ~((uintptr_t)alignment - 1));
++ *(returnCode - 1) = tmp;
++
++ return (void*)returnCode;
++}
++
++void xx_FreeSmart(void *p)
++{
++ xx_Free((void*)(*((uintptr_t *)(p) - 1)));
++}
+diff --git a/drivers/staging/fsl_qbman/Kconfig b/drivers/staging/fsl_qbman/Kconfig
+new file mode 100644
+index 00000000..93dcb7d3
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/Kconfig
+@@ -0,0 +1,228 @@
++config FSL_SDK_DPA
++ bool "Freescale Datapath Queue and Buffer management"
++ depends on !FSL_DPAA
++ select FSL_QMAN_FQ_LOOKUP if PPC64
++ select FSL_QMAN_FQ_LOOKUP if ARM64
++
++
++menu "Freescale Datapath QMan/BMan options"
++ depends on FSL_SDK_DPA
++
++config FSL_DPA_CHECKING
++ bool "additional driver checking"
++ default n
++ ---help---
++ Compiles in additional checks to sanity-check the drivers and any
++ use of it by other code. Not recommended for performance.
++
++config FSL_DPA_CAN_WAIT
++ bool
++ default y
++
++config FSL_DPA_CAN_WAIT_SYNC
++ bool
++ default y
++
++config FSL_DPA_PIRQ_FAST
++ bool
++ default y
++
++config FSL_DPA_PIRQ_SLOW
++ bool
++ default y
++
++config FSL_DPA_PORTAL_SHARE
++ bool
++ default y
++
++config FSL_SDK_BMAN
++ bool "Freescale Buffer Manager (BMan) support"
++ default y
++
++if FSL_SDK_BMAN
++
++config FSL_BMAN_CONFIG
++ bool "BMan device management"
++ default y
++ ---help---
++ If this linux image is running natively, you need this option. If this
++ linux image is running as a guest OS under the hypervisor, only one
++ guest OS ("the control plane") needs this option.
++
++config FSL_BMAN_TEST
++ tristate "BMan self-tests"
++ default n
++ ---help---
++ This option compiles self-test code for BMan.
++
++config FSL_BMAN_TEST_HIGH
++ bool "BMan high-level self-test"
++ depends on FSL_BMAN_TEST
++ default y
++ ---help---
++ This requires the presence of cpu-affine portals, and performs
++ high-level API testing with them (whichever portal(s) are affine to
++ the cpu(s) the test executes on).
++
++config FSL_BMAN_TEST_THRESH
++ bool "BMan threshold test"
++ depends on FSL_BMAN_TEST
++ default y
++ ---help---
++ Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded
++ before multiple threads (one per cpu) create pool objects to track
++ depletion state changes. The pool is then drained to empty by a
++ "drainer" thread, and the other threads that they observe exactly
++ the depletion state changes that are expected.
++
++config FSL_BMAN_DEBUGFS
++ tristate "BMan debugfs interface"
++ depends on DEBUG_FS
++ default y
++ ---help---
++ This option compiles debugfs code for BMan.
++
++endif # FSL_SDK_BMAN
++
++config FSL_SDK_QMAN
++ bool "Freescale Queue Manager (QMan) support"
++ default y
++
++if FSL_SDK_QMAN
++
++config FSL_QMAN_POLL_LIMIT
++ int
++ default 32
++
++config FSL_QMAN_CONFIG
++ bool "QMan device management"
++ default y
++ ---help---
++ If this linux image is running natively, you need this option. If this
++ linux image is running as a guest OS under the hypervisor, only one
++ guest OS ("the control plane") needs this option.
++
++config FSL_QMAN_TEST
++ tristate "QMan self-tests"
++ default n
++ ---help---
++ This option compiles self-test code for QMan.
++
++config FSL_QMAN_TEST_STASH_POTATO
++ bool "QMan 'hot potato' data-stashing self-test"
++ depends on FSL_QMAN_TEST
++ default y
++ ---help---
++ This performs a "hot potato" style test enqueuing/dequeuing a frame
++ across a series of FQs scheduled to different portals (and cpus), with
++ DQRR, data and context stashing always on.
++
++config FSL_QMAN_TEST_HIGH
++ bool "QMan high-level self-test"
++ depends on FSL_QMAN_TEST
++ default y
++ ---help---
++ This requires the presence of cpu-affine portals, and performs
++ high-level API testing with them (whichever portal(s) are affine to
++ the cpu(s) the test executes on).
++
++config FSL_QMAN_DEBUGFS
++ tristate "QMan debugfs interface"
++ depends on DEBUG_FS
++ default y
++ ---help---
++ This option compiles debugfs code for QMan.
++
++# H/w settings that can be hard-coded for now.
++config FSL_QMAN_FQD_SZ
++ int "size of Frame Queue Descriptor region"
++ default 10
++ ---help---
++ This is the size of the FQD region defined as: PAGE_SIZE * (2^value)
++ ex: 10 => PAGE_SIZE * (2^10)
++ Note: Default device-trees now require minimum Kconfig setting of 10.
++
++config FSL_QMAN_PFDR_SZ
++ int "size of the PFDR pool"
++ default 13
++ ---help---
++ This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value)
++ ex: 13 => PAGE_SIZE * (2^13)
++
++# Corenet initiator settings. Stash request queues are 4-deep to match cores'
++# ability to snart. Stash priority is 3, other priorities are 2.
++config FSL_QMAN_CI_SCHED_CFG_SRCCIV
++ int
++ depends on FSL_QMAN_CONFIG
++ default 4
++config FSL_QMAN_CI_SCHED_CFG_SRQ_W
++ int
++ depends on FSL_QMAN_CONFIG
++ default 3
++config FSL_QMAN_CI_SCHED_CFG_RW_W
++ int
++ depends on FSL_QMAN_CONFIG
++ default 2
++config FSL_QMAN_CI_SCHED_CFG_BMAN_W
++ int
++ depends on FSL_QMAN_CONFIG
++ default 2
++
++# portal interrupt settings
++config FSL_QMAN_PIRQ_DQRR_ITHRESH
++ int
++ default 12
++config FSL_QMAN_PIRQ_MR_ITHRESH
++ int
++ default 4
++config FSL_QMAN_PIRQ_IPERIOD
++ int
++ default 100
++
++# 64 bit kernel support
++config FSL_QMAN_FQ_LOOKUP
++ bool
++ default n
++
++config QMAN_CEETM_UPDATE_PERIOD
++ int "Token update period for shaping, in nanoseconds"
++ default 1000
++ ---help---
++ Traffic shaping works by performing token calculations (using
++ credits) on shaper instances periodically. This update period
++ sets the granularity for how often those token rate credit
++ updates are performed, and thus determines the accuracy and
++ range of traffic rates that can be configured by users. The
++ reference manual recommends a 1 microsecond period as providing
++ a good balance between granularity and range.
++
++ Unless you know what you are doing, leave this value at its default.
++
++config FSL_QMAN_INIT_TIMEOUT
++ int "timeout for qman init stage, in seconds"
++ default 10
++ ---help---
++ The timeout setting to quit the initialization loop for non-control
++ partition in case the control partition fails to boot-up.
++
++endif # FSL_SDK_QMAN
++
++config FSL_USDPAA
++ bool "Freescale USDPAA process driver"
++ depends on FSL_SDK_DPA
++ default y
++ ---help---
++ This driver provides user-space access to kernel-managed
++ resource interfaces for USDPAA applications, on the assumption
++ that each process will open this device once. Specifically, this
++ device exposes functionality that would be awkward if exposed
++ via the portal devices - ie. this device exposes functionality
++ that is inherently process-wide rather than portal-specific.
++ This device is necessary for obtaining access to DMA memory and
++ for allocation of Qman and Bman resources. In short, if you wish
++ to use USDPAA applications, you need this.
++
++ If unsure, say Y.
++
++
++endmenu
+diff --git a/drivers/staging/fsl_qbman/Makefile b/drivers/staging/fsl_qbman/Makefile
+new file mode 100644
+index 00000000..777d7d34
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/Makefile
+@@ -0,0 +1,28 @@
++subdir-ccflags-y := -Werror
++
++# Common
++obj-$(CONFIG_FSL_SDK_DPA) += dpa_alloc.o
++obj-$(CONFIG_FSL_SDK_DPA) += qbman_driver.o
++
++# Bman
++obj-$(CONFIG_FSL_SDK_BMAN) += bman_high.o
++obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o
++obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o
++obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o
++bman_tester-y = bman_test.o
++bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o
++bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o
++bman_debugfs_interface-y = bman_debugfs.o
++
++# Qman
++obj-$(CONFIG_FSL_SDK_QMAN) += qman_high.o qman_utility.o
++obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o
++obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o
++qman_tester-y = qman_test.o
++qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o
++qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o
++obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o
++qman_debugfs_interface-y = qman_debugfs.o
++
++# USDPAA
++obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o
+diff --git a/drivers/staging/fsl_qbman/bman_config.c b/drivers/staging/fsl_qbman/bman_config.c
+new file mode 100644
+index 00000000..bb397730
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_config.c
+@@ -0,0 +1,720 @@
++/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <asm/cacheflush.h>
++#include "bman_private.h"
++#include <linux/of_reserved_mem.h>
++
++/* Last updated for v00.79 of the BG */
++
++struct bman;
++
++/* Register offsets */
++#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
++#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
++#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
++#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
++#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
++#define REG_FBPR_FPC 0x0800
++#define REG_STATE_IDLE 0x960
++#define REG_STATE_STOP 0x964
++#define REG_ECSR 0x0a00
++#define REG_ECIR 0x0a04
++#define REG_EADR 0x0a08
++#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
++#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
++#define REG_IP_REV_1 0x0bf8
++#define REG_IP_REV_2 0x0bfc
++#define REG_FBPR_BARE 0x0c00
++#define REG_FBPR_BAR 0x0c04
++#define REG_FBPR_AR 0x0c10
++#define REG_SRCIDR 0x0d04
++#define REG_LIODNR 0x0d08
++#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
++
++/* Used by all error interrupt registers except 'inhibit' */
++#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
++#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
++#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
++#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
++#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
++
++/* BMAN_ECIR valid error bit */
++#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
++
++union bman_ecir {
++ u32 ecir_raw;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 __reserved1:4;
++ u32 portal_num:4;
++ u32 __reserved2:12;
++ u32 numb:4;
++ u32 __reserved3:2;
++ u32 pid:6;
++#else
++ u32 pid:6;
++ u32 __reserved3:2;
++ u32 numb:4;
++ u32 __reserved2:12;
++ u32 portal_num:4;
++ u32 __reserved1:4;
++#endif
++ } __packed info;
++};
++
++union bman_eadr {
++ u32 eadr_raw;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 __reserved1:5;
++ u32 memid:3;
++ u32 __reserved2:14;
++ u32 eadr:10;
++#else
++ u32 eadr:10;
++ u32 __reserved2:14;
++ u32 memid:3;
++ u32 __reserved1:5;
++#endif
++ } __packed info;
++};
++
++struct bman_hwerr_txt {
++ u32 mask;
++ const char *txt;
++};
++
++#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
++
++static const struct bman_hwerr_txt bman_hwerr_txts[] = {
++ BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
++ BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
++ BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
++ BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
++ BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
++};
++#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
++
++struct bman_error_info_mdata {
++ u16 addr_mask;
++ u16 bits;
++ const char *txt;
++};
++
++#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
++static const struct bman_error_info_mdata error_mdata[] = {
++ BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
++ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
++ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
++};
++#define BMAN_ERR_MDATA_COUNT \
++ (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
++
++/* Add this in Kconfig */
++#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
++
++/**
++ * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
++ * @v: for accessors that write values, this is the 32-bit value
++ *
++ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
++ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
++ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
++ * "write the enable register" rather than "enable the write register"!
++ */
++#define bm_err_isr_status_read(bm) \
++ __bm_err_isr_read(bm, bm_isr_status)
++#define bm_err_isr_status_clear(bm, m) \
++ __bm_err_isr_write(bm, bm_isr_status, m)
++#define bm_err_isr_enable_read(bm) \
++ __bm_err_isr_read(bm, bm_isr_enable)
++#define bm_err_isr_enable_write(bm, v) \
++ __bm_err_isr_write(bm, bm_isr_enable, v)
++#define bm_err_isr_disable_read(bm) \
++ __bm_err_isr_read(bm, bm_isr_disable)
++#define bm_err_isr_disable_write(bm, v) \
++ __bm_err_isr_write(bm, bm_isr_disable, v)
++#define bm_err_isr_inhibit(bm) \
++ __bm_err_isr_write(bm, bm_isr_inhibit, 1)
++#define bm_err_isr_uninhibit(bm) \
++ __bm_err_isr_write(bm, bm_isr_inhibit, 0)
++
++/*
++ * TODO: unimplemented registers
++ *
++ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
++ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
++ */
++
++/* Encapsulate "struct bman *" as a cast of the register space address. */
++
++static struct bman *bm_create(void *regs)
++{
++ return (struct bman *)regs;
++}
++
++static inline u32 __bm_in(struct bman *bm, u32 offset)
++{
++ return in_be32((void *)bm + offset);
++}
++static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
++{
++ out_be32((void *)bm + offset, val);
++}
++#define bm_in(reg) __bm_in(bm, REG_##reg)
++#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
++
++static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
++{
++ return __bm_in(bm, REG_ERR_ISR + (n << 2));
++}
++
++static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
++{
++ __bm_out(bm, REG_ERR_ISR + (n << 2), val);
++}
++
++static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
++{
++ u32 v = bm_in(IP_REV_1);
++ *id = (v >> 16);
++ *major = (v >> 8) & 0xff;
++ *minor = v & 0xff;
++}
++
++static u32 __generate_thresh(u32 val, int roundup)
++{
++ u32 e = 0; /* co-efficient, exponent */
++ int oddbit = 0;
++ while (val > 0xff) {
++ oddbit = val & 1;
++ val >>= 1;
++ e++;
++ if (roundup && oddbit)
++ val++;
++ }
++ DPA_ASSERT(e < 0x10);
++ return val | (e << 8);
++}
++
++static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
++ u32 hwdet, u32 hwdxt)
++{
++ DPA_ASSERT(pool < bman_pool_max);
++ bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
++ bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
++ bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
++ bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
++}
++
++static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
++{
++ u32 exp = ilog2(size);
++ /* choke if size isn't within range */
++ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
++ is_power_of_2(size));
++ /* choke if '[e]ba' has lower-alignment than 'size' */
++ DPA_ASSERT(!(ba & (size - 1)));
++ bm_out(FBPR_BARE, upper_32_bits(ba));
++ bm_out(FBPR_BAR, lower_32_bits(ba));
++ bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
++}
++
++/*****************/
++/* Config driver */
++/*****************/
++
++/* TODO: Kconfig these? */
++#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12)
++
++/* We support only one of these. */
++static struct bman *bm;
++static struct device_node *bm_node;
++
++/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used
++ * during bman_init_ccsr(). */
++static dma_addr_t fbpr_a;
++static size_t fbpr_sz = DEFAULT_FBPR_SZ;
++
++static int bman_fbpr(struct reserved_mem *rmem)
++{
++ fbpr_a = rmem->base;
++ fbpr_sz = rmem->size;
++
++ WARN_ON(!(fbpr_a && fbpr_sz));
++
++ return 0;
++}
++RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
++
++static int __init fsl_bman_init(struct device_node *node)
++{
++ struct resource res;
++ u32 __iomem *regs;
++ const char *s;
++ int ret, standby = 0;
++ u16 id;
++ u8 major, minor;
++
++ ret = of_address_to_resource(node, 0, &res);
++ if (ret) {
++ pr_err("Can't get %s property 'reg'\n",
++ node->full_name);
++ return ret;
++ }
++ s = of_get_property(node, "fsl,hv-claimable", &ret);
++ if (s && !strcmp(s, "standby"))
++ standby = 1;
++ /* Global configuration */
++ regs = ioremap(res.start, res.end - res.start + 1);
++ bm = bm_create(regs);
++ BUG_ON(!bm);
++ bm_node = node;
++ bm_get_version(bm, &id, &major, &minor);
++ pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor);
++ if ((major == 1) && (minor == 0)) {
++ bman_ip_rev = BMAN_REV10;
++ bman_pool_max = 64;
++ } else if ((major == 2) && (minor == 0)) {
++ bman_ip_rev = BMAN_REV20;
++ bman_pool_max = 8;
++ } else if ((major == 2) && (minor == 1)) {
++ bman_ip_rev = BMAN_REV21;
++ bman_pool_max = 64;
++ } else {
++ pr_warn("unknown Bman version, default to rev1.0\n");
++ }
++
++ if (standby) {
++ pr_info(" -> in standby mode\n");
++ return 0;
++ }
++ return 0;
++}
++
++int bman_have_ccsr(void)
++{
++ return bm ? 1 : 0;
++}
++
++int bm_pool_set(u32 bpid, const u32 *thresholds)
++{
++ if (!bm)
++ return -ENODEV;
++ bm_set_pool(bm, bpid, thresholds[0],
++ thresholds[1], thresholds[2],
++ thresholds[3]);
++ return 0;
++}
++EXPORT_SYMBOL(bm_pool_set);
++
++__init int bman_init_early(void)
++{
++ struct device_node *dn;
++ int ret;
++
++ for_each_compatible_node(dn, NULL, "fsl,bman") {
++ if (bm)
++ pr_err("%s: only one 'fsl,bman' allowed\n",
++ dn->full_name);
++ else {
++ if (!of_device_is_available(dn))
++ continue;
++
++ ret = fsl_bman_init(dn);
++ BUG_ON(ret);
++ }
++ }
++ return 0;
++}
++postcore_initcall_sync(bman_init_early);
++
++
++static void log_edata_bits(u32 bit_count)
++{
++ u32 i, j, mask = 0xffffffff;
++
++ pr_warn("Bman ErrInt, EDATA:\n");
++ i = bit_count/32;
++ if (bit_count%32) {
++ i++;
++ mask = ~(mask << bit_count%32);
++ }
++ j = 16-i;
++ pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
++ j++;
++ for (; j < 16; j++)
++ pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
++}
++
++static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
++{
++ union bman_ecir ecir_val;
++ union bman_eadr eadr_val;
++
++ ecir_val.ecir_raw = bm_in(ECIR);
++ /* Is portal info valid */
++ if (ecsr_val & PORTAL_ECSR_ERR) {
++ pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n",
++ ecir_val.info.portal_num, ecir_val.info.numb,
++ ecir_val.info.pid);
++ }
++ if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
++ eadr_val.eadr_raw = bm_in(EADR);
++ pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n",
++ error_mdata[eadr_val.info.memid].txt,
++ error_mdata[eadr_val.info.memid].addr_mask
++ & eadr_val.info.eadr);
++ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
++ }
++}
++
++/* Bman interrupt handler */
++static irqreturn_t bman_isr(int irq, void *ptr)
++{
++ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
++
++ ier_val = bm_err_isr_enable_read(bm);
++ isr_val = bm_err_isr_status_read(bm);
++ ecsr_val = bm_in(ECSR);
++ isr_mask = isr_val & ier_val;
++
++ if (!isr_mask)
++ return IRQ_NONE;
++ for (i = 0; i < BMAN_HWE_COUNT; i++) {
++ if (bman_hwerr_txts[i].mask & isr_mask) {
++ pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt);
++ if (bman_hwerr_txts[i].mask & ecsr_val) {
++ log_additional_error_info(isr_mask, ecsr_val);
++ /* Re-arm error capture registers */
++ bm_out(ECSR, ecsr_val);
++ }
++ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
++ pr_devel("Bman un-enabling error 0x%x\n",
++ bman_hwerr_txts[i].mask);
++ ier_val &= ~bman_hwerr_txts[i].mask;
++ bm_err_isr_enable_write(bm, ier_val);
++ }
++ }
++ }
++ bm_err_isr_status_clear(bm, isr_val);
++ return IRQ_HANDLED;
++}
++
++static int __bind_irq(void)
++{
++ int ret, err_irq;
++
++ err_irq = of_irq_to_resource(bm_node, 0, NULL);
++ if (err_irq == 0) {
++ pr_info("Can't get %s property '%s'\n", bm_node->full_name,
++ "interrupts");
++ return -ENODEV;
++ }
++ ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node);
++ if (ret) {
++ pr_err("request_irq() failed %d for '%s'\n", ret,
++ bm_node->full_name);
++ return -ENODEV;
++ }
++ /* Disable Buffer Pool State Change */
++ bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
++ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
++ * to resource allocation during driver init). */
++ bm_err_isr_status_clear(bm, 0xffffffff);
++ /* Enable Error Interrupts */
++ bm_err_isr_enable_write(bm, 0xffffffff);
++ return 0;
++}
++
++int bman_init_ccsr(struct device_node *node)
++{
++ int ret;
++ if (!bman_have_ccsr())
++ return 0;
++ if (node != bm_node)
++ return -EINVAL;
++ /* FBPR memory */
++ bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
++ pr_info("bman-fbpr addr %pad size 0x%zx\n", &fbpr_a, fbpr_sz);
++
++ ret = __bind_irq();
++ if (ret)
++ return ret;
++ return 0;
++}
++
++u32 bm_pool_free_buffers(u32 bpid)
++{
++ return bm_in(POOL_CONTENT(bpid));
++}
++
++#ifdef CONFIG_SYSFS
++
++#define DRV_NAME "fsl-bman"
++#define SBEC_MAX_ID 1
++#define SBEC_MIN_ID 0
++
++static ssize_t show_fbpr_fpc(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
++};
++
++static ssize_t show_pool_count(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ u32 data;
++ int i;
++
++ if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max))
++ return -EINVAL;
++ data = bm_in(POOL_CONTENT(i));
++ return snprintf(buf, PAGE_SIZE, "%d\n", data);
++};
++
++static ssize_t show_err_isr(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
++};
++
++static ssize_t show_sbec(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ int i;
++
++ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
++ return -EINVAL;
++ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
++ return -EINVAL;
++ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
++};
++
++static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
++static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
++
++/* Didn't use DEVICE_ATTR as 64 of this would be required.
++ * Initialize them when needed. */
++static char *name_attrs_pool_count; /* "xx" + null-terminator */
++static struct device_attribute *dev_attr_buffer_pool_count;
++
++static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
++
++static struct attribute *bman_dev_attributes[] = {
++ &dev_attr_fbpr_fpc.attr,
++ &dev_attr_err_isr.attr,
++ NULL
++};
++
++static struct attribute *bman_dev_ecr_attributes[] = {
++ &dev_attr_sbec_0.attr,
++ &dev_attr_sbec_1.attr,
++ NULL
++};
++
++static struct attribute **bman_dev_pool_count_attributes;
++
++
++/* root level */
++static const struct attribute_group bman_dev_attr_grp = {
++ .name = NULL,
++ .attrs = bman_dev_attributes
++};
++static const struct attribute_group bman_dev_ecr_grp = {
++ .name = "error_capture",
++ .attrs = bman_dev_ecr_attributes
++};
++static struct attribute_group bman_dev_pool_countent_grp = {
++ .name = "pool_count",
++};
++
++static int of_fsl_bman_remove(struct platform_device *ofdev)
++{
++ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
++ return 0;
++};
++
++static int of_fsl_bman_probe(struct platform_device *ofdev)
++{
++ int ret, i;
++
++ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
++ if (ret)
++ goto done;
++ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
++ if (ret)
++ goto del_group_0;
++
++ name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3,
++ GFP_KERNEL);
++ if (!name_attrs_pool_count) {
++ pr_err("Can't alloc name_attrs_pool_count\n");
++ goto del_group_1;
++ }
++
++ dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) *
++ bman_pool_max, GFP_KERNEL);
++ if (!dev_attr_buffer_pool_count) {
++ pr_err("Can't alloc dev_attr-buffer_pool_count\n");
++ goto del_group_2;
++ }
++
++ bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) *
++ (bman_pool_max + 1), GFP_KERNEL);
++ if (!bman_dev_pool_count_attributes) {
++ pr_err("can't alloc bman_dev_pool_count_attributes\n");
++ goto del_group_3;
++ }
++
++ for (i = 0; i < bman_pool_max; i++) {
++ ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
++ if (!ret)
++ goto del_group_4;
++ dev_attr_buffer_pool_count[i].attr.name =
++ (name_attrs_pool_count + i * 3);
++ dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
++ dev_attr_buffer_pool_count[i].show = show_pool_count;
++ bman_dev_pool_count_attributes[i] =
++ &dev_attr_buffer_pool_count[i].attr;
++ sysfs_attr_init(bman_dev_pool_count_attributes[i]);
++ }
++ bman_dev_pool_count_attributes[bman_pool_max] = NULL;
++
++ bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
++
++ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp);
++ if (ret)
++ goto del_group_4;
++
++ goto done;
++
++del_group_4:
++ kfree(bman_dev_pool_count_attributes);
++del_group_3:
++ kfree(dev_attr_buffer_pool_count);
++del_group_2:
++ kfree(name_attrs_pool_count);
++del_group_1:
++ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
++del_group_0:
++ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
++done:
++ if (ret)
++ dev_err(&ofdev->dev,
++ "Cannot create dev attributes ret=%d\n", ret);
++ return ret;
++};
++
++static struct of_device_id of_fsl_bman_ids[] = {
++ {
++ .compatible = "fsl,bman",
++ },
++ {}
++};
++MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
++
++#ifdef CONFIG_SUSPEND
++static u32 saved_isdr;
++
++static int bman_pm_suspend_noirq(struct device *dev)
++{
++ uint32_t idle_state;
++
++ suspend_unused_bportal();
++ /* save isdr, disable all, clear isr */
++ saved_isdr = bm_err_isr_disable_read(bm);
++ bm_err_isr_disable_write(bm, 0xffffffff);
++ bm_err_isr_status_clear(bm, 0xffffffff);
++
++ if (bman_ip_rev < BMAN_REV21) {
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Bman version doesn't have STATE_IDLE\n");
++#endif
++ return 0;
++ }
++ idle_state = bm_in(STATE_IDLE);
++ if (!(idle_state & 0x1)) {
++ pr_err("Bman not idle 0x%x aborting\n", idle_state);
++ bm_err_isr_disable_write(bm, saved_isdr);
++ resume_unused_bportal();
++ return -EBUSY;
++ }
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state);
++#endif
++ return 0;
++}
++
++static int bman_pm_resume_noirq(struct device *dev)
++{
++ /* restore isdr */
++ bm_err_isr_disable_write(bm, saved_isdr);
++ resume_unused_bportal();
++ return 0;
++}
++#else
++#define bman_pm_suspend_noirq NULL
++#define bman_pm_resume_noirq NULL
++#endif
++
++static const struct dev_pm_ops bman_pm_ops = {
++ .suspend_noirq = bman_pm_suspend_noirq,
++ .resume_noirq = bman_pm_resume_noirq,
++};
++
++static struct platform_driver of_fsl_bman_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRV_NAME,
++ .of_match_table = of_fsl_bman_ids,
++ .pm = &bman_pm_ops,
++ },
++ .probe = of_fsl_bman_probe,
++ .remove = of_fsl_bman_remove,
++};
++
++static int bman_ctrl_init(void)
++{
++ return platform_driver_register(&of_fsl_bman_driver);
++}
++
++static void bman_ctrl_exit(void)
++{
++ platform_driver_unregister(&of_fsl_bman_driver);
++}
++
++module_init(bman_ctrl_init);
++module_exit(bman_ctrl_exit);
++
++#endif /* CONFIG_SYSFS */
+diff --git a/drivers/staging/fsl_qbman/bman_debugfs.c b/drivers/staging/fsl_qbman/bman_debugfs.c
+new file mode 100644
+index 00000000..96909348
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_debugfs.c
+@@ -0,0 +1,119 @@
++/* Copyright 2010-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/module.h>
++#include <linux/fsl_bman.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h>
++
++static struct dentry *dfs_root; /* debugfs root directory */
++
++/*******************************************************************************
++ * Query Buffer Pool State
++ ******************************************************************************/
++static int query_bp_state_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct bm_pool_state state;
++ int i, j;
++ u32 mask;
++
++ memset(&state, 0, sizeof(struct bm_pool_state));
++ ret = bman_query_pools(&state);
++ if (ret) {
++ seq_printf(file, "Error %d\n", ret);
++ return 0;
++ }
++ seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
++ for (i = 0; i < 2; i++) {
++ mask = 0x80000000;
++ for (j = 0; j < 32; j++) {
++ seq_printf(file,
++ " %-2u %-3s %-3s\n",
++ (i*32)+j,
++ (state.as.state.__state[i] & mask) ? "no" : "yes",
++ (state.ds.state.__state[i] & mask) ? "yes" : "no");
++ mask >>= 1;
++ }
++ }
++ return 0;
++}
++
++static int query_bp_state_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, query_bp_state_show, NULL);
++}
++
++static const struct file_operations query_bp_state_fops = {
++ .owner = THIS_MODULE,
++ .open = query_bp_state_open,
++ .read = seq_read,
++ .release = single_release,
++};
++
++static int __init bman_debugfs_module_init(void)
++{
++ int ret = 0;
++ struct dentry *d;
++
++ dfs_root = debugfs_create_dir("bman", NULL);
++
++ if (dfs_root == NULL) {
++ ret = -ENOMEM;
++ pr_err("Cannot create bman debugfs dir\n");
++ goto _return;
++ }
++ d = debugfs_create_file("query_bp_state",
++ S_IRUGO,
++ dfs_root,
++ NULL,
++ &query_bp_state_fops);
++ if (d == NULL) {
++ ret = -ENOMEM;
++ pr_err("Cannot create query_bp_state\n");
++ goto _return;
++ }
++ return 0;
++
++_return:
++ debugfs_remove_recursive(dfs_root);
++ return ret;
++}
++
++static void __exit bman_debugfs_module_exit(void)
++{
++ debugfs_remove_recursive(dfs_root);
++}
++
++
++module_init(bman_debugfs_module_init);
++module_exit(bman_debugfs_module_exit);
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/staging/fsl_qbman/bman_driver.c b/drivers/staging/fsl_qbman/bman_driver.c
+new file mode 100644
+index 00000000..86fabef6
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_driver.c
+@@ -0,0 +1,575 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "bman_low.h"
++#ifdef CONFIG_HOTPLUG_CPU
++#include <linux/cpu.h>
++#endif
++/*
++ * Global variables of the max portal/pool number this bman version supported
++ */
++u16 bman_ip_rev;
++EXPORT_SYMBOL(bman_ip_rev);
++u16 bman_pool_max;
++EXPORT_SYMBOL(bman_pool_max);
++static u16 bman_portal_max;
++
++/* After initialising cpus that own shared portal configs, we cache the
++ * resulting portals (ie. not just the configs) in this array. Then we
++ * initialise slave cpus that don't have their own portals, redirecting them to
++ * portals from this cache in a round-robin assignment. */
++static struct bman_portal *shared_portals[NR_CPUS];
++static int num_shared_portals;
++static int shared_portals_idx;
++static LIST_HEAD(unused_pcfgs);
++static DEFINE_SPINLOCK(unused_pcfgs_lock);
++static void *affine_bportals[NR_CPUS];
++
++static int __init fsl_bpool_init(struct device_node *node)
++{
++ int ret;
++ u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret);
++ if (!bpid || (ret != 4)) {
++ pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name);
++ return -ENODEV;
++ }
++ thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret);
++ if (thresh) {
++ if (ret != 16) {
++ pr_err("Invalid %s property '%s'\n",
++ node->full_name, "fsl,bpool-thresholds");
++ return -ENODEV;
++ }
++ }
++ if (thresh) {
++#ifdef CONFIG_FSL_BMAN_CONFIG
++ ret = bm_pool_set(be32_to_cpu(*bpid), thresh);
++ if (ret)
++ pr_err("No CCSR node for %s property '%s'\n",
++ node->full_name, "fsl,bpool-thresholds");
++ return ret;
++#else
++ pr_err("Ignoring %s property '%s', no CCSR support\n",
++ node->full_name, "fsl,bpool-thresholds");
++#endif
++ }
++ return 0;
++}
++
++static int __init fsl_bpid_range_init(struct device_node *node)
++{
++ int ret;
++ u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret);
++ if (!range) {
++ pr_err("No 'fsl,bpid-range' property in node %s\n",
++ node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n",
++ node->full_name);
++ return -EINVAL;
++ }
++ bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ pr_info("Bman: BPID allocator includes range %d:%d\n",
++ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ return 0;
++}
++
++static struct bm_portal_config * __init parse_pcfg(struct device_node *node)
++{
++ struct bm_portal_config *pcfg;
++ const u32 *index;
++ int irq, ret;
++ resource_size_t len;
++
++ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
++ if (!pcfg) {
++ pr_err("can't allocate portal config");
++ return NULL;
++ }
++
++ if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
++ of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
++ bman_ip_rev = BMAN_REV10;
++ bman_pool_max = 64;
++ bman_portal_max = 10;
++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
++ of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
++ bman_ip_rev = BMAN_REV20;
++ bman_pool_max = 8;
++ bman_portal_max = 3;
++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) {
++ bman_ip_rev = BMAN_REV21;
++ bman_pool_max = 64;
++ bman_portal_max = 50;
++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) {
++ bman_ip_rev = BMAN_REV21;
++ bman_pool_max = 64;
++ bman_portal_max = 25;
++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) {
++ bman_ip_rev = BMAN_REV21;
++ bman_pool_max = 64;
++ bman_portal_max = 18;
++ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
++ bman_ip_rev = BMAN_REV21;
++ bman_pool_max = 64;
++ bman_portal_max = 10;
++ } else {
++ pr_warn("unknown BMan version in portal node,"
++ "default to rev1.0\n");
++ bman_ip_rev = BMAN_REV10;
++ bman_pool_max = 64;
++ bman_portal_max = 10;
++ }
++
++ ret = of_address_to_resource(node, DPA_PORTAL_CE,
++ &pcfg->addr_phys[DPA_PORTAL_CE]);
++ if (ret) {
++ pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
++ goto err;
++ }
++ ret = of_address_to_resource(node, DPA_PORTAL_CI,
++ &pcfg->addr_phys[DPA_PORTAL_CI]);
++ if (ret) {
++ pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
++ goto err;
++ }
++
++ index = of_get_property(node, "cell-index", &ret);
++ if (!index || (ret != 4)) {
++ pr_err("Can't get %s property '%s'\n", node->full_name,
++ "cell-index");
++ goto err;
++ }
++ if (be32_to_cpu(*index) >= bman_portal_max) {
++ pr_err("BMan portal cell index %d out of range, max %d\n",
++ be32_to_cpu(*index), bman_portal_max);
++ goto err;
++ }
++
++ pcfg->public_cfg.cpu = -1;
++
++ irq = irq_of_parse_and_map(node, 0);
++ if (irq == 0) {
++ pr_err("Can't get %s property 'interrupts'\n", node->full_name);
++ goto err;
++ }
++ pcfg->public_cfg.irq = irq;
++ pcfg->public_cfg.index = be32_to_cpu(*index);
++ bman_depletion_fill(&pcfg->public_cfg.mask);
++
++ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
++ if (len != (unsigned long)len)
++ goto err;
++
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
++ pcfg->addr_phys[DPA_PORTAL_CE].start,
++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
++ pcfg->addr_phys[DPA_PORTAL_CI].start,
++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
++
++#else
++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
++ pcfg->addr_phys[DPA_PORTAL_CE].start,
++ (unsigned long)len,
++ 0);
++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
++ pcfg->addr_phys[DPA_PORTAL_CI].start,
++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
++ _PAGE_GUARDED | _PAGE_NO_CACHE);
++#endif
++ /* disable bp depletion */
++ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0));
++ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1));
++ return pcfg;
++err:
++ kfree(pcfg);
++ return NULL;
++}
++
++static struct bm_portal_config *get_pcfg(struct list_head *list)
++{
++ struct bm_portal_config *pcfg;
++ if (list_empty(list))
++ return NULL;
++ pcfg = list_entry(list->prev, struct bm_portal_config, list);
++ list_del(&pcfg->list);
++ return pcfg;
++}
++
++static struct bm_portal_config *get_pcfg_idx(struct list_head *list,
++ uint32_t idx)
++{
++ struct bm_portal_config *pcfg;
++ if (list_empty(list))
++ return NULL;
++ list_for_each_entry(pcfg, list, list) {
++ if (pcfg->public_cfg.index == idx) {
++ list_del(&pcfg->list);
++ return pcfg;
++ }
++ }
++ return NULL;
++}
++
++struct bm_portal_config *bm_get_unused_portal(void)
++{
++ return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
++}
++
++struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx)
++{
++ struct bm_portal_config *ret;
++ spin_lock(&unused_pcfgs_lock);
++ if (idx == QBMAN_ANY_PORTAL_IDX)
++ ret = get_pcfg(&unused_pcfgs);
++ else
++ ret = get_pcfg_idx(&unused_pcfgs, idx);
++ spin_unlock(&unused_pcfgs_lock);
++ return ret;
++}
++
++void bm_put_unused_portal(struct bm_portal_config *pcfg)
++{
++ spin_lock(&unused_pcfgs_lock);
++ list_add(&pcfg->list, &unused_pcfgs);
++ spin_unlock(&unused_pcfgs_lock);
++}
++
++static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
++{
++ struct bman_portal *p;
++ p = bman_create_affine_portal(pcfg);
++ if (p) {
++#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
++ bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
++#endif
++ pr_info("Bman portal %sinitialised, cpu %d\n",
++ pcfg->public_cfg.is_shared ? "(shared) " : "",
++ pcfg->public_cfg.cpu);
++ affine_bportals[pcfg->public_cfg.cpu] = p;
++ } else
++ pr_crit("Bman portal failure on cpu %d\n",
++ pcfg->public_cfg.cpu);
++ return p;
++}
++
++static void init_slave(int cpu)
++{
++ struct bman_portal *p;
++ p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
++ if (!p)
++ pr_err("Bman slave portal failure on cpu %d\n", cpu);
++ else
++ pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
++ if (shared_portals_idx >= num_shared_portals)
++ shared_portals_idx = 0;
++ affine_bportals[cpu] = p;
++}
++
++/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
++ * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes
++ * and/or ranges of indexes, with each being optionally prefixed by "s" to
++ * explicitly mark it or them for sharing.
++ * Eg;
++ * bportals=s0,1-3,s4
++ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
++ * portals, and any remaining cpus share the portals that are assigned to cpus 0
++ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
++ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
++ * 0's portal.) */
++static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
++static struct cpumask want_shared __initdata; /* cpus requested with "s" */
++
++static int __init parse_bportals(char *str)
++{
++ return parse_portals_bootarg(str, &want_shared, &want_unshared,
++ "bportals");
++}
++__setup("bportals=", parse_bportals);
++
++static int bman_offline_cpu(unsigned int cpu)
++{
++ struct bman_portal *p;
++ const struct bm_portal_config *pcfg;
++ p = (struct bman_portal *)affine_bportals[cpu];
++ if (p) {
++ pcfg = bman_get_bm_portal_config(p);
++ if (pcfg)
++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int bman_online_cpu(unsigned int cpu)
++{
++ struct bman_portal *p;
++ const struct bm_portal_config *pcfg;
++ p = (struct bman_portal *)affine_bportals[cpu];
++ if (p) {
++ pcfg = bman_get_bm_portal_config(p);
++ if (pcfg)
++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
++ }
++ return 0;
++}
++static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++
++ switch (action) {
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ bman_online_cpu(cpu);
++ break;
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ bman_offline_cpu(cpu);
++ default:
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block bman_hotplug_cpu_notifier = {
++ .notifier_call = bman_hotplug_cpu_callback,
++};
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/* Initialise the Bman driver. The meat of this function deals with portals. The
++ * following describes the flow of portal-handling, the code "steps" refer to
++ * this description;
++ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
++ * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
++ * bound).
++ * 2. The "want_shared" and "want_unshared" lists (as filled by the
++ * "bportals=[...]" bootarg) are processed, allocating portals and assigning
++ * them to cpus, placing them in the relevant list and setting ::cpu as
++ * appropriate. If no "bportals" bootarg was present, the defaut is to try to
++ * assign portals to all online cpus at the time of driver initialisation.
++ * Any failure to allocate portals (when parsing the "want" lists or when
++ * using default behaviour) will be silently tolerated (the "fixup" logic in
++ * step 3 will determine what happens in this case).
++ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
++ * sharing and sharing is required (because not all cpus have been assigned
++ * portals), then one portal will marked for sharing. Conversely if no
++ * sharing is required, any portals marked for sharing will not be shared. It
++ * may be that sharing occurs when it wasn't expected, if portal allocation
++ * failed to honour all the requested assignments (including the default
++ * assignments if no bootarg is present).
++ * 4. Unshared portals are initialised on their respective cpus.
++ * 5. Shared portals are initialised on their respective cpus.
++ * 6. Each remaining cpu is initialised to slave to one of the shared portals,
++ * which are selected in a round-robin fashion.
++ * Any portal configs left unused are available for USDPAA allocation.
++ */
++__init int bman_init(void)
++{
++ struct cpumask slave_cpus;
++ struct cpumask unshared_cpus = *cpu_none_mask;
++ struct cpumask shared_cpus = *cpu_none_mask;
++ LIST_HEAD(unshared_pcfgs);
++ LIST_HEAD(shared_pcfgs);
++ struct device_node *dn;
++ struct bm_portal_config *pcfg;
++ struct bman_portal *p;
++ int cpu, ret;
++ struct cpumask offline_cpus;
++
++ /* Initialise the Bman (CCSR) device */
++ for_each_compatible_node(dn, NULL, "fsl,bman") {
++ if (!bman_init_ccsr(dn))
++ pr_info("Bman err interrupt handler present\n");
++ else
++ pr_err("Bman CCSR setup failed\n");
++ }
++ /* Initialise any declared buffer pools */
++ for_each_compatible_node(dn, NULL, "fsl,bpool") {
++ ret = fsl_bpool_init(dn);
++ if (ret)
++ return ret;
++ }
++ /* Step 1. See comments at the beginning of the file. */
++ for_each_compatible_node(dn, NULL, "fsl,bman-portal") {
++ if (!of_device_is_available(dn))
++ continue;
++ pcfg = parse_pcfg(dn);
++ if (pcfg)
++ list_add_tail(&pcfg->list, &unused_pcfgs);
++ }
++ /* Step 2. */
++ for_each_possible_cpu(cpu) {
++ if (cpumask_test_cpu(cpu, &want_shared)) {
++ pcfg = get_pcfg(&unused_pcfgs);
++ if (!pcfg)
++ break;
++ pcfg->public_cfg.cpu = cpu;
++ list_add_tail(&pcfg->list, &shared_pcfgs);
++ cpumask_set_cpu(cpu, &shared_cpus);
++ }
++ if (cpumask_test_cpu(cpu, &want_unshared)) {
++ if (cpumask_test_cpu(cpu, &shared_cpus))
++ continue;
++ pcfg = get_pcfg(&unused_pcfgs);
++ if (!pcfg)
++ break;
++ pcfg->public_cfg.cpu = cpu;
++ list_add_tail(&pcfg->list, &unshared_pcfgs);
++ cpumask_set_cpu(cpu, &unshared_cpus);
++ }
++ }
++ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
++ /* Default, give an unshared portal to each online cpu */
++ for_each_online_cpu(cpu) {
++ pcfg = get_pcfg(&unused_pcfgs);
++ if (!pcfg)
++ break;
++ pcfg->public_cfg.cpu = cpu;
++ list_add_tail(&pcfg->list, &unshared_pcfgs);
++ cpumask_set_cpu(cpu, &unshared_cpus);
++ }
++ }
++ /* Step 3. */
++ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
++ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
++ if (cpumask_empty(&slave_cpus)) {
++ /* No sharing required */
++ if (!list_empty(&shared_pcfgs)) {
++ /* Migrate "shared" to "unshared" */
++ cpumask_or(&unshared_cpus, &unshared_cpus,
++ &shared_cpus);
++ cpumask_clear(&shared_cpus);
++ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
++ INIT_LIST_HEAD(&shared_pcfgs);
++ }
++ } else {
++ /* Sharing required */
++ if (list_empty(&shared_pcfgs)) {
++ /* Migrate one "unshared" to "shared" */
++ pcfg = get_pcfg(&unshared_pcfgs);
++ if (!pcfg) {
++ pr_crit("No BMan portals available!\n");
++ return 0;
++ }
++ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
++ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
++ list_add_tail(&pcfg->list, &shared_pcfgs);
++ }
++ }
++ /* Step 4. */
++ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
++ pcfg->public_cfg.is_shared = 0;
++ p = init_pcfg(pcfg);
++ if (!p) {
++ pr_crit("Unable to initialize bman portal\n");
++ return 0;
++ }
++ }
++ /* Step 5. */
++ list_for_each_entry(pcfg, &shared_pcfgs, list) {
++ pcfg->public_cfg.is_shared = 1;
++ p = init_pcfg(pcfg);
++ if (p)
++ shared_portals[num_shared_portals++] = p;
++ }
++ /* Step 6. */
++ if (!cpumask_empty(&slave_cpus))
++ for_each_cpu(cpu, &slave_cpus)
++ init_slave(cpu);
++ pr_info("Bman portals initialised\n");
++ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
++ for_each_cpu(cpu, &offline_cpus)
++ bman_offline_cpu(cpu);
++#ifdef CONFIG_HOTPLUG_CPU
++ register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
++#endif
++ return 0;
++}
++
++__init int bman_resource_init(void)
++{
++ struct device_node *dn;
++ int ret;
++
++ /* Initialise BPID allocation ranges */
++ for_each_compatible_node(dn, NULL, "fsl,bpid-range") {
++ ret = fsl_bpid_range_init(dn);
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
++
++#ifdef CONFIG_SUSPEND
++void suspend_unused_bportal(void)
++{
++ struct bm_portal_config *pcfg;
++
++ if (list_empty(&unused_pcfgs))
++ return;
++
++ list_for_each_entry(pcfg, &unused_pcfgs, list) {
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Need to save bportal %d\n", pcfg->public_cfg.index);
++#endif
++ /* save isdr, disable all via isdr, clear isr */
++ pcfg->saved_isdr =
++ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
++ 0xe08);
++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
++ 0xe00);
++ }
++ return;
++}
++
++void resume_unused_bportal(void)
++{
++ struct bm_portal_config *pcfg;
++
++ if (list_empty(&unused_pcfgs))
++ return;
++
++ list_for_each_entry(pcfg, &unused_pcfgs, list) {
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index);
++#endif
++ /* restore isdr */
++ __raw_writel(pcfg->saved_isdr,
++ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
++ }
++ return;
++}
++#endif
+diff --git a/drivers/staging/fsl_qbman/bman_high.c b/drivers/staging/fsl_qbman/bman_high.c
+new file mode 100644
+index 00000000..c066602d
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_high.c
+@@ -0,0 +1,1145 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "bman_low.h"
++
++/* Compilation constants */
++#define RCR_THRESH 2 /* reread h/w CI when running out of space */
++#define IRQNAME "BMan portal %d"
++#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
++
++struct bman_portal {
++ struct bm_portal p;
++ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
++ struct bman_depletion *pools;
++ int thresh_set;
++ unsigned long irq_sources;
++ u32 slowpoll; /* only used when interrupts are off */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
++#endif
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ raw_spinlock_t sharing_lock; /* only used if is_shared */
++ int is_shared;
++ struct bman_portal *sharing_redirect;
++#endif
++ /* When the cpu-affine portal is activated, this is non-NULL */
++ const struct bm_portal_config *config;
++ /* This is needed for power management */
++ struct platform_device *pdev;
++ /* 64-entry hash-table of pool objects that are tracking depletion
++ * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
++ * we're not fussy about cache-misses and so forth - whereas the above
++ * members should all fit in one cacheline.
++ * BTW, with 64 entries in the hash table and 64 buffer pools to track,
++ * you'll never guess the hash-function ... */
++ struct bman_pool *cb[64];
++ char irqname[MAX_IRQNAME];
++ /* Track if the portal was alloced by the driver */
++ u8 alloced;
++ /* power management data */
++ u32 save_isdr;
++};
++
++/* For an explanation of the locking, redirection, or affine-portal logic,
++ * please consult the Qman driver for details. This is the same, only simpler
++ * (no fiddly Qman-specific bits.) */
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++#define PORTAL_IRQ_LOCK(p, irqflags) \
++ do { \
++ if ((p)->is_shared) \
++ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
++ else \
++ local_irq_save(irqflags); \
++ } while (0)
++#define PORTAL_IRQ_UNLOCK(p, irqflags) \
++ do { \
++ if ((p)->is_shared) \
++ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
++ irqflags); \
++ else \
++ local_irq_restore(irqflags); \
++ } while (0)
++#else
++#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
++#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
++#endif
++
++static cpumask_t affine_mask;
++static DEFINE_SPINLOCK(affine_mask_lock);
++static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
++static inline struct bman_portal *get_raw_affine_portal(void)
++{
++ return &get_cpu_var(bman_affine_portal);
++}
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++static inline struct bman_portal *get_affine_portal(void)
++{
++ struct bman_portal *p = get_raw_affine_portal();
++ if (p->sharing_redirect)
++ return p->sharing_redirect;
++ return p;
++}
++#else
++#define get_affine_portal() get_raw_affine_portal()
++#endif
++static inline void put_affine_portal(void)
++{
++ put_cpu_var(bman_affine_portal);
++}
++static inline struct bman_portal *get_poll_portal(void)
++{
++ return &get_cpu_var(bman_affine_portal);
++}
++#define put_poll_portal()
++
++/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
++ * more than one such object per Bman buffer pool, eg. if different users of the
++ * pool are operating via different portals. */
++struct bman_pool {
++ struct bman_pool_params params;
++ /* Used for hash-table admin when using depletion notifications. */
++ struct bman_portal *portal;
++ struct bman_pool *next;
++ /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
++ struct bm_buffer *sp;
++ unsigned int sp_fill;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ atomic_t in_use;
++#endif
++};
++
++/* (De)Registration of depletion notification callbacks */
++static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
++{
++ __maybe_unused unsigned long irqflags;
++ pool->portal = portal;
++ PORTAL_IRQ_LOCK(portal, irqflags);
++ pool->next = portal->cb[pool->params.bpid];
++ portal->cb[pool->params.bpid] = pool;
++ if (!pool->next)
++ /* First object for that bpid on this portal, enable the BSCN
++ * mask bit. */
++ bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
++ PORTAL_IRQ_UNLOCK(portal, irqflags);
++}
++static void depletion_unlink(struct bman_pool *pool)
++{
++ struct bman_pool *it, *last = NULL;
++ struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
++ __maybe_unused unsigned long irqflags;
++ PORTAL_IRQ_LOCK(pool->portal, irqflags);
++ it = *base; /* <-- gotcha, don't do this prior to the irq_save */
++ while (it != pool) {
++ last = it;
++ it = it->next;
++ }
++ if (!last)
++ *base = pool->next;
++ else
++ last->next = pool->next;
++ if (!last && !pool->next) {
++ /* Last object for that bpid on this portal, disable the BSCN
++ * mask bit. */
++ bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
++ /* And "forget" that we last saw this pool as depleted */
++ bman_depletion_unset(&pool->portal->pools[1],
++ pool->params.bpid);
++ }
++ PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
++}
++
++/* In the case that the application's core loop calls qman_poll() and
++ * bman_poll(), we ought to balance how often we incur the overheads of the
++ * slow-path poll. We'll use two decrementer sources. The idle decrementer
++ * constant is used when the last slow-poll detected no work to do, and the busy
++ * decrementer constant when the last slow-poll had work to do. */
++#define SLOW_POLL_IDLE 1000
++#define SLOW_POLL_BUSY 10
++static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
++
++/* Portal interrupt handler */
++static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
++{
++ struct bman_portal *p = ptr;
++ u32 clear = p->irq_sources;
++ u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
++ clear |= __poll_portal_slow(p, is);
++ bm_isr_status_clear(&p->p, clear);
++ return IRQ_HANDLED;
++}
++
++#ifdef CONFIG_SUSPEND
++static int _bman_portal_suspend_noirq(struct device *dev)
++{
++ struct bman_portal *p = (struct bman_portal *)dev->platform_data;
++#ifdef CONFIG_PM_DEBUG
++ struct platform_device *pdev = to_platform_device(dev);
++#endif
++ p->save_isdr = bm_isr_disable_read(&p->p);
++ bm_isr_disable_write(&p->p, 0xffffffff);
++ bm_isr_status_clear(&p->p, 0xffffffff);
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Suspend for %s\n", pdev->name);
++#endif
++ return 0;
++}
++
++static int _bman_portal_resume_noirq(struct device *dev)
++{
++ struct bman_portal *p = (struct bman_portal *)dev->platform_data;
++
++ /* restore isdr */
++ bm_isr_disable_write(&p->p, p->save_isdr);
++ return 0;
++}
++#else
++#define _bman_portal_suspend_noirq NULL
++#define _bman_portal_resume_noirq NULL
++#endif
++
++struct dev_pm_domain bman_portal_device_pm_domain = {
++ .ops = {
++ USE_PLATFORM_PM_SLEEP_OPS
++ .suspend_noirq = _bman_portal_suspend_noirq,
++ .resume_noirq = _bman_portal_resume_noirq,
++ }
++};
++
++struct bman_portal *bman_create_portal(
++ struct bman_portal *portal,
++ const struct bm_portal_config *config)
++{
++ struct bm_portal *__p;
++ const struct bman_depletion *pools = &config->public_cfg.mask;
++ int ret;
++ u8 bpid = 0;
++ char buf[16];
++
++ if (!portal) {
++ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
++ if (!portal)
++ return portal;
++ portal->alloced = 1;
++ } else
++ portal->alloced = 0;
++
++ __p = &portal->p;
++
++ /* prep the low-level portal struct with the mapped addresses from the
++ * config, everything that follows depends on it and "config" is more
++ * for (de)reference... */
++ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
++ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
++ if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
++ pr_err("Bman RCR initialisation failed\n");
++ goto fail_rcr;
++ }
++ if (bm_mc_init(__p)) {
++ pr_err("Bman MC initialisation failed\n");
++ goto fail_mc;
++ }
++ if (bm_isr_init(__p)) {
++ pr_err("Bman ISR initialisation failed\n");
++ goto fail_isr;
++ }
++ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
++ if (!portal->pools)
++ goto fail_pools;
++ portal->pools[0] = *pools;
++ bman_depletion_init(portal->pools + 1);
++ while (bpid < bman_pool_max) {
++ /* Default to all BPIDs disabled, we enable as required at
++ * run-time. */
++ bm_isr_bscn_mask(__p, bpid, 0);
++ bpid++;
++ }
++ portal->slowpoll = 0;
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ portal->rcri_owned = NULL;
++#endif
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ raw_spin_lock_init(&portal->sharing_lock);
++ portal->is_shared = config->public_cfg.is_shared;
++ portal->sharing_redirect = NULL;
++#endif
++ sprintf(buf, "bportal-%u", config->public_cfg.index);
++ portal->pdev = platform_device_alloc(buf, -1);
++ if (!portal->pdev)
++ goto fail_devalloc;
++ portal->pdev->dev.pm_domain = &bman_portal_device_pm_domain;
++ portal->pdev->dev.platform_data = portal;
++ ret = platform_device_add(portal->pdev);
++ if (ret)
++ goto fail_devadd;
++ memset(&portal->cb, 0, sizeof(portal->cb));
++ /* Write-to-clear any stale interrupt status bits */
++ bm_isr_disable_write(__p, 0xffffffff);
++ portal->irq_sources = 0;
++ bm_isr_enable_write(__p, portal->irq_sources);
++ bm_isr_status_clear(__p, 0xffffffff);
++ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
++ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
++ portal)) {
++ pr_err("request_irq() failed\n");
++ goto fail_irq;
++ }
++ if ((config->public_cfg.cpu != -1) &&
++ irq_can_set_affinity(config->public_cfg.irq) &&
++ irq_set_affinity(config->public_cfg.irq,
++ cpumask_of(config->public_cfg.cpu))) {
++ pr_err("irq_set_affinity() failed %s\n", portal->irqname);
++ goto fail_affinity;
++ }
++
++ /* Need RCR to be empty before continuing */
++ ret = bm_rcr_get_fill(__p);
++ if (ret) {
++ pr_err("Bman RCR unclean\n");
++ goto fail_rcr_empty;
++ }
++ /* Success */
++ portal->config = config;
++
++ bm_isr_disable_write(__p, 0);
++ bm_isr_uninhibit(__p);
++ return portal;
++fail_rcr_empty:
++fail_affinity:
++ free_irq(config->public_cfg.irq, portal);
++fail_irq:
++ platform_device_del(portal->pdev);
++fail_devadd:
++ platform_device_put(portal->pdev);
++fail_devalloc:
++ kfree(portal->pools);
++fail_pools:
++ bm_isr_finish(__p);
++fail_isr:
++ bm_mc_finish(__p);
++fail_mc:
++ bm_rcr_finish(__p);
++fail_rcr:
++ if (portal->alloced)
++ kfree(portal);
++ return NULL;
++}
++
++struct bman_portal *bman_create_affine_portal(
++ const struct bm_portal_config *config)
++{
++ struct bman_portal *portal;
++
++ portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
++ portal = bman_create_portal(portal, config);
++ if (portal) {
++ spin_lock(&affine_mask_lock);
++ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
++ spin_unlock(&affine_mask_lock);
++ }
++ return portal;
++}
++
++
++struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
++ int cpu)
++{
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ struct bman_portal *p;
++ p = &per_cpu(bman_affine_portal, cpu);
++ BUG_ON(p->config);
++ BUG_ON(p->is_shared);
++ BUG_ON(!redirect->config->public_cfg.is_shared);
++ p->irq_sources = 0;
++ p->sharing_redirect = redirect;
++ return p;
++#else
++ BUG();
++ return NULL;
++#endif
++}
++
++void bman_destroy_portal(struct bman_portal *bm)
++{
++ const struct bm_portal_config *pcfg;
++ pcfg = bm->config;
++ bm_rcr_cce_update(&bm->p);
++ bm_rcr_cce_update(&bm->p);
++
++ free_irq(pcfg->public_cfg.irq, bm);
++
++ kfree(bm->pools);
++ bm_isr_finish(&bm->p);
++ bm_mc_finish(&bm->p);
++ bm_rcr_finish(&bm->p);
++ bm->config = NULL;
++ if (bm->alloced)
++ kfree(bm);
++}
++
++const struct bm_portal_config *bman_destroy_affine_portal(void)
++{
++ struct bman_portal *bm = get_raw_affine_portal();
++ const struct bm_portal_config *pcfg;
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (bm->sharing_redirect) {
++ bm->sharing_redirect = NULL;
++ put_affine_portal();
++ return NULL;
++ }
++ bm->is_shared = 0;
++#endif
++ pcfg = bm->config;
++ bman_destroy_portal(bm);
++ spin_lock(&affine_mask_lock);
++ cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
++ spin_unlock(&affine_mask_lock);
++ put_affine_portal();
++ return pcfg;
++}
++
++/* When release logic waits on available RCR space, we need a global waitqueue
++ * in the case of "affine" use (as the waits wake on different cpus which means
++ * different portals - so we can't wait on any per-portal waitqueue). */
++static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
++
++static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
++{
++ struct bman_depletion tmp;
++ u32 ret = is;
++
++ /* There is a gotcha to be aware of. If we do the query before clearing
++ * the status register, we may miss state changes that occur between the
++ * two. If we write to clear the status register before the query, the
++ * cache-enabled query command may overtake the status register write
++ * unless we use a heavyweight sync (which we don't want). Instead, we
++ * write-to-clear the status register then *read it back* before doing
++ * the query, hence the odd while loop with the 'is' accumulation. */
++ if (is & BM_PIRQ_BSCN) {
++ struct bm_mc_result *mcr;
++ __maybe_unused unsigned long irqflags;
++ unsigned int i, j;
++ u32 __is;
++ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
++ while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
++ is |= __is;
++ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
++ }
++ is &= ~BM_PIRQ_BSCN;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ bm_mc_start(&p->p);
++ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
++ while (!(mcr = bm_mc_result(&p->p)))
++ cpu_relax();
++ tmp = mcr->query.ds.state;
++ tmp.__state[0] = be32_to_cpu(tmp.__state[0]);
++ tmp.__state[1] = be32_to_cpu(tmp.__state[1]);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ for (i = 0; i < 2; i++) {
++ int idx = i * 32;
++ /* tmp is a mask of currently-depleted pools.
++ * pools[0] is mask of those we care about.
++ * pools[1] is our previous view (we only want to
++ * be told about changes). */
++ tmp.__state[i] &= p->pools[0].__state[i];
++ if (tmp.__state[i] == p->pools[1].__state[i])
++ /* fast-path, nothing to see, move along */
++ continue;
++ for (j = 0; j <= 31; j++, idx++) {
++ struct bman_pool *pool = p->cb[idx];
++ int b4 = bman_depletion_get(&p->pools[1], idx);
++ int af = bman_depletion_get(&tmp, idx);
++ if (b4 == af)
++ continue;
++ while (pool) {
++ pool->params.cb(p, pool,
++ pool->params.cb_ctx, af);
++ pool = pool->next;
++ }
++ }
++ }
++ p->pools[1] = tmp;
++ }
++
++ if (is & BM_PIRQ_RCRI) {
++ __maybe_unused unsigned long irqflags;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ bm_rcr_cce_update(&p->p);
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ /* If waiting for sync, we only cancel the interrupt threshold
++ * when the ring utilisation hits zero. */
++ if (p->rcri_owned) {
++ if (!bm_rcr_get_fill(&p->p)) {
++ p->rcri_owned = NULL;
++ bm_rcr_set_ithresh(&p->p, 0);
++ }
++ } else
++#endif
++ bm_rcr_set_ithresh(&p->p, 0);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ wake_up(&affine_queue);
++ bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
++ is &= ~BM_PIRQ_RCRI;
++ }
++
++ /* There should be no status register bits left undefined */
++ DPA_ASSERT(!is);
++ return ret;
++}
++
++const struct bman_portal_config *bman_get_portal_config(void)
++{
++ struct bman_portal *p = get_affine_portal();
++ const struct bman_portal_config *ret = &p->config->public_cfg;
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(bman_get_portal_config);
++
++u32 bman_irqsource_get(void)
++{
++ struct bman_portal *p = get_raw_affine_portal();
++ u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(bman_irqsource_get);
++
++int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
++{
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (p->sharing_redirect)
++ return -EINVAL;
++ else
++#endif
++ {
++ __maybe_unused unsigned long irqflags;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
++ bm_isr_enable_write(&p->p, p->irq_sources);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(bman_p_irqsource_add);
++
++int bman_irqsource_add(__maybe_unused u32 bits)
++{
++ struct bman_portal *p = get_raw_affine_portal();
++ int ret = 0;
++ ret = bman_p_irqsource_add(p, bits);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(bman_irqsource_add);
++
++int bman_irqsource_remove(u32 bits)
++{
++ struct bman_portal *p = get_raw_affine_portal();
++ __maybe_unused unsigned long irqflags;
++ u32 ier;
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (p->sharing_redirect) {
++ put_affine_portal();
++ return -EINVAL;
++ }
++#endif
++ /* Our interrupt handler only processes+clears status register bits that
++ * are in p->irq_sources. As we're trimming that mask, if one of them
++ * were to assert in the status register just before we remove it from
++ * the enable register, there would be an interrupt-storm when we
++ * release the IRQ lock. So we wait for the enable register update to
++ * take effect in h/w (by reading it back) and then clear all other bits
++ * in the status register. Ie. we clear them from ISR once it's certain
++ * IER won't allow them to reassert. */
++ PORTAL_IRQ_LOCK(p, irqflags);
++ bits &= BM_PIRQ_VISIBLE;
++ clear_bits(bits, &p->irq_sources);
++ bm_isr_enable_write(&p->p, p->irq_sources);
++ ier = bm_isr_enable_read(&p->p);
++ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
++ * data-dependency, ie. to protect against re-ordering. */
++ bm_isr_status_clear(&p->p, ~ier);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return 0;
++}
++EXPORT_SYMBOL(bman_irqsource_remove);
++
++const cpumask_t *bman_affine_cpus(void)
++{
++ return &affine_mask;
++}
++EXPORT_SYMBOL(bman_affine_cpus);
++
++u32 bman_poll_slow(void)
++{
++ struct bman_portal *p = get_poll_portal();
++ u32 ret;
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (unlikely(p->sharing_redirect))
++ ret = (u32)-1;
++ else
++#endif
++ {
++ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
++ ret = __poll_portal_slow(p, is);
++ bm_isr_status_clear(&p->p, ret);
++ }
++ put_poll_portal();
++ return ret;
++}
++EXPORT_SYMBOL(bman_poll_slow);
++
++/* Legacy wrapper */
++void bman_poll(void)
++{
++ struct bman_portal *p = get_poll_portal();
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (unlikely(p->sharing_redirect))
++ goto done;
++#endif
++ if (!(p->slowpoll--)) {
++ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
++ u32 active = __poll_portal_slow(p, is);
++ if (active)
++ p->slowpoll = SLOW_POLL_BUSY;
++ else
++ p->slowpoll = SLOW_POLL_IDLE;
++ }
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++done:
++#endif
++ put_poll_portal();
++}
++EXPORT_SYMBOL(bman_poll);
++
++static const u32 zero_thresholds[4] = {0, 0, 0, 0};
++
++struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
++{
++ struct bman_pool *pool = NULL;
++ u32 bpid;
++
++ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
++ int ret = bman_alloc_bpid(&bpid);
++ if (ret)
++ return NULL;
++ } else {
++ if (params->bpid >= bman_pool_max)
++ return NULL;
++ bpid = params->bpid;
++ }
++#ifdef CONFIG_FSL_BMAN_CONFIG
++ if (params->flags & BMAN_POOL_FLAG_THRESH) {
++ int ret = bm_pool_set(bpid, params->thresholds);
++ if (ret)
++ goto err;
++ }
++#else
++ if (params->flags & BMAN_POOL_FLAG_THRESH)
++ goto err;
++#endif
++ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
++ if (!pool)
++ goto err;
++ pool->sp = NULL;
++ pool->sp_fill = 0;
++ pool->params = *params;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ atomic_set(&pool->in_use, 1);
++#endif
++ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
++ pool->params.bpid = bpid;
++ if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
++ pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
++ GFP_KERNEL);
++ if (!pool->sp)
++ goto err;
++ }
++ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
++ struct bman_portal *p = get_affine_portal();
++ if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
++ pr_err("Depletion events disabled for bpid %d\n", bpid);
++ goto err;
++ }
++ depletion_link(p, pool);
++ put_affine_portal();
++ }
++ return pool;
++err:
++#ifdef CONFIG_FSL_BMAN_CONFIG
++ if (params->flags & BMAN_POOL_FLAG_THRESH)
++ bm_pool_set(bpid, zero_thresholds);
++#endif
++ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
++ bman_release_bpid(bpid);
++ if (pool) {
++ kfree(pool->sp);
++ kfree(pool);
++ }
++ return NULL;
++}
++EXPORT_SYMBOL(bman_new_pool);
++
++void bman_free_pool(struct bman_pool *pool)
++{
++#ifdef CONFIG_FSL_BMAN_CONFIG
++ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
++ bm_pool_set(pool->params.bpid, zero_thresholds);
++#endif
++ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
++ depletion_unlink(pool);
++ if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
++ if (pool->sp_fill)
++ pr_err("Stockpile not flushed, has %u in bpid %u.\n",
++ pool->sp_fill, pool->params.bpid);
++ kfree(pool->sp);
++ pool->sp = NULL;
++ pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
++ }
++ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
++ bman_release_bpid(pool->params.bpid);
++ kfree(pool);
++}
++EXPORT_SYMBOL(bman_free_pool);
++
++const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
++{
++ return &pool->params;
++}
++EXPORT_SYMBOL(bman_get_params);
++
++static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
++{
++ if (avail)
++ bm_rcr_cce_prefetch(&p->p);
++ else
++ bm_rcr_cce_update(&p->p);
++}
++
++int bman_rcr_is_empty(void)
++{
++ __maybe_unused unsigned long irqflags;
++ struct bman_portal *p = get_affine_portal();
++ u8 avail;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ update_rcr_ci(p, 0);
++ avail = bm_rcr_get_fill(&p->p);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return avail == 0;
++}
++EXPORT_SYMBOL(bman_rcr_is_empty);
++
++static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ __maybe_unused struct bman_pool *pool,
++#endif
++ __maybe_unused unsigned long *irqflags,
++ __maybe_unused u32 flags)
++{
++ struct bm_rcr_entry *r;
++ u8 avail;
++
++ *p = get_affine_portal();
++ PORTAL_IRQ_LOCK(*p, (*irqflags));
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
++ if ((*p)->rcri_owned) {
++ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
++ put_affine_portal();
++ return NULL;
++ }
++ (*p)->rcri_owned = pool;
++ }
++#endif
++ avail = bm_rcr_get_avail(&(*p)->p);
++ if (avail < 2)
++ update_rcr_ci(*p, avail);
++ r = bm_rcr_start(&(*p)->p);
++ if (unlikely(!r)) {
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
++ (*p)->rcri_owned = NULL;
++#endif
++ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
++ put_affine_portal();
++ }
++ return r;
++}
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
++ struct bman_pool *pool,
++ __maybe_unused unsigned long *irqflags,
++ u32 flags)
++{
++ struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
++ if (!rcr)
++ bm_rcr_set_ithresh(&(*p)->p, 1);
++ return rcr;
++}
++
++static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
++ struct bman_pool *pool,
++ __maybe_unused unsigned long *irqflags,
++ u32 flags)
++{
++ struct bm_rcr_entry *rcr;
++#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ pool = NULL;
++#endif
++ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
++ /* NB: return NULL if signal occurs before completion. Signal
++ * can occur during return. Caller must check for signal */
++ wait_event_interruptible(affine_queue,
++ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
++ else
++ wait_event(affine_queue,
++ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
++ return rcr;
++}
++#endif
++
++static inline int __bman_release(struct bman_pool *pool,
++ const struct bm_buffer *bufs, u8 num, u32 flags)
++{
++ struct bman_portal *p;
++ struct bm_rcr_entry *r;
++ __maybe_unused unsigned long irqflags;
++ u32 i = num - 1;
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & BMAN_RELEASE_FLAG_WAIT)
++ r = wait_rel_start(&p, pool, &irqflags, flags);
++ else
++ r = try_rel_start(&p, pool, &irqflags, flags);
++#else
++ r = try_rel_start(&p, &irqflags, flags);
++#endif
++ if (!r)
++ return -EBUSY;
++ /* We can copy all but the first entry, as this can trigger badness
++ * with the valid-bit. Use the overlay to mask the verb byte. */
++ r->bufs[0].opaque =
++ ((cpu_to_be64((bufs[0].opaque |
++ ((u64)pool->params.bpid<<48))
++ & 0x00ffffffffffffff)));
++ if (i) {
++ for (i = 1; i < num; i++)
++ r->bufs[i].opaque =
++ cpu_to_be64(bufs[i].opaque);
++ }
++
++ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
++ (num & BM_RCR_VERB_BUFCOUNT_MASK));
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ /* if we wish to sync we need to set the threshold after h/w sees the
++ * new ring entry. As we're mixing cache-enabled and cache-inhibited
++ * accesses, this requires a heavy-weight sync. */
++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
++ hwsync();
++ bm_rcr_set_ithresh(&p->p, 1);
++ }
++#endif
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
++ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
++ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
++ /* NB: return success even if signal occurs before
++ * condition is true. pvb_commit guarantees success */
++ wait_event_interruptible(affine_queue,
++ (p->rcri_owned != pool));
++ else
++ wait_event(affine_queue, (p->rcri_owned != pool));
++ }
++#endif
++ return 0;
++}
++
++int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
++ u32 flags)
++{
++ int ret;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (!num || (num > 8))
++ return -EINVAL;
++ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
++ return -EINVAL;
++#endif
++ /* Without stockpile, this API is a pass-through to the h/w operation */
++ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
++ return __bman_release(pool, bufs, num, flags);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (!atomic_dec_and_test(&pool->in_use)) {
++ pr_crit("Parallel attempts to enter bman_released() detected.");
++ panic("only one instance of bman_released/acquired allowed");
++ }
++#endif
++ /* Two movements of buffers are possible, and can occur in either order.
++ * A: moving buffers from the caller to the stockpile.
++ * B: moving buffers from the stockpile to hardware.
++ * Order 1: if there is already enough space in the stockpile for A
++ * then we want to do A first, and only do B if we trigger the
++ * stockpile-high threshold.
++ * Order 2: if there is not enough space in the stockpile for A, then
++ * we want to do B first, then do A if B had succeeded. However in this
++ * case B is dependent on how many buffers the user needs to release,
++ * not the stockpile-high threshold.
++ * Due to the different handling of B between the two cases, putting A
++ * and B in a while() loop would require quite obscure logic, so handle
++ * the different sequences explicitly. */
++ if ((pool->sp_fill + num) <= BMAN_STOCKPILE_SZ) {
++ /* Order 1: do A */
++ copy_words(pool->sp + pool->sp_fill, bufs,
++ sizeof(struct bm_buffer) * num);
++ pool->sp_fill += num;
++ /* do B relative to STOCKPILE_HIGH */
++ while (pool->sp_fill >= BMAN_STOCKPILE_HIGH) {
++ ret = __bman_release(pool,
++ pool->sp + (pool->sp_fill - 8), 8,
++ flags);
++ if (ret >= 0)
++ pool->sp_fill -= 8;
++ }
++ } else {
++ /* Order 2: do B relative to 'num' */
++ do {
++ ret = __bman_release(pool,
++ pool->sp + (pool->sp_fill - 8), 8,
++ flags);
++ if (ret < 0)
++ /* failure */
++ goto release_done;
++ pool->sp_fill -= 8;
++ } while ((pool->sp_fill + num) > BMAN_STOCKPILE_SZ);
++ /* do A */
++ copy_words(pool->sp + pool->sp_fill, bufs,
++ sizeof(struct bm_buffer) * num);
++ pool->sp_fill += num;
++ }
++ /* success */
++ ret = 0;
++release_done:
++#ifdef CONFIG_FSL_DPA_CHECKING
++ atomic_inc(&pool->in_use);
++#endif
++ return ret;
++}
++EXPORT_SYMBOL(bman_release);
++
++static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
++ u8 num)
++{
++ struct bman_portal *p = get_affine_portal();
++ struct bm_mc_command *mcc;
++ struct bm_mc_result *mcr;
++ __maybe_unused unsigned long irqflags;
++ int ret, i;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ mcc = bm_mc_start(&p->p);
++ mcc->acquire.bpid = pool->params.bpid;
++ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
++ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
++ while (!(mcr = bm_mc_result(&p->p)))
++ cpu_relax();
++ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
++ if (bufs) {
++ for (i = 0; i < num; i++)
++ bufs[i].opaque =
++ be64_to_cpu(mcr->acquire.bufs[i].opaque);
++ }
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (ret != num)
++ ret = -ENOMEM;
++ return ret;
++}
++
++int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
++ u32 flags)
++{
++ int ret;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (!num || (num > 8))
++ return -EINVAL;
++ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
++ return -EINVAL;
++#endif
++ /* Without stockpile, this API is a pass-through to the h/w operation */
++ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
++ return __bman_acquire(pool, bufs, num);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (!atomic_dec_and_test(&pool->in_use)) {
++ pr_crit("Parallel attempts to enter bman_acquire() detected.");
++ panic("only one instance of bman_released/acquired allowed");
++ }
++#endif
++ /* Two movements of buffers are possible, and can occur in either order.
++ * A: moving buffers from stockpile to the caller.
++ * B: moving buffers from hardware to the stockpile.
++ * Order 1: if there are already enough buffers in the stockpile for A
++ * then we want to do A first, and only do B if we trigger the
++ * stockpile-low threshold.
++ * Order 2: if there are not enough buffers in the stockpile for A,
++ * then we want to do B first, then do A if B had succeeded. However in
++ * this case B is dependent on how many buffers the user needs, not the
++ * stockpile-low threshold.
++ * Due to the different handling of B between the two cases, putting A
++ * and B in a while() loop would require quite obscure logic, so handle
++ * the different sequences explicitly. */
++ if (num <= pool->sp_fill) {
++ /* Order 1: do A */
++ copy_words(bufs, pool->sp + (pool->sp_fill - num),
++ sizeof(struct bm_buffer) * num);
++ pool->sp_fill -= num;
++ /* do B relative to STOCKPILE_LOW */
++ while (pool->sp_fill <= BMAN_STOCKPILE_LOW) {
++ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
++ if (ret < 0)
++ ret = __bman_acquire(pool,
++ pool->sp + pool->sp_fill, 1);
++ if (ret < 0)
++ break;
++ pool->sp_fill += ret;
++ }
++ } else {
++ /* Order 2: do B relative to 'num' */
++ do {
++ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
++ if (ret < 0)
++ ret = __bman_acquire(pool,
++ pool->sp + pool->sp_fill, 1);
++ if (ret < 0)
++ /* failure */
++ goto acquire_done;
++ pool->sp_fill += ret;
++ } while (pool->sp_fill < num);
++ /* do A */
++ copy_words(bufs, pool->sp + (pool->sp_fill - num),
++ sizeof(struct bm_buffer) * num);
++ pool->sp_fill -= num;
++ }
++ /* success */
++ ret = num;
++acquire_done:
++#ifdef CONFIG_FSL_DPA_CHECKING
++ atomic_inc(&pool->in_use);
++#endif
++ return ret;
++}
++EXPORT_SYMBOL(bman_acquire);
++
++int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
++{
++ u8 num;
++ int ret;
++
++ while (pool->sp_fill) {
++ num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
++ ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
++ num, flags);
++ if (ret)
++ return ret;
++ pool->sp_fill -= num;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(bman_flush_stockpile);
++
++int bman_query_pools(struct bm_pool_state *state)
++{
++ struct bman_portal *p = get_affine_portal();
++ struct bm_mc_result *mcr;
++ __maybe_unused unsigned long irqflags;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ bm_mc_start(&p->p);
++ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
++ while (!(mcr = bm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
++ *state = mcr->query;
++ state->as.state.__state[0] = be32_to_cpu(state->as.state.__state[0]);
++ state->as.state.__state[1] = be32_to_cpu(state->as.state.__state[1]);
++ state->ds.state.__state[0] = be32_to_cpu(state->ds.state.__state[0]);
++ state->ds.state.__state[1] = be32_to_cpu(state->ds.state.__state[1]);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return 0;
++}
++EXPORT_SYMBOL(bman_query_pools);
++
++#ifdef CONFIG_FSL_BMAN_CONFIG
++u32 bman_query_free_buffers(struct bman_pool *pool)
++{
++ return bm_pool_free_buffers(pool->params.bpid);
++}
++EXPORT_SYMBOL(bman_query_free_buffers);
++
++int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
++{
++ u32 bpid;
++
++ bpid = bman_get_params(pool)->bpid;
++
++ return bm_pool_set(bpid, thresholds);
++}
++EXPORT_SYMBOL(bman_update_pool_thresholds);
++#endif
++
++int bman_shutdown_pool(u32 bpid)
++{
++ struct bman_portal *p = get_affine_portal();
++ __maybe_unused unsigned long irqflags;
++ int ret;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ ret = bm_shutdown_pool(&p->p, bpid);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(bman_shutdown_pool);
++
++const struct bm_portal_config *bman_get_bm_portal_config(
++ struct bman_portal *portal)
++{
++ return portal->sharing_redirect ? NULL : portal->config;
++}
+diff --git a/drivers/staging/fsl_qbman/bman_low.h b/drivers/staging/fsl_qbman/bman_low.h
+new file mode 100644
+index 00000000..3da70571
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_low.h
+@@ -0,0 +1,565 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "bman_private.h"
++
++/***************************/
++/* Portal register assists */
++/***************************/
++
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++
++/* Cache-inhibited register offsets */
++#define BM_REG_RCR_PI_CINH 0x0000
++#define BM_REG_RCR_CI_CINH 0x0004
++#define BM_REG_RCR_ITR 0x0008
++#define BM_REG_CFG 0x0100
++#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
++#define BM_REG_ISR 0x0e00
++#define BM_REG_IIR 0x0e0c
++
++/* Cache-enabled register offsets */
++#define BM_CL_CR 0x0000
++#define BM_CL_RR0 0x0100
++#define BM_CL_RR1 0x0140
++#define BM_CL_RCR 0x1000
++#define BM_CL_RCR_PI_CENA 0x3000
++#define BM_CL_RCR_CI_CENA 0x3100
++
++#endif
++
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++
++/* Cache-inhibited register offsets */
++#define BM_REG_RCR_PI_CINH 0x3000
++#define BM_REG_RCR_CI_CINH 0x3100
++#define BM_REG_RCR_ITR 0x3200
++#define BM_REG_CFG 0x3300
++#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
++#define BM_REG_ISR 0x3e00
++#define BM_REG_IIR 0x3ec0
++
++/* Cache-enabled register offsets */
++#define BM_CL_CR 0x0000
++#define BM_CL_RR0 0x0100
++#define BM_CL_RR1 0x0140
++#define BM_CL_RCR 0x1000
++#define BM_CL_RCR_PI_CENA 0x3000
++#define BM_CL_RCR_CI_CENA 0x3100
++
++#endif
++
++/* BTW, the drivers (and h/w programming model) already obtain the required
++ * synchronisation for portal accesses via lwsync(), hwsync(), and
++ * data-dependencies. Use of barrier()s or other order-preserving primitives
++ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
++ * simply ensure that the compiler treats the portal registers as volatile (ie.
++ * non-coherent). */
++
++/* Cache-inhibited register access. */
++#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o)))
++#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
++ (bm)->addr_ci + (o));
++#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
++#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
++
++/* Cache-enabled (index) register access */
++#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
++#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
++#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o)))
++#define __bm_cl_out(bm, o, val) \
++ do { \
++ u32 *__tmpclout = (bm)->addr_ce + (o); \
++ __raw_writel(cpu_to_be32(val), __tmpclout); \
++ dcbf(__tmpclout); \
++ } while (0)
++#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
++#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
++#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
++#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
++#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
++#define bm_cl_invalidate(reg)\
++ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
++
++/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
++ * analysis, look at using the "extra" bit in the ring index registers to avoid
++ * cyclic issues. */
++static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
++{
++ /* 'first' is included, 'last' is excluded */
++ if (first <= last)
++ return last - first;
++ return ringsize + last - first;
++}
++
++/* Portal modes.
++ * Enum types;
++ * pmode == production mode
++ * cmode == consumption mode,
++ * Enum values use 3 letter codes. First letter matches the portal mode,
++ * remaining two letters indicate;
++ * ci == cache-inhibited portal register
++ * ce == cache-enabled portal register
++ * vb == in-band valid-bit (cache-enabled)
++ */
++enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
++ bm_rcr_pci = 0, /* PI index, cache-inhibited */
++ bm_rcr_pce = 1, /* PI index, cache-enabled */
++ bm_rcr_pvb = 2 /* valid-bit */
++};
++enum bm_rcr_cmode { /* s/w-only */
++ bm_rcr_cci, /* CI index, cache-inhibited */
++ bm_rcr_cce /* CI index, cache-enabled */
++};
++
++
++/* ------------------------- */
++/* --- Portal structures --- */
++
++#define BM_RCR_SIZE 8
++
++struct bm_rcr {
++ struct bm_rcr_entry *ring, *cursor;
++ u8 ci, available, ithresh, vbit;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ u32 busy;
++ enum bm_rcr_pmode pmode;
++ enum bm_rcr_cmode cmode;
++#endif
++};
++
++struct bm_mc {
++ struct bm_mc_command *cr;
++ struct bm_mc_result *rr;
++ u8 rridx, vbit;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ enum {
++ /* Can only be _mc_start()ed */
++ mc_idle,
++ /* Can only be _mc_commit()ed or _mc_abort()ed */
++ mc_user,
++ /* Can only be _mc_retry()ed */
++ mc_hw
++ } state;
++#endif
++};
++
++struct bm_addr {
++ void __iomem *addr_ce; /* cache-enabled */
++ void __iomem *addr_ci; /* cache-inhibited */
++};
++
++struct bm_portal {
++ struct bm_addr addr;
++ struct bm_rcr rcr;
++ struct bm_mc mc;
++ struct bm_portal_config config;
++} ____cacheline_aligned;
++
++
++/* --------------- */
++/* --- RCR API --- */
++
++/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
++#define RCR_CARRYCLEAR(p) \
++ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
++
++/* Bit-wise logic to convert a ring pointer to a ring index */
++static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
++{
++ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
++}
++
++/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
++static inline void RCR_INC(struct bm_rcr *rcr)
++{
++ /* NB: this is odd-looking, but experiments show that it generates
++ * fast code with essentially no branching overheads. We increment to
++ * the next RCR pointer and handle overflow and 'vbit'. */
++ struct bm_rcr_entry *partial = rcr->cursor + 1;
++ rcr->cursor = RCR_CARRYCLEAR(partial);
++ if (partial != rcr->cursor)
++ rcr->vbit ^= BM_RCR_VERB_VBIT;
++}
++
++static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
++ __maybe_unused enum bm_rcr_cmode cmode)
++{
++ /* This use of 'register', as well as all other occurrences, is because
++ * it has been observed to generate much faster code with gcc than is
++ * otherwise the case. */
++ register struct bm_rcr *rcr = &portal->rcr;
++ u32 cfg;
++ u8 pi;
++
++ rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
++ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
++
++ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
++ rcr->cursor = rcr->ring + pi;
++ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
++ rcr->available = BM_RCR_SIZE - 1
++ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
++ rcr->ithresh = bm_in(RCR_ITR);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ rcr->busy = 0;
++ rcr->pmode = pmode;
++ rcr->cmode = cmode;
++#endif
++ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
++ bm_out(CFG, cfg);
++ return 0;
++}
++
++static inline void bm_rcr_finish(struct bm_portal *portal)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
++ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
++ DPA_ASSERT(!rcr->busy);
++ if (pi != RCR_PTR2IDX(rcr->cursor))
++ pr_crit("losing uncommited RCR entries\n");
++ if (ci != rcr->ci)
++ pr_crit("missing existing RCR completions\n");
++ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
++ pr_crit("RCR destroyed unquiesced\n");
++}
++
++static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ DPA_ASSERT(!rcr->busy);
++ if (!rcr->available)
++ return NULL;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ rcr->busy = 1;
++#endif
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++ dcbz_64(rcr->cursor);
++#endif
++ return rcr->cursor;
++}
++
++static inline void bm_rcr_abort(struct bm_portal *portal)
++{
++ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
++ DPA_ASSERT(rcr->busy);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ rcr->busy = 0;
++#endif
++}
++
++static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
++ struct bm_portal *portal, u8 myverb)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ DPA_ASSERT(rcr->busy);
++ DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
++ if (rcr->available == 1)
++ return NULL;
++ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
++ dcbf_64(rcr->cursor);
++ RCR_INC(rcr);
++ rcr->available--;
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++ dcbz_64(rcr->cursor);
++#endif
++ return rcr->cursor;
++}
++
++static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ DPA_ASSERT(rcr->busy);
++ DPA_ASSERT(rcr->pmode == bm_rcr_pci);
++ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
++ RCR_INC(rcr);
++ rcr->available--;
++ hwsync();
++ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
++#ifdef CONFIG_FSL_DPA_CHECKING
++ rcr->busy = 0;
++#endif
++}
++
++static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
++{
++ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
++ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
++ bm_cl_invalidate(RCR_PI);
++ bm_cl_touch_rw(RCR_PI);
++}
++
++static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ DPA_ASSERT(rcr->busy);
++ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
++ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
++ RCR_INC(rcr);
++ rcr->available--;
++ lwsync();
++ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
++#ifdef CONFIG_FSL_DPA_CHECKING
++ rcr->busy = 0;
++#endif
++}
++
++static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ struct bm_rcr_entry *rcursor;
++ DPA_ASSERT(rcr->busy);
++ DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
++ lwsync();
++ rcursor = rcr->cursor;
++ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
++ dcbf_64(rcursor);
++ RCR_INC(rcr);
++ rcr->available--;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ rcr->busy = 0;
++#endif
++}
++
++static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ u8 diff, old_ci = rcr->ci;
++ DPA_ASSERT(rcr->cmode == bm_rcr_cci);
++ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
++ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
++ rcr->available += diff;
++ return diff;
++}
++
++static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
++{
++ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
++ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
++ bm_cl_touch_ro(RCR_CI);
++}
++
++static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ u8 diff, old_ci = rcr->ci;
++ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
++ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
++ bm_cl_invalidate(RCR_CI);
++ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
++ rcr->available += diff;
++ return diff;
++}
++
++static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ return rcr->ithresh;
++}
++
++static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ rcr->ithresh = ithresh;
++ bm_out(RCR_ITR, ithresh);
++}
++
++static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ return rcr->available;
++}
++
++static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
++{
++ register struct bm_rcr *rcr = &portal->rcr;
++ return BM_RCR_SIZE - 1 - rcr->available;
++}
++
++
++/* ------------------------------ */
++/* --- Management command API --- */
++
++static inline int bm_mc_init(struct bm_portal *portal)
++{
++ register struct bm_mc *mc = &portal->mc;
++ mc->cr = portal->addr.addr_ce + BM_CL_CR;
++ mc->rr = portal->addr.addr_ce + BM_CL_RR0;
++ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
++ BM_MCC_VERB_VBIT) ? 0 : 1;
++ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = mc_idle;
++#endif
++ return 0;
++}
++
++static inline void bm_mc_finish(struct bm_portal *portal)
++{
++ __maybe_unused register struct bm_mc *mc = &portal->mc;
++ DPA_ASSERT(mc->state == mc_idle);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (mc->state != mc_idle)
++ pr_crit("Losing incomplete MC command\n");
++#endif
++}
++
++static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
++{
++ register struct bm_mc *mc = &portal->mc;
++ DPA_ASSERT(mc->state == mc_idle);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = mc_user;
++#endif
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++ dcbz_64(mc->cr);
++#endif
++ return mc->cr;
++}
++
++static inline void bm_mc_abort(struct bm_portal *portal)
++{
++ __maybe_unused register struct bm_mc *mc = &portal->mc;
++ DPA_ASSERT(mc->state == mc_user);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = mc_idle;
++#endif
++}
++
++static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
++{
++ register struct bm_mc *mc = &portal->mc;
++ struct bm_mc_result *rr = mc->rr + mc->rridx;
++ DPA_ASSERT(mc->state == mc_user);
++ lwsync();
++ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
++ dcbf(mc->cr);
++ dcbit_ro(rr);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = mc_hw;
++#endif
++}
++
++static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
++{
++ register struct bm_mc *mc = &portal->mc;
++ struct bm_mc_result *rr = mc->rr + mc->rridx;
++ DPA_ASSERT(mc->state == mc_hw);
++ /* The inactive response register's verb byte always returns zero until
++ * its command is submitted and completed. This includes the valid-bit,
++ * in case you were wondering... */
++ if (!__raw_readb(&rr->verb)) {
++ dcbit_ro(rr);
++ return NULL;
++ }
++ mc->rridx ^= 1;
++ mc->vbit ^= BM_MCC_VERB_VBIT;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = mc_idle;
++#endif
++ return rr;
++}
++
++
++/* ------------------------------------- */
++/* --- Portal interrupt register API --- */
++
++static inline int bm_isr_init(__always_unused struct bm_portal *portal)
++{
++ return 0;
++}
++
++static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
++{
++}
++
++#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
++#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
++static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
++ int enable)
++{
++ u32 val;
++ DPA_ASSERT(bpid < bman_pool_max);
++ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
++ val = __bm_in(&portal->addr, SCN_REG(bpid));
++ if (enable)
++ val |= SCN_BIT(bpid);
++ else
++ val &= ~SCN_BIT(bpid);
++ __bm_out(&portal->addr, SCN_REG(bpid), val);
++}
++
++static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
++{
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
++#else
++ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
++#endif
++}
++
++static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
++ u32 val)
++{
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
++#else
++ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
++#endif
++}
++
++/* Buffer Pool Cleanup */
++static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
++{
++ struct bm_mc_command *bm_cmd;
++ struct bm_mc_result *bm_res;
++
++ int aq_count = 0;
++ bool stop = false;
++ while (!stop) {
++ /* Acquire buffers until empty */
++ bm_cmd = bm_mc_start(p);
++ bm_cmd->acquire.bpid = bpid;
++ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
++ while (!(bm_res = bm_mc_result(p)))
++ cpu_relax();
++ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
++ /* Pool is empty */
++ /* TBD : Should we do a few extra iterations in
++ case some other some blocks keep buffers 'on deck',
++ which may also be problematic */
++ stop = true;
++ } else
++ ++aq_count;
++ }
++ return 0;
++}
+diff --git a/drivers/staging/fsl_qbman/bman_private.h b/drivers/staging/fsl_qbman/bman_private.h
+new file mode 100644
+index 00000000..64eefe7d
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_private.h
+@@ -0,0 +1,166 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "dpa_sys.h"
++#include <linux/fsl_bman.h>
++
++/* Revision info (for errata and feature handling) */
++#define BMAN_REV10 0x0100
++#define BMAN_REV20 0x0200
++#define BMAN_REV21 0x0201
++#define QBMAN_ANY_PORTAL_IDX 0xffffffff
++extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
++
++/*
++ * Global variables of the max portal/pool number this bman version supported
++ */
++extern u16 bman_pool_max;
++
++/* used by CCSR and portal interrupt code */
++enum bm_isr_reg {
++ bm_isr_status = 0,
++ bm_isr_enable = 1,
++ bm_isr_disable = 2,
++ bm_isr_inhibit = 3
++};
++
++struct bm_portal_config {
++ /* Corenet portal addresses;
++ * [0]==cache-enabled, [1]==cache-inhibited. */
++ __iomem void *addr_virt[2];
++ struct resource addr_phys[2];
++ /* Allow these to be joined in lists */
++ struct list_head list;
++ /* User-visible portal configuration settings */
++ struct bman_portal_config public_cfg;
++ /* power management saved data */
++ u32 saved_isdr;
++};
++
++#ifdef CONFIG_FSL_BMAN_CONFIG
++/* Hooks from bman_driver.c to bman_config.c */
++int bman_init_ccsr(struct device_node *node);
++#endif
++
++/* Hooks from bman_driver.c in to bman_high.c */
++struct bman_portal *bman_create_portal(
++ struct bman_portal *portal,
++ const struct bm_portal_config *config);
++struct bman_portal *bman_create_affine_portal(
++ const struct bm_portal_config *config);
++struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
++ int cpu);
++void bman_destroy_portal(struct bman_portal *bm);
++
++const struct bm_portal_config *bman_destroy_affine_portal(void);
++
++/* Hooks from fsl_usdpaa.c to bman_driver.c */
++struct bm_portal_config *bm_get_unused_portal(void);
++struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
++void bm_put_unused_portal(struct bm_portal_config *pcfg);
++void bm_set_liodns(struct bm_portal_config *pcfg);
++
++/* Pool logic in the portal driver, during initialisation, needs to know if
++ * there's access to CCSR or not (if not, it'll cripple the pool allocator). */
++#ifdef CONFIG_FSL_BMAN_CONFIG
++int bman_have_ccsr(void);
++#else
++#define bman_have_ccsr() 0
++#endif
++
++/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
++ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
++ * might fail (if the buffer pool is depleted). So this value provides some
++ * "stagger" in that the bman_acquire() function will only fail if lots of bufs
++ * are requested at once or if h/w has been tested a couple of times without
++ * luck. The _HIGH value: when bman_release() is called and the stockpile
++ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
++ * the release ring is full). So this value provides some "stagger" so that
++ * ring-access is retried a couple of times prior to the API returning a
++ * failure. The following *must* be true;
++ * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
++ * (to avoid thrashing)
++ * BMAN_STOCKPILE_SZ >= 16
++ * (as the release logic expects to either send 8 buffers to hw prior to
++ * adding the given buffers to the stockpile or add the buffers to the
++ * stockpile before sending 8 to hw, as the API must be an all-or-nothing
++ * success/fail.)
++ */
++#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
++#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
++#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
++
++/*************************************************/
++/* BMan s/w corenet portal, low-level i/face */
++/*************************************************/
++
++/* Used by all portal interrupt registers except 'inhibit'
++ * This mask contains all the "irqsource" bits visible to API users
++ */
++#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
++
++/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
++ * the disable register" rather than "disable the ability to write". */
++#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
++#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
++#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
++#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
++#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
++#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
++#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
++#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
++
++#ifdef CONFIG_FSL_BMAN_CONFIG
++/* Set depletion thresholds associated with a buffer pool. Requires that the
++ * operating system have access to Bman CCSR (ie. compiled in support and
++ * run-time access courtesy of the device-tree). */
++int bm_pool_set(u32 bpid, const u32 *thresholds);
++#define BM_POOL_THRESH_SW_ENTER 0
++#define BM_POOL_THRESH_SW_EXIT 1
++#define BM_POOL_THRESH_HW_ENTER 2
++#define BM_POOL_THRESH_HW_EXIT 3
++
++/* Read the free buffer count for a given buffer */
++u32 bm_pool_free_buffers(u32 bpid);
++
++__init int bman_init(void);
++__init int bman_resource_init(void);
++
++const struct bm_portal_config *bman_get_bm_portal_config(
++ struct bman_portal *portal);
++
++/* power management */
++#ifdef CONFIG_SUSPEND
++void suspend_unused_bportal(void);
++void resume_unused_bportal(void);
++#endif
++
++#endif /* CONFIG_FSL_BMAN_CONFIG */
+diff --git a/drivers/staging/fsl_qbman/bman_test.c b/drivers/staging/fsl_qbman/bman_test.c
+new file mode 100644
+index 00000000..db5b7fd3
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_test.c
+@@ -0,0 +1,56 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "bman_test.h"
++
++MODULE_AUTHOR("Geoff Thorpe");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("Bman testing");
++
++static int test_init(void)
++{
++#ifdef CONFIG_FSL_BMAN_TEST_HIGH
++ int loop = 1;
++ while (loop--)
++ bman_test_high();
++#endif
++#ifdef CONFIG_FSL_BMAN_TEST_THRESH
++ bman_test_thresh();
++#endif
++ return 0;
++}
++
++static void test_exit(void)
++{
++}
++
++module_init(test_init);
++module_exit(test_exit);
+diff --git a/drivers/staging/fsl_qbman/bman_test.h b/drivers/staging/fsl_qbman/bman_test.h
+new file mode 100644
+index 00000000..fcd65056
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_test.h
+@@ -0,0 +1,44 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/kthread.h>
++
++#include <linux/fsl_bman.h>
++
++void bman_test_high(void);
++void bman_test_thresh(void);
+diff --git a/drivers/staging/fsl_qbman/bman_test_high.c b/drivers/staging/fsl_qbman/bman_test_high.c
+new file mode 100644
+index 00000000..1617a531
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_test_high.c
+@@ -0,0 +1,183 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "bman_test.h"
++#include "bman_private.h"
++
++/*************/
++/* constants */
++/*************/
++
++#define PORTAL_OPAQUE ((void *)0xf00dbeef)
++#define POOL_OPAQUE ((void *)0xdeadabba)
++#define NUM_BUFS 93
++#define LOOPS 3
++#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
++
++/***************/
++/* global vars */
++/***************/
++
++static struct bman_pool *pool;
++static int depleted;
++static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
++static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
++static int bufs_received;
++
++/* Predeclare the callback so we can instantiate pool parameters */
++static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
++
++/**********************/
++/* internal functions */
++/**********************/
++
++static void bufs_init(void)
++{
++ int i;
++ for (i = 0; i < NUM_BUFS; i++)
++ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
++ bufs_received = 0;
++}
++
++static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
++{
++ if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
++
++ /* On SoCs with Bman revison 2.0, Bman only respects the 40
++ * LS-bits of buffer addresses, masking off the upper 8-bits on
++ * release commands. The API provides for 48-bit addresses
++ * because some SoCs support all 48-bits. When generating
++ * garbage addresses for testing, we either need to zero the
++ * upper 8-bits when releasing to Bman (otherwise we'll be
++ * disappointed when the buffers we acquire back from Bman
++ * don't match), or we need to mask the upper 8-bits off when
++ * comparing. We do the latter.
++ */
++ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
++ < (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
++ return -1;
++ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
++ > (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
++ return 1;
++ } else {
++ if (bm_buffer_get64(a) < bm_buffer_get64(b))
++ return -1;
++ if (bm_buffer_get64(a) > bm_buffer_get64(b))
++ return 1;
++ }
++
++ return 0;
++}
++
++static void bufs_confirm(void)
++{
++ int i, j;
++ for (i = 0; i < NUM_BUFS; i++) {
++ int matches = 0;
++ for (j = 0; j < NUM_BUFS; j++)
++ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
++ matches++;
++ BUG_ON(matches != 1);
++ }
++}
++
++/********/
++/* test */
++/********/
++
++static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
++ void *pool_ctx, int __depleted)
++{
++ BUG_ON(__pool != pool);
++ BUG_ON(pool_ctx != POOL_OPAQUE);
++ depleted = __depleted;
++}
++
++void bman_test_high(void)
++{
++ struct bman_pool_params pparams = {
++ .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
++ .cb = depletion_cb,
++ .cb_ctx = POOL_OPAQUE,
++ };
++ int i, loops = LOOPS;
++ struct bm_buffer tmp_buf;
++
++ bufs_init();
++
++ pr_info("BMAN: --- starting high-level test ---\n");
++
++ pool = bman_new_pool(&pparams);
++ BUG_ON(!pool);
++
++ /*******************/
++ /* Release buffers */
++ /*******************/
++do_loop:
++ i = 0;
++ while (i < NUM_BUFS) {
++ u32 flags = BMAN_RELEASE_FLAG_WAIT;
++ int num = 8;
++ if ((i + num) > NUM_BUFS)
++ num = NUM_BUFS - i;
++ if ((i + num) == NUM_BUFS)
++ flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
++ if (bman_release(pool, bufs_in + i, num, flags))
++ panic("bman_release() failed\n");
++ i += num;
++ }
++
++ /*******************/
++ /* Acquire buffers */
++ /*******************/
++ while (i > 0) {
++ int tmp, num = 8;
++ if (num > i)
++ num = i;
++ tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
++ BUG_ON(tmp != num);
++ i -= num;
++ }
++
++ i = bman_acquire(pool, &tmp_buf, 1, 0);
++ BUG_ON(i > 0);
++
++ bufs_confirm();
++
++ if (--loops)
++ goto do_loop;
++
++ /************/
++ /* Clean up */
++ /************/
++ bman_free_pool(pool);
++ pr_info("BMAN: --- finished high-level test ---\n");
++}
+diff --git a/drivers/staging/fsl_qbman/bman_test_thresh.c b/drivers/staging/fsl_qbman/bman_test_thresh.c
+new file mode 100644
+index 00000000..67093693
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/bman_test_thresh.c
+@@ -0,0 +1,196 @@
++/* Copyright 2010-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "bman_test.h"
++
++/* Test constants */
++#define TEST_NUMBUFS 129728
++#define TEST_EXIT 129536
++#define TEST_ENTRY 129024
++
++struct affine_test_data {
++ struct task_struct *t;
++ int cpu;
++ int expect_affinity;
++ int drain;
++ int num_enter;
++ int num_exit;
++ struct list_head node;
++ struct completion wakethread;
++ struct completion wakeparent;
++};
++
++static void cb_depletion(struct bman_portal *portal,
++ struct bman_pool *pool,
++ void *opaque,
++ int depleted)
++{
++ struct affine_test_data *data = opaque;
++ int c = smp_processor_id();
++ pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n",
++ bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
++ /* We should be executing on the CPU of the thread that owns the pool if
++ * and that CPU has an affine portal (ie. it isn't slaved). */
++ BUG_ON((c != data->cpu) && data->expect_affinity);
++ BUG_ON((c == data->cpu) && !data->expect_affinity);
++ if (depleted)
++ data->num_enter++;
++ else
++ data->num_exit++;
++}
++
++/* Params used to set up a pool, this also dynamically allocates a BPID */
++static const struct bman_pool_params params_nocb = {
++ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
++ .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
++};
++
++/* Params used to set up each cpu's pool with callbacks enabled */
++static struct bman_pool_params params_cb = {
++ .bpid = 0, /* will be replaced to match pool_nocb */
++ .flags = BMAN_POOL_FLAG_DEPLETION,
++ .cb = cb_depletion
++};
++
++static struct bman_pool *pool_nocb;
++static LIST_HEAD(threads);
++
++static int affine_test(void *__data)
++{
++ struct bman_pool *pool;
++ struct affine_test_data *data = __data;
++ struct bman_pool_params my_params = params_cb;
++
++ pr_info("thread %d: starting\n", data->cpu);
++ /* create the pool */
++ my_params.cb_ctx = data;
++ pool = bman_new_pool(&my_params);
++ BUG_ON(!pool);
++ complete(&data->wakeparent);
++ wait_for_completion(&data->wakethread);
++ init_completion(&data->wakethread);
++
++ /* if we're the drainer, we get signalled for that */
++ if (data->drain) {
++ struct bm_buffer buf;
++ int ret;
++ pr_info("thread %d: draining...\n", data->cpu);
++ do {
++ ret = bman_acquire(pool, &buf, 1, 0);
++ } while (ret > 0);
++ pr_info("thread %d: draining done.\n", data->cpu);
++ complete(&data->wakeparent);
++ wait_for_completion(&data->wakethread);
++ init_completion(&data->wakethread);
++ }
++
++ /* cleanup */
++ bman_free_pool(pool);
++ while (!kthread_should_stop())
++ cpu_relax();
++ pr_info("thread %d: exiting\n", data->cpu);
++ return 0;
++}
++
++static struct affine_test_data *start_affine_test(int cpu, int drain)
++{
++ struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
++
++ if (!data)
++ return NULL;
++ data->cpu = cpu;
++ data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
++ data->drain = drain;
++ data->num_enter = 0;
++ data->num_exit = 0;
++ init_completion(&data->wakethread);
++ init_completion(&data->wakeparent);
++ list_add_tail(&data->node, &threads);
++ data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
++ BUG_ON(IS_ERR(data->t));
++ kthread_bind(data->t, cpu);
++ wake_up_process(data->t);
++ return data;
++}
++
++void bman_test_thresh(void)
++{
++ int loop = TEST_NUMBUFS;
++ int ret, num_cpus = 0;
++ struct affine_test_data *data, *drainer = NULL;
++
++ pr_info("bman_test_thresh: start\n");
++
++ /* allocate a BPID and seed it */
++ pool_nocb = bman_new_pool(&params_nocb);
++ BUG_ON(!pool_nocb);
++ while (loop--) {
++ struct bm_buffer buf;
++ bm_buffer_set64(&buf, 0x0badbeef + loop);
++ ret = bman_release(pool_nocb, &buf, 1,
++ BMAN_RELEASE_FLAG_WAIT);
++ BUG_ON(ret);
++ }
++ while (!bman_rcr_is_empty())
++ cpu_relax();
++ pr_info("bman_test_thresh: buffers are in\n");
++
++ /* create threads and wait for them to create pools */
++ params_cb.bpid = bman_get_params(pool_nocb)->bpid;
++ for_each_cpu(loop, cpu_online_mask) {
++ data = start_affine_test(loop, drainer ? 0 : 1);
++ BUG_ON(!data);
++ if (!drainer)
++ drainer = data;
++ num_cpus++;
++ wait_for_completion(&data->wakeparent);
++ }
++
++ /* signal the drainer to start draining */
++ complete(&drainer->wakethread);
++ wait_for_completion(&drainer->wakeparent);
++ init_completion(&drainer->wakeparent);
++
++ /* tear down */
++ list_for_each_entry_safe(data, drainer, &threads, node) {
++ complete(&data->wakethread);
++ ret = kthread_stop(data->t);
++ BUG_ON(ret);
++ list_del(&data->node);
++ /* check that we get the expected callbacks (and no others) */
++ BUG_ON(data->num_enter != 1);
++ BUG_ON(data->num_exit != 0);
++ kfree(data);
++ }
++ bman_free_pool(pool_nocb);
++
++ pr_info("bman_test_thresh: done\n");
++}
+diff --git a/drivers/staging/fsl_qbman/dpa_alloc.c b/drivers/staging/fsl_qbman/dpa_alloc.c
+new file mode 100644
+index 00000000..44db3e1e
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/dpa_alloc.c
+@@ -0,0 +1,706 @@
++/* Copyright 2009-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "dpa_sys.h"
++#include <linux/fsl_qman.h>
++#include <linux/fsl_bman.h>
++
++/* Qman and Bman APIs are front-ends to the common code; */
++
++static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */
++static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */
++static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */
++static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */
++static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */
++static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */
++static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */
++static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */
++
++/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing
++ * FQIDs (probably from user-space), it can filter out those that aren't in the
++ * OOS state (better to leak a h/w resource than to crash). This function
++ * returns the number of invalid IDs that were not released. */
++static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count,
++ int (*is_valid)(u32 id))
++{
++ int valid_mode = 0;
++ u32 loop = id, total_invalid = 0;
++ while (loop < (id + count)) {
++ int isvalid = is_valid ? is_valid(loop) : 1;
++ if (!valid_mode) {
++ /* We're looking for a valid ID to terminate an invalid
++ * range */
++ if (isvalid) {
++ /* We finished a range of invalid IDs, a valid
++ * range is now underway */
++ valid_mode = 1;
++ count -= (loop - id);
++ id = loop;
++ } else
++ total_invalid++;
++ } else {
++ /* We're looking for an invalid ID to terminate a
++ * valid range */
++ if (!isvalid) {
++ /* Release the range of valid IDs, an unvalid
++ * range is now underway */
++ if (loop > id)
++ dpa_alloc_free(alloc, id, loop - id);
++ valid_mode = 0;
++ }
++ }
++ loop++;
++ }
++ /* Release any unterminated range of valid IDs */
++ if (valid_mode && count)
++ dpa_alloc_free(alloc, id, count);
++ return total_invalid;
++}
++
++/* BPID allocator front-end */
++
++int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
++{
++ return dpa_alloc_new(&bpalloc, result, count, align, partial);
++}
++EXPORT_SYMBOL(bman_alloc_bpid_range);
++
++static int bp_cleanup(u32 bpid)
++{
++ return bman_shutdown_pool(bpid) == 0;
++}
++void bman_release_bpid_range(u32 bpid, u32 count)
++{
++ u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
++ if (total_invalid)
++ pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
++ bpid, bpid + count - 1, count, total_invalid);
++}
++EXPORT_SYMBOL(bman_release_bpid_range);
++
++void bman_seed_bpid_range(u32 bpid, u32 count)
++{
++ dpa_alloc_seed(&bpalloc, bpid, count);
++}
++EXPORT_SYMBOL(bman_seed_bpid_range);
++
++int bman_reserve_bpid_range(u32 bpid, u32 count)
++{
++ return dpa_alloc_reserve(&bpalloc, bpid, count);
++}
++EXPORT_SYMBOL(bman_reserve_bpid_range);
++
++
++/* FQID allocator front-end */
++
++int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
++{
++ return dpa_alloc_new(&fqalloc, result, count, align, partial);
++}
++EXPORT_SYMBOL(qman_alloc_fqid_range);
++
++static int fq_cleanup(u32 fqid)
++{
++ return qman_shutdown_fq(fqid) == 0;
++}
++void qman_release_fqid_range(u32 fqid, u32 count)
++{
++ u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup);
++ if (total_invalid)
++ pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
++ fqid, fqid + count - 1, count, total_invalid);
++}
++EXPORT_SYMBOL(qman_release_fqid_range);
++
++int qman_reserve_fqid_range(u32 fqid, u32 count)
++{
++ return dpa_alloc_reserve(&fqalloc, fqid, count);
++}
++EXPORT_SYMBOL(qman_reserve_fqid_range);
++
++void qman_seed_fqid_range(u32 fqid, u32 count)
++{
++ dpa_alloc_seed(&fqalloc, fqid, count);
++}
++EXPORT_SYMBOL(qman_seed_fqid_range);
++
++/* Pool-channel allocator front-end */
++
++int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
++{
++ return dpa_alloc_new(&qpalloc, result, count, align, partial);
++}
++EXPORT_SYMBOL(qman_alloc_pool_range);
++
++static int qpool_cleanup(u32 qp)
++{
++ /* We query all FQDs starting from
++ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
++ * whose destination channel is the pool-channel being released.
++ * When a non-OOS FQD is found we attempt to clean it up */
++ struct qman_fq fq = {
++ .fqid = 1
++ };
++ int err;
++ do {
++ struct qm_mcr_queryfq_np np;
++ err = qman_query_fq_np(&fq, &np);
++ if (err)
++ /* FQID range exceeded, found no problems */
++ return 1;
++ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
++ struct qm_fqd fqd;
++ err = qman_query_fq(&fq, &fqd);
++ BUG_ON(err);
++ if (fqd.dest.channel == qp) {
++ /* The channel is the FQ's target, clean it */
++ if (qman_shutdown_fq(fq.fqid) != 0)
++ /* Couldn't shut down the FQ
++ so the pool must be leaked */
++ return 0;
++ }
++ }
++ /* Move to the next FQID */
++ fq.fqid++;
++ } while (1);
++}
++void qman_release_pool_range(u32 qp, u32 count)
++{
++ u32 total_invalid = release_id_range(&qpalloc, qp,
++ count, qpool_cleanup);
++ if (total_invalid) {
++ /* Pool channels are almost always used individually */
++ if (count == 1)
++ pr_err("Pool channel 0x%x had %d leaks\n",
++ qp, total_invalid);
++ else
++ pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
++ qp, qp + count - 1, count, total_invalid);
++ }
++}
++EXPORT_SYMBOL(qman_release_pool_range);
++
++
++void qman_seed_pool_range(u32 poolid, u32 count)
++{
++ dpa_alloc_seed(&qpalloc, poolid, count);
++
++}
++EXPORT_SYMBOL(qman_seed_pool_range);
++
++int qman_reserve_pool_range(u32 poolid, u32 count)
++{
++ return dpa_alloc_reserve(&qpalloc, poolid, count);
++}
++EXPORT_SYMBOL(qman_reserve_pool_range);
++
++
++/* CGR ID allocator front-end */
++
++int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
++{
++ return dpa_alloc_new(&cgralloc, result, count, align, partial);
++}
++EXPORT_SYMBOL(qman_alloc_cgrid_range);
++
++static int cqr_cleanup(u32 cgrid)
++{
++ /* We query all FQDs starting from
++ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
++ * whose CGR is the CGR being released.
++ */
++ struct qman_fq fq = {
++ .fqid = 1
++ };
++ int err;
++ do {
++ struct qm_mcr_queryfq_np np;
++ err = qman_query_fq_np(&fq, &np);
++ if (err)
++ /* FQID range exceeded, found no problems */
++ return 1;
++ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
++ struct qm_fqd fqd;
++ err = qman_query_fq(&fq, &fqd);
++ BUG_ON(err);
++ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
++ (fqd.cgid == cgrid)) {
++ pr_err("CRGID 0x%x is being used by FQID 0x%x,"
++ " CGR will be leaked\n",
++ cgrid, fq.fqid);
++ return 1;
++ }
++ }
++ /* Move to the next FQID */
++ fq.fqid++;
++ } while (1);
++}
++
++void qman_release_cgrid_range(u32 cgrid, u32 count)
++{
++ u32 total_invalid = release_id_range(&cgralloc, cgrid,
++ count, cqr_cleanup);
++ if (total_invalid)
++ pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
++ cgrid, cgrid + count - 1, count, total_invalid);
++}
++EXPORT_SYMBOL(qman_release_cgrid_range);
++
++void qman_seed_cgrid_range(u32 cgrid, u32 count)
++{
++ dpa_alloc_seed(&cgralloc, cgrid, count);
++
++}
++EXPORT_SYMBOL(qman_seed_cgrid_range);
++
++/* CEETM CHANNEL ID allocator front-end */
++int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
++ int partial)
++{
++ return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial);
++}
++EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range);
++
++int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
++ int partial)
++{
++ return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial);
++}
++EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range);
++
++void qman_release_ceetm0_channel_range(u32 channelid, u32 count)
++{
++ u32 total_invalid;
++
++ total_invalid = release_id_range(&ceetm0_challoc, channelid, count,
++ NULL);
++ if (total_invalid)
++ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
++ channelid, channelid + count - 1, count, total_invalid);
++}
++EXPORT_SYMBOL(qman_release_ceetm0_channel_range);
++
++void qman_seed_ceetm0_channel_range(u32 channelid, u32 count)
++{
++ dpa_alloc_seed(&ceetm0_challoc, channelid, count);
++
++}
++EXPORT_SYMBOL(qman_seed_ceetm0_channel_range);
++
++void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
++{
++ u32 total_invalid;
++ total_invalid = release_id_range(&ceetm1_challoc, channelid, count,
++ NULL);
++ if (total_invalid)
++ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
++ channelid, channelid + count - 1, count, total_invalid);
++}
++EXPORT_SYMBOL(qman_release_ceetm1_channel_range);
++
++void qman_seed_ceetm1_channel_range(u32 channelid, u32 count)
++{
++ dpa_alloc_seed(&ceetm1_challoc, channelid, count);
++
++}
++EXPORT_SYMBOL(qman_seed_ceetm1_channel_range);
++
++/* CEETM LFQID allocator front-end */
++int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
++ int partial)
++{
++ return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial);
++}
++EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range);
++
++int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
++ int partial)
++{
++ return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial);
++}
++EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range);
++
++void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count)
++{
++ u32 total_invalid;
++
++ total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count,
++ NULL);
++ if (total_invalid)
++ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
++ lfqid, lfqid + count - 1, count, total_invalid);
++}
++EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range);
++
++void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count)
++{
++ dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count);
++
++}
++EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range);
++
++void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
++{
++ u32 total_invalid;
++
++ total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count,
++ NULL);
++ if (total_invalid)
++ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
++ lfqid, lfqid + count - 1, count, total_invalid);
++}
++EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range);
++
++void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count)
++{
++ dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count);
++
++}
++EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range);
++
++
++/* Everything else is the common backend to all the allocators */
++
++/* The allocator is a (possibly-empty) list of these; */
++struct alloc_node {
++ struct list_head list;
++ u32 base;
++ u32 num;
++ /* refcount and is_alloced are only set
++ when the node is in the used list */
++ unsigned int refcount;
++ int is_alloced;
++};
++
++/* #define DPA_ALLOC_DEBUG */
++
++#ifdef DPA_ALLOC_DEBUG
++#define DPRINT pr_info
++static void DUMP(struct dpa_alloc *alloc)
++{
++ int off = 0;
++ char buf[256];
++ struct alloc_node *p;
++ pr_info("Free Nodes\n");
++ list_for_each_entry(p, &alloc->free, list) {
++ if (off < 255)
++ off += snprintf(buf + off, 255-off, "{%d,%d}",
++ p->base, p->base + p->num - 1);
++ }
++ pr_info("%s\n", buf);
++
++ off = 0;
++ pr_info("Used Nodes\n");
++ list_for_each_entry(p, &alloc->used, list) {
++ if (off < 255)
++ off += snprintf(buf + off, 255-off, "{%d,%d}",
++ p->base, p->base + p->num - 1);
++ }
++ pr_info("%s\n", buf);
++
++
++
++}
++#else
++#define DPRINT(x...)
++#define DUMP(a)
++#endif
++
++int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
++ int partial)
++{
++ struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
++ u32 base, next_best_base = 0, num = 0, next_best_num = 0;
++ struct alloc_node *margin_left, *margin_right;
++
++ *result = (u32)-1;
++ DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
++ DUMP(alloc);
++ /* If 'align' is 0, it should behave as though it was 1 */
++ if (!align)
++ align = 1;
++ margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
++ if (!margin_left)
++ goto err;
++ margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
++ if (!margin_right) {
++ kfree(margin_left);
++ goto err;
++ }
++ spin_lock_irq(&alloc->lock);
++ list_for_each_entry(i, &alloc->free, list) {
++ base = (i->base + align - 1) / align;
++ base *= align;
++ if ((base - i->base) >= i->num)
++ /* alignment is impossible, regardless of count */
++ continue;
++ num = i->num - (base - i->base);
++ if (num >= count) {
++ /* this one will do nicely */
++ num = count;
++ goto done;
++ }
++ if (num > next_best_num) {
++ next_best = i;
++ next_best_base = base;
++ next_best_num = num;
++ }
++ }
++ if (partial && next_best) {
++ i = next_best;
++ base = next_best_base;
++ num = next_best_num;
++ } else
++ i = NULL;
++done:
++ if (i) {
++ if (base != i->base) {
++ margin_left->base = i->base;
++ margin_left->num = base - i->base;
++ list_add_tail(&margin_left->list, &i->list);
++ } else
++ kfree(margin_left);
++ if ((base + num) < (i->base + i->num)) {
++ margin_right->base = base + num;
++ margin_right->num = (i->base + i->num) -
++ (base + num);
++ list_add(&margin_right->list, &i->list);
++ } else
++ kfree(margin_right);
++ list_del(&i->list);
++ kfree(i);
++ *result = base;
++ } else {
++ spin_unlock_irq(&alloc->lock);
++ kfree(margin_left);
++ kfree(margin_right);
++ }
++
++err:
++ DPRINT("returning %d\n", i ? num : -ENOMEM);
++ DUMP(alloc);
++ if (!i)
++ return -ENOMEM;
++
++ /* Add the allocation to the used list with a refcount of 1 */
++ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
++ if (!used_node) {
++ spin_unlock_irq(&alloc->lock);
++ return -ENOMEM;
++ }
++ used_node->base = *result;
++ used_node->num = num;
++ used_node->refcount = 1;
++ used_node->is_alloced = 1;
++ list_add_tail(&used_node->list, &alloc->used);
++ spin_unlock_irq(&alloc->lock);
++ return (int)num;
++}
++
++/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
++ * forcing error-handling on to users in the deallocation path. */
++static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
++{
++ struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
++ BUG_ON(!node);
++ DPRINT("release_range(%d,%d)\n", base_id, count);
++ DUMP(alloc);
++ BUG_ON(!count);
++ spin_lock_irq(&alloc->lock);
++
++
++ node->base = base_id;
++ node->num = count;
++ list_for_each_entry(i, &alloc->free, list) {
++ if (i->base >= node->base) {
++ /* BUG_ON(any overlapping) */
++ BUG_ON(i->base < (node->base + node->num));
++ list_add_tail(&node->list, &i->list);
++ goto done;
++ }
++ }
++ list_add_tail(&node->list, &alloc->free);
++done:
++ /* Merge to the left */
++ i = list_entry(node->list.prev, struct alloc_node, list);
++ if (node->list.prev != &alloc->free) {
++ BUG_ON((i->base + i->num) > node->base);
++ if ((i->base + i->num) == node->base) {
++ node->base = i->base;
++ node->num += i->num;
++ list_del(&i->list);
++ kfree(i);
++ }
++ }
++ /* Merge to the right */
++ i = list_entry(node->list.next, struct alloc_node, list);
++ if (node->list.next != &alloc->free) {
++ BUG_ON((node->base + node->num) > i->base);
++ if ((node->base + node->num) == i->base) {
++ node->num += i->num;
++ list_del(&i->list);
++ kfree(i);
++ }
++ }
++ spin_unlock_irq(&alloc->lock);
++ DUMP(alloc);
++}
++
++
++void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
++{
++ struct alloc_node *i = NULL;
++ spin_lock_irq(&alloc->lock);
++
++ /* First find the node in the used list and decrement its ref count */
++ list_for_each_entry(i, &alloc->used, list) {
++ if (i->base == base_id && i->num == count) {
++ --i->refcount;
++ if (i->refcount == 0) {
++ list_del(&i->list);
++ spin_unlock_irq(&alloc->lock);
++ if (i->is_alloced)
++ _dpa_alloc_free(alloc, base_id, count);
++ kfree(i);
++ return;
++ }
++ spin_unlock_irq(&alloc->lock);
++ return;
++ }
++ }
++ /* Couldn't find the allocation */
++ pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
++ base_id, count);
++ spin_unlock_irq(&alloc->lock);
++}
++
++void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
++{
++ /* Same as free but no previous allocation checking is needed */
++ _dpa_alloc_free(alloc, base_id, count);
++}
++
++
++int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
++{
++ struct alloc_node *i = NULL, *used_node;
++
++ DPRINT("alloc_reserve(%d,%d)\n", base, num);
++ DUMP(alloc);
++
++ spin_lock_irq(&alloc->lock);
++
++ /* Check for the node in the used list.
++ If found, increase it's refcount */
++ list_for_each_entry(i, &alloc->used, list) {
++ if ((i->base == base) && (i->num == num)) {
++ ++i->refcount;
++ spin_unlock_irq(&alloc->lock);
++ return 0;
++ }
++ if ((base >= i->base) && (base < (i->base + i->num))) {
++ /* This is an attempt to reserve a region that was
++ already reserved or alloced with a different
++ base or num */
++ pr_err("Cannot reserve %d - %d, it overlaps with"
++ " existing reservation from %d - %d\n",
++ base, base + num - 1, i->base,
++ i->base + i->num - 1);
++ spin_unlock_irq(&alloc->lock);
++ return -1;
++ }
++ }
++ /* Check to make sure this ID isn't in the free list */
++ list_for_each_entry(i, &alloc->free, list) {
++ if ((base >= i->base) && (base < (i->base + i->num))) {
++ /* yep, the reservation is within this node */
++ pr_err("Cannot reserve %d - %d, it overlaps with"
++ " free range %d - %d and must be alloced\n",
++ base, base + num - 1,
++ i->base, i->base + i->num - 1);
++ spin_unlock_irq(&alloc->lock);
++ return -1;
++ }
++ }
++ /* Add the allocation to the used list with a refcount of 1 */
++ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
++ if (!used_node) {
++ spin_unlock_irq(&alloc->lock);
++ return -ENOMEM;
++
++ }
++ used_node->base = base;
++ used_node->num = num;
++ used_node->refcount = 1;
++ used_node->is_alloced = 0;
++ list_add_tail(&used_node->list, &alloc->used);
++ spin_unlock_irq(&alloc->lock);
++ return 0;
++}
++
++
++int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
++{
++ struct alloc_node *i = NULL;
++ DPRINT("alloc_pop()\n");
++ DUMP(alloc);
++ spin_lock_irq(&alloc->lock);
++ if (!list_empty(&alloc->free)) {
++ i = list_entry(alloc->free.next, struct alloc_node, list);
++ list_del(&i->list);
++ }
++ spin_unlock_irq(&alloc->lock);
++ DPRINT("returning %d\n", i ? 0 : -ENOMEM);
++ DUMP(alloc);
++ if (!i)
++ return -ENOMEM;
++ *result = i->base;
++ *count = i->num;
++ kfree(i);
++ return 0;
++}
++
++int dpa_alloc_check(struct dpa_alloc *list_head, u32 item)
++{
++ struct alloc_node *i = NULL;
++ int res = 0;
++ DPRINT("alloc_check()\n");
++ spin_lock_irq(&list_head->lock);
++
++ list_for_each_entry(i, &list_head->free, list) {
++ if ((item >= i->base) && (item < (i->base + i->num))) {
++ res = 1;
++ break;
++ }
++ }
++ spin_unlock_irq(&list_head->lock);
++ return res;
++}
+diff --git a/drivers/staging/fsl_qbman/dpa_sys.h b/drivers/staging/fsl_qbman/dpa_sys.h
+new file mode 100644
+index 00000000..e144f5a4
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/dpa_sys.h
+@@ -0,0 +1,259 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPA_SYS_H
++#define DPA_SYS_H
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/dma-mapping.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/kthread.h>
++#include <linux/memblock.h>
++#include <linux/completion.h>
++#include <linux/log2.h>
++#include <linux/types.h>
++#include <linux/ioctl.h>
++#include <linux/miscdevice.h>
++#include <linux/uaccess.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/device.h>
++#include <linux/uio_driver.h>
++#include <linux/smp.h>
++#include <linux/fsl_hypervisor.h>
++#include <linux/vmalloc.h>
++#include <linux/ctype.h>
++#include <linux/math64.h>
++#include <linux/bitops.h>
++
++#include <linux/fsl_usdpaa.h>
++
++/* When copying aligned words or shorts, try to avoid memcpy() */
++#define CONFIG_TRY_BETTER_MEMCPY
++
++/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
++#define DPA_PORTAL_CE 0
++#define DPA_PORTAL_CI 1
++
++/***********************/
++/* Misc inline assists */
++/***********************/
++
++#if defined CONFIG_PPC32
++#include "dpa_sys_ppc32.h"
++#elif defined CONFIG_PPC64
++#include "dpa_sys_ppc64.h"
++#elif defined CONFIG_ARM
++#include "dpa_sys_arm.h"
++#elif defined CONFIG_ARM64
++#include "dpa_sys_arm64.h"
++#endif
++
++
++#ifdef CONFIG_FSL_DPA_CHECKING
++#define DPA_ASSERT(x) \
++ do { \
++ if (!(x)) { \
++ pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \
++ __stringify_1(x)); \
++ dump_stack(); \
++ panic("assertion failure"); \
++ } \
++ } while (0)
++#else
++#define DPA_ASSERT(x)
++#endif
++
++/* memcpy() stuff - when you know alignments in advance */
++#ifdef CONFIG_TRY_BETTER_MEMCPY
++static inline void copy_words(void *dest, const void *src, size_t sz)
++{
++ u32 *__dest = dest;
++ const u32 *__src = src;
++ size_t __sz = sz >> 2;
++ BUG_ON((unsigned long)dest & 0x3);
++ BUG_ON((unsigned long)src & 0x3);
++ BUG_ON(sz & 0x3);
++ while (__sz--)
++ *(__dest++) = *(__src++);
++}
++static inline void copy_shorts(void *dest, const void *src, size_t sz)
++{
++ u16 *__dest = dest;
++ const u16 *__src = src;
++ size_t __sz = sz >> 1;
++ BUG_ON((unsigned long)dest & 0x1);
++ BUG_ON((unsigned long)src & 0x1);
++ BUG_ON(sz & 0x1);
++ while (__sz--)
++ *(__dest++) = *(__src++);
++}
++static inline void copy_bytes(void *dest, const void *src, size_t sz)
++{
++ u8 *__dest = dest;
++ const u8 *__src = src;
++ while (sz--)
++ *(__dest++) = *(__src++);
++}
++#else
++#define copy_words memcpy
++#define copy_shorts memcpy
++#define copy_bytes memcpy
++#endif
++
++/************/
++/* RB-trees */
++/************/
++
++/* We encapsulate RB-trees so that its easier to use non-linux forms in
++ * non-linux systems. This also encapsulates the extra plumbing that linux code
++ * usually provides when using RB-trees. This encapsulation assumes that the
++ * data type held by the tree is u32. */
++
++struct dpa_rbtree {
++ struct rb_root root;
++};
++#define DPA_RBTREE { .root = RB_ROOT }
++
++static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
++{
++ tree->root = RB_ROOT;
++}
++
++#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
++static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
++{ \
++ struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
++ while (*p) { \
++ u32 item; \
++ parent = *p; \
++ item = rb_entry(parent, type, node_field)->val_field; \
++ if (obj->val_field < item) \
++ p = &parent->rb_left; \
++ else if (obj->val_field > item) \
++ p = &parent->rb_right; \
++ else \
++ return -EBUSY; \
++ } \
++ rb_link_node(&obj->node_field, parent, p); \
++ rb_insert_color(&obj->node_field, &tree->root); \
++ return 0; \
++} \
++static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
++{ \
++ rb_erase(&obj->node_field, &tree->root); \
++} \
++static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
++{ \
++ type *ret; \
++ struct rb_node *p = tree->root.rb_node; \
++ while (p) { \
++ ret = rb_entry(p, type, node_field); \
++ if (val < ret->val_field) \
++ p = p->rb_left; \
++ else if (val > ret->val_field) \
++ p = p->rb_right; \
++ else \
++ return ret; \
++ } \
++ return NULL; \
++}
++
++/************/
++/* Bootargs */
++/************/
++
++/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax
++ * though; a comma-separated list of items, each item being a cpu index and/or a
++ * range of cpu indices, and each item optionally be prefixed by "s" to indicate
++ * that the portal associated with that cpu should be shared. See bman_driver.c
++ * for more specifics. */
++static int __parse_portals_cpu(const char **s, unsigned int *cpu)
++{
++ *cpu = 0;
++ if (!isdigit(**s))
++ return -EINVAL;
++ while (isdigit(**s))
++ *cpu = *cpu * 10 + (*((*s)++) - '0');
++ return 0;
++}
++static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
++ struct cpumask *want_unshared,
++ const char *argname)
++{
++ const char *s = str;
++ unsigned int shared, cpu1, cpu2, loop;
++
++keep_going:
++ if (*s == 's') {
++ shared = 1;
++ s++;
++ } else
++ shared = 0;
++ if (__parse_portals_cpu(&s, &cpu1))
++ goto err;
++ if (*s == '-') {
++ s++;
++ if (__parse_portals_cpu(&s, &cpu2))
++ goto err;
++ if (cpu2 < cpu1)
++ goto err;
++ } else
++ cpu2 = cpu1;
++ for (loop = cpu1; loop <= cpu2; loop++)
++ cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
++ if (*s == ',') {
++ s++;
++ goto keep_going;
++ } else if ((*s == '\0') || isspace(*s))
++ return 0;
++err:
++ pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
++ (unsigned long)s - (unsigned long)str);
++ return -EINVAL;
++}
++#ifdef CONFIG_FSL_USDPAA
++/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */
++int usdpaa_get_portal_config(struct file *filp, void *cinh,
++ enum usdpaa_portal_type ptype, unsigned int *irq,
++ void **iir_reg);
++#endif
++#endif /* DPA_SYS_H */
+diff --git a/drivers/staging/fsl_qbman/dpa_sys_arm.h b/drivers/staging/fsl_qbman/dpa_sys_arm.h
+new file mode 100644
+index 00000000..17c5500e
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/dpa_sys_arm.h
+@@ -0,0 +1,95 @@
++/* Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPA_SYS_ARM_H
++#define DPA_SYS_ARM_H
++
++#include <asm/cacheflush.h>
++#include <asm/barrier.h>
++
++/* Implementation of ARM specific routines */
++
++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
++ * barriers and that dcb*() won't fall victim to compiler or execution
++ * reordering with respect to other code/instructions that manipulate the same
++ * cacheline. */
++#define hwsync() { asm volatile("dmb st" : : : "memory"); }
++#define lwsync() { asm volatile("dmb st" : : : "memory"); }
++#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); }
++#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); }
++#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); }
++#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); }
++
++#define dcbz_64(p) { memset(p, 0, sizeof(*p)); }
++
++#define dcbf_64(p) \
++ do { \
++ dcbf((u32)p); \
++ } while (0)
++/* Commonly used combo */
++#define dcbit_ro(p) \
++ do { \
++ dcbi((u32)p); \
++ dcbt_ro((u32)p); \
++ } while (0)
++
++static inline u64 mfatb(void)
++{
++ return get_cycles();
++}
++
++static inline u32 in_be32(volatile void *addr)
++{
++ return be32_to_cpu(*((volatile u32 *) addr));
++}
++
++static inline void out_be32(void *addr, u32 val)
++{
++ *((u32 *) addr) = cpu_to_be32(val);
++}
++
++
++static inline void set_bits(unsigned long mask, volatile unsigned long *p)
++{
++ *p |= mask;
++}
++static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
++{
++ *p &= ~mask;
++}
++
++static inline void flush_dcache_range(unsigned long start, unsigned long stop)
++{
++ __cpuc_flush_dcache_area((void *) start, stop - start);
++}
++
++#define hard_smp_processor_id() raw_smp_processor_id()
++#endif
+diff --git a/drivers/staging/fsl_qbman/dpa_sys_arm64.h b/drivers/staging/fsl_qbman/dpa_sys_arm64.h
+new file mode 100644
+index 00000000..247c8d97
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/dpa_sys_arm64.h
+@@ -0,0 +1,102 @@
++/* Copyright 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPA_SYS_ARM64_H
++#define DPA_SYS_ARM64_H
++
++#include <asm/cacheflush.h>
++#include <asm/barrier.h>
++
++/* Implementation of ARM 64 bit specific routines */
++
++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
++ * barriers and that dcb*() won't fall victim to compiler or execution
++ * reordering with respect to other code/instructions that manipulate the same
++ * cacheline. */
++#define hwsync() { asm volatile("dmb st" : : : "memory"); }
++#define lwsync() { asm volatile("dmb st" : : : "memory"); }
++#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
++#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p)); }
++#define dcbt_rw(p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); }
++#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
++#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
++
++#define dcbz_64(p) \
++ do { \
++ dcbz(p); \
++ } while (0)
++
++#define dcbf_64(p) \
++ do { \
++ dcbf(p); \
++ } while (0)
++/* Commonly used combo */
++#define dcbit_ro(p) \
++ do { \
++ dcbi(p); \
++ dcbt_ro(p); \
++ } while (0)
++
++static inline u64 mfatb(void)
++{
++ return get_cycles();
++}
++
++static inline u32 in_be32(volatile void *addr)
++{
++ return be32_to_cpu(*((volatile u32 *) addr));
++}
++
++static inline void out_be32(void *addr, u32 val)
++{
++ *((u32 *) addr) = cpu_to_be32(val);
++}
++
++
++static inline void set_bits(unsigned long mask, volatile unsigned long *p)
++{
++ *p |= mask;
++}
++static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
++{
++ *p &= ~mask;
++}
++
++static inline void flush_dcache_range(unsigned long start, unsigned long stop)
++{
++ __flush_dcache_area((void *) start, stop - start);
++}
++
++#define hard_smp_processor_id() raw_smp_processor_id()
++
++
++
++#endif
+diff --git a/drivers/staging/fsl_qbman/dpa_sys_ppc32.h b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h
+new file mode 100644
+index 00000000..874616df
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h
+@@ -0,0 +1,70 @@
++/* Copyright 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPA_SYS_PPC32_H
++#define DPA_SYS_PPC32_H
++
++/* Implementation of PowerPC 32 bit specific routines */
++
++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
++ * barriers and that dcb*() won't fall victim to compiler or execution
++ * reordering with respect to other code/instructions that manipulate the same
++ * cacheline. */
++#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
++#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
++#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
++#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
++#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
++#define dcbi(p) dcbf(p)
++
++#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
++#define dcbz_64(p) dcbzl(p)
++#define dcbf_64(p) dcbf(p)
++
++/* Commonly used combo */
++#define dcbit_ro(p) \
++ do { \
++ dcbi(p); \
++ dcbt_ro(p); \
++ } while (0)
++
++static inline u64 mfatb(void)
++{
++ u32 hi, lo, chk;
++ do {
++ hi = mfspr(SPRN_ATBU);
++ lo = mfspr(SPRN_ATBL);
++ chk = mfspr(SPRN_ATBU);
++ } while (unlikely(hi != chk));
++ return ((u64)hi << 32) | (u64)lo;
++}
++
++#endif
+diff --git a/drivers/staging/fsl_qbman/dpa_sys_ppc64.h b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h
+new file mode 100644
+index 00000000..d9803199
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h
+@@ -0,0 +1,79 @@
++/* Copyright 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPA_SYS_PPC64_H
++#define DPA_SYS_PPC64_H
++
++/* Implementation of PowerPC 64 bit specific routines */
++
++/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
++ * barriers and that dcb*() won't fall victim to compiler or execution
++ * reordering with respect to other code/instructions that manipulate the same
++ * cacheline. */
++#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
++#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
++#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
++#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
++#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
++#define dcbi(p) dcbf(p)
++
++#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
++#define dcbz_64(p) \
++ do { \
++ dcbz((void*)p + 32); \
++ dcbz(p); \
++ } while (0)
++#define dcbf_64(p) \
++ do { \
++ dcbf((void*)p + 32); \
++ dcbf(p); \
++ } while (0)
++/* Commonly used combo */
++#define dcbit_ro(p) \
++ do { \
++ dcbi(p); \
++ dcbi((void*)p + 32); \
++ dcbt_ro(p); \
++ dcbt_ro((void*)p + 32); \
++ } while (0)
++
++static inline u64 mfatb(void)
++{
++ u32 hi, lo, chk;
++ do {
++ hi = mfspr(SPRN_ATBU);
++ lo = mfspr(SPRN_ATBL);
++ chk = mfspr(SPRN_ATBU);
++ } while (unlikely(hi != chk));
++ return ((u64)hi << 32) | (u64)lo;
++}
++
++#endif
+diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
+new file mode 100644
+index 00000000..3a6d3722
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
+@@ -0,0 +1,1983 @@
++/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
++ * Authors: Andy Fleming <afleming@freescale.com>
++ * Timur Tabi <timur@freescale.com>
++ * Geoff Thorpe <Geoff.Thorpe@freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/mm.h>
++#include <linux/of.h>
++#include <linux/memblock.h>
++#include <linux/slab.h>
++#include <linux/mman.h>
++#include <linux/of_reserved_mem.h>
++
++#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
++#include <mm/mmu_decl.h>
++#endif
++
++#include "dpa_sys.h"
++#include <linux/fsl_usdpaa.h>
++#include "bman_low.h"
++#include "qman_low.h"
++
++/* Physical address range of the memory reservation, exported for mm/mem.c */
++static u64 phys_start;
++static u64 phys_size;
++static u64 arg_phys_size;
++
++/* PFN versions of the above */
++static unsigned long pfn_start;
++static unsigned long pfn_size;
++
++/* Memory reservations are manipulated under this spinlock (which is why 'refs'
++ * isn't atomic_t). */
++static DEFINE_SPINLOCK(mem_lock);
++
++/* The range of TLB1 indices */
++static unsigned int first_tlb;
++static unsigned int num_tlb = 1;
++static unsigned int current_tlb; /* loops around for fault handling */
++
++/* Memory reservation is represented as a list of 'mem_fragment's, some of which
++ * may be mapped. Unmapped fragments are always merged where possible. */
++static LIST_HEAD(mem_list);
++
++struct mem_mapping;
++
++/* Memory fragments are in 'mem_list'. */
++struct mem_fragment {
++ u64 base;
++ u64 len;
++ unsigned long pfn_base; /* PFN version of 'base' */
++ unsigned long pfn_len; /* PFN version of 'len' */
++ unsigned int refs; /* zero if unmapped */
++ u64 root_len; /* Size of the orignal fragment */
++ unsigned long root_pfn; /* PFN of the orignal fragment */
++ struct list_head list;
++ /* if mapped, flags+name captured at creation time */
++ u32 flags;
++ char name[USDPAA_DMA_NAME_MAX];
++ u64 map_len;
++ /* support multi-process locks per-memory-fragment. */
++ int has_locking;
++ wait_queue_head_t wq;
++ struct mem_mapping *owner;
++};
++
++/* Mappings of memory fragments in 'struct ctx'. These are created from
++ * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a
++ * mmap(). */
++struct mem_mapping {
++ struct mem_fragment *root_frag;
++ u32 frag_count;
++ u64 total_size;
++ struct list_head list;
++ int refs;
++ void *virt_addr;
++};
++
++struct portal_mapping {
++ struct usdpaa_ioctl_portal_map user;
++ union {
++ struct qm_portal_config *qportal;
++ struct bm_portal_config *bportal;
++ };
++ /* Declare space for the portals in case the process
++ exits unexpectedly and needs to be cleaned by the kernel */
++ union {
++ struct qm_portal qman_portal_low;
++ struct bm_portal bman_portal_low;
++ };
++ struct list_head list;
++ struct resource *phys;
++ struct iommu_domain *iommu_domain;
++};
++
++/* Track the DPAA resources the process is using */
++struct active_resource {
++ struct list_head list;
++ u32 id;
++ u32 num;
++ unsigned int refcount;
++};
++
++/* Per-FD state (which should also be per-process but we don't enforce that) */
++struct ctx {
++ /* Lock to protect the context */
++ spinlock_t lock;
++ /* Allocated resources get put here for accounting */
++ struct list_head resources[usdpaa_id_max];
++ /* list of DMA maps */
++ struct list_head maps;
++ /* list of portal maps */
++ struct list_head portals;
++};
++
++/* Different resource classes */
++static const struct alloc_backend {
++ enum usdpaa_id_type id_type;
++ int (*alloc)(u32 *, u32, u32, int);
++ void (*release)(u32 base, unsigned int count);
++ int (*reserve)(u32 base, unsigned int count);
++ const char *acronym;
++} alloc_backends[] = {
++ {
++ .id_type = usdpaa_id_fqid,
++ .alloc = qman_alloc_fqid_range,
++ .release = qman_release_fqid_range,
++ .reserve = qman_reserve_fqid_range,
++ .acronym = "FQID"
++ },
++ {
++ .id_type = usdpaa_id_bpid,
++ .alloc = bman_alloc_bpid_range,
++ .release = bman_release_bpid_range,
++ .reserve = bman_reserve_bpid_range,
++ .acronym = "BPID"
++ },
++ {
++ .id_type = usdpaa_id_qpool,
++ .alloc = qman_alloc_pool_range,
++ .release = qman_release_pool_range,
++ .reserve = qman_reserve_pool_range,
++ .acronym = "QPOOL"
++ },
++ {
++ .id_type = usdpaa_id_cgrid,
++ .alloc = qman_alloc_cgrid_range,
++ .release = qman_release_cgrid_range,
++ .acronym = "CGRID"
++ },
++ {
++ .id_type = usdpaa_id_ceetm0_lfqid,
++ .alloc = qman_alloc_ceetm0_lfqid_range,
++ .release = qman_release_ceetm0_lfqid_range,
++ .acronym = "CEETM0_LFQID"
++ },
++ {
++ .id_type = usdpaa_id_ceetm0_channelid,
++ .alloc = qman_alloc_ceetm0_channel_range,
++ .release = qman_release_ceetm0_channel_range,
++ .acronym = "CEETM0_LFQID"
++ },
++ {
++ .id_type = usdpaa_id_ceetm1_lfqid,
++ .alloc = qman_alloc_ceetm1_lfqid_range,
++ .release = qman_release_ceetm1_lfqid_range,
++ .acronym = "CEETM1_LFQID"
++ },
++ {
++ .id_type = usdpaa_id_ceetm1_channelid,
++ .alloc = qman_alloc_ceetm1_channel_range,
++ .release = qman_release_ceetm1_channel_range,
++ .acronym = "CEETM1_LFQID"
++ },
++ {
++ /* This terminates the array */
++ .id_type = usdpaa_id_max
++ }
++};
++
++/* Determines the largest acceptable page size for a given size
++ The sizes are determined by what the TLB1 acceptable page sizes are */
++static u32 largest_page_size(u32 size)
++{
++ int shift = 30; /* Start at 1G size */
++ if (size < 4096)
++ return 0;
++ do {
++ if (size >= (1<<shift))
++ return 1<<shift;
++ shift -= 2;
++ } while (shift >= 12); /* Up to 4k */
++ return 0;
++}
++
++/* Determine if value is power of 4 */
++static inline bool is_power_of_4(u64 x)
++{
++ if (x == 0 || ((x & (x - 1)) != 0))
++ return false;
++ return !!(x & 0x5555555555555555ull);
++}
++
++/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This
++ * splits the fragment into 4 and returns the upper-most. (The caller can loop
++ * until it has a suitable fragment size.) */
++static struct mem_fragment *split_frag(struct mem_fragment *frag)
++{
++ struct mem_fragment *x[3];
++
++ x[0] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
++ x[1] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
++ x[2] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
++ if (!x[0] || !x[1] || !x[2]) {
++ kfree(x[0]);
++ kfree(x[1]);
++ kfree(x[2]);
++ return NULL;
++ }
++ BUG_ON(frag->refs);
++ frag->len >>= 2;
++ frag->pfn_len >>= 2;
++ x[0]->base = frag->base + frag->len;
++ x[1]->base = x[0]->base + frag->len;
++ x[2]->base = x[1]->base + frag->len;
++ x[0]->len = x[1]->len = x[2]->len = frag->len;
++ x[0]->pfn_base = frag->pfn_base + frag->pfn_len;
++ x[1]->pfn_base = x[0]->pfn_base + frag->pfn_len;
++ x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len;
++ x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len;
++ x[0]->refs = x[1]->refs = x[2]->refs = 0;
++ x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len;
++ x[0]->root_pfn = x[1]->root_pfn = x[2]->root_pfn = frag->root_pfn;
++ x[0]->name[0] = x[1]->name[0] = x[2]->name[0] = 0;
++ list_add_tail(&x[0]->list, &frag->list);
++ list_add_tail(&x[1]->list, &x[0]->list);
++ list_add_tail(&x[2]->list, &x[1]->list);
++ return x[2];
++}
++
++static __maybe_unused void dump_frags(void)
++{
++ struct mem_fragment *frag;
++ int i = 0;
++ list_for_each_entry(frag, &mem_list, list) {
++ pr_info("FRAG %d: base 0x%llx pfn_base 0x%lx len 0x%llx root_len 0x%llx root_pfn 0x%lx refs %d name %s\n",
++ i, frag->base, frag->pfn_base,
++ frag->len, frag->root_len, frag->root_pfn,
++ frag->refs, frag->name);
++ ++i;
++ }
++}
++
++/* Walk the list of fragments and adjoin neighbouring segments if possible */
++static void compress_frags(void)
++{
++ /* Walk the fragment list and combine fragments */
++ struct mem_fragment *frag, *nxtfrag;
++ u64 len = 0;
++
++ int i, numfrags;
++
++
++ frag = list_entry(mem_list.next, struct mem_fragment, list);
++
++ while (&frag->list != &mem_list) {
++ /* Must combine consecutive fragemenst with
++ same root_pfn such that they are power of 4 */
++ if (frag->refs != 0) {
++ frag = list_entry(frag->list.next,
++ struct mem_fragment, list);
++ continue; /* Not this window */
++ }
++ len = frag->len;
++ numfrags = 0;
++ nxtfrag = list_entry(frag->list.next,
++ struct mem_fragment, list);
++ while (true) {
++ if (&nxtfrag->list == &mem_list) {
++ numfrags = 0;
++ break; /* End of list */
++ }
++ if (nxtfrag->refs) {
++ numfrags = 0;
++ break; /* In use still */
++ }
++ if (nxtfrag->root_pfn != frag->root_pfn) {
++ numfrags = 0;
++ break; /* Crosses root fragment boundary */
++ }
++ len += nxtfrag->len;
++ numfrags++;
++ if (is_power_of_4(len)) {
++ /* These fragments can be combined */
++ break;
++ }
++ nxtfrag = list_entry(nxtfrag->list.next,
++ struct mem_fragment, list);
++ }
++ if (numfrags == 0) {
++ frag = list_entry(frag->list.next,
++ struct mem_fragment, list);
++ continue; /* try the next window */
++ }
++ for (i = 0; i < numfrags; i++) {
++ struct mem_fragment *todel =
++ list_entry(nxtfrag->list.prev,
++ struct mem_fragment, list);
++ nxtfrag->len += todel->len;
++ nxtfrag->pfn_len += todel->pfn_len;
++ list_del(&todel->list);
++ }
++ /* Re evaluate the list, things may merge now */
++ frag = list_entry(mem_list.next, struct mem_fragment, list);
++ }
++}
++
++/* Hook from arch/powerpc/mm/mem.c */
++int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size)
++{
++ struct mem_fragment *frag;
++ int idx = -1;
++ if ((pfn < pfn_start) || (pfn >= (pfn_start + pfn_size)))
++ return -1;
++ /* It's in-range, we need to find the fragment */
++ spin_lock(&mem_lock);
++ list_for_each_entry(frag, &mem_list, list) {
++ if ((pfn >= frag->pfn_base) && (pfn < (frag->pfn_base +
++ frag->pfn_len))) {
++ *phys_addr = frag->base;
++ *size = frag->len;
++ idx = current_tlb++;
++ if (current_tlb >= (first_tlb + num_tlb))
++ current_tlb = first_tlb;
++ break;
++ }
++ }
++ spin_unlock(&mem_lock);
++ return idx;
++}
++
++static int usdpaa_open(struct inode *inode, struct file *filp)
++{
++ const struct alloc_backend *backend = &alloc_backends[0];
++ struct ctx *ctx = kmalloc(sizeof(struct ctx), GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++ filp->private_data = ctx;
++
++ while (backend->id_type != usdpaa_id_max) {
++ INIT_LIST_HEAD(&ctx->resources[backend->id_type]);
++ backend++;
++ }
++
++ INIT_LIST_HEAD(&ctx->maps);
++ INIT_LIST_HEAD(&ctx->portals);
++ spin_lock_init(&ctx->lock);
++
++ //filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi;
++
++ return 0;
++}
++
++#define DQRR_MAXFILL 15
++
++/* Reset a QMan portal to its default state */
++static int init_qm_portal(struct qm_portal_config *config,
++ struct qm_portal *portal)
++{
++ const struct qm_dqrr_entry *dqrr = NULL;
++ int i;
++
++ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
++ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
++
++ /* Make sure interrupts are inhibited */
++ qm_out(IIR, 1);
++
++ /* Initialize the DQRR. This will stop any dequeue
++ commands that are in progress */
++ if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb,
++ qm_dqrr_cdc, DQRR_MAXFILL)) {
++ pr_err("qm_dqrr_init() failed when trying to"
++ " recover portal, portal will be leaked\n");
++ return 1;
++ }
++
++ /* Discard any entries on the DQRR */
++ /* If we consume the ring twice something is wrong */
++ for (i = 0; i < DQRR_MAXFILL * 2; i++) {
++ qm_dqrr_pvb_update(portal);
++ dqrr = qm_dqrr_current(portal);
++ if (!dqrr)
++ break;
++ qm_dqrr_cdc_consume_1ptr(portal, dqrr, 0);
++ qm_dqrr_pvb_update(portal);
++ qm_dqrr_next(portal);
++ }
++ /* Initialize the EQCR */
++ if (qm_eqcr_init(portal, qm_eqcr_pvb,
++ qm_eqcr_get_ci_stashing(portal), 1)) {
++ pr_err("Qman EQCR initialisation failed\n");
++ return 1;
++ }
++ /* initialize the MR */
++ if (qm_mr_init(portal, qm_mr_pvb, qm_mr_cci)) {
++ pr_err("Qman MR initialisation failed\n");
++ return 1;
++ }
++ qm_mr_pvb_update(portal);
++ while (qm_mr_current(portal)) {
++ qm_mr_next(portal);
++ qm_mr_cci_consume_to_current(portal);
++ qm_mr_pvb_update(portal);
++ }
++
++ if (qm_mc_init(portal)) {
++ pr_err("Qman MC initialisation failed\n");
++ return 1;
++ }
++ return 0;
++}
++
++static int init_bm_portal(struct bm_portal_config *config,
++ struct bm_portal *portal)
++{
++ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
++ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
++
++ if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) {
++ pr_err("Bman RCR initialisation failed\n");
++ return 1;
++ }
++ if (bm_mc_init(portal)) {
++ pr_err("Bman MC initialisation failed\n");
++ return 1;
++ }
++ return 0;
++}
++
++/* Function that will scan all FQ's in the system. For each FQ that is not
++ OOS it will call the check_channel helper to determine if the FQ should
++ be torn down. If the check_channel helper returns true the FQ will be
++ transitioned to the OOS state */
++static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
++ bool (*check_channel)(void*, u32))
++{
++ u32 fq_id = 0;
++ while (1) {
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ u8 state;
++ u32 channel;
++
++ /* Determine the channel for the FQID */
++ mcc = qm_mc_start(portal);
++ mcc->queryfq.fqid = fq_id;
++ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ);
++ while (!(mcr = qm_mc_result(portal)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
++ == QM_MCR_VERB_QUERYFQ);
++ if (mcr->result != QM_MCR_RESULT_OK)
++ break; /* End of valid FQIDs */
++
++ channel = mcr->queryfq.fqd.dest.channel;
++ /* Determine the state of the FQID */
++ mcc = qm_mc_start(portal);
++ mcc->queryfq_np.fqid = fq_id;
++ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP);
++ while (!(mcr = qm_mc_result(portal)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
++ == QM_MCR_VERB_QUERYFQ_NP);
++ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
++ if (state == QM_MCR_NP_STATE_OOS)
++ /* Already OOS, no need to do anymore checks */
++ goto next;
++
++ if (check_channel(ctx, channel))
++ qm_shutdown_fq(&portal, 1, fq_id);
++ next:
++ ++fq_id;
++ }
++ return 0;
++}
++
++static bool check_channel_device(void *_ctx, u32 channel)
++{
++ struct ctx *ctx = _ctx;
++ struct portal_mapping *portal, *tmpportal;
++ struct active_resource *res;
++
++ /* See if the FQ is destined for one of the portals we're cleaning up */
++ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
++ if (portal->user.type == usdpaa_portal_qman) {
++ if (portal->qportal->public_cfg.channel == channel) {
++ /* This FQs destination is a portal
++ we're cleaning, send a retire */
++ return true;
++ }
++ }
++ }
++
++ /* Check the pool channels that will be released as well */
++ list_for_each_entry(res, &ctx->resources[usdpaa_id_qpool], list) {
++ if ((res->id >= channel) &&
++ ((res->id + res->num - 1) <= channel))
++ return true;
++ }
++ return false;
++}
++
++static bool check_portal_channel(void *ctx, u32 channel)
++{
++ u32 portal_channel = *(u32 *)ctx;
++ if (portal_channel == channel) {
++ /* This FQs destination is a portal
++ we're cleaning, send a retire */
++ return true;
++ }
++ return false;
++}
++
++
++
++
++static int usdpaa_release(struct inode *inode, struct file *filp)
++{
++ struct ctx *ctx = filp->private_data;
++ struct mem_mapping *map, *tmpmap;
++ struct portal_mapping *portal, *tmpportal;
++ const struct alloc_backend *backend = &alloc_backends[0];
++ struct active_resource *res;
++ struct qm_portal *qm_cleanup_portal = NULL;
++ struct bm_portal *bm_cleanup_portal = NULL;
++ struct qm_portal_config *qm_alloced_portal = NULL;
++ struct bm_portal_config *bm_alloced_portal = NULL;
++
++ struct qm_portal *portal_array[qman_portal_max];
++ int portal_count = 0;
++
++ /* Ensure the release operation cannot be migrated to another
++ CPU as CPU specific variables may be needed during cleanup */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_disable();
++#endif
++ /* The following logic is used to recover resources that were not
++ correctly released by the process that is closing the FD.
++ Step 1: syncronize the HW with the qm_portal/bm_portal structures
++ in the kernel
++ */
++
++ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
++ /* Try to recover any portals that weren't shut down */
++ if (portal->user.type == usdpaa_portal_qman) {
++ portal_array[portal_count] = &portal->qman_portal_low;
++ ++portal_count;
++ init_qm_portal(portal->qportal,
++ &portal->qman_portal_low);
++ if (!qm_cleanup_portal) {
++ qm_cleanup_portal = &portal->qman_portal_low;
++ } else {
++ /* Clean FQs on the dedicated channel */
++ u32 chan = portal->qportal->public_cfg.channel;
++ qm_check_and_destroy_fqs(
++ &portal->qman_portal_low, &chan,
++ check_portal_channel);
++ }
++ } else {
++ /* BMAN */
++ init_bm_portal(portal->bportal,
++ &portal->bman_portal_low);
++ if (!bm_cleanup_portal)
++ bm_cleanup_portal = &portal->bman_portal_low;
++ }
++ }
++ /* If no portal was found, allocate one for cleanup */
++ if (!qm_cleanup_portal) {
++ qm_alloced_portal = qm_get_unused_portal();
++ if (!qm_alloced_portal) {
++ pr_crit("No QMan portal avalaible for cleanup\n");
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_enable();
++#endif
++ return -1;
++ }
++ qm_cleanup_portal = kmalloc(sizeof(struct qm_portal),
++ GFP_KERNEL);
++ if (!qm_cleanup_portal) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_enable();
++#endif
++ return -ENOMEM;
++ }
++ init_qm_portal(qm_alloced_portal, qm_cleanup_portal);
++ portal_array[portal_count] = qm_cleanup_portal;
++ ++portal_count;
++ }
++ if (!bm_cleanup_portal) {
++ bm_alloced_portal = bm_get_unused_portal();
++ if (!bm_alloced_portal) {
++ pr_crit("No BMan portal avalaible for cleanup\n");
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_enable();
++#endif
++ return -1;
++ }
++ bm_cleanup_portal = kmalloc(sizeof(struct bm_portal),
++ GFP_KERNEL);
++ if (!bm_cleanup_portal) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_enable();
++#endif
++ return -ENOMEM;
++ }
++ init_bm_portal(bm_alloced_portal, bm_cleanup_portal);
++ }
++
++ /* OOS the FQs associated with this process */
++ qm_check_and_destroy_fqs(qm_cleanup_portal, ctx, check_channel_device);
++
++ while (backend->id_type != usdpaa_id_max) {
++ int leaks = 0;
++ list_for_each_entry(res, &ctx->resources[backend->id_type],
++ list) {
++ if (backend->id_type == usdpaa_id_fqid) {
++ int i = 0;
++ for (; i < res->num; i++) {
++ /* Clean FQs with the cleanup portal */
++ qm_shutdown_fq(portal_array,
++ portal_count,
++ res->id + i);
++ }
++ }
++ leaks += res->num;
++ backend->release(res->id, res->num);
++ }
++ if (leaks)
++ pr_crit("USDPAA process leaking %d %s%s\n", leaks,
++ backend->acronym, (leaks > 1) ? "s" : "");
++ backend++;
++ }
++ /* Release any DMA regions */
++ spin_lock(&mem_lock);
++ list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) {
++ struct mem_fragment *current_frag = map->root_frag;
++ int i;
++ if (map->root_frag->has_locking &&
++ (map->root_frag->owner == map)) {
++ map->root_frag->owner = NULL;
++ wake_up(&map->root_frag->wq);
++ }
++ /* Check each fragment and merge if the ref count is 0 */
++ for (i = 0; i < map->frag_count; i++) {
++ --current_frag->refs;
++ current_frag = list_entry(current_frag->list.prev,
++ struct mem_fragment, list);
++ }
++
++ compress_frags();
++ list_del(&map->list);
++ kfree(map);
++ }
++ spin_unlock(&mem_lock);
++
++ /* Return portals */
++ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
++ if (portal->user.type == usdpaa_portal_qman) {
++ /* Give the portal back to the allocator */
++ init_qm_portal(portal->qportal,
++ &portal->qman_portal_low);
++ qm_put_unused_portal(portal->qportal);
++ } else {
++ init_bm_portal(portal->bportal,
++ &portal->bman_portal_low);
++ bm_put_unused_portal(portal->bportal);
++ }
++ list_del(&portal->list);
++ kfree(portal);
++ }
++ if (qm_alloced_portal) {
++ qm_put_unused_portal(qm_alloced_portal);
++ kfree(qm_cleanup_portal);
++ }
++ if (bm_alloced_portal) {
++ bm_put_unused_portal(bm_alloced_portal);
++ kfree(bm_cleanup_portal);
++ }
++
++ kfree(ctx);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_enable();
++#endif
++ return 0;
++}
++
++static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
++ int *match, unsigned long *pfn)
++{
++ struct mem_mapping *map;
++
++ list_for_each_entry(map, &ctx->maps, list) {
++ int i;
++ struct mem_fragment *frag = map->root_frag;
++
++ for (i = 0; i < map->frag_count; i++) {
++ if (frag->pfn_base == vma->vm_pgoff) {
++ *match = 1;
++ *pfn = frag->pfn_base;
++ return 0;
++ }
++ frag = list_entry(frag->list.next, struct mem_fragment,
++ list);
++ }
++ }
++ *match = 0;
++ return 0;
++}
++
++static int check_mmap_resource(struct resource *res, struct vm_area_struct *vma,
++ int *match, unsigned long *pfn)
++{
++ *pfn = res->start >> PAGE_SHIFT;
++ if (*pfn == vma->vm_pgoff) {
++ *match = 1;
++ if ((vma->vm_end - vma->vm_start) != resource_size(res))
++ return -EINVAL;
++ } else
++ *match = 0;
++ return 0;
++}
++
++static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma,
++ int *match, unsigned long *pfn)
++{
++ struct portal_mapping *portal;
++ int ret;
++
++ list_for_each_entry(portal, &ctx->portals, list) {
++ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CE], vma,
++ match, pfn);
++ if (*match) {
++ vma->vm_page_prot =
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ pgprot_cached_ns(vma->vm_page_prot);
++#else
++ pgprot_cached_noncoherent(vma->vm_page_prot);
++#endif
++ return ret;
++ }
++ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CI], vma,
++ match, pfn);
++ if (*match) {
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ return ret;
++ }
++ }
++ *match = 0;
++ return 0;
++}
++
++static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct ctx *ctx = filp->private_data;
++ unsigned long pfn = 0;
++ int match, ret;
++
++ spin_lock(&mem_lock);
++ ret = check_mmap_dma(ctx, vma, &match, &pfn);
++ if (!match)
++ ret = check_mmap_portal(ctx, vma, &match, &pfn);
++ spin_unlock(&mem_lock);
++ if (!match)
++ return -EINVAL;
++ if (!ret)
++ ret = remap_pfn_range(vma, vma->vm_start, pfn,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++ return ret;
++}
++
++/* Return the nearest rounded-up address >= 'addr' that is 'sz'-aligned. 'sz'
++ * must be a power of 2, but both 'addr' and 'sz' can be expressions. */
++#define USDPAA_MEM_ROUNDUP(addr, sz) \
++ ({ \
++ unsigned long foo_align = (sz) - 1; \
++ ((addr) + foo_align) & ~foo_align; \
++ })
++/* Searching for a size-aligned virtual address range starting from 'addr' */
++static unsigned long usdpaa_get_unmapped_area(struct file *file,
++ unsigned long addr,
++ unsigned long len,
++ unsigned long pgoff,
++ unsigned long flags)
++{
++ struct vm_area_struct *vma;
++
++ if (len % PAGE_SIZE)
++ return -EINVAL;
++ if (!len)
++ return -EINVAL;
++
++ /* Need to align the address to the largest pagesize of the mapping
++ * because the MMU requires the virtual address to have the same
++ * alignment as the physical address */
++ addr = USDPAA_MEM_ROUNDUP(addr, largest_page_size(len));
++ vma = find_vma(current->mm, addr);
++ /* Keep searching until we reach the end of currently-used virtual
++ * address-space or we find a big enough gap. */
++ while (vma) {
++ if ((addr + len) < vma->vm_start)
++ return addr;
++
++ addr = USDPAA_MEM_ROUNDUP(vma->vm_end, largest_page_size(len));
++ vma = vma->vm_next;
++ }
++ if ((TASK_SIZE - len) < addr)
++ return -ENOMEM;
++ return addr;
++}
++
++static long ioctl_id_alloc(struct ctx *ctx, void __user *arg)
++{
++ struct usdpaa_ioctl_id_alloc i;
++ const struct alloc_backend *backend;
++ struct active_resource *res;
++ int ret = copy_from_user(&i, arg, sizeof(i));
++ if (ret)
++ return ret;
++ if ((i.id_type >= usdpaa_id_max) || !i.num)
++ return -EINVAL;
++ backend = &alloc_backends[i.id_type];
++ /* Allocate the required resource type */
++ ret = backend->alloc(&i.base, i.num, i.align, i.partial);
++ if (ret < 0)
++ return ret;
++ i.num = ret;
++ /* Copy the result to user-space */
++ ret = copy_to_user(arg, &i, sizeof(i));
++ if (ret) {
++ backend->release(i.base, i.num);
++ return ret;
++ }
++ /* Assign the allocated range to the FD accounting */
++ res = kmalloc(sizeof(*res), GFP_KERNEL);
++ if (!res) {
++ backend->release(i.base, i.num);
++ return -ENOMEM;
++ }
++ spin_lock(&ctx->lock);
++ res->id = i.base;
++ res->num = i.num;
++ res->refcount = 1;
++ list_add(&res->list, &ctx->resources[i.id_type]);
++ spin_unlock(&ctx->lock);
++ return 0;
++}
++
++static long ioctl_id_release(struct ctx *ctx, void __user *arg)
++{
++ struct usdpaa_ioctl_id_release i;
++ const struct alloc_backend *backend;
++ struct active_resource *tmp, *pos;
++
++ int ret = copy_from_user(&i, arg, sizeof(i));
++ if (ret)
++ return ret;
++ if ((i.id_type >= usdpaa_id_max) || !i.num)
++ return -EINVAL;
++ backend = &alloc_backends[i.id_type];
++ /* Pull the range out of the FD accounting - the range is valid iff this
++ * succeeds. */
++ spin_lock(&ctx->lock);
++ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
++ if (pos->id == i.base && pos->num == i.num) {
++ pos->refcount--;
++ if (pos->refcount) {
++ spin_unlock(&ctx->lock);
++ return 0; /* Still being used */
++ }
++ list_del(&pos->list);
++ kfree(pos);
++ spin_unlock(&ctx->lock);
++ goto found;
++ }
++ }
++ /* Failed to find the resource */
++ spin_unlock(&ctx->lock);
++ pr_err("Couldn't find resource type %d base 0x%x num %d\n",
++ i.id_type, i.base, i.num);
++ return -EINVAL;
++found:
++ /* Release the resource to the backend */
++ backend->release(i.base, i.num);
++ return 0;
++}
++
++static long ioctl_id_reserve(struct ctx *ctx, void __user *arg)
++{
++ struct usdpaa_ioctl_id_reserve i;
++ const struct alloc_backend *backend;
++ struct active_resource *tmp, *pos;
++
++ int ret = copy_from_user(&i, arg, sizeof(i));
++ if (ret)
++ return ret;
++ if ((i.id_type >= usdpaa_id_max) || !i.num)
++ return -EINVAL;
++ backend = &alloc_backends[i.id_type];
++ if (!backend->reserve)
++ return -EINVAL;
++ /* Pull the range out of the FD accounting - the range is valid iff this
++ * succeeds. */
++ spin_lock(&ctx->lock);
++ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
++ if (pos->id == i.base && pos->num == i.num) {
++ pos->refcount++;
++ spin_unlock(&ctx->lock);
++ return 0;
++ }
++ }
++
++ /* Failed to find the resource */
++ spin_unlock(&ctx->lock);
++
++ /* Reserve the resource in the backend */
++ ret = backend->reserve(i.base, i.num);
++ if (ret)
++ return ret;
++ /* Assign the reserved range to the FD accounting */
++ pos = kmalloc(sizeof(*pos), GFP_KERNEL);
++ if (!pos) {
++ backend->release(i.base, i.num);
++ return -ENOMEM;
++ }
++ spin_lock(&ctx->lock);
++ pos->id = i.base;
++ pos->num = i.num;
++ pos->refcount = 1;
++ list_add(&pos->list, &ctx->resources[i.id_type]);
++ spin_unlock(&ctx->lock);
++ return 0;
++}
++
++static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
++ struct usdpaa_ioctl_dma_map *i)
++{
++ struct mem_fragment *frag, *start_frag, *next_frag;
++ struct mem_mapping *map, *tmp;
++ int ret = 0;
++ u32 largest_page, so_far = 0;
++ int frag_count = 0;
++ unsigned long next_addr = PAGE_SIZE, populate;
++
++ /* error checking to ensure values copied from user space are valid */
++ if (i->len % PAGE_SIZE)
++ return -EINVAL;
++
++ map = kmalloc(sizeof(*map), GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ spin_lock(&mem_lock);
++ if (i->flags & USDPAA_DMA_FLAG_SHARE) {
++ list_for_each_entry(frag, &mem_list, list) {
++ if (frag->refs && (frag->flags &
++ USDPAA_DMA_FLAG_SHARE) &&
++ !strncmp(i->name, frag->name,
++ USDPAA_DMA_NAME_MAX)) {
++ /* Matching entry */
++ if ((i->flags & USDPAA_DMA_FLAG_CREATE) &&
++ !(i->flags & USDPAA_DMA_FLAG_LAZY)) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ /* Check to ensure size matches record */
++ if (i->len != frag->map_len && i->len) {
++ pr_err("ioctl_dma_map() Size requested does not match %s and is none zero\n",
++ frag->name);
++ return -EINVAL;
++ }
++
++ /* Check if this has already been mapped
++ to this process */
++ list_for_each_entry(tmp, &ctx->maps, list)
++ if (tmp->root_frag == frag) {
++ /* Already mapped, just need to
++ inc ref count */
++ tmp->refs++;
++ kfree(map);
++ i->did_create = 0;
++ i->len = tmp->total_size;
++ i->phys_addr = frag->base;
++ i->ptr = tmp->virt_addr;
++ spin_unlock(&mem_lock);
++ return 0;
++ }
++ /* Matching entry - just need to map */
++ i->has_locking = frag->has_locking;
++ i->did_create = 0;
++ i->len = frag->map_len;
++ start_frag = frag;
++ goto do_map;
++ }
++ }
++ /* No matching entry */
++ if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) {
++ pr_err("ioctl_dma_map() No matching entry\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++ }
++ /* New fragment required, size must be provided. */
++ if (!i->len) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* Find one of more contiguous fragments that satisfy the total length
++ trying to minimize the number of fragments
++ compute the largest page size that the allocation could use */
++ largest_page = largest_page_size(i->len);
++ start_frag = NULL;
++ while (largest_page &&
++ largest_page <= largest_page_size(phys_size) &&
++ start_frag == NULL) {
++ /* Search the list for a frag of that size */
++ list_for_each_entry(frag, &mem_list, list) {
++ if (!frag->refs && (frag->len == largest_page)) {
++ /* See if the next x fragments are free
++ and can accomidate the size */
++ u32 found_size = largest_page;
++ next_frag = list_entry(frag->list.prev,
++ struct mem_fragment,
++ list);
++ /* If the fragement is too small check
++ if the neighbours cab support it */
++ while (found_size < i->len) {
++ if (&mem_list == &next_frag->list)
++ break; /* End of list */
++ if (next_frag->refs != 0 ||
++ next_frag->len == 0)
++ break; /* not enough space */
++ found_size += next_frag->len;
++ next_frag = list_entry(
++ next_frag->list.prev,
++ struct mem_fragment,
++ list);
++ }
++ if (found_size >= i->len) {
++ /* Success! there is enough contigous
++ free space */
++ start_frag = frag;
++ break;
++ }
++ }
++ } /* next frag loop */
++ /* Couldn't statisfy the request with this
++ largest page size, try a smaller one */
++ largest_page <<= 2;
++ }
++ if (start_frag == NULL) {
++ /* Couldn't find proper amount of space */
++ ret = -ENOMEM;
++ goto out;
++ }
++ i->did_create = 1;
++do_map:
++ /* Verify there is sufficient space to do the mapping */
++ down_write(&current->mm->mmap_sem);
++ next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0);
++ up_write(&current->mm->mmap_sem);
++
++ if (next_addr & ~PAGE_MASK) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* We may need to divide the final fragment to accomidate the mapping */
++ next_frag = start_frag;
++ while (so_far != i->len) {
++ BUG_ON(next_frag->len == 0);
++ while ((next_frag->len + so_far) > i->len) {
++ /* Split frag until they match */
++ split_frag(next_frag);
++ }
++ so_far += next_frag->len;
++ next_frag->refs++;
++ ++frag_count;
++ next_frag = list_entry(next_frag->list.prev,
++ struct mem_fragment, list);
++ }
++ if (i->did_create) {
++ size_t name_len = 0;
++ start_frag->flags = i->flags;
++ strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX);
++ name_len = strnlen(start_frag->name, USDPAA_DMA_NAME_MAX);
++ if (name_len >= USDPAA_DMA_NAME_MAX) {
++ ret = -EFAULT;
++ goto out;
++ }
++ start_frag->map_len = i->len;
++ start_frag->has_locking = i->has_locking;
++ init_waitqueue_head(&start_frag->wq);
++ start_frag->owner = NULL;
++ }
++
++ /* Setup the map entry */
++ map->root_frag = start_frag;
++ map->total_size = i->len;
++ map->frag_count = frag_count;
++ map->refs = 1;
++ list_add(&map->list, &ctx->maps);
++ i->phys_addr = start_frag->base;
++out:
++ spin_unlock(&mem_lock);
++
++ if (!ret) {
++ unsigned long longret;
++ down_write(&current->mm->mmap_sem);
++ longret = do_mmap_pgoff(fp, next_addr, map->total_size,
++ PROT_READ |
++ (i->flags &
++ USDPAA_DMA_FLAG_RDONLY ? 0
++ : PROT_WRITE),
++ MAP_SHARED,
++ start_frag->pfn_base,
++ &populate);
++ up_write(&current->mm->mmap_sem);
++ if (longret & ~PAGE_MASK) {
++ ret = (int)longret;
++ } else {
++ i->ptr = (void *)longret;
++ map->virt_addr = i->ptr;
++ }
++ } else
++ kfree(map);
++ return ret;
++}
++
++static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg)
++{
++ struct mem_mapping *map;
++ struct vm_area_struct *vma;
++ int ret, i;
++ struct mem_fragment *current_frag;
++ size_t sz;
++ unsigned long base;
++ unsigned long vaddr;
++
++ down_write(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, (unsigned long)arg);
++ if (!vma || (vma->vm_start > (unsigned long)arg)) {
++ up_write(&current->mm->mmap_sem);
++ return -EFAULT;
++ }
++ spin_lock(&mem_lock);
++ list_for_each_entry(map, &ctx->maps, list) {
++ if (map->root_frag->pfn_base == vma->vm_pgoff) {
++ /* Drop the map lock if we hold it */
++ if (map->root_frag->has_locking &&
++ (map->root_frag->owner == map)) {
++ map->root_frag->owner = NULL;
++ wake_up(&map->root_frag->wq);
++ }
++ goto map_match;
++ }
++ }
++ /* Failed to find a matching mapping for this process */
++ ret = -EFAULT;
++ spin_unlock(&mem_lock);
++ goto out;
++map_match:
++ map->refs--;
++ if (map->refs != 0) {
++ /* Another call the dma_map is referencing this */
++ ret = 0;
++ spin_unlock(&mem_lock);
++ goto out;
++ }
++
++ current_frag = map->root_frag;
++ vaddr = (unsigned long) map->virt_addr;
++ for (i = 0; i < map->frag_count; i++) {
++ DPA_ASSERT(current_frag->refs > 0);
++ --current_frag->refs;
++#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
++ /*
++ * Make sure we invalidate the TLB entry for
++ * this fragment, otherwise a remap of a different
++ * page to this vaddr would give acces to an
++ * incorrect piece of memory
++ */
++ cleartlbcam(vaddr, mfspr(SPRN_PID));
++#endif
++ vaddr += current_frag->len;
++ current_frag = list_entry(current_frag->list.prev,
++ struct mem_fragment, list);
++ }
++ map->root_frag->name[0] = 0;
++ list_del(&map->list);
++ compress_frags();
++ spin_unlock(&mem_lock);
++
++ base = vma->vm_start;
++ sz = vma->vm_end - vma->vm_start;
++ do_munmap(current->mm, base, sz);
++ ret = 0;
++ out:
++ up_write(&current->mm->mmap_sem);
++ return ret;
++}
++
++static long ioctl_dma_stats(struct ctx *ctx, void __user *arg)
++{
++ struct mem_fragment *frag;
++ struct usdpaa_ioctl_dma_used result;
++
++ result.free_bytes = 0;
++ result.total_bytes = phys_size;
++
++ list_for_each_entry(frag, &mem_list, list) {
++ if (frag->refs == 0)
++ result.free_bytes += frag->len;
++ }
++
++ return copy_to_user(arg, &result, sizeof(result)); }
++
++static int test_lock(struct mem_mapping *map)
++{
++ int ret = 0;
++ spin_lock(&mem_lock);
++ if (!map->root_frag->owner) {
++ map->root_frag->owner = map;
++ ret = 1;
++ }
++ spin_unlock(&mem_lock);
++ return ret;
++}
++
++static long ioctl_dma_lock(struct ctx *ctx, void __user *arg)
++{
++ struct mem_mapping *map;
++ struct vm_area_struct *vma;
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, (unsigned long)arg);
++ if (!vma || (vma->vm_start > (unsigned long)arg)) {
++ up_read(&current->mm->mmap_sem);
++ return -EFAULT;
++ }
++ spin_lock(&mem_lock);
++ list_for_each_entry(map, &ctx->maps, list) {
++ if (map->root_frag->pfn_base == vma->vm_pgoff)
++ goto map_match;
++ }
++ map = NULL;
++map_match:
++ spin_unlock(&mem_lock);
++ up_read(&current->mm->mmap_sem);
++
++ if (!map)
++ return -EFAULT;
++ if (!map->root_frag->has_locking)
++ return -ENODEV;
++ return wait_event_interruptible(map->root_frag->wq, test_lock(map));
++}
++
++static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg)
++{
++ struct mem_mapping *map;
++ struct vm_area_struct *vma;
++ int ret;
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, (unsigned long)arg);
++ if (!vma || (vma->vm_start > (unsigned long)arg))
++ ret = -EFAULT;
++ else {
++ spin_lock(&mem_lock);
++ list_for_each_entry(map, &ctx->maps, list) {
++ if (map->root_frag->pfn_base == vma->vm_pgoff) {
++ if (!map->root_frag->has_locking)
++ ret = -ENODEV;
++ else if (map->root_frag->owner == map) {
++ map->root_frag->owner = NULL;
++ wake_up(&map->root_frag->wq);
++ ret = 0;
++ } else
++ ret = -EBUSY;
++ goto map_match;
++ }
++ }
++ ret = -EINVAL;
++map_match:
++ spin_unlock(&mem_lock);
++ }
++ up_read(&current->mm->mmap_sem);
++ return ret;
++}
++
++static int portal_mmap(struct file *fp, struct resource *res, void **ptr)
++{
++ unsigned long longret = 0, populate;
++ resource_size_t len;
++
++ down_write(&current->mm->mmap_sem);
++ len = resource_size(res);
++ if (len != (unsigned long)len)
++ return -EINVAL;
++ longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len,
++ PROT_READ | PROT_WRITE, MAP_SHARED,
++ res->start >> PAGE_SHIFT, &populate);
++ up_write(&current->mm->mmap_sem);
++
++ if (longret & ~PAGE_MASK)
++ return (int)longret;
++
++ *ptr = (void *) longret;
++ return 0;
++}
++
++static void portal_munmap(struct resource *res, void *ptr)
++{
++ down_write(&current->mm->mmap_sem);
++ do_munmap(current->mm, (unsigned long)ptr, resource_size(res));
++ up_write(&current->mm->mmap_sem);
++}
++
++static long ioctl_portal_map(struct file *fp, struct ctx *ctx,
++ struct usdpaa_ioctl_portal_map *arg)
++{
++ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
++ int ret;
++
++ if (!mapping)
++ return -ENOMEM;
++
++ mapping->user = *arg;
++ mapping->iommu_domain = NULL;
++
++ if (mapping->user.type == usdpaa_portal_qman) {
++ mapping->qportal =
++ qm_get_unused_portal_idx(mapping->user.index);
++ if (!mapping->qportal) {
++ ret = -ENODEV;
++ goto err_get_portal;
++ }
++ mapping->phys = &mapping->qportal->addr_phys[0];
++ mapping->user.channel = mapping->qportal->public_cfg.channel;
++ mapping->user.pools = mapping->qportal->public_cfg.pools;
++ mapping->user.index = mapping->qportal->public_cfg.index;
++ } else if (mapping->user.type == usdpaa_portal_bman) {
++ mapping->bportal =
++ bm_get_unused_portal_idx(mapping->user.index);
++ if (!mapping->bportal) {
++ ret = -ENODEV;
++ goto err_get_portal;
++ }
++ mapping->phys = &mapping->bportal->addr_phys[0];
++ mapping->user.index = mapping->bportal->public_cfg.index;
++ } else {
++ ret = -EINVAL;
++ goto err_copy_from_user;
++ }
++ /* Need to put pcfg in ctx's list before the mmaps because the mmap
++ * handlers look it up. */
++ spin_lock(&mem_lock);
++ list_add(&mapping->list, &ctx->portals);
++ spin_unlock(&mem_lock);
++ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CE],
++ &mapping->user.addr.cena);
++ if (ret)
++ goto err_mmap_cena;
++ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CI],
++ &mapping->user.addr.cinh);
++ if (ret)
++ goto err_mmap_cinh;
++ *arg = mapping->user;
++ return ret;
++
++err_mmap_cinh:
++ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
++err_mmap_cena:
++ if ((mapping->user.type == usdpaa_portal_qman) && mapping->qportal)
++ qm_put_unused_portal(mapping->qportal);
++ else if ((mapping->user.type == usdpaa_portal_bman) && mapping->bportal)
++ bm_put_unused_portal(mapping->bportal);
++ spin_lock(&mem_lock);
++ list_del(&mapping->list);
++ spin_unlock(&mem_lock);
++err_get_portal:
++err_copy_from_user:
++ kfree(mapping);
++ return ret;
++}
++
++static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i)
++{
++ struct portal_mapping *mapping;
++ struct vm_area_struct *vma;
++ unsigned long pfn;
++ u32 channel;
++
++ /* Get the PFN corresponding to one of the virt addresses */
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, (unsigned long)i->cinh);
++ if (!vma || (vma->vm_start > (unsigned long)i->cinh)) {
++ up_read(&current->mm->mmap_sem);
++ return -EFAULT;
++ }
++ pfn = vma->vm_pgoff;
++ up_read(&current->mm->mmap_sem);
++
++ /* Find the corresponding portal */
++ spin_lock(&mem_lock);
++ list_for_each_entry(mapping, &ctx->portals, list) {
++ if (pfn == (mapping->phys[DPA_PORTAL_CI].start >> PAGE_SHIFT))
++ goto found;
++ }
++ mapping = NULL;
++found:
++ if (mapping)
++ list_del(&mapping->list);
++ spin_unlock(&mem_lock);
++ if (!mapping)
++ return -ENODEV;
++ portal_munmap(&mapping->phys[DPA_PORTAL_CI], mapping->user.addr.cinh);
++ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
++ if (mapping->user.type == usdpaa_portal_qman) {
++ init_qm_portal(mapping->qportal,
++ &mapping->qman_portal_low);
++
++ /* Tear down any FQs this portal is referencing */
++ channel = mapping->qportal->public_cfg.channel;
++ qm_check_and_destroy_fqs(&mapping->qman_portal_low,
++ &channel,
++ check_portal_channel);
++ qm_put_unused_portal(mapping->qportal);
++ } else if (mapping->user.type == usdpaa_portal_bman) {
++ init_bm_portal(mapping->bportal,
++ &mapping->bman_portal_low);
++ bm_put_unused_portal(mapping->bportal);
++ }
++ kfree(mapping);
++ return 0;
++}
++
++static void portal_config_pamu(struct qm_portal_config *pcfg, uint8_t sdest,
++ uint32_t cpu, uint32_t cache, uint32_t window)
++{
++#ifdef CONFIG_FSL_PAMU
++ int ret;
++ int window_count = 1;
++ struct iommu_domain_geometry geom_attr;
++ struct pamu_stash_attribute stash_attr;
++
++ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
++ if (!pcfg->iommu_domain) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
++ __func__);
++ goto _no_iommu;
++ }
++ geom_attr.aperture_start = 0;
++ geom_attr.aperture_end =
++ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
++ geom_attr.force_aperture = true;
++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
++ &geom_attr);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
++ &window_count);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ stash_attr.cpu = cpu;
++ stash_attr.cache = cache;
++ /* set stash information for the window */
++ stash_attr.window = 0;
++
++ ret = iommu_domain_set_attr(pcfg->iommu_domain,
++ DOMAIN_ATTR_FSL_PAMU_STASH,
++ &stash_attr);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
++ IOMMU_READ | IOMMU_WRITE);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_domain_set_attr(pcfg->iommu_domain,
++ DOMAIN_ATTR_FSL_PAMU_ENABLE,
++ &window_count);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_detach_device;
++ }
++_no_iommu:
++#endif
++
++#ifdef CONFIG_FSL_QMAN_CONFIG
++ if (qman_set_sdest(pcfg->public_cfg.channel, sdest))
++#endif
++ pr_warn("Failed to set QMan portal's stash request queue\n");
++
++ return;
++
++#ifdef CONFIG_FSL_PAMU
++_iommu_detach_device:
++ iommu_detach_device(pcfg->iommu_domain, NULL);
++_iommu_domain_free:
++ iommu_domain_free(pcfg->iommu_domain);
++#endif
++}
++
++static long ioctl_allocate_raw_portal(struct file *fp, struct ctx *ctx,
++ struct usdpaa_ioctl_raw_portal *arg)
++{
++ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
++ int ret;
++
++ if (!mapping)
++ return -ENOMEM;
++
++ mapping->user.type = arg->type;
++ mapping->iommu_domain = NULL;
++ if (arg->type == usdpaa_portal_qman) {
++ mapping->qportal = qm_get_unused_portal_idx(arg->index);
++ if (!mapping->qportal) {
++ ret = -ENODEV;
++ goto err;
++ }
++ mapping->phys = &mapping->qportal->addr_phys[0];
++ arg->index = mapping->qportal->public_cfg.index;
++ arg->cinh = mapping->qportal->addr_phys[DPA_PORTAL_CI].start;
++ arg->cena = mapping->qportal->addr_phys[DPA_PORTAL_CE].start;
++ if (arg->enable_stash) {
++ /* Setup the PAMU with the supplied parameters */
++ portal_config_pamu(mapping->qportal, arg->sdest,
++ arg->cpu, arg->cache, arg->window);
++ }
++ } else if (mapping->user.type == usdpaa_portal_bman) {
++ mapping->bportal =
++ bm_get_unused_portal_idx(arg->index);
++ if (!mapping->bportal) {
++ ret = -ENODEV;
++ goto err;
++ }
++ mapping->phys = &mapping->bportal->addr_phys[0];
++ arg->index = mapping->bportal->public_cfg.index;
++ arg->cinh = mapping->bportal->addr_phys[DPA_PORTAL_CI].start;
++ arg->cena = mapping->bportal->addr_phys[DPA_PORTAL_CE].start;
++ } else {
++ ret = -EINVAL;
++ goto err;
++ }
++ /* Need to put pcfg in ctx's list before the mmaps because the mmap
++ * handlers look it up. */
++ spin_lock(&mem_lock);
++ list_add(&mapping->list, &ctx->portals);
++ spin_unlock(&mem_lock);
++ return 0;
++err:
++ kfree(mapping);
++ return ret;
++}
++
++static long ioctl_free_raw_portal(struct file *fp, struct ctx *ctx,
++ struct usdpaa_ioctl_raw_portal *arg)
++{
++ struct portal_mapping *mapping;
++ u32 channel;
++
++ /* Find the corresponding portal */
++ spin_lock(&mem_lock);
++ list_for_each_entry(mapping, &ctx->portals, list) {
++ if (mapping->phys[DPA_PORTAL_CI].start == arg->cinh)
++ goto found;
++ }
++ mapping = NULL;
++found:
++ if (mapping)
++ list_del(&mapping->list);
++ spin_unlock(&mem_lock);
++ if (!mapping)
++ return -ENODEV;
++ if (mapping->user.type == usdpaa_portal_qman) {
++ init_qm_portal(mapping->qportal,
++ &mapping->qman_portal_low);
++
++ /* Tear down any FQs this portal is referencing */
++ channel = mapping->qportal->public_cfg.channel;
++ qm_check_and_destroy_fqs(&mapping->qman_portal_low,
++ &channel,
++ check_portal_channel);
++ qm_put_unused_portal(mapping->qportal);
++ } else if (mapping->user.type == usdpaa_portal_bman) {
++ init_bm_portal(mapping->bportal,
++ &mapping->bman_portal_low);
++ bm_put_unused_portal(mapping->bportal);
++ }
++ kfree(mapping);
++ return 0;
++}
++
++static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ctx *ctx = fp->private_data;
++ void __user *a = (void __user *)arg;
++ switch (cmd) {
++ case USDPAA_IOCTL_ID_ALLOC:
++ return ioctl_id_alloc(ctx, a);
++ case USDPAA_IOCTL_ID_RELEASE:
++ return ioctl_id_release(ctx, a);
++ case USDPAA_IOCTL_ID_RESERVE:
++ return ioctl_id_reserve(ctx, a);
++ case USDPAA_IOCTL_DMA_MAP:
++ {
++ struct usdpaa_ioctl_dma_map input;
++ int ret;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ ret = ioctl_dma_map(fp, ctx, &input);
++ if (copy_to_user(a, &input, sizeof(input)))
++ return -EFAULT;
++ return ret;
++ }
++ case USDPAA_IOCTL_DMA_UNMAP:
++ return ioctl_dma_unmap(ctx, a);
++ case USDPAA_IOCTL_DMA_LOCK:
++ return ioctl_dma_lock(ctx, a);
++ case USDPAA_IOCTL_DMA_UNLOCK:
++ return ioctl_dma_unlock(ctx, a);
++ case USDPAA_IOCTL_PORTAL_MAP:
++ {
++ struct usdpaa_ioctl_portal_map input;
++ int ret;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ ret = ioctl_portal_map(fp, ctx, &input);
++ if (copy_to_user(a, &input, sizeof(input)))
++ return -EFAULT;
++ return ret;
++ }
++ case USDPAA_IOCTL_PORTAL_UNMAP:
++ {
++ struct usdpaa_portal_map input;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ return ioctl_portal_unmap(ctx, &input);
++ }
++ case USDPAA_IOCTL_DMA_USED:
++ return ioctl_dma_stats(ctx, a);
++ case USDPAA_IOCTL_ALLOC_RAW_PORTAL:
++ {
++ struct usdpaa_ioctl_raw_portal input;
++ int ret;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ ret = ioctl_allocate_raw_portal(fp, ctx, &input);
++ if (copy_to_user(a, &input, sizeof(input)))
++ return -EFAULT;
++ return ret;
++ }
++ case USDPAA_IOCTL_FREE_RAW_PORTAL:
++ {
++ struct usdpaa_ioctl_raw_portal input;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ return ioctl_free_raw_portal(fp, ctx, &input);
++ }
++ }
++ return -EINVAL;
++}
++
++static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
++ unsigned long arg)
++{
++#ifdef CONFIG_COMPAT
++ struct ctx *ctx = fp->private_data;
++ void __user *a = (void __user *)arg;
++#endif
++ switch (cmd) {
++#ifdef CONFIG_COMPAT
++ case USDPAA_IOCTL_DMA_MAP_COMPAT:
++ {
++ int ret;
++ struct usdpaa_ioctl_dma_map_compat input;
++ struct usdpaa_ioctl_dma_map converted;
++
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++
++ converted.ptr = compat_ptr(input.ptr);
++ converted.phys_addr = input.phys_addr;
++ converted.len = input.len;
++ converted.flags = input.flags;
++ strncpy(converted.name, input.name, USDPAA_DMA_NAME_MAX);
++ converted.has_locking = input.has_locking;
++ converted.did_create = input.did_create;
++
++ ret = ioctl_dma_map(fp, ctx, &converted);
++ input.ptr = ptr_to_compat(converted.ptr);
++ input.phys_addr = converted.phys_addr;
++ input.len = converted.len;
++ input.flags = converted.flags;
++ strncpy(input.name, converted.name, USDPAA_DMA_NAME_MAX);
++ input.has_locking = converted.has_locking;
++ input.did_create = converted.did_create;
++ if (copy_to_user(a, &input, sizeof(input)))
++ return -EFAULT;
++ return ret;
++ }
++ case USDPAA_IOCTL_PORTAL_MAP_COMPAT:
++ {
++ int ret;
++ struct compat_usdpaa_ioctl_portal_map input;
++ struct usdpaa_ioctl_portal_map converted;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ converted.type = input.type;
++ converted.index = input.index;
++ ret = ioctl_portal_map(fp, ctx, &converted);
++ input.addr.cinh = ptr_to_compat(converted.addr.cinh);
++ input.addr.cena = ptr_to_compat(converted.addr.cena);
++ input.channel = converted.channel;
++ input.pools = converted.pools;
++ input.index = converted.index;
++ if (copy_to_user(a, &input, sizeof(input)))
++ return -EFAULT;
++ return ret;
++ }
++ case USDPAA_IOCTL_PORTAL_UNMAP_COMPAT:
++ {
++ struct usdpaa_portal_map_compat input;
++ struct usdpaa_portal_map converted;
++
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ converted.cinh = compat_ptr(input.cinh);
++ converted.cena = compat_ptr(input.cena);
++ return ioctl_portal_unmap(ctx, &converted);
++ }
++ case USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT:
++ {
++ int ret;
++ struct usdpaa_ioctl_raw_portal converted;
++ struct compat_ioctl_raw_portal input;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ converted.type = input.type;
++ converted.index = input.index;
++ converted.enable_stash = input.enable_stash;
++ converted.cpu = input.cpu;
++ converted.cache = input.cache;
++ converted.window = input.window;
++ converted.sdest = input.sdest;
++ ret = ioctl_allocate_raw_portal(fp, ctx, &converted);
++
++ input.cinh = converted.cinh;
++ input.cena = converted.cena;
++ input.index = converted.index;
++
++ if (copy_to_user(a, &input, sizeof(input)))
++ return -EFAULT;
++ return ret;
++ }
++ case USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT:
++ {
++ struct usdpaa_ioctl_raw_portal converted;
++ struct compat_ioctl_raw_portal input;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ converted.type = input.type;
++ converted.index = input.index;
++ converted.cinh = input.cinh;
++ converted.cena = input.cena;
++ return ioctl_free_raw_portal(fp, ctx, &converted);
++ }
++#endif
++ default:
++ return usdpaa_ioctl(fp, cmd, arg);
++ }
++ return -EINVAL;
++}
++
++int usdpaa_get_portal_config(struct file *filp, void *cinh,
++ enum usdpaa_portal_type ptype, unsigned int *irq,
++ void **iir_reg)
++{
++ /* Walk the list of portals for filp and return the config
++ for the portal that matches the hint */
++ struct ctx *context;
++ struct portal_mapping *portal;
++
++ /* First sanitize the filp */
++ if (filp->f_op->open != usdpaa_open)
++ return -ENODEV;
++ context = filp->private_data;
++ spin_lock(&context->lock);
++ list_for_each_entry(portal, &context->portals, list) {
++ if (portal->user.type == ptype &&
++ portal->user.addr.cinh == cinh) {
++ if (ptype == usdpaa_portal_qman) {
++ *irq = portal->qportal->public_cfg.irq;
++ *iir_reg = portal->qportal->addr_virt[1] +
++ QM_REG_IIR;
++ } else {
++ *irq = portal->bportal->public_cfg.irq;
++ *iir_reg = portal->bportal->addr_virt[1] +
++ BM_REG_IIR;
++ }
++ spin_unlock(&context->lock);
++ return 0;
++ }
++ }
++ spin_unlock(&context->lock);
++ return -EINVAL;
++}
++
++static const struct file_operations usdpaa_fops = {
++ .open = usdpaa_open,
++ .release = usdpaa_release,
++ .mmap = usdpaa_mmap,
++ .get_unmapped_area = usdpaa_get_unmapped_area,
++ .unlocked_ioctl = usdpaa_ioctl,
++ .compat_ioctl = usdpaa_ioctl_compat
++};
++
++static struct miscdevice usdpaa_miscdev = {
++ .name = "fsl-usdpaa",
++ .fops = &usdpaa_fops,
++ .minor = MISC_DYNAMIC_MINOR,
++};
++
++/* Early-boot memory allocation. The boot-arg "usdpaa_mem=<x>" is used to
++ * indicate how much memory (if any) to allocate during early boot. If the
++ * format "usdpaa_mem=<x>,<y>" is used, then <y> will be interpreted as the
++ * number of TLB1 entries to reserve (default is 1). If there are more mappings
++ * than there are TLB1 entries, fault-handling will occur. */
++
++static __init int usdpaa_mem(char *arg)
++{
++ pr_warn("uspdaa_mem argument is depracated\n");
++ arg_phys_size = memparse(arg, &arg);
++ num_tlb = 1;
++ if (*arg == ',') {
++ unsigned long ul;
++ int err = kstrtoul(arg + 1, 0, &ul);
++ if (err < 0) {
++ num_tlb = 1;
++ pr_warn("ERROR, usdpaa_mem arg is invalid\n");
++ } else
++ num_tlb = (unsigned int)ul;
++ }
++ return 0;
++}
++early_param("usdpaa_mem", usdpaa_mem);
++
++static int usdpaa_mem_init(struct reserved_mem *rmem)
++{
++ phys_start = rmem->base;
++ phys_size = rmem->size;
++
++ WARN_ON(!(phys_start && phys_size));
++
++ return 0;
++}
++RESERVEDMEM_OF_DECLARE(usdpaa_mem_init, "fsl,usdpaa-mem", usdpaa_mem_init);
++
++__init int fsl_usdpaa_init_early(void)
++{
++ if (!phys_size || !phys_start) {
++ pr_info("No USDPAA memory, no 'fsl,usdpaa-mem' in device-tree\n");
++ return 0;
++ }
++ if (phys_size % PAGE_SIZE) {
++ pr_err("'fsl,usdpaa-mem' size must be a multiple of page size\n");
++ phys_size = 0;
++ return 0;
++ }
++ if (arg_phys_size && phys_size != arg_phys_size) {
++ pr_err("'usdpaa_mem argument size (0x%llx) does not match device tree size (0x%llx)\n",
++ arg_phys_size, phys_size);
++ phys_size = 0;
++ return 0;
++ }
++ pfn_start = phys_start >> PAGE_SHIFT;
++ pfn_size = phys_size >> PAGE_SHIFT;
++#ifdef CONFIG_PPC
++ first_tlb = current_tlb = tlbcam_index;
++ tlbcam_index += num_tlb;
++#endif
++ pr_info("USDPAA region at %llx:%llx(%lx:%lx), %d TLB1 entries)\n",
++ phys_start, phys_size, pfn_start, pfn_size, num_tlb);
++ return 0;
++}
++subsys_initcall(fsl_usdpaa_init_early);
++
++
++static int __init usdpaa_init(void)
++{
++ struct mem_fragment *frag;
++ int ret;
++ u64 tmp_size = phys_size;
++ u64 tmp_start = phys_start;
++ u64 tmp_pfn_size = pfn_size;
++ u64 tmp_pfn_start = pfn_start;
++
++ pr_info("Freescale USDPAA process driver\n");
++ if (!phys_start) {
++ pr_warn("fsl-usdpaa: no region found\n");
++ return 0;
++ }
++
++ while (tmp_size != 0) {
++ u32 frag_size = largest_page_size(tmp_size);
++ frag = kmalloc(sizeof(*frag), GFP_KERNEL);
++ if (!frag) {
++ pr_err("Failed to setup USDPAA memory accounting\n");
++ return -ENOMEM;
++ }
++ frag->base = tmp_start;
++ frag->len = frag->root_len = frag_size;
++ frag->root_pfn = tmp_pfn_start;
++ frag->pfn_base = tmp_pfn_start;
++ frag->pfn_len = frag_size / PAGE_SIZE;
++ frag->refs = 0;
++ init_waitqueue_head(&frag->wq);
++ frag->owner = NULL;
++ list_add(&frag->list, &mem_list);
++
++ /* Adjust for this frag */
++ tmp_start += frag_size;
++ tmp_size -= frag_size;
++ tmp_pfn_start += frag_size / PAGE_SIZE;
++ tmp_pfn_size -= frag_size / PAGE_SIZE;
++ }
++ ret = misc_register(&usdpaa_miscdev);
++ if (ret)
++ pr_err("fsl-usdpaa: failed to register misc device\n");
++ return ret;
++}
++
++static void __exit usdpaa_exit(void)
++{
++ misc_deregister(&usdpaa_miscdev);
++}
++
++module_init(usdpaa_init);
++module_exit(usdpaa_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Freescale Semiconductor");
++MODULE_DESCRIPTION("Freescale USDPAA process driver");
+diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
+new file mode 100644
+index 00000000..914c7471
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
+@@ -0,0 +1,289 @@
++/* Copyright (c) 2013 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/* define a device that allows USPDAA processes to open a file
++ descriptor and specify which IRQ it wants to montior using an ioctl()
++ When an IRQ is received, the device becomes readable so that a process
++ can use read() or select() type calls to monitor for IRQs */
++
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/poll.h>
++#include <linux/uaccess.h>
++#include <linux/fsl_usdpaa.h>
++#include <linux/module.h>
++#include <linux/fdtable.h>
++#include <linux/file.h>
++
++#include "qman_low.h"
++#include "bman_low.h"
++
++struct usdpaa_irq_ctx {
++ int irq_set; /* Set to true once the irq is set via ioctl */
++ unsigned int irq_num;
++ u32 last_irq_count; /* Last value returned from read */
++ u32 irq_count; /* Number of irqs since last read */
++ wait_queue_head_t wait_queue; /* Waiting processes */
++ spinlock_t lock;
++ void *inhibit_addr; /* inhibit register address */
++ struct file *usdpaa_filp;
++ char irq_name[128];
++};
++
++static int usdpaa_irq_open(struct inode *inode, struct file *filp)
++{
++ struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++ ctx->irq_set = 0;
++ ctx->irq_count = 0;
++ ctx->last_irq_count = 0;
++ init_waitqueue_head(&ctx->wait_queue);
++ spin_lock_init(&ctx->lock);
++ filp->private_data = ctx;
++ return 0;
++}
++
++static int usdpaa_irq_release(struct inode *inode, struct file *filp)
++{
++ struct usdpaa_irq_ctx *ctx = filp->private_data;
++ if (ctx->irq_set) {
++ /* Inhibit the IRQ */
++ out_be32(ctx->inhibit_addr, 0x1);
++ irq_set_affinity_hint(ctx->irq_num, NULL);
++ free_irq(ctx->irq_num, ctx);
++ ctx->irq_set = 0;
++ fput(ctx->usdpaa_filp);
++ }
++ kfree(filp->private_data);
++ return 0;
++}
++
++static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
++{
++ unsigned long flags;
++ struct usdpaa_irq_ctx *ctx = _ctx;
++ spin_lock_irqsave(&ctx->lock, flags);
++ ++ctx->irq_count;
++ spin_unlock_irqrestore(&ctx->lock, flags);
++ wake_up_all(&ctx->wait_queue);
++ /* Set the inhibit register. This will be reenabled
++ once the USDPAA code handles the IRQ */
++ out_be32(ctx->inhibit_addr, 0x1);
++ pr_info("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count);
++ return IRQ_HANDLED;
++}
++
++static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map)
++{
++ struct usdpaa_irq_ctx *ctx = fp->private_data;
++ int ret;
++
++ if (ctx->irq_set) {
++ pr_debug("Setting USDPAA IRQ when it was already set!\n");
++ return -EBUSY;
++ }
++
++ ctx->usdpaa_filp = fget(irq_map->fd);
++ if (!ctx->usdpaa_filp) {
++ pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd);
++ return -EINVAL;
++ }
++
++ ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh,
++ irq_map->type, &ctx->irq_num,
++ &ctx->inhibit_addr);
++ if (ret) {
++ pr_debug("USDPAA IRQ couldn't identify portal\n");
++ fput(ctx->usdpaa_filp);
++ return ret;
++ }
++
++ ctx->irq_set = 1;
++
++ snprintf(ctx->irq_name, sizeof(ctx->irq_name),
++ "usdpaa_irq %d", ctx->irq_num);
++
++ ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0,
++ ctx->irq_name, ctx);
++ if (ret) {
++ pr_err("USDPAA request_irq(%d) failed, ret= %d\n",
++ ctx->irq_num, ret);
++ ctx->irq_set = 0;
++ fput(ctx->usdpaa_filp);
++ return ret;
++ }
++ ret = irq_set_affinity(ctx->irq_num, &current->cpus_allowed);
++ if (ret)
++ pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret);
++
++ ret = irq_set_affinity_hint(ctx->irq_num, &current->cpus_allowed);
++ if (ret)
++ pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret);
++
++ return 0;
++}
++
++static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret;
++ struct usdpaa_ioctl_irq_map irq_map;
++
++ if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) {
++ pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd);
++ return -EINVAL;
++ }
++
++ ret = copy_from_user(&irq_map, (void __user *)arg,
++ sizeof(irq_map));
++ if (ret)
++ return ret;
++ return map_irq(fp, &irq_map);
++}
++
++static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
++ size_t count, loff_t *offp)
++{
++ struct usdpaa_irq_ctx *ctx = filp->private_data;
++ int ret;
++
++ if (!ctx->irq_set) {
++ pr_debug("Reading USDPAA IRQ before it was set\n");
++ return -EINVAL;
++ }
++
++ if (count < sizeof(ctx->irq_count)) {
++ pr_debug("USDPAA IRQ Read too small\n");
++ return -EINVAL;
++ }
++ if (ctx->irq_count == ctx->last_irq_count) {
++ if (filp->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ ret = wait_event_interruptible(ctx->wait_queue,
++ ctx->irq_count != ctx->last_irq_count);
++ if (ret == -ERESTARTSYS)
++ return ret;
++ }
++
++ ctx->last_irq_count = ctx->irq_count;
++
++ if (copy_to_user(buff, &ctx->last_irq_count,
++ sizeof(ctx->last_irq_count)))
++ return -EFAULT;
++ return sizeof(ctx->irq_count);
++}
++
++static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
++{
++ struct usdpaa_irq_ctx *ctx = filp->private_data;
++ unsigned int ret = 0;
++ unsigned long flags;
++
++ if (!ctx->irq_set)
++ return POLLHUP;
++
++ poll_wait(filp, &ctx->wait_queue, wait);
++
++ spin_lock_irqsave(&ctx->lock, flags);
++ if (ctx->irq_count != ctx->last_irq_count)
++ ret |= POLLIN | POLLRDNORM;
++ spin_unlock_irqrestore(&ctx->lock, flags);
++ return ret;
++}
++
++static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
++ unsigned long arg)
++{
++#ifdef CONFIG_COMPAT
++ void __user *a = (void __user *)arg;
++#endif
++ switch (cmd) {
++#ifdef CONFIG_COMPAT
++ case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT:
++ {
++ struct compat_ioctl_irq_map input;
++ struct usdpaa_ioctl_irq_map converted;
++ if (copy_from_user(&input, a, sizeof(input)))
++ return -EFAULT;
++ converted.type = input.type;
++ converted.fd = input.fd;
++ converted.portal_cinh = compat_ptr(input.portal_cinh);
++ return map_irq(fp, &converted);
++ }
++#endif
++ default:
++ return usdpaa_irq_ioctl(fp, cmd, arg);
++ }
++}
++
++static const struct file_operations usdpaa_irq_fops = {
++ .open = usdpaa_irq_open,
++ .release = usdpaa_irq_release,
++ .unlocked_ioctl = usdpaa_irq_ioctl,
++ .compat_ioctl = usdpaa_irq_ioctl_compat,
++ .read = usdpaa_irq_read,
++ .poll = usdpaa_irq_poll
++};
++
++static struct miscdevice usdpaa_miscdev = {
++ .name = "fsl-usdpaa-irq",
++ .fops = &usdpaa_irq_fops,
++ .minor = MISC_DYNAMIC_MINOR,
++};
++
++static int __init usdpaa_irq_init(void)
++{
++ int ret;
++
++ pr_info("Freescale USDPAA process IRQ driver\n");
++ ret = misc_register(&usdpaa_miscdev);
++ if (ret)
++ pr_err("fsl-usdpaa-irq: failed to register misc device\n");
++ return ret;
++}
++
++static void __exit usdpaa_irq_exit(void)
++{
++ misc_deregister(&usdpaa_miscdev);
++}
++
++module_init(usdpaa_irq_init);
++module_exit(usdpaa_irq_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Freescale Semiconductor");
++MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver");
+diff --git a/drivers/staging/fsl_qbman/qbman_driver.c b/drivers/staging/fsl_qbman/qbman_driver.c
+new file mode 100644
+index 00000000..ab487d5f
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qbman_driver.c
+@@ -0,0 +1,88 @@
++/* Copyright 2013 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/time.h>
++#include "qman_private.h"
++#include "bman_private.h"
++__init void qman_init_early(void);
++__init void bman_init_early(void);
++
++static __init int qbman_init(void)
++{
++ struct device_node *dn;
++ u32 is_portal_available;
++
++ bman_init();
++ qman_init();
++
++ is_portal_available = 0;
++ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
++ if (!of_device_is_available(dn))
++ continue;
++ else
++ is_portal_available = 1;
++ }
++
++ if (!qman_have_ccsr() && is_portal_available) {
++ struct qman_fq fq = {
++ .fqid = 1
++ };
++ struct qm_mcr_queryfq_np np;
++ int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
++ struct timespec nowts, diffts, startts = current_kernel_time();
++ /* Loop while querying given fqid succeeds or time out */
++ while (1) {
++ err = qman_query_fq_np(&fq, &np);
++ if (!err) {
++ /* success, control-plane has configured QMan */
++ break;
++ } else if (err != -ERANGE) {
++ pr_err("QMan: I/O error, continuing anyway\n");
++ break;
++ }
++ nowts = current_kernel_time();
++ diffts = timespec_sub(nowts, startts);
++ if (diffts.tv_sec > 0) {
++ if (!retry--) {
++ pr_err("QMan: time out, control-plane"
++ " dead?\n");
++ break;
++ }
++ pr_warn("QMan: polling for the control-plane"
++ " (%d)\n", retry);
++ }
++ }
++ }
++ bman_resource_init();
++ qman_resource_init();
++ return 0;
++}
++subsys_initcall(qbman_init);
+diff --git a/drivers/staging/fsl_qbman/qman_config.c b/drivers/staging/fsl_qbman/qman_config.c
+new file mode 100644
+index 00000000..9bb1e11a
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_config.c
+@@ -0,0 +1,1224 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <asm/cacheflush.h>
++#include "qman_private.h"
++#include <linux/highmem.h>
++#include <linux/of_reserved_mem.h>
++
++/* Last updated for v00.800 of the BG */
++
++/* Register offsets */
++#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
++#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
++#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
++#define REG_DD_CFG 0x0200
++#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
++#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
++#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
++#define REG_PFDR_FPC 0x0400
++#define REG_PFDR_FP_HEAD 0x0404
++#define REG_PFDR_FP_TAIL 0x0408
++#define REG_PFDR_FP_LWIT 0x0410
++#define REG_PFDR_CFG 0x0414
++#define REG_SFDR_CFG 0x0500
++#define REG_SFDR_IN_USE 0x0504
++#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
++#define REG_WQ_DEF_ENC_WQID 0x0630
++#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
++#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
++#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
++#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
++#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
++#define REG_CM_CFG 0x0800
++#define REG_ECSR 0x0a00
++#define REG_ECIR 0x0a04
++#define REG_EADR 0x0a08
++#define REG_ECIR2 0x0a0c
++#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
++#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
++#define REG_MCR 0x0b00
++#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
++#define REG_MISC_CFG 0x0be0
++#define REG_HID_CFG 0x0bf0
++#define REG_IDLE_STAT 0x0bf4
++#define REG_IP_REV_1 0x0bf8
++#define REG_IP_REV_2 0x0bfc
++#define REG_FQD_BARE 0x0c00
++#define REG_PFDR_BARE 0x0c20
++#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
++#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
++#define REG_QCSP_BARE 0x0c80
++#define REG_QCSP_BAR 0x0c84
++#define REG_CI_SCHED_CFG 0x0d00
++#define REG_SRCIDR 0x0d04
++#define REG_LIODNR 0x0d08
++#define REG_CI_RLM_AVG 0x0d14
++#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */
++#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
++#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
++#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
++#define REG_CEETM_CFG_IDX 0x900
++#define REG_CEETM_CFG_PRES 0x904
++#define REG_CEETM_XSFDR_IN_USE 0x908
++
++/* Assists for QMAN_MCR */
++#define MCR_INIT_PFDR 0x01000000
++#define MCR_get_rslt(v) (u8)((v) >> 24)
++#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0))
++#define MCR_rslt_ok(r) (rslt == 0xf0)
++#define MCR_rslt_eaccess(r) (rslt == 0xf8)
++#define MCR_rslt_inval(r) (rslt == 0xff)
++
++struct qman;
++
++/* Follows WQ_CS_CFG0-5 */
++enum qm_wq_class {
++ qm_wq_portal = 0,
++ qm_wq_pool = 1,
++ qm_wq_fman0 = 2,
++ qm_wq_fman1 = 3,
++ qm_wq_caam = 4,
++ qm_wq_pme = 5,
++ qm_wq_first = qm_wq_portal,
++ qm_wq_last = qm_wq_pme
++};
++
++/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
++enum qm_memory {
++ qm_memory_fqd,
++ qm_memory_pfdr
++};
++
++/* Used by all error interrupt registers except 'inhibit' */
++#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
++#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
++#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
++#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
++#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
++#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
++#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
++#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
++#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
++#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
++#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
++#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
++#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
++#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
++#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
++#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
++#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
++#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
++
++/* QMAN_ECIR valid error bit */
++#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
++ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
++ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
++#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
++ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
++ QM_EIRQ_IFSI)
++
++union qman_ecir {
++ u32 ecir_raw;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 __reserved:2;
++ u32 portal_type:1;
++ u32 portal_num:5;
++ u32 fqid:24;
++#else
++ u32 fqid:24;
++ u32 portal_num:5;
++ u32 portal_type:1;
++ u32 __reserved:2;
++#endif
++ } __packed info;
++};
++
++union qman_ecir2 {
++ u32 ecir2_raw;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 portal_type:1;
++ u32 __reserved:21;
++ u32 portal_num:10;
++#else
++ u32 portal_num:10;
++ u32 __reserved:21;
++ u32 portal_type:1;
++#endif
++ } __packed info;
++};
++
++union qman_eadr {
++ u32 eadr_raw;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 __reserved1:4;
++ u32 memid:4;
++ u32 __reserved2:12;
++ u32 eadr:12;
++#else
++ u32 eadr:12;
++ u32 __reserved2:12;
++ u32 memid:4;
++ u32 __reserved1:4;
++#endif
++ } __packed info;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 __reserved1:3;
++ u32 memid:5;
++ u32 __reserved:8;
++ u32 eadr:16;
++#else
++ u32 eadr:16;
++ u32 __reserved:8;
++ u32 memid:5;
++ u32 __reserved1:3;
++#endif
++ } __packed info_rev3;
++};
++
++struct qman_hwerr_txt {
++ u32 mask;
++ const char *txt;
++};
++
++#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
++
++static const struct qman_hwerr_txt qman_hwerr_txts[] = {
++ QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
++ QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
++ QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
++ QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
++ QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
++ QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
++ QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
++ QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
++ QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
++ QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
++ QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
++ QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
++ QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
++ QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
++ QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
++ QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
++ QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
++ QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
++};
++#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
++
++struct qman_error_info_mdata {
++ u16 addr_mask;
++ u16 bits;
++ const char *txt;
++};
++
++#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
++static const struct qman_error_info_mdata error_mdata[] = {
++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
++ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
++ QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
++ QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
++ QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
++ QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
++ QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
++ QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"),
++ QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"),
++ QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"),
++ QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"),
++ QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"),
++ QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"),
++ QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"),
++ QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"),
++ QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"),
++};
++#define QMAN_ERR_MDATA_COUNT \
++ (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
++
++/* Add this in Kconfig */
++#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
++
++/**
++ * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
++ * @v: for accessors that write values, this is the 32-bit value
++ *
++ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
++ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
++ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
++ * "write the enable register" rather than "enable the write register"!
++ */
++#define qm_err_isr_status_read(qm) \
++ __qm_err_isr_read(qm, qm_isr_status)
++#define qm_err_isr_status_clear(qm, m) \
++ __qm_err_isr_write(qm, qm_isr_status, m)
++#define qm_err_isr_enable_read(qm) \
++ __qm_err_isr_read(qm, qm_isr_enable)
++#define qm_err_isr_enable_write(qm, v) \
++ __qm_err_isr_write(qm, qm_isr_enable, v)
++#define qm_err_isr_disable_read(qm) \
++ __qm_err_isr_read(qm, qm_isr_disable)
++#define qm_err_isr_disable_write(qm, v) \
++ __qm_err_isr_write(qm, qm_isr_disable, v)
++#define qm_err_isr_inhibit(qm) \
++ __qm_err_isr_write(qm, qm_isr_inhibit, 1)
++#define qm_err_isr_uninhibit(qm) \
++ __qm_err_isr_write(qm, qm_isr_inhibit, 0)
++
++/*
++ * TODO: unimplemented registers
++ *
++ * Keeping a list here of Qman registers I have not yet covered;
++ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
++ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
++ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
++ */
++
++/* Encapsulate "struct qman *" as a cast of the register space address. */
++
++static struct qman *qm_create(void *regs)
++{
++ return (struct qman *)regs;
++}
++
++static inline u32 __qm_in(struct qman *qm, u32 offset)
++{
++ return in_be32((void *)qm + offset);
++}
++static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
++{
++ out_be32((void *)qm + offset, val);
++}
++#define qm_in(reg) __qm_in(qm, REG_##reg)
++#define qm_out(reg, val) __qm_out(qm, REG_##reg, val)
++
++static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
++{
++ return __qm_in(qm, REG_ERR_ISR + (n << 2));
++}
++
++static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
++{
++ __qm_out(qm, REG_ERR_ISR + (n << 2), val);
++}
++
++static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
++ int ed, u8 sernd)
++{
++ DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
++ (portal == qm_dc_portal_fman1));
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
++ qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
++ else
++ qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
++}
++
++static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
++ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
++ u8 csw6, u8 csw7)
++{
++ qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
++ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
++ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
++ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
++}
++
++static void qm_set_hid(struct qman *qm)
++{
++ qm_out(HID_CFG, 0);
++}
++
++static void qm_set_corenet_initiator(struct qman *qm)
++{
++ qm_out(CI_SCHED_CFG,
++ 0x80000000 | /* write srcciv enable */
++ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
++ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
++ (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
++ CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W);
++}
++
++static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor,
++ u8 *cfg)
++{
++ u32 v = qm_in(IP_REV_1);
++ u32 v2 = qm_in(IP_REV_2);
++ *id = (v >> 16);
++ *major = (v >> 8) & 0xff;
++ *minor = v & 0xff;
++ *cfg = v2 & 0xff;
++}
++
++static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
++ int enable, int prio, int stash, u32 size)
++{
++ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
++ u32 exp = ilog2(size);
++ /* choke if size isn't within range */
++ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
++ is_power_of_2(size));
++ /* choke if 'ba' has lower-alignment than 'size' */
++ DPA_ASSERT(!(ba & (size - 1)));
++ __qm_out(qm, offset, upper_32_bits(ba));
++ __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
++ __qm_out(qm, offset + REG_offset_AR,
++ (enable ? 0x80000000 : 0) |
++ (prio ? 0x40000000 : 0) |
++ (stash ? 0x20000000 : 0) |
++ (exp - 1));
++}
++
++static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
++{
++ qm_out(PFDR_FP_LWIT, th & 0xffffff);
++ qm_out(PFDR_CFG, k);
++}
++
++static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
++{
++ qm_out(SFDR_CFG, th & 0x3ff);
++}
++
++static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
++{
++ u8 rslt = MCR_get_rslt(qm_in(MCR));
++
++ DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
++ /* Make sure the command interface is 'idle' */
++ if (!MCR_rslt_idle(rslt))
++ panic("QMAN_MCR isn't idle");
++
++ /* Write the MCR command params then the verb */
++ qm_out(MCP(0), pfdr_start);
++ /* TODO: remove this - it's a workaround for a model bug that is
++ * corrected in more recent versions. We use the workaround until
++ * everyone has upgraded. */
++ qm_out(MCP(1), (pfdr_start + num - 16));
++ lwsync();
++ qm_out(MCR, MCR_INIT_PFDR);
++ /* Poll for the result */
++ do {
++ rslt = MCR_get_rslt(qm_in(MCR));
++ } while (!MCR_rslt_idle(rslt));
++ if (MCR_rslt_ok(rslt))
++ return 0;
++ if (MCR_rslt_eaccess(rslt))
++ return -EACCES;
++ if (MCR_rslt_inval(rslt))
++ return -EINVAL;
++ pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
++ return -ENOSYS;
++}
++
++/*****************/
++/* Config driver */
++/*****************/
++
++#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ)
++#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ)
++
++/* We support only one of these */
++static struct qman *qm;
++static struct device_node *qm_node;
++
++/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
++ * during qman_init_ccsr(). */
++static dma_addr_t fqd_a, pfdr_a;
++static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ;
++
++static int qman_fqd(struct reserved_mem *rmem)
++{
++ fqd_a = rmem->base;
++ fqd_sz = rmem->size;
++
++ WARN_ON(!(fqd_a && fqd_sz));
++
++ return 0;
++}
++RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
++
++static int qman_pfdr(struct reserved_mem *rmem)
++{
++ pfdr_a = rmem->base;
++ pfdr_sz = rmem->size;
++
++ WARN_ON(!(pfdr_a && pfdr_sz));
++
++ return 0;
++}
++RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
++
++size_t get_qman_fqd_size()
++{
++ return fqd_sz;
++}
++
++/* Parse the <name> property to extract the memory location and size and
++ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
++ * size. Also flush this memory range from data cache so that QMAN originated
++ * transactions for this memory region could be marked non-coherent.
++ */
++static __init int parse_mem_property(struct device_node *node, const char *name,
++ dma_addr_t *addr, size_t *sz, int zero)
++{
++ int ret;
++
++ /* If using a "zero-pma", don't try to zero it, even if you asked */
++ if (zero && of_find_property(node, "zero-pma", &ret)) {
++ pr_info(" it's a 'zero-pma', not zeroing from s/w\n");
++ zero = 0;
++ }
++
++ if (zero) {
++ /* map as cacheable, non-guarded */
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ void __iomem *tmpp = ioremap_cache(*addr, *sz);
++#else
++ void __iomem *tmpp = ioremap(*addr, *sz);
++#endif
++
++ if (!tmpp)
++ return -ENOMEM;
++ memset_io(tmpp, 0, *sz);
++ flush_dcache_range((unsigned long)tmpp,
++ (unsigned long)tmpp + *sz);
++ iounmap(tmpp);
++ }
++
++ return 0;
++}
++
++/* TODO:
++ * - there is obviously no handling of errors,
++ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
++ * both memory resources to zero.
++ */
++static int __init fsl_qman_init(struct device_node *node)
++{
++ struct resource res;
++ resource_size_t len;
++ u32 __iomem *regs;
++ const char *s;
++ int ret, standby = 0;
++ u16 id;
++ u8 major, minor, cfg;
++ ret = of_address_to_resource(node, 0, &res);
++ if (ret) {
++ pr_err("Can't get %s property '%s'\n", node->full_name, "reg");
++ return ret;
++ }
++ s = of_get_property(node, "fsl,hv-claimable", &ret);
++ if (s && !strcmp(s, "standby"))
++ standby = 1;
++ if (!standby) {
++ ret = parse_mem_property(node, "fsl,qman-fqd",
++ &fqd_a, &fqd_sz, 1);
++ pr_info("qman-fqd addr %pad size 0x%zx\n", &fqd_a, fqd_sz);
++ BUG_ON(ret);
++ ret = parse_mem_property(node, "fsl,qman-pfdr",
++ &pfdr_a, &pfdr_sz, 0);
++ pr_info("qman-pfdr addr %pad size 0x%zx\n", &pfdr_a, pfdr_sz);
++ BUG_ON(ret);
++ }
++ /* Global configuration */
++ len = resource_size(&res);
++ if (len != (unsigned long)len)
++ return -EINVAL;
++ regs = ioremap(res.start, (unsigned long)len);
++ qm = qm_create(regs);
++ qm_node = node;
++ qm_get_version(qm, &id, &major, &minor, &cfg);
++ pr_info("Qman ver:%04x,%02x,%02x,%02x\n", id, major, minor, cfg);
++ if (!qman_ip_rev) {
++ if ((major == 1) && (minor == 0)) {
++ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
++ iounmap(regs);
++ return -ENODEV;
++ } else if ((major == 1) && (minor == 1))
++ qman_ip_rev = QMAN_REV11;
++ else if ((major == 1) && (minor == 2))
++ qman_ip_rev = QMAN_REV12;
++ else if ((major == 2) && (minor == 0))
++ qman_ip_rev = QMAN_REV20;
++ else if ((major == 3) && (minor == 0))
++ qman_ip_rev = QMAN_REV30;
++ else if ((major == 3) && (minor == 1))
++ qman_ip_rev = QMAN_REV31;
++ else if ((major == 3) && (minor == 2))
++ qman_ip_rev = QMAN_REV32;
++ else {
++ pr_warn("unknown Qman version, default to rev1.1\n");
++ qman_ip_rev = QMAN_REV11;
++ }
++ qman_ip_cfg = cfg;
++ }
++
++ if (standby) {
++ pr_info(" -> in standby mode\n");
++ return 0;
++ }
++ return 0;
++}
++
++int qman_have_ccsr(void)
++{
++ return qm ? 1 : 0;
++}
++
++__init int qman_init_early(void)
++{
++ struct device_node *dn;
++ int ret;
++
++ for_each_compatible_node(dn, NULL, "fsl,qman") {
++ if (qm)
++ pr_err("%s: only one 'fsl,qman' allowed\n",
++ dn->full_name);
++ else {
++ if (!of_device_is_available(dn))
++ continue;
++
++ ret = fsl_qman_init(dn);
++ BUG_ON(ret);
++ }
++ }
++ return 0;
++}
++postcore_initcall_sync(qman_init_early);
++
++static void log_edata_bits(u32 bit_count)
++{
++ u32 i, j, mask = 0xffffffff;
++
++ pr_warn("Qman ErrInt, EDATA:\n");
++ i = bit_count/32;
++ if (bit_count%32) {
++ i++;
++ mask = ~(mask << bit_count%32);
++ }
++ j = 16-i;
++ pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask);
++ j++;
++ for (; j < 16; j++)
++ pr_warn(" 0x%08x\n", qm_in(EDATA(j)));
++}
++
++static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
++{
++ union qman_ecir ecir_val;
++ union qman_eadr eadr_val;
++
++ ecir_val.ecir_raw = qm_in(ECIR);
++ /* Is portal info valid */
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
++ union qman_ecir2 ecir2_val;
++ ecir2_val.ecir2_raw = qm_in(ECIR2);
++ if (ecsr_val & PORTAL_ECSR_ERR) {
++ pr_warn("Qman ErrInt: %s id %d\n",
++ (ecir2_val.info.portal_type) ?
++ "DCP" : "SWP", ecir2_val.info.portal_num);
++ }
++ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) {
++ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
++ ecir_val.info.fqid);
++ }
++ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
++ eadr_val.eadr_raw = qm_in(EADR);
++ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
++ error_mdata[eadr_val.info_rev3.memid].txt,
++ error_mdata[eadr_val.info_rev3.memid].addr_mask
++ & eadr_val.info_rev3.eadr);
++ log_edata_bits(
++ error_mdata[eadr_val.info_rev3.memid].bits);
++ }
++ } else {
++ if (ecsr_val & PORTAL_ECSR_ERR) {
++ pr_warn("Qman ErrInt: %s id %d\n",
++ (ecir_val.info.portal_type) ?
++ "DCP" : "SWP", ecir_val.info.portal_num);
++ }
++ if (ecsr_val & FQID_ECSR_ERR) {
++ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
++ ecir_val.info.fqid);
++ }
++ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
++ eadr_val.eadr_raw = qm_in(EADR);
++ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
++ error_mdata[eadr_val.info.memid].txt,
++ error_mdata[eadr_val.info.memid].addr_mask
++ & eadr_val.info.eadr);
++ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
++ }
++ }
++}
++
++/* Qman interrupt handler */
++static irqreturn_t qman_isr(int irq, void *ptr)
++{
++ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
++
++ ier_val = qm_err_isr_enable_read(qm);
++ isr_val = qm_err_isr_status_read(qm);
++ ecsr_val = qm_in(ECSR);
++ isr_mask = isr_val & ier_val;
++
++ if (!isr_mask)
++ return IRQ_NONE;
++ for (i = 0; i < QMAN_HWE_COUNT; i++) {
++ if (qman_hwerr_txts[i].mask & isr_mask) {
++ pr_warn("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt);
++ if (qman_hwerr_txts[i].mask & ecsr_val) {
++ log_additional_error_info(isr_mask, ecsr_val);
++ /* Re-arm error capture registers */
++ qm_out(ECSR, ecsr_val);
++ }
++ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
++ pr_devel("Qman un-enabling error 0x%x\n",
++ qman_hwerr_txts[i].mask);
++ ier_val &= ~qman_hwerr_txts[i].mask;
++ qm_err_isr_enable_write(qm, ier_val);
++ }
++ }
++ }
++ qm_err_isr_status_clear(qm, isr_val);
++ return IRQ_HANDLED;
++}
++
++static int __bind_irq(void)
++{
++ int ret, err_irq;
++
++ err_irq = of_irq_to_resource(qm_node, 0, NULL);
++ if (err_irq == 0) {
++ pr_info("Can't get %s property '%s'\n", qm_node->full_name,
++ "interrupts");
++ return -ENODEV;
++ }
++ ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node);
++ if (ret) {
++ pr_err("request_irq() failed %d for '%s'\n", ret,
++ qm_node->full_name);
++ return -ENODEV;
++ }
++ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
++ * to resource allocation during driver init). */
++ qm_err_isr_status_clear(qm, 0xffffffff);
++ /* Enable Error Interrupts */
++ qm_err_isr_enable_write(qm, 0xffffffff);
++ return 0;
++}
++
++int qman_init_ccsr(struct device_node *node)
++{
++ int ret;
++ if (!qman_have_ccsr())
++ return 0;
++ if (node != qm_node)
++ return -EINVAL;
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ /* TEMP for LS1043 : should be done in uboot */
++ qm_out(QCSP_BARE, 0x5);
++ qm_out(QCSP_BAR, 0x0);
++#endif
++ /* FQD memory */
++ qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
++ /* PFDR memory */
++ qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
++ qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
++ /* thresholds */
++ qm_set_pfdr_threshold(qm, 512, 64);
++ qm_set_sfdr_threshold(qm, 128);
++ /* clear stale PEBI bit from interrupt status register */
++ qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
++ /* corenet initiator settings */
++ qm_set_corenet_initiator(qm);
++ /* HID settings */
++ qm_set_hid(qm);
++ /* Set scheduling weights to defaults */
++ for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
++ qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
++ /* We are not prepared to accept ERNs for hardware enqueues */
++ qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
++ qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
++ /* Initialise Error Interrupt Handler */
++ ret = __bind_irq();
++ if (ret)
++ return ret;
++ return 0;
++}
++
++#define LIO_CFG_LIODN_MASK 0x0fff0000
++void qman_liodn_fixup(u16 channel)
++{
++ static int done;
++ static u32 liodn_offset;
++ u32 before, after;
++ int idx = channel - QM_CHANNEL_SWPORTAL0;
++
++ if (!qman_have_ccsr())
++ return;
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
++ before = qm_in(REV3_QCSP_LIO_CFG(idx));
++ else
++ before = qm_in(QCSP_LIO_CFG(idx));
++ if (!done) {
++ liodn_offset = before & LIO_CFG_LIODN_MASK;
++ done = 1;
++ return;
++ }
++ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
++ qm_out(REV3_QCSP_LIO_CFG(idx), after);
++ else
++ qm_out(QCSP_LIO_CFG(idx), after);
++}
++
++#define IO_CFG_SDEST_MASK 0x00ff0000
++int qman_set_sdest(u16 channel, unsigned int cpu_idx)
++{
++ int idx = channel - QM_CHANNEL_SWPORTAL0;
++ u32 before, after;
++
++ if (!qman_have_ccsr())
++ return -ENODEV;
++ if ((qman_ip_rev & 0xFF00) == QMAN_REV31) {
++ /* LS1043A - only one L2 cache */
++ cpu_idx = 0;
++ }
++
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
++ before = qm_in(REV3_QCSP_IO_CFG(idx));
++ /* Each pair of vcpu share the same SRQ(SDEST) */
++ cpu_idx /= 2;
++ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
++ qm_out(REV3_QCSP_IO_CFG(idx), after);
++ } else {
++ before = qm_in(QCSP_IO_CFG(idx));
++ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
++ qm_out(QCSP_IO_CFG(idx), after);
++ }
++ return 0;
++}
++
++#define MISC_CFG_WPM_MASK 0x00000002
++int qm_set_wpm(int wpm)
++{
++ u32 before;
++ u32 after;
++
++ if (!qman_have_ccsr())
++ return -ENODEV;
++
++ before = qm_in(MISC_CFG);
++ after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
++ qm_out(MISC_CFG, after);
++ return 0;
++}
++
++int qm_get_wpm(int *wpm)
++{
++ u32 before;
++
++ if (!qman_have_ccsr())
++ return -ENODEV;
++
++ before = qm_in(MISC_CFG);
++ *wpm = (before & MISC_CFG_WPM_MASK) >> 1;
++ return 0;
++}
++
++/* CEETM_CFG_PRES register has PRES field which is calculated by:
++ * PRES = (2^22 / credit update reference period) * QMan clock period
++ * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk
++ */
++
++int qman_ceetm_set_prescaler(enum qm_dc_portal portal)
++{
++ u64 temp;
++ u16 pres;
++
++ if (!qman_have_ccsr())
++ return -ENODEV;
++
++ temp = 0x400000 * 100;
++ do_div(temp, CONFIG_QMAN_CEETM_UPDATE_PERIOD);
++ temp *= 10000000;
++ do_div(temp, qman_clk);
++ pres = (u16) temp;
++ qm_out(CEETM_CFG_IDX, portal);
++ qm_out(CEETM_CFG_PRES, pres);
++ return 0;
++}
++
++int qman_ceetm_get_prescaler(u16 *pres)
++{
++ if (!qman_have_ccsr())
++ return -ENODEV;
++ *pres = (u16)qm_in(CEETM_CFG_PRES);
++ return 0;
++}
++
++#define DCP_CFG_CEETME_MASK 0xFFFF0000
++#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n))
++int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
++{
++ u32 dcp_cfg;
++
++ if (!qman_have_ccsr())
++ return -ENODEV;
++
++ dcp_cfg = qm_in(DCP_CFG(portal));
++ dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal);
++ qm_out(DCP_CFG(portal), dcp_cfg);
++ return 0;
++}
++
++int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
++{
++ u32 dcp_cfg;
++
++ if (!qman_have_ccsr())
++ return -ENODEV;
++ dcp_cfg = qm_in(DCP_CFG(portal));
++ dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal));
++ qm_out(DCP_CFG(portal), dcp_cfg);
++ return 0;
++}
++
++int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num)
++{
++ if (!qman_have_ccsr())
++ return -ENODEV;
++ *num = qm_in(CEETM_XSFDR_IN_USE);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_get_xsfdr);
++
++#ifdef CONFIG_SYSFS
++
++#define DRV_NAME "fsl-qman"
++#define DCP_MAX_ID 3
++#define DCP_MIN_ID 0
++
++static ssize_t show_pfdr_fpc(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
++};
++
++static ssize_t show_dlm_avg(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ u32 data;
++ int i;
++
++ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
++ return -EINVAL;
++ if (i < DCP_MIN_ID || i > DCP_MAX_ID)
++ return -EINVAL;
++ data = qm_in(DCP_DLM_AVG(i));
++ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
++ (data & 0x000000ff)*390625);
++};
++
++static ssize_t set_dlm_avg(struct device *dev,
++ struct device_attribute *dev_attr, const char *buf, size_t count)
++{
++ unsigned long val;
++ int i;
++
++ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
++ return -EINVAL;
++ if (i < DCP_MIN_ID || i > DCP_MAX_ID)
++ return -EINVAL;
++ if (kstrtoul(buf, 0, &val)) {
++ dev_dbg(dev, "invalid input %s\n", buf);
++ return -EINVAL;
++ }
++ qm_out(DCP_DLM_AVG(i), val);
++ return count;
++};
++
++static ssize_t show_pfdr_cfg(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
++};
++
++static ssize_t set_pfdr_cfg(struct device *dev,
++ struct device_attribute *dev_attr, const char *buf, size_t count)
++{
++ unsigned long val;
++
++ if (kstrtoul(buf, 0, &val)) {
++ dev_dbg(dev, "invalid input %s\n", buf);
++ return -EINVAL;
++ }
++ qm_out(PFDR_CFG, val);
++ return count;
++};
++
++static ssize_t show_sfdr_in_use(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
++};
++
++static ssize_t show_idle_stat(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
++};
++
++static ssize_t show_ci_rlm_avg(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ u32 data = qm_in(CI_RLM_AVG);
++ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
++ (data & 0x000000ff)*390625);
++};
++
++static ssize_t set_ci_rlm_avg(struct device *dev,
++ struct device_attribute *dev_attr, const char *buf, size_t count)
++{
++ unsigned long val;
++
++ if (kstrtoul(buf, 0, &val)) {
++ dev_dbg(dev, "invalid input %s\n", buf);
++ return -EINVAL;
++ }
++ qm_out(CI_RLM_AVG, val);
++ return count;
++};
++
++static ssize_t show_err_isr(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
++};
++
++#define SBEC_MAX_ID 14
++#define SBEC_MIN_ID 0
++
++static ssize_t show_sbec(struct device *dev,
++ struct device_attribute *dev_attr, char *buf)
++{
++ int i;
++
++ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
++ return -EINVAL;
++ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
++ return -EINVAL;
++ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
++};
++
++static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
++static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
++static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
++static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
++ show_ci_rlm_avg, set_ci_rlm_avg);
++static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
++static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
++
++static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
++static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
++static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
++static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
++
++static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
++static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
++
++static struct attribute *qman_dev_attributes[] = {
++ &dev_attr_pfdr_fpc.attr,
++ &dev_attr_pfdr_cfg.attr,
++ &dev_attr_idle_stat.attr,
++ &dev_attr_ci_rlm_avg.attr,
++ &dev_attr_err_isr.attr,
++ &dev_attr_dcp0_dlm_avg.attr,
++ &dev_attr_dcp1_dlm_avg.attr,
++ &dev_attr_dcp2_dlm_avg.attr,
++ &dev_attr_dcp3_dlm_avg.attr,
++ /* sfdr_in_use will be added if necessary */
++ NULL
++};
++
++static struct attribute *qman_dev_ecr_attributes[] = {
++ &dev_attr_sbec_0.attr,
++ &dev_attr_sbec_1.attr,
++ &dev_attr_sbec_2.attr,
++ &dev_attr_sbec_3.attr,
++ &dev_attr_sbec_4.attr,
++ &dev_attr_sbec_5.attr,
++ &dev_attr_sbec_6.attr,
++ &dev_attr_sbec_7.attr,
++ &dev_attr_sbec_8.attr,
++ &dev_attr_sbec_9.attr,
++ &dev_attr_sbec_10.attr,
++ &dev_attr_sbec_11.attr,
++ &dev_attr_sbec_12.attr,
++ &dev_attr_sbec_13.attr,
++ &dev_attr_sbec_14.attr,
++ NULL
++};
++
++/* root level */
++static const struct attribute_group qman_dev_attr_grp = {
++ .name = NULL,
++ .attrs = qman_dev_attributes
++};
++static const struct attribute_group qman_dev_ecr_grp = {
++ .name = "error_capture",
++ .attrs = qman_dev_ecr_attributes
++};
++
++static int of_fsl_qman_remove(struct platform_device *ofdev)
++{
++ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
++ return 0;
++};
++
++static int of_fsl_qman_probe(struct platform_device *ofdev)
++{
++ int ret;
++
++ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
++ if (ret)
++ goto done;
++ ret = sysfs_add_file_to_group(&ofdev->dev.kobj,
++ &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
++ if (ret)
++ goto del_group_0;
++ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp);
++ if (ret)
++ goto del_group_0;
++
++ goto done;
++
++del_group_0:
++ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
++done:
++ if (ret)
++ dev_err(&ofdev->dev,
++ "Cannot create dev attributes ret=%d\n", ret);
++ return ret;
++};
++
++static struct of_device_id of_fsl_qman_ids[] = {
++ {
++ .compatible = "fsl,qman",
++ },
++ {}
++};
++MODULE_DEVICE_TABLE(of, of_fsl_qman_ids);
++
++#ifdef CONFIG_SUSPEND
++
++static u32 saved_isdr;
++static int qman_pm_suspend_noirq(struct device *dev)
++{
++ uint32_t idle_state;
++
++ suspend_unused_qportal();
++ /* save isdr, disable all, clear isr */
++ saved_isdr = qm_err_isr_disable_read(qm);
++ qm_err_isr_disable_write(qm, 0xffffffff);
++ qm_err_isr_status_clear(qm, 0xffffffff);
++ idle_state = qm_in(IDLE_STAT);
++ if (!(idle_state & 0x1)) {
++ pr_err("Qman not idle 0x%x aborting\n", idle_state);
++ qm_err_isr_disable_write(qm, saved_isdr);
++ resume_unused_qportal();
++ return -EBUSY;
++ }
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Qman suspend code, IDLE_STAT = 0x%x\n", idle_state);
++#endif
++ return 0;
++}
++
++static int qman_pm_resume_noirq(struct device *dev)
++{
++ /* restore isdr */
++ qm_err_isr_disable_write(qm, saved_isdr);
++ resume_unused_qportal();
++ return 0;
++}
++#else
++#define qman_pm_suspend_noirq NULL
++#define qman_pm_resume_noirq NULL
++#endif
++
++static const struct dev_pm_ops qman_pm_ops = {
++ .suspend_noirq = qman_pm_suspend_noirq,
++ .resume_noirq = qman_pm_resume_noirq,
++};
++
++static struct platform_driver of_fsl_qman_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRV_NAME,
++ .of_match_table = of_fsl_qman_ids,
++ .pm = &qman_pm_ops,
++ },
++ .probe = of_fsl_qman_probe,
++ .remove = of_fsl_qman_remove,
++};
++
++static int qman_ctrl_init(void)
++{
++ return platform_driver_register(&of_fsl_qman_driver);
++}
++
++static void qman_ctrl_exit(void)
++{
++ platform_driver_unregister(&of_fsl_qman_driver);
++}
++
++module_init(qman_ctrl_init);
++module_exit(qman_ctrl_exit);
++
++#endif /* CONFIG_SYSFS */
+diff --git a/drivers/staging/fsl_qbman/qman_debugfs.c b/drivers/staging/fsl_qbman/qman_debugfs.c
+new file mode 100644
+index 00000000..fb8ecba1
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_debugfs.c
+@@ -0,0 +1,1594 @@
++/* Copyright 2010-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "qman_private.h"
++
++#define MAX_FQID (0x00ffffff)
++#define QM_FQD_BLOCK_SIZE 64
++#define QM_FQD_AR (0xC10)
++
++static u32 fqid_max;
++static u64 qman_ccsr_start;
++static u64 qman_ccsr_size;
++
++static const char * const state_txt[] = {
++ "Out of Service",
++ "Retired",
++ "Tentatively Scheduled",
++ "Truly Scheduled",
++ "Parked",
++ "Active, Active Held or Held Suspended",
++ "Unknown State 6",
++ "Unknown State 7",
++ NULL,
++};
++
++static const u8 fqd_states[] = {
++ QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED,
++ QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED,
++ QM_MCR_NP_STATE_ACTIVE};
++
++struct mask_to_text {
++ u16 mask;
++ const char *txt;
++};
++
++struct mask_filter_s {
++ u16 mask;
++ u8 filter;
++};
++
++static const struct mask_filter_s mask_filter[] = {
++ {QM_FQCTRL_PREFERINCACHE, 0},
++ {QM_FQCTRL_PREFERINCACHE, 1},
++ {QM_FQCTRL_HOLDACTIVE, 0},
++ {QM_FQCTRL_HOLDACTIVE, 1},
++ {QM_FQCTRL_AVOIDBLOCK, 0},
++ {QM_FQCTRL_AVOIDBLOCK, 1},
++ {QM_FQCTRL_FORCESFDR, 0},
++ {QM_FQCTRL_FORCESFDR, 1},
++ {QM_FQCTRL_CPCSTASH, 0},
++ {QM_FQCTRL_CPCSTASH, 1},
++ {QM_FQCTRL_CTXASTASHING, 0},
++ {QM_FQCTRL_CTXASTASHING, 1},
++ {QM_FQCTRL_ORP, 0},
++ {QM_FQCTRL_ORP, 1},
++ {QM_FQCTRL_TDE, 0},
++ {QM_FQCTRL_TDE, 1},
++ {QM_FQCTRL_CGE, 0},
++ {QM_FQCTRL_CGE, 1}
++};
++
++static const struct mask_to_text fq_ctrl_text_list[] = {
++ {
++ .mask = QM_FQCTRL_PREFERINCACHE,
++ .txt = "Prefer in cache",
++ },
++ {
++ .mask = QM_FQCTRL_HOLDACTIVE,
++ .txt = "Hold active in portal",
++ },
++ {
++ .mask = QM_FQCTRL_AVOIDBLOCK,
++ .txt = "Avoid Blocking",
++ },
++ {
++ .mask = QM_FQCTRL_FORCESFDR,
++ .txt = "High-priority SFDRs",
++ },
++ {
++ .mask = QM_FQCTRL_CPCSTASH,
++ .txt = "CPC Stash Enable",
++ },
++ {
++ .mask = QM_FQCTRL_CTXASTASHING,
++ .txt = "Context-A stashing",
++ },
++ {
++ .mask = QM_FQCTRL_ORP,
++ .txt = "ORP Enable",
++ },
++ {
++ .mask = QM_FQCTRL_TDE,
++ .txt = "Tail-Drop Enable",
++ },
++ {
++ .mask = QM_FQCTRL_CGE,
++ .txt = "Congestion Group Enable",
++ },
++ {
++ .mask = 0,
++ .txt = NULL,
++ }
++};
++
++static const char *get_fqd_ctrl_text(u16 mask)
++{
++ int i = 0;
++
++ while (fq_ctrl_text_list[i].txt != NULL) {
++ if (fq_ctrl_text_list[i].mask == mask)
++ return fq_ctrl_text_list[i].txt;
++ i++;
++ }
++ return NULL;
++}
++
++static const struct mask_to_text stashing_text_list[] = {
++ {
++ .mask = QM_STASHING_EXCL_CTX,
++ .txt = "FQ Ctx Stash"
++ },
++ {
++ .mask = QM_STASHING_EXCL_DATA,
++ .txt = "Frame Data Stash",
++ },
++ {
++ .mask = QM_STASHING_EXCL_ANNOTATION,
++ .txt = "Frame Annotation Stash",
++ },
++ {
++ .mask = 0,
++ .txt = NULL,
++ },
++};
++
++static int user_input_convert(const char __user *user_buf, size_t count,
++ unsigned long *val)
++{
++ char buf[12];
++
++ if (count > sizeof(buf) - 1)
++ return -EINVAL;
++ if (copy_from_user(buf, user_buf, count))
++ return -EFAULT;
++ buf[count] = '\0';
++ if (kstrtoul(buf, 0, val))
++ return -EINVAL;
++ return 0;
++}
++
++struct line_buffer_fq {
++ u32 buf[8];
++ u32 buf_cnt;
++ int line_cnt;
++};
++
++static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid,
++ struct seq_file *file)
++{
++ line_buf->buf[line_buf->buf_cnt] = fqid;
++ line_buf->buf_cnt++;
++ if (line_buf->buf_cnt == 8) {
++ /* Buffer is full, flush it */
++ if (line_buf->line_cnt != 0)
++ seq_puts(file, ",\n");
++ seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x,"
++ "0x%06x,0x%06x,0x%06x",
++ line_buf->buf[0], line_buf->buf[1], line_buf->buf[2],
++ line_buf->buf[3], line_buf->buf[4], line_buf->buf[5],
++ line_buf->buf[6], line_buf->buf[7]);
++ line_buf->buf_cnt = 0;
++ line_buf->line_cnt++;
++ }
++}
++
++static void flush_line_buffer(struct line_buffer_fq *line_buf,
++ struct seq_file *file)
++{
++ if (line_buf->buf_cnt) {
++ int y = 0;
++ if (line_buf->line_cnt != 0)
++ seq_puts(file, ",\n");
++ while (y != line_buf->buf_cnt) {
++ if (y+1 == line_buf->buf_cnt)
++ seq_printf(file, "0x%06x", line_buf->buf[y]);
++ else
++ seq_printf(file, "0x%06x,", line_buf->buf[y]);
++ y++;
++ }
++ line_buf->line_cnt++;
++ }
++ if (line_buf->line_cnt)
++ seq_putc(file, '\n');
++}
++
++static struct dentry *dfs_root; /* debugfs root directory */
++
++/*******************************************************************************
++ * Query Frame Queue Non Programmable Fields
++ ******************************************************************************/
++struct query_fq_np_fields_data_s {
++ u32 fqid;
++};
++static struct query_fq_np_fields_data_s query_fq_np_fields_data = {
++ .fqid = 1,
++};
++
++static int query_fq_np_fields_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_mcr_queryfq_np np;
++ struct qman_fq fq;
++
++ fq.fqid = query_fq_np_fields_data.fqid;
++ ret = qman_query_fq_np(&fq, &np);
++ if (ret)
++ return ret;
++ /* Print state */
++ seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n",
++ fq.fqid);
++ seq_printf(file, " force eligible pending: %s\n",
++ (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no");
++ seq_printf(file, " retirement pending: %s\n",
++ (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no");
++ seq_printf(file, " state: %s\n",
++ state_txt[np.state & QM_MCR_NP_STATE_MASK]);
++ seq_printf(file, " fq_link: 0x%x\n", np.fqd_link);
++ seq_printf(file, " odp_seq: %u\n", np.odp_seq);
++ seq_printf(file, " orp_nesn: %u\n", np.orp_nesn);
++ seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq);
++ seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq);
++ seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr);
++ seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr);
++ seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr);
++ seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr);
++ seq_printf(file, " is: ics_surp contains a %s\n",
++ (np.is) ? "deficit" : "surplus");
++ seq_printf(file, " ics_surp: %u\n", np.ics_surp);
++ seq_printf(file, " byte_cnt: %u\n", np.byte_cnt);
++ seq_printf(file, " frm_cnt: %u\n", np.frm_cnt);
++ seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr);
++ seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr);
++ seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr);
++ seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr);
++ seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr);
++ return 0;
++}
++
++static int query_fq_np_fields_open(struct inode *inode,
++ struct file *file)
++{
++ return single_open(file, query_fq_np_fields_show, NULL);
++}
++
++static ssize_t query_fq_np_fields_write(struct file *f,
++ const char __user *buf, size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ if (val > MAX_FQID)
++ return -EINVAL;
++ query_fq_np_fields_data.fqid = (u32)val;
++ return count;
++}
++
++static const struct file_operations query_fq_np_fields_fops = {
++ .owner = THIS_MODULE,
++ .open = query_fq_np_fields_open,
++ .read = seq_read,
++ .write = query_fq_np_fields_write,
++ .release = single_release,
++};
++
++/*******************************************************************************
++ * Frame Queue Programmable Fields
++ ******************************************************************************/
++struct query_fq_fields_data_s {
++ u32 fqid;
++};
++
++static struct query_fq_fields_data_s query_fq_fields_data = {
++ .fqid = 1,
++};
++
++static int query_fq_fields_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_fqd fqd;
++ struct qman_fq fq;
++ int i = 0;
++
++ memset(&fqd, 0, sizeof(struct qm_fqd));
++ fq.fqid = query_fq_fields_data.fqid;
++ ret = qman_query_fq(&fq, &fqd);
++ if (ret)
++ return ret;
++ seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n",
++ fq.fqid);
++ seq_printf(file, " orprws: %u\n", fqd.orprws);
++ seq_printf(file, " oa: %u\n", fqd.oa);
++ seq_printf(file, " olws: %u\n", fqd.olws);
++
++ seq_printf(file, " cgid: %u\n", fqd.cgid);
++
++ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0)
++ seq_puts(file, " fq_ctrl: None\n");
++ else {
++ i = 0;
++ seq_puts(file, " fq_ctrl:\n");
++ while (fq_ctrl_text_list[i].txt != NULL) {
++ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
++ fq_ctrl_text_list[i].mask)
++ seq_printf(file, " %s\n",
++ fq_ctrl_text_list[i].txt);
++ i++;
++ }
++ }
++ seq_printf(file, " dest_channel: %u\n", fqd.dest.channel);
++ seq_printf(file, " dest_wq: %u\n", fqd.dest.wq);
++ seq_printf(file, " ics_cred: %u\n", fqd.ics_cred);
++ seq_printf(file, " td_mant: %u\n", fqd.td.mant);
++ seq_printf(file, " td_exp: %u\n", fqd.td.exp);
++
++ seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b);
++
++ seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd));
++ /* Any stashing configured */
++ if ((fqd.context_a.stashing.exclusive & 0x7) == 0)
++ seq_puts(file, " ctx_a_stash_exclusive: None\n");
++ else {
++ seq_puts(file, " ctx_a_stash_exclusive:\n");
++ i = 0;
++ while (stashing_text_list[i].txt != NULL) {
++ if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask)
++ seq_printf(file, " %s\n",
++ stashing_text_list[i].txt);
++ i++;
++ }
++ }
++ seq_printf(file, " ctx_a_stash_annotation_cl: %u\n",
++ fqd.context_a.stashing.annotation_cl);
++ seq_printf(file, " ctx_a_stash_data_cl: %u\n",
++ fqd.context_a.stashing.data_cl);
++ seq_printf(file, " ctx_a_stash_context_cl: %u\n",
++ fqd.context_a.stashing.context_cl);
++ return 0;
++}
++
++static int query_fq_fields_open(struct inode *inode,
++ struct file *file)
++{
++ return single_open(file, query_fq_fields_show, NULL);
++}
++
++static ssize_t query_fq_fields_write(struct file *f,
++ const char __user *buf, size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ if (val > MAX_FQID)
++ return -EINVAL;
++ query_fq_fields_data.fqid = (u32)val;
++ return count;
++}
++
++static const struct file_operations query_fq_fields_fops = {
++ .owner = THIS_MODULE,
++ .open = query_fq_fields_open,
++ .read = seq_read,
++ .write = query_fq_fields_write,
++ .release = single_release,
++};
++
++/*******************************************************************************
++ * Query WQ lengths
++ ******************************************************************************/
++struct query_wq_lengths_data_s {
++ union {
++ u16 channel_wq; /* ignores wq (3 lsbits) */
++ struct {
++ u16 id:13; /* qm_channel */
++ u16 __reserved:3;
++ } __packed channel;
++ };
++};
++static struct query_wq_lengths_data_s query_wq_lengths_data;
++static int query_wq_lengths_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_mcr_querywq wq;
++ int i;
++
++ memset(&wq, 0, sizeof(struct qm_mcr_querywq));
++ wq.channel.id = query_wq_lengths_data.channel.id;
++ ret = qman_query_wq(0, &wq);
++ if (ret)
++ return ret;
++ seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id);
++ for (i = 0; i < 8; i++)
++ /* mask out upper 4 bits since they are not part of length */
++ seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff);
++ return 0;
++}
++
++static int query_wq_lengths_open(struct inode *inode,
++ struct file *file)
++{
++ return single_open(file, query_wq_lengths_show, NULL);
++}
++
++static ssize_t query_wq_lengths_write(struct file *f,
++ const char __user *buf, size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ if (val > 0xfff8)
++ return -EINVAL;
++ query_wq_lengths_data.channel.id = (u16)val;
++ return count;
++}
++
++static const struct file_operations query_wq_lengths_fops = {
++ .owner = THIS_MODULE,
++ .open = query_wq_lengths_open,
++ .read = seq_read,
++ .write = query_wq_lengths_write,
++ .release = single_release,
++};
++
++/*******************************************************************************
++ * Query CGR
++ ******************************************************************************/
++struct query_cgr_s {
++ u8 cgid;
++};
++static struct query_cgr_s query_cgr_data;
++
++static int query_cgr_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_mcr_querycgr cgrd;
++ struct qman_cgr cgr;
++ int i, j;
++ u32 mask;
++
++ memset(&cgr, 0, sizeof(cgr));
++ memset(&cgrd, 0, sizeof(cgrd));
++ cgr.cgrid = query_cgr_data.cgid;
++ ret = qman_query_cgr(&cgr, &cgrd);
++ if (ret)
++ return ret;
++ seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid);
++ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn,
++ cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn,
++ cgrd.cgr.wr_parm_g.Pn);
++
++ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn,
++ cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn,
++ cgrd.cgr.wr_parm_y.Pn);
++
++ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn,
++ cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn,
++ cgrd.cgr.wr_parm_r.Pn);
++
++ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
++ cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r);
++
++ seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en);
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
++ seq_puts(file, " cscn_targ_dcp:\n");
++ mask = 0x80000000;
++ for (i = 0; i < 32; i++) {
++ if (cgrd.cgr.cscn_targ & mask)
++ seq_printf(file, " send CSCN to dcp %u\n",
++ (31 - i));
++ mask >>= 1;
++ }
++
++ seq_puts(file, " cscn_targ_swp:\n");
++ for (i = 0; i < 4; i++) {
++ mask = 0x80000000;
++ for (j = 0; j < 32; j++) {
++ if (cgrd.cscn_targ_swp[i] & mask)
++ seq_printf(file, " send CSCN to swp"
++ " %u\n", (127 - (i * 32) - j));
++ mask >>= 1;
++ }
++ }
++ } else {
++ seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ);
++ }
++ seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en);
++ seq_printf(file, " cs: %u\n", cgrd.cgr.cs);
++
++ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
++ cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn);
++
++ seq_printf(file, " mode: %s\n",
++ (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ?
++ "frame count" : "byte count");
++ seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd));
++ seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd));
++
++ return 0;
++}
++
++static int query_cgr_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, query_cgr_show, NULL);
++}
++
++static ssize_t query_cgr_write(struct file *f, const char __user *buf,
++ size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ if (val > 0xff)
++ return -EINVAL;
++ query_cgr_data.cgid = (u8)val;
++ return count;
++}
++
++static const struct file_operations query_cgr_fops = {
++ .owner = THIS_MODULE,
++ .open = query_cgr_open,
++ .read = seq_read,
++ .write = query_cgr_write,
++ .release = single_release,
++};
++
++/*******************************************************************************
++ * Test Write CGR
++ ******************************************************************************/
++struct test_write_cgr_s {
++ u64 i_bcnt;
++ u8 cgid;
++};
++static struct test_write_cgr_s test_write_cgr_data;
++
++static int testwrite_cgr_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_mcr_cgrtestwrite result;
++ struct qman_cgr cgr;
++ u64 i_bcnt;
++
++ memset(&cgr, 0, sizeof(struct qman_cgr));
++ memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite));
++ cgr.cgrid = test_write_cgr_data.cgid;
++ i_bcnt = test_write_cgr_data.i_bcnt;
++ ret = qman_testwrite_cgr(&cgr, i_bcnt, &result);
++ if (ret)
++ return ret;
++ seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid);
++ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn,
++ result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn,
++ result.cgr.wr_parm_g.Pn);
++ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn,
++ result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn,
++ result.cgr.wr_parm_y.Pn);
++ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn,
++ result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn,
++ result.cgr.wr_parm_r.Pn);
++ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
++ result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r);
++ seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en);
++ seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ);
++ seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en);
++ seq_printf(file, " cs: %u\n", result.cgr.cs);
++ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
++ result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn);
++
++ /* Add Mode for Si 2 */
++ seq_printf(file, " mode: %s\n",
++ (result.cgr.mode & QMAN_CGR_MODE_FRAME) ?
++ "frame count" : "byte count");
++
++ seq_printf(file, " i_bcnt: %llu\n",
++ qm_mcr_cgrtestwrite_i_get64(&result));
++ seq_printf(file, " a_bcnt: %llu\n",
++ qm_mcr_cgrtestwrite_a_get64(&result));
++ seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g);
++ seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y);
++ seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r);
++ return 0;
++}
++
++static int testwrite_cgr_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, testwrite_cgr_show, NULL);
++}
++
++static const struct file_operations testwrite_cgr_fops = {
++ .owner = THIS_MODULE,
++ .open = testwrite_cgr_open,
++ .read = seq_read,
++ .release = single_release,
++};
++
++
++static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset)
++{
++ seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt);
++ return 0;
++}
++static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, testwrite_cgr_ibcnt_show, NULL);
++}
++
++static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf,
++ size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ test_write_cgr_data.i_bcnt = val;
++ return count;
++}
++
++static const struct file_operations teswrite_cgr_ibcnt_fops = {
++ .owner = THIS_MODULE,
++ .open = testwrite_cgr_ibcnt_open,
++ .read = seq_read,
++ .write = testwrite_cgr_ibcnt_write,
++ .release = single_release,
++};
++
++static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset)
++{
++ seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid);
++ return 0;
++}
++static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, testwrite_cgr_cgrid_show, NULL);
++}
++
++static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf,
++ size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ if (val > 0xff)
++ return -EINVAL;
++ test_write_cgr_data.cgid = (u8)val;
++ return count;
++}
++
++static const struct file_operations teswrite_cgr_cgrid_fops = {
++ .owner = THIS_MODULE,
++ .open = testwrite_cgr_cgrid_open,
++ .read = seq_read,
++ .write = testwrite_cgr_cgrid_write,
++ .release = single_release,
++};
++
++/*******************************************************************************
++ * Query Congestion State
++ ******************************************************************************/
++static int query_congestion_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_mcr_querycongestion cs;
++ int i, j, in_cong = 0;
++ u32 mask;
++
++ memset(&cs, 0, sizeof(struct qm_mcr_querycongestion));
++ ret = qman_query_congestion(&cs);
++ if (ret)
++ return ret;
++ seq_puts(file, "Query Congestion Result\n");
++ for (i = 0; i < 8; i++) {
++ mask = 0x80000000;
++ for (j = 0; j < 32; j++) {
++ if (cs.state.__state[i] & mask) {
++ in_cong = 1;
++ seq_printf(file, " cg %u: %s\n", (i*32)+j,
++ "in congestion");
++ }
++ mask >>= 1;
++ }
++ }
++ if (!in_cong)
++ seq_puts(file, " All congestion groups not congested.\n");
++ return 0;
++}
++
++static int query_congestion_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, query_congestion_show, NULL);
++}
++
++static const struct file_operations query_congestion_fops = {
++ .owner = THIS_MODULE,
++ .open = query_congestion_open,
++ .read = seq_read,
++ .release = single_release,
++};
++
++/*******************************************************************************
++ * Query CCGR
++ ******************************************************************************/
++struct query_ccgr_s {
++ u32 ccgid;
++};
++static struct query_ccgr_s query_ccgr_data;
++
++static int query_ccgr_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_mcr_ceetm_ccgr_query ccgr_query;
++ struct qm_mcc_ceetm_ccgr_query query_opts;
++ int i, j;
++ u32 mask;
++
++ memset(&ccgr_query, 0, sizeof(struct qm_mcr_ceetm_ccgr_query));
++ memset(&query_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_query));
++
++ if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
++ return -EINVAL;
++
++ seq_printf(file, "Query CCGID %x\n", query_ccgr_data.ccgid);
++ query_opts.dcpid = ((query_ccgr_data.ccgid & 0xFF000000) >> 24);
++ query_opts.ccgrid = query_ccgr_data.ccgid & 0x000001FF;
++ ret = qman_ceetm_query_ccgr(&query_opts, &ccgr_query);
++ if (ret)
++ return ret;
++ seq_printf(file, "Query CCGR id %x in DCP %d\n", query_opts.ccgrid,
++ query_opts.dcpid);
++ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ ccgr_query.cm_query.wr_parm_g.MA,
++ ccgr_query.cm_query.wr_parm_g.Mn,
++ ccgr_query.cm_query.wr_parm_g.SA,
++ ccgr_query.cm_query.wr_parm_g.Sn,
++ ccgr_query.cm_query.wr_parm_g.Pn);
++
++ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ ccgr_query.cm_query.wr_parm_y.MA,
++ ccgr_query.cm_query.wr_parm_y.Mn,
++ ccgr_query.cm_query.wr_parm_y.SA,
++ ccgr_query.cm_query.wr_parm_y.Sn,
++ ccgr_query.cm_query.wr_parm_y.Pn);
++
++ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
++ ccgr_query.cm_query.wr_parm_r.MA,
++ ccgr_query.cm_query.wr_parm_r.Mn,
++ ccgr_query.cm_query.wr_parm_r.SA,
++ ccgr_query.cm_query.wr_parm_r.Sn,
++ ccgr_query.cm_query.wr_parm_r.Pn);
++
++ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
++ ccgr_query.cm_query.ctl_wr_en_g,
++ ccgr_query.cm_query.ctl_wr_en_y,
++ ccgr_query.cm_query.ctl_wr_en_r);
++
++ seq_printf(file, " cscn_en: %u\n", ccgr_query.cm_query.ctl_cscn_en);
++ seq_puts(file, " cscn_targ_dcp:\n");
++ mask = 0x80000000;
++ for (i = 0; i < 32; i++) {
++ if (ccgr_query.cm_query.cscn_targ_dcp & mask)
++ seq_printf(file, " send CSCN to dcp %u\n", (31 - i));
++ mask >>= 1;
++ }
++
++ seq_puts(file, " cscn_targ_swp:\n");
++ for (i = 0; i < 4; i++) {
++ mask = 0x80000000;
++ for (j = 0; j < 32; j++) {
++ if (ccgr_query.cm_query.cscn_targ_swp[i] & mask)
++ seq_printf(file, " send CSCN to swp"
++ "%u\n", (127 - (i * 32) - j));
++ mask >>= 1;
++ }
++ }
++
++ seq_printf(file, " td_en: %u\n", ccgr_query.cm_query.ctl_td_en);
++
++ seq_printf(file, " cs_thresh_in_TA: %u, cs_thresh_in_Tn: %u\n",
++ ccgr_query.cm_query.cs_thres.TA,
++ ccgr_query.cm_query.cs_thres.Tn);
++
++ seq_printf(file, " cs_thresh_out_TA: %u, cs_thresh_out_Tn: %u\n",
++ ccgr_query.cm_query.cs_thres_x.TA,
++ ccgr_query.cm_query.cs_thres_x.Tn);
++
++ seq_printf(file, " td_thresh_TA: %u, td_thresh_Tn: %u\n",
++ ccgr_query.cm_query.td_thres.TA,
++ ccgr_query.cm_query.td_thres.Tn);
++
++ seq_printf(file, " mode: %s\n",
++ (ccgr_query.cm_query.ctl_mode &
++ QMAN_CGR_MODE_FRAME) ?
++ "frame count" : "byte count");
++ seq_printf(file, " i_cnt: %llu\n", (u64)ccgr_query.cm_query.i_cnt);
++ seq_printf(file, " a_cnt: %llu\n", (u64)ccgr_query.cm_query.a_cnt);
++
++ return 0;
++}
++
++static int query_ccgr_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, query_ccgr_show, NULL);
++}
++
++static ssize_t query_ccgr_write(struct file *f, const char __user *buf,
++ size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ query_ccgr_data.ccgid = val;
++ return count;
++}
++
++static const struct file_operations query_ccgr_fops = {
++ .owner = THIS_MODULE,
++ .open = query_ccgr_open,
++ .read = seq_read,
++ .write = query_ccgr_write,
++ .release = single_release,
++};
++/*******************************************************************************
++ * QMan register
++ ******************************************************************************/
++struct qman_register_s {
++ u32 val;
++};
++static struct qman_register_s qman_register_data;
++
++static void init_ccsrmempeek(void)
++{
++ struct device_node *dn;
++ const u32 *regaddr_p;
++
++ dn = of_find_compatible_node(NULL, NULL, "fsl,qman");
++ if (!dn) {
++ pr_info("No fsl,qman node\n");
++ return;
++ }
++ regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL);
++ if (!regaddr_p) {
++ of_node_put(dn);
++ return;
++ }
++ qman_ccsr_start = of_translate_address(dn, regaddr_p);
++ of_node_put(dn);
++}
++/* This function provides access to QMan ccsr memory map */
++static int qman_ccsrmempeek(u32 *val, u32 offset)
++{
++ void __iomem *addr;
++ u64 phys_addr;
++
++ if (!qman_ccsr_start)
++ return -EINVAL;
++
++ if (offset > (qman_ccsr_size - sizeof(u32)))
++ return -EINVAL;
++
++ phys_addr = qman_ccsr_start + offset;
++ addr = ioremap(phys_addr, sizeof(u32));
++ if (!addr) {
++ pr_err("ccsrmempeek, ioremap failed\n");
++ return -EINVAL;
++ }
++ *val = in_be32(addr);
++ iounmap(addr);
++ return 0;
++}
++
++static int qman_ccsrmempeek_show(struct seq_file *file, void *offset)
++{
++ u32 b;
++
++ qman_ccsrmempeek(&b, qman_register_data.val);
++ seq_printf(file, "QMan register offset = 0x%x\n",
++ qman_register_data.val);
++ seq_printf(file, "value = 0x%08x\n", b);
++
++ return 0;
++}
++
++static int qman_ccsrmempeek_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, qman_ccsrmempeek_show, NULL);
++}
++
++static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf,
++ size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ /* multiple of 4 */
++ if (val > (qman_ccsr_size - sizeof(u32))) {
++ pr_info("Input 0x%lx > 0x%llx\n",
++ val, (qman_ccsr_size - sizeof(u32)));
++ return -EINVAL;
++ }
++ if (val & 0x3) {
++ pr_info("Input 0x%lx not multiple of 4\n", val);
++ return -EINVAL;
++ }
++ qman_register_data.val = val;
++ return count;
++}
++
++static const struct file_operations qman_ccsrmempeek_fops = {
++ .owner = THIS_MODULE,
++ .open = qman_ccsrmempeek_open,
++ .read = seq_read,
++ .write = qman_ccsrmempeek_write,
++};
++
++/*******************************************************************************
++ * QMan state
++ ******************************************************************************/
++static int qman_fqd_state_show(struct seq_file *file, void *offset)
++{
++ struct qm_mcr_queryfq_np np;
++ struct qman_fq fq;
++ struct line_buffer_fq line_buf;
++ int ret, i;
++ u8 *state = file->private;
++ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
++
++ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
++ memset(&line_buf, 0, sizeof(line_buf));
++
++ seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]);
++
++ for (i = 1; i < fqid_max; i++) {
++ fq.fqid = i;
++ ret = qman_query_fq_np(&fq, &np);
++ if (ret)
++ return ret;
++ if (*state == (np.state & QM_MCR_NP_STATE_MASK))
++ add_to_line_buffer(&line_buf, fq.fqid, file);
++ /* Keep a summary count of all states */
++ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
++ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
++ }
++ flush_line_buffer(&line_buf, file);
++
++ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
++ seq_printf(file, "%s count = %u\n", state_txt[i],
++ qm_fq_state_cnt[i]);
++ }
++ return 0;
++}
++
++static int qman_fqd_state_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, qman_fqd_state_show, inode->i_private);
++}
++
++static const struct file_operations qman_fqd_state_fops = {
++ .owner = THIS_MODULE,
++ .open = qman_fqd_state_open,
++ .read = seq_read,
++};
++
++static int qman_fqd_ctrl_show(struct seq_file *file, void *offset)
++{
++ struct qm_fqd fqd;
++ struct qman_fq fq;
++ u32 fq_en_cnt = 0, fq_di_cnt = 0;
++ int ret, i;
++ struct mask_filter_s *data = file->private;
++ const char *ctrl_txt = get_fqd_ctrl_text(data->mask);
++ struct line_buffer_fq line_buf;
++
++ memset(&line_buf, 0, sizeof(line_buf));
++ seq_printf(file, "List of fq ids with: %s :%s\n",
++ ctrl_txt, (data->filter) ? "enabled" : "disabled");
++ for (i = 1; i < fqid_max; i++) {
++ fq.fqid = i;
++ memset(&fqd, 0, sizeof(struct qm_fqd));
++ ret = qman_query_fq(&fq, &fqd);
++ if (ret)
++ return ret;
++ if (data->filter) {
++ if (fqd.fq_ctrl & data->mask)
++ add_to_line_buffer(&line_buf, fq.fqid, file);
++ } else {
++ if (!(fqd.fq_ctrl & data->mask))
++ add_to_line_buffer(&line_buf, fq.fqid, file);
++ }
++ if (fqd.fq_ctrl & data->mask)
++ fq_en_cnt++;
++ else
++ fq_di_cnt++;
++ }
++ flush_line_buffer(&line_buf, file);
++
++ seq_printf(file, "Total FQD with: %s : enabled = %u\n",
++ ctrl_txt, fq_en_cnt);
++ seq_printf(file, "Total FQD with: %s : disabled = %u\n",
++ ctrl_txt, fq_di_cnt);
++ return 0;
++}
++
++/*******************************************************************************
++ * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE
++ ******************************************************************************/
++static int qman_fqd_ctrl_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, qman_fqd_ctrl_show, inode->i_private);
++}
++
++static const struct file_operations qman_fqd_ctrl_fops = {
++ .owner = THIS_MODULE,
++ .open = qman_fqd_ctrl_open,
++ .read = seq_read,
++};
++
++/*******************************************************************************
++ * QMan ctrl summary
++ ******************************************************************************/
++/*******************************************************************************
++ * QMan summary state
++ ******************************************************************************/
++static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset)
++{
++ struct qm_mcr_queryfq_np np;
++ struct qman_fq fq;
++ int ret, i;
++ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
++
++ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
++
++ for (i = 1; i < fqid_max; i++) {
++ fq.fqid = i;
++ ret = qman_query_fq_np(&fq, &np);
++ if (ret)
++ return ret;
++ /* Keep a summary count of all states */
++ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
++ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
++ seq_printf(file, "%s count = %u\n", state_txt[i],
++ qm_fq_state_cnt[i]);
++ }
++ return 0;
++}
++
++static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset)
++{
++ struct qm_fqd fqd;
++ struct qman_fq fq;
++ int ret, i , j;
++ u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2];
++
++ memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt));
++
++ for (i = 1; i < fqid_max; i++) {
++ memset(&fqd, 0, sizeof(struct qm_fqd));
++ fq.fqid = i;
++ ret = qman_query_fq(&fq, &fqd);
++ if (ret)
++ return ret;
++ /* Keep a summary count of all states */
++ for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2)
++ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
++ mask_filter[j].mask)
++ qm_prog_cnt[j/2]++;
++ }
++ for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) {
++ seq_printf(file, "%s count = %u\n",
++ get_fqd_ctrl_text(mask_filter[i*2].mask),
++ qm_prog_cnt[i]);
++ }
++ return 0;
++}
++
++static int qman_fqd_summary_show(struct seq_file *file, void *offset)
++{
++ int ret;
++
++ /* Display summary of non programmable fields */
++ ret = qman_fqd_non_prog_summary_show(file, offset);
++ if (ret)
++ return ret;
++ seq_puts(file, "-----------------------------------------\n");
++ /* Display programmable fields */
++ ret = qman_fqd_prog_summary_show(file, offset);
++ if (ret)
++ return ret;
++ return 0;
++}
++
++static int qman_fqd_summary_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, qman_fqd_summary_show, NULL);
++}
++
++static const struct file_operations qman_fqd_summary_fops = {
++ .owner = THIS_MODULE,
++ .open = qman_fqd_summary_open,
++ .read = seq_read,
++};
++
++/*******************************************************************************
++ * QMan destination work queue
++ ******************************************************************************/
++struct qman_dest_wq_s {
++ u16 wq_id;
++};
++static struct qman_dest_wq_s qman_dest_wq_data = {
++ .wq_id = 0,
++};
++
++static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset)
++{
++ struct qm_fqd fqd;
++ struct qman_fq fq;
++ int ret, i;
++ u16 *wq, wq_id = qman_dest_wq_data.wq_id;
++ struct line_buffer_fq line_buf;
++
++ memset(&line_buf, 0, sizeof(line_buf));
++ /* use vmalloc : need to allocate large memory region and don't
++ * require the memory to be physically contiguous. */
++ wq = vzalloc(sizeof(u16) * (0xFFFF+1));
++ if (!wq)
++ return -ENOMEM;
++
++ seq_printf(file, "List of fq ids with destination work queue id"
++ " = 0x%x\n", wq_id);
++
++ for (i = 1; i < fqid_max; i++) {
++ fq.fqid = i;
++ memset(&fqd, 0, sizeof(struct qm_fqd));
++ ret = qman_query_fq(&fq, &fqd);
++ if (ret) {
++ vfree(wq);
++ return ret;
++ }
++ if (wq_id == fqd.dest_wq)
++ add_to_line_buffer(&line_buf, fq.fqid, file);
++ wq[fqd.dest_wq]++;
++ }
++ flush_line_buffer(&line_buf, file);
++
++ seq_puts(file, "Summary of all FQD destination work queue values\n");
++ for (i = 0; i < 0xFFFF; i++) {
++ if (wq[i])
++ seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, "
++ "count = %u\n", i >> 3, i & 0x3, i, wq[i]);
++ }
++ vfree(wq);
++ return 0;
++}
++
++static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf,
++ size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ if (val > 0xFFFF)
++ return -EINVAL;
++ qman_dest_wq_data.wq_id = val;
++ return count;
++}
++
++static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, qman_fqd_dest_wq_show, NULL);
++}
++
++static const struct file_operations qman_fqd_dest_wq_fops = {
++ .owner = THIS_MODULE,
++ .open = qman_fqd_dest_wq_open,
++ .read = seq_read,
++ .write = qman_fqd_dest_wq_write,
++};
++
++/*******************************************************************************
++ * QMan Intra-Class Scheduling Credit
++ ******************************************************************************/
++static int qman_fqd_cred_show(struct seq_file *file, void *offset)
++{
++ struct qm_fqd fqd;
++ struct qman_fq fq;
++ int ret, i;
++ u32 fq_cnt = 0;
++ struct line_buffer_fq line_buf;
++
++ memset(&line_buf, 0, sizeof(line_buf));
++ seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0"
++ "\n");
++
++ for (i = 1; i < fqid_max; i++) {
++ fq.fqid = i;
++ memset(&fqd, 0, sizeof(struct qm_fqd));
++ ret = qman_query_fq(&fq, &fqd);
++ if (ret)
++ return ret;
++ if (fqd.ics_cred > 0) {
++ add_to_line_buffer(&line_buf, fq.fqid, file);
++ fq_cnt++;
++ }
++ }
++ flush_line_buffer(&line_buf, file);
++
++ seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt);
++ return 0;
++}
++
++static int qman_fqd_cred_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, qman_fqd_cred_show, NULL);
++}
++
++static const struct file_operations qman_fqd_cred_fops = {
++ .owner = THIS_MODULE,
++ .open = qman_fqd_cred_open,
++ .read = seq_read,
++};
++
++/*******************************************************************************
++ * Class Queue Fields
++ ******************************************************************************/
++struct query_cq_fields_data_s {
++ u32 cqid;
++};
++
++static struct query_cq_fields_data_s query_cq_fields_data = {
++ .cqid = 1,
++};
++
++static int query_cq_fields_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ struct qm_mcr_ceetm_cq_query query_result;
++ unsigned int cqid;
++ unsigned int portal;
++
++ if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
++ return -EINVAL;
++
++ cqid = query_cq_fields_data.cqid & 0x00FFFFFF;
++ portal = query_cq_fields_data.cqid >> 24;
++ if (portal > qm_dc_portal_fman1)
++ return -EINVAL;
++
++ ret = qman_ceetm_query_cq(cqid, portal, &query_result);
++ if (ret)
++ return ret;
++ seq_printf(file, "Query CQ Fields Result cqid 0x%x on DCP %d\n",
++ cqid, portal);
++ seq_printf(file, " ccgid: %u\n", query_result.ccgid);
++ seq_printf(file, " state: %u\n", query_result.state);
++ seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr);
++ seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr);
++ seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr);
++ seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr);
++ seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr);
++ seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr);
++ seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr);
++ seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr);
++ seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr);
++ seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr);
++ seq_printf(file, " frame_count: %u\n", query_result.frm_cnt);
++
++ return 0;
++}
++
++static int query_cq_fields_open(struct inode *inode,
++ struct file *file)
++{
++ return single_open(file, query_cq_fields_show, NULL);
++}
++
++static ssize_t query_cq_fields_write(struct file *f,
++ const char __user *buf, size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ query_cq_fields_data.cqid = (u32)val;
++ return count;
++}
++
++static const struct file_operations query_cq_fields_fops = {
++ .owner = THIS_MODULE,
++ .open = query_cq_fields_open,
++ .read = seq_read,
++ .write = query_cq_fields_write,
++ .release = single_release,
++};
++
++/*******************************************************************************
++ * READ CEETM_XSFDR_IN_USE
++ ******************************************************************************/
++struct query_ceetm_xsfdr_data_s {
++ enum qm_dc_portal dcp_portal;
++};
++
++static struct query_ceetm_xsfdr_data_s query_ceetm_xsfdr_data;
++
++static int query_ceetm_xsfdr_show(struct seq_file *file, void *offset)
++{
++ int ret;
++ unsigned int xsfdr_in_use;
++ enum qm_dc_portal portal;
++
++
++ if (qman_ip_rev < QMAN_REV31)
++ return -EINVAL;
++
++ portal = query_ceetm_xsfdr_data.dcp_portal;
++ ret = qman_ceetm_get_xsfdr(portal, &xsfdr_in_use);
++ if (ret) {
++ seq_printf(file, "Read CEETM_XSFDR_IN_USE on DCP %d failed\n",
++ portal);
++ return ret;
++ }
++
++ seq_printf(file, "DCP%d: CEETM_XSFDR_IN_USE number is %u\n", portal,
++ (xsfdr_in_use & 0x1FFF));
++ return 0;
++}
++
++static int query_ceetm_xsfdr_open(struct inode *inode,
++ struct file *file)
++{
++ return single_open(file, query_ceetm_xsfdr_show, NULL);
++}
++
++static ssize_t query_ceetm_xsfdr_write(struct file *f,
++ const char __user *buf, size_t count, loff_t *off)
++{
++ int ret;
++ unsigned long val;
++
++ ret = user_input_convert(buf, count, &val);
++ if (ret)
++ return ret;
++ if (val > qm_dc_portal_fman1)
++ return -EINVAL;
++ query_ceetm_xsfdr_data.dcp_portal = (u32)val;
++ return count;
++}
++
++static const struct file_operations query_ceetm_xsfdr_fops = {
++ .owner = THIS_MODULE,
++ .open = query_ceetm_xsfdr_open,
++ .read = seq_read,
++ .write = query_ceetm_xsfdr_write,
++ .release = single_release,
++};
++
++/* helper macros used in qman_debugfs_module_init */
++#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \
++ do { \
++ d = debugfs_create_file(name, \
++ mode, parent, \
++ data, \
++ fops); \
++ if (d == NULL) { \
++ ret = -ENOMEM; \
++ goto _return; \
++ } \
++ } while (0)
++
++/* dfs_root as parent */
++#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \
++ QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops)
++
++/* fqd_root as parent */
++#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \
++ QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops)
++
++/* fqd state */
++#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \
++ QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \
++ (void *)&mask_filter[index], &qman_fqd_ctrl_fops)
++
++static int __init qman_debugfs_module_init(void)
++{
++ int ret = 0;
++ struct dentry *d, *fqd_root;
++ u32 reg;
++
++ fqid_max = 0;
++ init_ccsrmempeek();
++ if (qman_ccsr_start) {
++ if (!qman_ccsrmempeek(&reg, QM_FQD_AR)) {
++ /* extract the size of the FQD window */
++ reg = reg & 0x3f;
++ /* calculate valid frame queue descriptor range */
++ fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE;
++ }
++ }
++ dfs_root = debugfs_create_dir("qman", NULL);
++ fqd_root = debugfs_create_dir("fqd", dfs_root);
++ if (dfs_root == NULL || fqd_root == NULL) {
++ ret = -ENOMEM;
++ pr_err("Cannot create qman/fqd debugfs dir\n");
++ goto _return;
++ }
++ if (fqid_max) {
++ QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO,
++ NULL, &qman_ccsrmempeek_fops);
++ }
++ QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO,
++ &query_fq_np_fields_data, &query_fq_np_fields_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO,
++ &query_fq_fields_data, &query_fq_fields_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO,
++ &query_wq_lengths_data, &query_wq_lengths_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO,
++ &query_cgr_data, &query_cgr_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO,
++ NULL, &query_congestion_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO,
++ NULL, &testwrite_cgr_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO,
++ NULL, &teswrite_cgr_cgrid_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO,
++ NULL, &teswrite_cgr_ibcnt_fops);
++
++ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_ccgr", S_IRUGO | S_IWUGO,
++ &query_ccgr_data, &query_ccgr_fops);
++ /* Create files with fqd_root as parent */
++
++ QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO,
++ (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO,
++ (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED],
++ &qman_fqd_state_fops);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO,
++ (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED],
++ &qman_fqd_state_fops);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO,
++ (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED],
++ &qman_fqd_state_fops);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO,
++ (void *)&fqd_states[QM_MCR_NP_STATE_PARKED],
++ &qman_fqd_state_fops);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO,
++ (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE],
++ &qman_fqd_state_fops);
++ QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO,
++ &query_cq_fields_data, &query_cq_fields_fops);
++ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_xsfdr_in_use", S_IRUGO | S_IWUGO,
++ &query_ceetm_xsfdr_data, &query_ceetm_xsfdr_fops);
++
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1);
++
++ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO,
++ NULL, &qman_fqd_summary_fops);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO,
++ NULL, &qman_fqd_dest_wq_fops);
++
++ QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO,
++ NULL, &qman_fqd_cred_fops);
++
++ return 0;
++
++_return:
++ debugfs_remove_recursive(dfs_root);
++ return ret;
++}
++
++static void __exit qman_debugfs_module_exit(void)
++{
++ debugfs_remove_recursive(dfs_root);
++}
++
++module_init(qman_debugfs_module_init);
++module_exit(qman_debugfs_module_exit);
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c
+new file mode 100644
+index 00000000..857ecd62
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_driver.c
+@@ -0,0 +1,977 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qman_private.h"
++
++#include <asm/smp.h> /* hard_smp_processor_id() if !CONFIG_SMP */
++#ifdef CONFIG_HOTPLUG_CPU
++#include <linux/cpu.h>
++#endif
++
++/* Global variable containing revision id (even on non-control plane systems
++ * where CCSR isn't available) */
++u16 qman_ip_rev;
++EXPORT_SYMBOL(qman_ip_rev);
++u8 qman_ip_cfg;
++EXPORT_SYMBOL(qman_ip_cfg);
++u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
++EXPORT_SYMBOL(qm_channel_pool1);
++u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
++EXPORT_SYMBOL(qm_channel_caam);
++u16 qm_channel_pme = QMAN_CHANNEL_PME;
++EXPORT_SYMBOL(qm_channel_pme);
++u16 qm_channel_dce = QMAN_CHANNEL_DCE;
++EXPORT_SYMBOL(qm_channel_dce);
++u16 qman_portal_max;
++EXPORT_SYMBOL(qman_portal_max);
++
++u32 qman_clk;
++struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
++/* the qman ceetm instances on the given SoC */
++u8 num_ceetms;
++
++/* For these variables, and the portal-initialisation logic, the
++ * comments in bman_driver.c apply here so won't be repeated. */
++static struct qman_portal *shared_portals[NR_CPUS];
++static int num_shared_portals;
++static int shared_portals_idx;
++static LIST_HEAD(unused_pcfgs);
++static DEFINE_SPINLOCK(unused_pcfgs_lock);
++
++/* A SDQCR mask comprising all the available/visible pool channels */
++static u32 pools_sdqcr;
++
++#define STR_ERR_NOPROP "No '%s' property in node %s\n"
++#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
++#define STR_FQID_RANGE "fsl,fqid-range"
++#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
++#define STR_CGRID_RANGE "fsl,cgrid-range"
++
++/* A "fsl,fqid-range" node; release the given range to the allocator */
++static __init int fsl_fqid_range_init(struct device_node *node)
++{
++ int ret;
++ const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
++ if (!range) {
++ pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
++ return -EINVAL;
++ }
++ qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ pr_info("Qman: FQID allocator includes range %d:%d\n",
++ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ return 0;
++}
++
++/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
++static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
++{
++ int ret;
++ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
++ if (!chanid) {
++ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
++ return -EINVAL;
++ }
++ for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++)
++ pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret);
++ return 0;
++}
++
++/* A "fsl,pool-channel-range" node; release the given range to the allocator */
++static __init int fsl_pool_channel_range_init(struct device_node *node)
++{
++ int ret;
++ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
++ if (!chanid) {
++ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
++ return -EINVAL;
++ }
++ qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
++ pr_info("Qman: pool channel allocator includes range %d:%d\n",
++ be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
++ return 0;
++}
++
++/* A "fsl,cgrid-range" node; release the given range to the allocator */
++static __init int fsl_cgrid_range_init(struct device_node *node)
++{
++ struct qman_cgr cgr;
++ int ret, errors = 0;
++ const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
++ if (!range) {
++ pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
++ return -EINVAL;
++ }
++ qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ pr_info("Qman: CGRID allocator includes range %d:%d\n",
++ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
++ ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
++ if (ret)
++ errors++;
++ }
++ if (errors)
++ pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
++ errors, (errors > 1) ? "s" : "", range[0], range[1]);
++ return 0;
++}
++
++static __init int fsl_ceetm_init(struct device_node *node)
++{
++ enum qm_dc_portal dcp_portal;
++ struct qm_ceetm_sp *sp;
++ struct qm_ceetm_lni *lni;
++ int ret, i;
++ const u32 *range;
++
++ /* Find LFQID range */
++ range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret);
++ if (!range) {
++ pr_err("No fsl,ceetm-lfqid-range in node %s\n",
++ node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node"
++ " %s\n", node->full_name);
++ return -EINVAL;
++ }
++
++ dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16;
++ if (dcp_portal > qm_dc_portal_fman1) {
++ pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal);
++ return -EINVAL;
++ }
++
++ if (dcp_portal == qm_dc_portal_fman0)
++ qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ if (dcp_portal == qm_dc_portal_fman1)
++ qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ pr_debug("Qman: The lfqid allocator of CEETM %d includes range"
++ " 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++
++ qman_ceetms[dcp_portal].idx = dcp_portal;
++ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals);
++ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis);
++
++ /* Find Sub-portal range */
++ range = of_get_property(node, "fsl,ceetm-sp-range", &ret);
++ if (!range) {
++ pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n",
++ node->full_name);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < be32_to_cpu(range[1]); i++) {
++ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
++ if (!sp) {
++ pr_err("Can't alloc memory for sub-portal %d\n",
++ range[0] + i);
++ return -ENOMEM;
++ }
++ sp->idx = be32_to_cpu(range[0]) + i;
++ sp->dcp_idx = dcp_portal;
++ sp->is_claimed = 0;
++ list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals);
++ sp++;
++ }
++ pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n",
++ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
++ qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]);
++ qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]);
++
++ /* Find LNI range */
++ range = of_get_property(node, "fsl,ceetm-lni-range", &ret);
++ if (!range) {
++ pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n",
++ node->full_name);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < be32_to_cpu(range[1]); i++) {
++ lni = kzalloc(sizeof(*lni), GFP_KERNEL);
++ if (!lni) {
++ pr_err("Can't alloc memory for LNI %d\n",
++ range[0] + i);
++ return -ENOMEM;
++ }
++ lni->idx = be32_to_cpu(range[0]) + i;
++ lni->dcp_idx = dcp_portal;
++ lni->is_claimed = 0;
++ INIT_LIST_HEAD(&lni->channels);
++ list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis);
++ lni++;
++ }
++ pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n",
++ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
++ qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]);
++ qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]);
++
++ /* Find CEETM channel range */
++ range = of_get_property(node, "fsl,ceetm-channel-range", &ret);
++ if (!range) {
++ pr_err("No fsl,ceetm-channel-range in node %s\n",
++ node->full_name);
++ return -EINVAL;
++ }
++ if (ret != 8) {
++ pr_err("fsl,ceetm-channel-range is not a 2-cell range in node"
++ "%s\n", node->full_name);
++ return -EINVAL;
++ }
++
++ if (dcp_portal == qm_dc_portal_fman0)
++ qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ if (dcp_portal == qm_dc_portal_fman1)
++ qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++ pr_debug("Qman: The channel allocator of CEETM %d includes"
++ " range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
++
++ /* Set CEETM PRES register */
++ ret = qman_ceetm_set_prescaler(dcp_portal);
++ if (ret)
++ return ret;
++ return 0;
++}
++
++static void qman_get_ip_revision(struct device_node *dn)
++{
++ u16 ip_rev = 0;
++ u8 ip_cfg = QMAN_REV_CFG_0;
++ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
++ if (!of_device_is_available(dn))
++ continue;
++ if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
++ of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
++ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
++ BUG_ON(1);
++ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
++ of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
++ ip_rev = QMAN_REV11;
++ qman_portal_max = 10;
++ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
++ of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
++ ip_rev = QMAN_REV12;
++ qman_portal_max = 10;
++ } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
++ of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
++ ip_rev = QMAN_REV20;
++ qman_portal_max = 3;
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.0.0")) {
++ ip_rev = QMAN_REV30;
++ qman_portal_max = 50;
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.0.1")) {
++ ip_rev = QMAN_REV30;
++ qman_portal_max = 25;
++ ip_cfg = QMAN_REV_CFG_1;
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.1.0")) {
++ ip_rev = QMAN_REV31;
++ qman_portal_max = 50;
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.1.1")) {
++ ip_rev = QMAN_REV31;
++ qman_portal_max = 25;
++ ip_cfg = QMAN_REV_CFG_1;
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.1.2")) {
++ ip_rev = QMAN_REV31;
++ qman_portal_max = 18;
++ ip_cfg = QMAN_REV_CFG_2;
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.1.3")) {
++ ip_rev = QMAN_REV31;
++ qman_portal_max = 10;
++ ip_cfg = QMAN_REV_CFG_3;
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.2.0")) {
++ ip_rev = QMAN_REV32;
++ qman_portal_max = 10;
++ ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043
++ } else if (of_device_is_compatible(dn,
++ "fsl,qman-portal-3.2.1")) {
++ ip_rev = QMAN_REV32;
++ qman_portal_max = 10;
++ ip_cfg = QMAN_REV_CFG_3;
++ } else {
++ pr_warn("unknown QMan version in portal node,"
++ "default to rev1.1\n");
++ ip_rev = QMAN_REV11;
++ qman_portal_max = 10;
++ }
++
++ if (!qman_ip_rev) {
++ if (ip_rev) {
++ qman_ip_rev = ip_rev;
++ qman_ip_cfg = ip_cfg;
++ } else {
++ pr_warn("unknown Qman version,"
++ " default to rev1.1\n");
++ qman_ip_rev = QMAN_REV11;
++ qman_ip_cfg = QMAN_REV_CFG_0;
++ }
++ } else if (ip_rev && (qman_ip_rev != ip_rev))
++ pr_warn("Revision=0x%04x, but portal '%s' has"
++ " 0x%04x\n",
++ qman_ip_rev, dn->full_name, ip_rev);
++ if (qman_ip_rev == ip_rev)
++ break;
++ }
++}
++
++/* Parse a portal node, perform generic mapping duties and return the config. It
++ * is not known at this stage for what purpose (or even if) the portal will be
++ * used. */
++static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
++{
++ struct qm_portal_config *pcfg;
++ const u32 *index_p;
++ u32 index, channel;
++ int irq, ret;
++ resource_size_t len;
++
++ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
++ if (!pcfg) {
++ pr_err("can't allocate portal config");
++ return NULL;
++ }
++
++ /*
++ * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
++ * 'struct device' in order to get the PAMU stashing setup and the QMan
++ * portal [driver] won't function at all without ring stashing
++ *
++ * Making the QMan portal driver nice and proper is part of the
++ * upstreaming effort
++ */
++ pcfg->dev.bus = &platform_bus_type;
++ pcfg->dev.of_node = node;
++#ifdef CONFIG_FSL_PAMU
++ pcfg->dev.archdata.iommu_domain = NULL;
++#endif
++
++ ret = of_address_to_resource(node, DPA_PORTAL_CE,
++ &pcfg->addr_phys[DPA_PORTAL_CE]);
++ if (ret) {
++ pr_err("Can't get %s property '%s'\n", node->full_name,
++ "reg::CE");
++ goto err;
++ }
++ ret = of_address_to_resource(node, DPA_PORTAL_CI,
++ &pcfg->addr_phys[DPA_PORTAL_CI]);
++ if (ret) {
++ pr_err("Can't get %s property '%s'\n", node->full_name,
++ "reg::CI");
++ goto err;
++ }
++ index_p = of_get_property(node, "cell-index", &ret);
++ if (!index_p || (ret != 4)) {
++ pr_err("Can't get %s property '%s'\n", node->full_name,
++ "cell-index");
++ goto err;
++ }
++ index = be32_to_cpu(*index_p);
++ if (index >= qman_portal_max) {
++ pr_err("QMan portal index %d is beyond max (%d)\n",
++ index, qman_portal_max);
++ goto err;
++ }
++
++ channel = index + QM_CHANNEL_SWPORTAL0;
++ pcfg->public_cfg.channel = channel;
++ pcfg->public_cfg.cpu = -1;
++ irq = irq_of_parse_and_map(node, 0);
++ if (irq == 0) {
++ pr_err("Can't get %s property '%s'\n", node->full_name,
++ "interrupts");
++ goto err;
++ }
++ pcfg->public_cfg.irq = irq;
++ pcfg->public_cfg.index = index;
++#ifdef CONFIG_FSL_QMAN_CONFIG
++ /* We need the same LIODN offset for all portals */
++ qman_liodn_fixup(pcfg->public_cfg.channel);
++#endif
++
++ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
++ if (len != (unsigned long)len)
++ goto err;
++
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
++ pcfg->addr_phys[DPA_PORTAL_CE].start,
++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
++
++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
++ pcfg->addr_phys[DPA_PORTAL_CI].start,
++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
++#else
++ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
++ pcfg->addr_phys[DPA_PORTAL_CE].start,
++ (unsigned long)len,
++ 0);
++ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
++ pcfg->addr_phys[DPA_PORTAL_CI].start,
++ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
++ _PAGE_GUARDED | _PAGE_NO_CACHE);
++#endif
++ return pcfg;
++err:
++ kfree(pcfg);
++ return NULL;
++}
++
++static struct qm_portal_config *get_pcfg(struct list_head *list)
++{
++ struct qm_portal_config *pcfg;
++ if (list_empty(list))
++ return NULL;
++ pcfg = list_entry(list->prev, struct qm_portal_config, list);
++ list_del(&pcfg->list);
++ return pcfg;
++}
++
++static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
++{
++ struct qm_portal_config *pcfg;
++ if (list_empty(list))
++ return NULL;
++ list_for_each_entry(pcfg, list, list) {
++ if (pcfg->public_cfg.index == idx) {
++ list_del(&pcfg->list);
++ return pcfg;
++ }
++ }
++ return NULL;
++}
++
++static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
++{
++#ifdef CONFIG_FSL_PAMU
++ int ret;
++ int window_count = 1;
++ struct iommu_domain_geometry geom_attr;
++ struct pamu_stash_attribute stash_attr;
++
++ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
++ if (!pcfg->iommu_domain) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
++ __func__);
++ goto _no_iommu;
++ }
++ geom_attr.aperture_start = 0;
++ geom_attr.aperture_end =
++ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
++ geom_attr.force_aperture = true;
++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
++ &geom_attr);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
++ &window_count);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ stash_attr.cpu = cpu;
++ stash_attr.cache = PAMU_ATTR_CACHE_L1;
++ /* set stash information for the window */
++ stash_attr.window = 0;
++ ret = iommu_domain_set_attr(pcfg->iommu_domain,
++ DOMAIN_ATTR_FSL_PAMU_STASH,
++ &stash_attr);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
++ IOMMU_READ | IOMMU_WRITE);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
++ __func__, ret);
++ goto _iommu_domain_free;
++ }
++ ret = iommu_domain_set_attr(pcfg->iommu_domain,
++ DOMAIN_ATTR_FSL_PAMU_ENABLE,
++ &window_count);
++ if (ret < 0) {
++ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
++ __func__, ret);
++ goto _iommu_detach_device;
++ }
++
++_no_iommu:
++#endif
++#ifdef CONFIG_FSL_QMAN_CONFIG
++ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
++#endif
++ pr_warn("Failed to set QMan portal's stash request queue\n");
++
++ return;
++
++#ifdef CONFIG_FSL_PAMU
++_iommu_detach_device:
++ iommu_detach_device(pcfg->iommu_domain, NULL);
++_iommu_domain_free:
++ iommu_domain_free(pcfg->iommu_domain);
++#endif
++}
++
++struct qm_portal_config *qm_get_unused_portal_idx(u32 idx)
++{
++ struct qm_portal_config *ret;
++ spin_lock(&unused_pcfgs_lock);
++ if (idx == QBMAN_ANY_PORTAL_IDX)
++ ret = get_pcfg(&unused_pcfgs);
++ else
++ ret = get_pcfg_idx(&unused_pcfgs, idx);
++ spin_unlock(&unused_pcfgs_lock);
++ /* Bind stashing LIODNs to the CPU we are currently executing on, and
++ * set the portal to use the stashing request queue corresonding to the
++ * cpu as well. The user-space driver assumption is that the pthread has
++ * to already be affine to one cpu only before opening a portal. If that
++ * check is circumvented, the only risk is a performance degradation -
++ * stashing will go to whatever cpu they happened to be running on when
++ * opening the device file, and if that isn't the cpu they subsequently
++ * bind to and do their polling on, tough. */
++ if (ret)
++ portal_set_cpu(ret, hard_smp_processor_id());
++ return ret;
++}
++
++struct qm_portal_config *qm_get_unused_portal(void)
++{
++ return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
++}
++
++void qm_put_unused_portal(struct qm_portal_config *pcfg)
++{
++ spin_lock(&unused_pcfgs_lock);
++ list_add(&pcfg->list, &unused_pcfgs);
++ spin_unlock(&unused_pcfgs_lock);
++}
++
++static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
++{
++ struct qman_portal *p;
++
++ pcfg->iommu_domain = NULL;
++ portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
++ p = qman_create_affine_portal(pcfg, NULL);
++ if (p) {
++ u32 irq_sources = 0;
++ /* Determine what should be interrupt-vs-poll driven */
++#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
++ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
++ QM_PIRQ_CSCI | QM_PIRQ_CCSCI;
++#endif
++#ifdef CONFIG_FSL_DPA_PIRQ_FAST
++ irq_sources |= QM_PIRQ_DQRI;
++#endif
++ qman_p_irqsource_add(p, irq_sources);
++ pr_info("Qman portal %sinitialised, cpu %d\n",
++ pcfg->public_cfg.is_shared ? "(shared) " : "",
++ pcfg->public_cfg.cpu);
++ } else
++ pr_crit("Qman portal failure on cpu %d\n",
++ pcfg->public_cfg.cpu);
++ return p;
++}
++
++static void init_slave(int cpu)
++{
++ struct qman_portal *p;
++ struct cpumask oldmask = current->cpus_allowed;
++ set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
++ p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
++ if (!p)
++ pr_err("Qman slave portal failure on cpu %d\n", cpu);
++ else
++ pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
++ set_cpus_allowed_ptr(current, &oldmask);
++ if (shared_portals_idx >= num_shared_portals)
++ shared_portals_idx = 0;
++}
++
++static struct cpumask want_unshared __initdata;
++static struct cpumask want_shared __initdata;
++
++static int __init parse_qportals(char *str)
++{
++ return parse_portals_bootarg(str, &want_shared, &want_unshared,
++ "qportals");
++}
++__setup("qportals=", parse_qportals);
++
++static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
++ unsigned int cpu)
++{
++#ifdef CONFIG_FSL_PAMU
++ struct pamu_stash_attribute stash_attr;
++ int ret;
++
++ if (pcfg->iommu_domain) {
++ stash_attr.cpu = cpu;
++ stash_attr.cache = PAMU_ATTR_CACHE_L1;
++ /* set stash information for the window */
++ stash_attr.window = 0;
++ ret = iommu_domain_set_attr(pcfg->iommu_domain,
++ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
++ if (ret < 0) {
++ pr_err("Failed to update pamu stash setting\n");
++ return;
++ }
++ }
++#endif
++#ifdef CONFIG_FSL_QMAN_CONFIG
++ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
++ pr_warn("Failed to update portal's stash request queue\n");
++#endif
++}
++
++static int qman_offline_cpu(unsigned int cpu)
++{
++ struct qman_portal *p;
++ const struct qm_portal_config *pcfg;
++ p = (struct qman_portal *)affine_portals[cpu];
++ if (p) {
++ pcfg = qman_get_qm_portal_config(p);
++ if (pcfg) {
++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
++ qman_portal_update_sdest(pcfg, 0);
++ }
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int qman_online_cpu(unsigned int cpu)
++{
++ struct qman_portal *p;
++ const struct qm_portal_config *pcfg;
++ p = (struct qman_portal *)affine_portals[cpu];
++ if (p) {
++ pcfg = qman_get_qm_portal_config(p);
++ if (pcfg) {
++ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
++ qman_portal_update_sdest(pcfg, cpu);
++ }
++ }
++ return 0;
++}
++
++static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++
++ switch (action) {
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ qman_online_cpu(cpu);
++ break;
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ qman_offline_cpu(cpu);
++ default:
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block qman_hotplug_cpu_notifier = {
++ .notifier_call = qman_hotplug_cpu_callback,
++};
++#endif /* CONFIG_HOTPLUG_CPU */
++
++__init int qman_init(void)
++{
++ struct cpumask slave_cpus;
++ struct cpumask unshared_cpus = *cpu_none_mask;
++ struct cpumask shared_cpus = *cpu_none_mask;
++ LIST_HEAD(unshared_pcfgs);
++ LIST_HEAD(shared_pcfgs);
++ struct device_node *dn;
++ struct qm_portal_config *pcfg;
++ struct qman_portal *p;
++ int cpu, ret;
++ const u32 *clk;
++ struct cpumask offline_cpus;
++
++ /* Initialise the Qman (CCSR) device */
++ for_each_compatible_node(dn, NULL, "fsl,qman") {
++ if (!qman_init_ccsr(dn))
++ pr_info("Qman err interrupt handler present\n");
++ else
++ pr_err("Qman CCSR setup failed\n");
++
++ clk = of_get_property(dn, "clock-frequency", NULL);
++ if (!clk)
++ pr_warn("Can't find Qman clock frequency\n");
++ else
++ qman_clk = be32_to_cpu(*clk);
++ }
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ /* Setup lookup table for FQ demux */
++ ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64);
++ if (ret)
++ return ret;
++#endif
++
++ /* Get qman ip revision */
++ qman_get_ip_revision(dn);
++ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
++ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
++ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
++ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
++ }
++
++ if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2))
++ qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312;
++
++ /*
++ * Parse the ceetm node to get how many ceetm instances are supported
++ * on the current silicon. num_ceetms must be confirmed before portals
++ * are intiailized.
++ */
++ num_ceetms = 0;
++ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm")
++ num_ceetms++;
++
++ /* Parse pool channels into the SDQCR mask. (Must happen before portals
++ * are initialised.) */
++ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
++ ret = fsl_pool_channel_range_sdqcr(dn);
++ if (ret)
++ return ret;
++ }
++
++ memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
++ /* Initialise portals. See bman_driver.c for comments */
++ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
++ if (!of_device_is_available(dn))
++ continue;
++ pcfg = parse_pcfg(dn);
++ if (pcfg) {
++ pcfg->public_cfg.pools = pools_sdqcr;
++ list_add_tail(&pcfg->list, &unused_pcfgs);
++ }
++ }
++ for_each_possible_cpu(cpu) {
++ if (cpumask_test_cpu(cpu, &want_shared)) {
++ pcfg = get_pcfg(&unused_pcfgs);
++ if (!pcfg)
++ break;
++ pcfg->public_cfg.cpu = cpu;
++ list_add_tail(&pcfg->list, &shared_pcfgs);
++ cpumask_set_cpu(cpu, &shared_cpus);
++ }
++ if (cpumask_test_cpu(cpu, &want_unshared)) {
++ if (cpumask_test_cpu(cpu, &shared_cpus))
++ continue;
++ pcfg = get_pcfg(&unused_pcfgs);
++ if (!pcfg)
++ break;
++ pcfg->public_cfg.cpu = cpu;
++ list_add_tail(&pcfg->list, &unshared_pcfgs);
++ cpumask_set_cpu(cpu, &unshared_cpus);
++ }
++ }
++ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
++ for_each_online_cpu(cpu) {
++ pcfg = get_pcfg(&unused_pcfgs);
++ if (!pcfg)
++ break;
++ pcfg->public_cfg.cpu = cpu;
++ list_add_tail(&pcfg->list, &unshared_pcfgs);
++ cpumask_set_cpu(cpu, &unshared_cpus);
++ }
++ }
++ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
++ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
++ if (cpumask_empty(&slave_cpus)) {
++ if (!list_empty(&shared_pcfgs)) {
++ cpumask_or(&unshared_cpus, &unshared_cpus,
++ &shared_cpus);
++ cpumask_clear(&shared_cpus);
++ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
++ INIT_LIST_HEAD(&shared_pcfgs);
++ }
++ } else {
++ if (list_empty(&shared_pcfgs)) {
++ pcfg = get_pcfg(&unshared_pcfgs);
++ if (!pcfg) {
++ pr_crit("No QMan portals available!\n");
++ return 0;
++ }
++ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
++ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
++ list_add_tail(&pcfg->list, &shared_pcfgs);
++ }
++ }
++ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
++ pcfg->public_cfg.is_shared = 0;
++ p = init_pcfg(pcfg);
++ if (!p) {
++ pr_crit("Unable to configure portals\n");
++ return 0;
++ }
++ }
++ list_for_each_entry(pcfg, &shared_pcfgs, list) {
++ pcfg->public_cfg.is_shared = 1;
++ p = init_pcfg(pcfg);
++ if (p)
++ shared_portals[num_shared_portals++] = p;
++ }
++ if (!cpumask_empty(&slave_cpus))
++ for_each_cpu(cpu, &slave_cpus)
++ init_slave(cpu);
++ pr_info("Qman portals initialised\n");
++ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
++ for_each_cpu(cpu, &offline_cpus)
++ qman_offline_cpu(cpu);
++#ifdef CONFIG_HOTPLUG_CPU
++ register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
++#endif
++ return 0;
++}
++
++__init int qman_resource_init(void)
++{
++ struct device_node *dn;
++ int ret;
++
++ /* Initialise FQID allocation ranges */
++ for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
++ ret = fsl_fqid_range_init(dn);
++ if (ret)
++ return ret;
++ }
++ /* Initialise CGRID allocation ranges */
++ for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
++ ret = fsl_cgrid_range_init(dn);
++ if (ret)
++ return ret;
++ }
++ /* Parse pool channels into the allocator. (Must happen after portals
++ * are initialised.) */
++ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
++ ret = fsl_pool_channel_range_init(dn);
++ if (ret)
++ return ret;
++ }
++
++ /* Parse CEETM */
++ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") {
++ ret = fsl_ceetm_init(dn);
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
++
++#ifdef CONFIG_SUSPEND
++void suspend_unused_qportal(void)
++{
++ struct qm_portal_config *pcfg;
++
++ if (list_empty(&unused_pcfgs))
++ return;
++
++ list_for_each_entry(pcfg, &unused_pcfgs, list) {
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Need to save qportal %d\n", pcfg->public_cfg.index);
++#endif
++ /* save isdr, disable all via isdr, clear isr */
++ pcfg->saved_isdr =
++ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
++ 0xe08);
++ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
++ 0xe00);
++ }
++ return;
++}
++
++void resume_unused_qportal(void)
++{
++ struct qm_portal_config *pcfg;
++
++ if (list_empty(&unused_pcfgs))
++ return;
++
++ list_for_each_entry(pcfg, &unused_pcfgs, list) {
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index);
++#endif
++ /* restore isdr */
++ __raw_writel(pcfg->saved_isdr,
++ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
++ }
++ return;
++}
++#endif
+diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
+new file mode 100644
+index 00000000..1651e62c
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_high.c
+@@ -0,0 +1,5669 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qman_low.h"
++
++/* Compilation constants */
++#define DQRR_MAXFILL 15
++#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
++#define IRQNAME "QMan portal %d"
++#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
++
++/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
++ * positive, and rounding to the closest value if it's zero. NB, this macro
++ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
++ * that are compatible with this. NB, these arguments should not be expressions
++ * unless it is safe for them to be evaluated multiple times. Eg. do not pass
++ * in "some_value++" as a parameter to the macro! */
++#define ROUNDING(n, d, r) \
++ (((r) < 0) ? div64_u64((n), (d)) : \
++ (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
++ div64_u64(((n) + ((d) / 2)), (d))))
++
++/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
++ * inter-processor locking only. Note, FQLOCK() is always called either under a
++ * local_irq_save() or from interrupt context - hence there's no need for irq
++ * protection (and indeed, attempting to nest irq-protection doesn't work, as
++ * the "irq en/disable" machinery isn't recursive...). */
++#define FQLOCK(fq) \
++ do { \
++ struct qman_fq *__fq478 = (fq); \
++ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
++ spin_lock(&__fq478->fqlock); \
++ } while (0)
++#define FQUNLOCK(fq) \
++ do { \
++ struct qman_fq *__fq478 = (fq); \
++ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
++ spin_unlock(&__fq478->fqlock); \
++ } while (0)
++
++static inline void fq_set(struct qman_fq *fq, u32 mask)
++{
++ set_bits(mask, &fq->flags);
++}
++static inline void fq_clear(struct qman_fq *fq, u32 mask)
++{
++ clear_bits(mask, &fq->flags);
++}
++static inline int fq_isset(struct qman_fq *fq, u32 mask)
++{
++ return fq->flags & mask;
++}
++static inline int fq_isclear(struct qman_fq *fq, u32 mask)
++{
++ return !(fq->flags & mask);
++}
++
++struct qman_portal {
++ struct qm_portal p;
++ unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
++ unsigned long irq_sources;
++ u32 use_eqcr_ci_stashing;
++ u32 slowpoll; /* only used when interrupts are off */
++ struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
++#endif
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ raw_spinlock_t sharing_lock; /* only used if is_shared */
++ int is_shared;
++ struct qman_portal *sharing_redirect;
++#endif
++ u32 sdqcr;
++ int dqrr_disable_ref;
++ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
++ * handler is called instead. */
++ qman_cb_dc_ern cb_dc_ern;
++ /* When the cpu-affine portal is activated, this is non-NULL */
++ const struct qm_portal_config *config;
++ /* This is needed for providing a non-NULL device to dma_map_***() */
++ struct platform_device *pdev;
++ struct dpa_rbtree retire_table;
++ char irqname[MAX_IRQNAME];
++ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
++ struct qman_cgrs *cgrs;
++ /* linked-list of CSCN handlers. */
++ struct list_head cgr_cbs;
++ /* list lock */
++ spinlock_t cgr_lock;
++ /* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */
++ struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX];
++ /* 256-element array, each is a linked-list of CCSCN handlers. */
++ struct list_head ccgr_cbs[QMAN_CEETM_MAX];
++ /* list lock */
++ spinlock_t ccgr_lock;
++ /* track if memory was allocated by the driver */
++ u8 alloced;
++ /* power management data */
++ u32 save_isdr;
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
++ * do byte swaps of DQRR read only memory. First entry must be aligned
++ * to 2 ** 10 to ensure DQRR index calculations based shadow copy
++ * address (6 bits for address shift + 4 bits for the DQRR size).
++ */
++ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE] __aligned(1024);
++#endif
++};
++
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++#define PORTAL_IRQ_LOCK(p, irqflags) \
++ do { \
++ if ((p)->is_shared) \
++ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
++ else \
++ local_irq_save(irqflags); \
++ } while (0)
++#define PORTAL_IRQ_UNLOCK(p, irqflags) \
++ do { \
++ if ((p)->is_shared) \
++ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
++ irqflags); \
++ else \
++ local_irq_restore(irqflags); \
++ } while (0)
++#else
++#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
++#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
++#endif
++
++/* Global handler for DCP ERNs. Used when the portal receiving the message does
++ * not have a portal-specific handler. */
++static qman_cb_dc_ern cb_dc_ern;
++
++static cpumask_t affine_mask;
++static DEFINE_SPINLOCK(affine_mask_lock);
++static u16 affine_channels[NR_CPUS];
++static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
++void *affine_portals[NR_CPUS];
++
++/* "raw" gets the cpu-local struct whether it's a redirect or not. */
++static inline struct qman_portal *get_raw_affine_portal(void)
++{
++ return &get_cpu_var(qman_affine_portal);
++}
++/* For ops that can redirect, this obtains the portal to use */
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++static inline struct qman_portal *get_affine_portal(void)
++{
++ struct qman_portal *p = get_raw_affine_portal();
++ if (p->sharing_redirect)
++ return p->sharing_redirect;
++ return p;
++}
++#else
++#define get_affine_portal() get_raw_affine_portal()
++#endif
++/* For every "get", there must be a "put" */
++static inline void put_affine_portal(void)
++{
++ put_cpu_var(qman_affine_portal);
++}
++/* Exception: poll functions assume the caller is cpu-affine and in no risk of
++ * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
++ * semantic - ie. to disable pre-emption. Some use-cases expect the execution
++ * context to remain as non-atomic during poll-triggered callbacks as it was
++ * when the poll API was first called (eg. NAPI), so we go out of our way in
++ * this case to not disable pre-emption. */
++static inline struct qman_portal *get_poll_portal(void)
++{
++ return &get_cpu_var(qman_affine_portal);
++}
++#define put_poll_portal()
++
++/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
++ * retirement notifications (the fact they are sometimes h/w-consumed means that
++ * contextB isn't always a s/w demux - and as we can't know which case it is
++ * when looking at the notification, we have to use the slow lookup for all of
++ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
++ * (though at most one of them should be the consumer), so this table isn't for
++ * all FQs - FQs are added when retirement commands are issued, and removed when
++ * they complete, which also massively reduces the size of this table. */
++IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
++
++/* This is what everything can wait on, even if it migrates to a different cpu
++ * to the one whose affine portal it is waiting on. */
++static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
++
++static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
++{
++ int ret = fqtree_push(&p->retire_table, fq);
++ if (ret)
++ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
++ return ret;
++}
++
++static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
++{
++ fqtree_del(&p->retire_table, fq);
++}
++
++static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
++{
++ return fqtree_find(&p->retire_table, fqid);
++}
++
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++static void **qman_fq_lookup_table;
++static size_t qman_fq_lookup_table_size;
++
++int qman_setup_fq_lookup_table(size_t num_entries)
++{
++ num_entries++;
++ /* Allocate 1 more entry since the first entry is not used */
++ qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
++ if (!qman_fq_lookup_table) {
++ pr_err("QMan: Could not allocate fq lookup table\n");
++ return -ENOMEM;
++ }
++ qman_fq_lookup_table_size = num_entries;
++ pr_info("QMan: Allocated lookup table at %p, entry count %lu\n",
++ qman_fq_lookup_table,
++ (unsigned long)qman_fq_lookup_table_size);
++ return 0;
++}
++
++/* global structure that maintains fq object mapping */
++static DEFINE_SPINLOCK(fq_hash_table_lock);
++
++static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
++{
++ u32 i;
++
++ spin_lock(&fq_hash_table_lock);
++ /* Can't use index zero because this has special meaning
++ * in context_b field. */
++ for (i = 1; i < qman_fq_lookup_table_size; i++) {
++ if (qman_fq_lookup_table[i] == NULL) {
++ *entry = i;
++ qman_fq_lookup_table[i] = fq;
++ spin_unlock(&fq_hash_table_lock);
++ return 0;
++ }
++ }
++ spin_unlock(&fq_hash_table_lock);
++ return -ENOMEM;
++}
++
++static void clear_fq_table_entry(u32 entry)
++{
++ spin_lock(&fq_hash_table_lock);
++ BUG_ON(entry >= qman_fq_lookup_table_size);
++ qman_fq_lookup_table[entry] = NULL;
++ spin_unlock(&fq_hash_table_lock);
++}
++
++static inline struct qman_fq *get_fq_table_entry(u32 entry)
++{
++ BUG_ON(entry >= qman_fq_lookup_table_size);
++ return qman_fq_lookup_table[entry];
++}
++#endif
++
++static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
++{
++ /* Byteswap the FQD to HW format */
++ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
++ fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
++ fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
++ fqd->context_b = cpu_to_be32(fqd->context_b);
++ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
++}
++
++static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
++{
++ /* Byteswap the FQD to CPU format */
++ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
++ fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
++ fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
++ fqd->context_b = be32_to_cpu(fqd->context_b);
++ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
++}
++
++/* Swap a 40 bit address */
++static inline u64 cpu_to_be40(u64 in)
++{
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ return in;
++#else
++ u64 out = 0;
++ u8 *p = (u8 *) &out;
++ p[0] = in >> 32;
++ p[1] = in >> 24;
++ p[2] = in >> 16;
++ p[3] = in >> 8;
++ p[4] = in >> 0;
++ return out;
++#endif
++}
++static inline u64 be40_to_cpu(u64 in)
++{
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ return in;
++#else
++ u64 out = 0;
++ u8 *pout = (u8 *) &out;
++ u8 *pin = (u8 *) &in;
++ pout[0] = pin[4];
++ pout[1] = pin[3];
++ pout[2] = pin[2];
++ pout[3] = pin[1];
++ pout[4] = pin[0];
++ return out;
++#endif
++}
++
++/* Swap a 24 bit value */
++static inline u32 cpu_to_be24(u32 in)
++{
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ return in;
++#else
++ u32 out = 0;
++ u8 *p = (u8 *) &out;
++ p[0] = in >> 16;
++ p[1] = in >> 8;
++ p[2] = in >> 0;
++ return out;
++#endif
++}
++
++static inline u32 be24_to_cpu(u32 in)
++{
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ return in;
++#else
++ u32 out = 0;
++ u8 *pout = (u8 *) &out;
++ u8 *pin = (u8 *) &in;
++ pout[0] = pin[2];
++ pout[1] = pin[1];
++ pout[2] = pin[0];
++ return out;
++#endif
++}
++
++static inline u64 be48_to_cpu(u64 in)
++{
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ return in;
++#else
++ u64 out = 0;
++ u8 *pout = (u8 *) &out;
++ u8 *pin = (u8 *) &in;
++
++ pout[0] = pin[5];
++ pout[1] = pin[4];
++ pout[2] = pin[3];
++ pout[3] = pin[2];
++ pout[4] = pin[1];
++ pout[5] = pin[0];
++ return out;
++#endif
++}
++static inline void cpu_to_hw_fd(struct qm_fd *fd)
++{
++ fd->opaque_addr = cpu_to_be64(fd->opaque_addr);
++ fd->status = cpu_to_be32(fd->status);
++ fd->opaque = cpu_to_be32(fd->opaque);
++}
++
++static inline void hw_fd_to_cpu(struct qm_fd *fd)
++{
++ fd->opaque_addr = be64_to_cpu(fd->opaque_addr);
++ fd->status = be32_to_cpu(fd->status);
++ fd->opaque = be32_to_cpu(fd->opaque);
++}
++
++static inline void hw_cq_query_to_cpu(struct qm_mcr_ceetm_cq_query *cq_query)
++{
++ cq_query->ccgid = be16_to_cpu(cq_query->ccgid);
++ cq_query->state = be16_to_cpu(cq_query->state);
++ cq_query->pfdr_hptr = be24_to_cpu(cq_query->pfdr_hptr);
++ cq_query->pfdr_tptr = be24_to_cpu(cq_query->pfdr_tptr);
++ cq_query->od1_xsfdr = be16_to_cpu(cq_query->od1_xsfdr);
++ cq_query->od2_xsfdr = be16_to_cpu(cq_query->od2_xsfdr);
++ cq_query->od3_xsfdr = be16_to_cpu(cq_query->od3_xsfdr);
++ cq_query->od4_xsfdr = be16_to_cpu(cq_query->od4_xsfdr);
++ cq_query->od5_xsfdr = be16_to_cpu(cq_query->od5_xsfdr);
++ cq_query->od6_xsfdr = be16_to_cpu(cq_query->od6_xsfdr);
++ cq_query->ra1_xsfdr = be16_to_cpu(cq_query->ra1_xsfdr);
++ cq_query->ra2_xsfdr = be16_to_cpu(cq_query->ra2_xsfdr);
++ cq_query->frm_cnt = be24_to_cpu(cq_query->frm_cnt);
++}
++
++static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q)
++{
++ int i;
++
++ ccgr_q->cm_query.cs_thres.hword =
++ be16_to_cpu(ccgr_q->cm_query.cs_thres.hword);
++ ccgr_q->cm_query.cs_thres_x.hword =
++ be16_to_cpu(ccgr_q->cm_query.cs_thres_x.hword);
++ ccgr_q->cm_query.td_thres.hword =
++ be16_to_cpu(ccgr_q->cm_query.td_thres.hword);
++ ccgr_q->cm_query.wr_parm_g.word =
++ be32_to_cpu(ccgr_q->cm_query.wr_parm_g.word);
++ ccgr_q->cm_query.wr_parm_y.word =
++ be32_to_cpu(ccgr_q->cm_query.wr_parm_y.word);
++ ccgr_q->cm_query.wr_parm_r.word =
++ be32_to_cpu(ccgr_q->cm_query.wr_parm_r.word);
++ ccgr_q->cm_query.cscn_targ_dcp =
++ be16_to_cpu(ccgr_q->cm_query.cscn_targ_dcp);
++ ccgr_q->cm_query.i_cnt = be40_to_cpu(ccgr_q->cm_query.i_cnt);
++ ccgr_q->cm_query.a_cnt = be40_to_cpu(ccgr_q->cm_query.a_cnt);
++ for (i = 0; i < ARRAY_SIZE(ccgr_q->cm_query.cscn_targ_swp); i++)
++ ccgr_q->cm_query.cscn_targ_swp[i] =
++ be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]);
++}
++
++/* In the case that slow- and fast-path handling are both done by qman_poll()
++ * (ie. because there is no interrupt handling), we ought to balance how often
++ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
++ * sources, so we call the fast poll 'n' times before calling the slow poll
++ * once. The idle decrementer constant is used when the last slow-poll detected
++ * no work to do, and the busy decrementer constant when the last slow-poll had
++ * work to do. */
++#define SLOW_POLL_IDLE 1000
++#define SLOW_POLL_BUSY 10
++static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
++static inline unsigned int __poll_portal_fast(struct qman_portal *p,
++ unsigned int poll_limit);
++
++/* Portal interrupt handler */
++static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
++{
++ struct qman_portal *p = ptr;
++ /*
++ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
++ * it could race against a Query Congestion State command also given
++ * as part of the handling of this interrupt source. We mustn't
++ * clear it a second time in this top-level function.
++ */
++ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
++ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
++ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
++ /* DQRR-handling if it's interrupt-driven */
++ if (is & QM_PIRQ_DQRI)
++ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
++ /* Handling of anything else that's interrupt-driven */
++ clear |= __poll_portal_slow(p, is);
++ qm_isr_status_clear(&p->p, clear);
++ return IRQ_HANDLED;
++}
++
++/* This inner version is used privately by qman_create_affine_portal(), as well
++ * as by the exported qman_stop_dequeues(). */
++static inline void qman_stop_dequeues_ex(struct qman_portal *p)
++{
++ unsigned long irqflags __maybe_unused;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ if (!(p->dqrr_disable_ref++))
++ qm_dqrr_set_maxfill(&p->p, 0);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++}
++
++static int drain_mr_fqrni(struct qm_portal *p)
++{
++ const struct qm_mr_entry *msg;
++loop:
++ msg = qm_mr_current(p);
++ if (!msg) {
++ /* if MR was full and h/w had other FQRNI entries to produce, we
++ * need to allow it time to produce those entries once the
++ * existing entries are consumed. A worst-case situation
++ * (fully-loaded system) means h/w sequencers may have to do 3-4
++ * other things before servicing the portal's MR pump, each of
++ * which (if slow) may take ~50 qman cycles (which is ~200
++ * processor cycles). So rounding up and then multiplying this
++ * worst-case estimate by a factor of 10, just to be
++ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
++ * one entry at a time, so h/w has an opportunity to produce new
++ * entries well before the ring has been fully consumed, so
++ * we're being *really* paranoid here. */
++ u64 now, then = mfatb();
++ do {
++ now = mfatb();
++ } while ((then + 10000) > now);
++ msg = qm_mr_current(p);
++ if (!msg)
++ return 0;
++ }
++ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
++ /* We aren't draining anything but FQRNIs */
++ pr_err("QMan found verb 0x%x in MR\n", msg->verb);
++ return -1;
++ }
++ qm_mr_next(p);
++ qm_mr_cci_consume(p, 1);
++ goto loop;
++}
++
++#ifdef CONFIG_SUSPEND
++static int _qman_portal_suspend_noirq(struct device *dev)
++{
++ struct qman_portal *p = (struct qman_portal *)dev->platform_data;
++#ifdef CONFIG_PM_DEBUG
++ struct platform_device *pdev = to_platform_device(dev);
++#endif
++
++ p->save_isdr = qm_isr_disable_read(&p->p);
++ qm_isr_disable_write(&p->p, 0xffffffff);
++ qm_isr_status_clear(&p->p, 0xffffffff);
++#ifdef CONFIG_PM_DEBUG
++ pr_info("Suspend for %s\n", pdev->name);
++#endif
++ return 0;
++}
++
++static int _qman_portal_resume_noirq(struct device *dev)
++{
++ struct qman_portal *p = (struct qman_portal *)dev->platform_data;
++
++ /* restore isdr */
++ qm_isr_disable_write(&p->p, p->save_isdr);
++ return 0;
++}
++#else
++#define _qman_portal_suspend_noirq NULL
++#define _qman_portal_resume_noirq NULL
++#endif
++
++struct dev_pm_domain qman_portal_device_pm_domain = {
++ .ops = {
++ USE_PLATFORM_PM_SLEEP_OPS
++ .suspend_noirq = _qman_portal_suspend_noirq,
++ .resume_noirq = _qman_portal_resume_noirq,
++ }
++};
++
++struct qman_portal *qman_create_portal(
++ struct qman_portal *portal,
++ const struct qm_portal_config *config,
++ const struct qman_cgrs *cgrs)
++{
++ struct qm_portal *__p;
++ char buf[16];
++ int ret;
++ u32 isdr;
++
++ if (!portal) {
++ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
++ if (!portal)
++ return portal;
++ portal->alloced = 1;
++ } else
++ portal->alloced = 0;
++
++ __p = &portal->p;
++
++#if (defined CONFIG_PPC || defined CONFIG_PPC64) && defined CONFIG_FSL_PAMU
++ /* PAMU is required for stashing */
++ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
++ 1 : 0);
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ portal->use_eqcr_ci_stashing = 1;
++#else
++ portal->use_eqcr_ci_stashing = 0;
++#endif
++
++ /* prep the low-level portal struct with the mapped addresses from the
++ * config, everything that follows depends on it and "config" is more
++ * for (de)reference... */
++ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
++ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
++ /*
++ * If CI-stashing is used, the current defaults use a threshold of 3,
++ * and stash with high-than-DQRR priority.
++ */
++ if (qm_eqcr_init(__p, qm_eqcr_pvb,
++ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
++ pr_err("Qman EQCR initialisation failed\n");
++ goto fail_eqcr;
++ }
++ if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
++ qm_dqrr_cdc, DQRR_MAXFILL)) {
++ pr_err("Qman DQRR initialisation failed\n");
++ goto fail_dqrr;
++ }
++ if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
++ pr_err("Qman MR initialisation failed\n");
++ goto fail_mr;
++ }
++ if (qm_mc_init(__p)) {
++ pr_err("Qman MC initialisation failed\n");
++ goto fail_mc;
++ }
++ if (qm_isr_init(__p)) {
++ pr_err("Qman ISR initialisation failed\n");
++ goto fail_isr;
++ }
++ /* static interrupt-gating controls */
++ qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH);
++ qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH);
++ qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD);
++ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
++ if (!portal->cgrs)
++ goto fail_cgrs;
++ /* initial snapshot is no-depletion */
++ qman_cgrs_init(&portal->cgrs[1]);
++ if (cgrs)
++ portal->cgrs[0] = *cgrs;
++ else
++ /* if the given mask is NULL, assume all CGRs can be seen */
++ qman_cgrs_fill(&portal->cgrs[0]);
++ INIT_LIST_HEAD(&portal->cgr_cbs);
++ spin_lock_init(&portal->cgr_lock);
++ if (num_ceetms) {
++ for (ret = 0; ret < num_ceetms; ret++) {
++ portal->ccgrs[ret] = kmalloc(2 *
++ sizeof(struct qman_ccgrs), GFP_KERNEL);
++ if (!portal->ccgrs[ret])
++ goto fail_ccgrs;
++ qman_ccgrs_init(&portal->ccgrs[ret][1]);
++ qman_ccgrs_fill(&portal->ccgrs[ret][0]);
++ INIT_LIST_HEAD(&portal->ccgr_cbs[ret]);
++ }
++ }
++ spin_lock_init(&portal->ccgr_lock);
++ portal->bits = 0;
++ portal->slowpoll = 0;
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ portal->eqci_owned = NULL;
++#endif
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ raw_spin_lock_init(&portal->sharing_lock);
++ portal->is_shared = config->public_cfg.is_shared;
++ portal->sharing_redirect = NULL;
++#endif
++ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
++ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
++ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
++ portal->dqrr_disable_ref = 0;
++ portal->cb_dc_ern = NULL;
++ sprintf(buf, "qportal-%d", config->public_cfg.channel);
++ portal->pdev = platform_device_alloc(buf, -1);
++ if (!portal->pdev) {
++ pr_err("qman_portal - platform_device_alloc() failed\n");
++ goto fail_devalloc;
++ }
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
++ portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask;
++#else
++ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) {
++ pr_err("qman_portal - dma_set_mask() failed\n");
++ goto fail_devadd;
++ }
++#endif
++ portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain;
++ portal->pdev->dev.platform_data = portal;
++ ret = platform_device_add(portal->pdev);
++ if (ret) {
++ pr_err("qman_portal - platform_device_add() failed\n");
++ goto fail_devadd;
++ }
++ dpa_rbtree_init(&portal->retire_table);
++ isdr = 0xffffffff;
++ qm_isr_disable_write(__p, isdr);
++ portal->irq_sources = 0;
++ qm_isr_enable_write(__p, portal->irq_sources);
++ qm_isr_status_clear(__p, 0xffffffff);
++ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
++ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
++ portal)) {
++ pr_err("request_irq() failed\n");
++ goto fail_irq;
++ }
++ if ((config->public_cfg.cpu != -1) &&
++ irq_can_set_affinity(config->public_cfg.irq) &&
++ irq_set_affinity(config->public_cfg.irq,
++ cpumask_of(config->public_cfg.cpu))) {
++ pr_err("irq_set_affinity() failed\n");
++ goto fail_affinity;
++ }
++
++ /* Need EQCR to be empty before continuing */
++ isdr ^= QM_PIRQ_EQCI;
++ qm_isr_disable_write(__p, isdr);
++ ret = qm_eqcr_get_fill(__p);
++ if (ret) {
++ pr_err("Qman EQCR unclean\n");
++ goto fail_eqcr_empty;
++ }
++ isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
++ qm_isr_disable_write(__p, isdr);
++ if (qm_dqrr_current(__p) != NULL) {
++ pr_err("Qman DQRR unclean\n");
++ qm_dqrr_cdc_consume_n(__p, 0xffff);
++ }
++ if (qm_mr_current(__p) != NULL) {
++ /* special handling, drain just in case it's a few FQRNIs */
++ if (drain_mr_fqrni(__p)) {
++ const struct qm_mr_entry *e = qm_mr_current(__p);
++ /*
++ * Message ring cannot be empty no need to check
++ * qm_mr_current returned successfully
++ */
++ pr_err("Qman MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
++ e->verb, e->ern.rc, e->ern.fd.addr_lo);
++ goto fail_dqrr_mr_empty;
++ }
++ }
++ /* Success */
++ portal->config = config;
++ qm_isr_disable_write(__p, 0);
++ qm_isr_uninhibit(__p);
++ /* Write a sane SDQCR */
++ qm_dqrr_sdqcr_set(__p, portal->sdqcr);
++ return portal;
++fail_dqrr_mr_empty:
++fail_eqcr_empty:
++fail_affinity:
++ free_irq(config->public_cfg.irq, portal);
++fail_irq:
++ platform_device_del(portal->pdev);
++fail_devadd:
++ platform_device_put(portal->pdev);
++fail_devalloc:
++ if (num_ceetms)
++ for (ret = 0; ret < num_ceetms; ret++)
++ kfree(portal->ccgrs[ret]);
++fail_ccgrs:
++ kfree(portal->cgrs);
++fail_cgrs:
++ qm_isr_finish(__p);
++fail_isr:
++ qm_mc_finish(__p);
++fail_mc:
++ qm_mr_finish(__p);
++fail_mr:
++ qm_dqrr_finish(__p);
++fail_dqrr:
++ qm_eqcr_finish(__p);
++fail_eqcr:
++ if (portal->alloced)
++ kfree(portal);
++ return NULL;
++}
++
++struct qman_portal *qman_create_affine_portal(
++ const struct qm_portal_config *config,
++ const struct qman_cgrs *cgrs)
++{
++ struct qman_portal *res;
++ struct qman_portal *portal;
++
++ portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
++ res = qman_create_portal(portal, config, cgrs);
++ if (res) {
++ spin_lock(&affine_mask_lock);
++ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
++ affine_channels[config->public_cfg.cpu] =
++ config->public_cfg.channel;
++ affine_portals[config->public_cfg.cpu] = portal;
++ spin_unlock(&affine_mask_lock);
++ }
++ return res;
++}
++
++/* These checks are BUG_ON()s because the driver is already supposed to avoid
++ * these cases. */
++struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
++ int cpu)
++{
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ struct qman_portal *p;
++ p = &per_cpu(qman_affine_portal, cpu);
++ /* Check that we don't already have our own portal */
++ BUG_ON(p->config);
++ /* Check that we aren't already slaving to another portal */
++ BUG_ON(p->is_shared);
++ /* Check that 'redirect' is prepared to have us */
++ BUG_ON(!redirect->config->public_cfg.is_shared);
++ /* These are the only elements to initialise when redirecting */
++ p->irq_sources = 0;
++ p->sharing_redirect = redirect;
++ affine_portals[cpu] = p;
++ return p;
++#else
++ BUG();
++ return NULL;
++#endif
++}
++
++void qman_destroy_portal(struct qman_portal *qm)
++{
++ const struct qm_portal_config *pcfg;
++ int i;
++
++ /* Stop dequeues on the portal */
++ qm_dqrr_sdqcr_set(&qm->p, 0);
++
++ /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
++ * something related to QM_PIRQ_EQCI, this may need fixing.
++ * Also, due to the prefetching model used for CI updates in the enqueue
++ * path, this update will only invalidate the CI cacheline *after*
++ * working on it, so we need to call this twice to ensure a full update
++ * irrespective of where the enqueue processing was at when the teardown
++ * began. */
++ qm_eqcr_cce_update(&qm->p);
++ qm_eqcr_cce_update(&qm->p);
++ pcfg = qm->config;
++
++ free_irq(pcfg->public_cfg.irq, qm);
++
++ kfree(qm->cgrs);
++ if (num_ceetms)
++ for (i = 0; i < num_ceetms; i++)
++ kfree(qm->ccgrs[i]);
++ qm_isr_finish(&qm->p);
++ qm_mc_finish(&qm->p);
++ qm_mr_finish(&qm->p);
++ qm_dqrr_finish(&qm->p);
++ qm_eqcr_finish(&qm->p);
++
++ platform_device_del(qm->pdev);
++ platform_device_put(qm->pdev);
++
++ qm->config = NULL;
++ if (qm->alloced)
++ kfree(qm);
++}
++
++const struct qm_portal_config *qman_destroy_affine_portal(void)
++{
++ /* We don't want to redirect if we're a slave, use "raw" */
++ struct qman_portal *qm = get_raw_affine_portal();
++ const struct qm_portal_config *pcfg;
++ int cpu;
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (qm->sharing_redirect) {
++ qm->sharing_redirect = NULL;
++ put_affine_portal();
++ return NULL;
++ }
++ qm->is_shared = 0;
++#endif
++ pcfg = qm->config;
++ cpu = pcfg->public_cfg.cpu;
++
++ qman_destroy_portal(qm);
++
++ spin_lock(&affine_mask_lock);
++ cpumask_clear_cpu(cpu, &affine_mask);
++ spin_unlock(&affine_mask_lock);
++ put_affine_portal();
++ return pcfg;
++}
++
++const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
++{
++ return &p->config->public_cfg;
++}
++EXPORT_SYMBOL(qman_p_get_portal_config);
++
++const struct qman_portal_config *qman_get_portal_config(void)
++{
++ struct qman_portal *p = get_affine_portal();
++ const struct qman_portal_config *ret = qman_p_get_portal_config(p);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_get_portal_config);
++
++/* Inline helper to reduce nesting in __poll_portal_slow() */
++static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_mr_entry *msg, u8 verb)
++{
++ FQLOCK(fq);
++ switch (verb) {
++ case QM_MR_VERB_FQRL:
++ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
++ fq_clear(fq, QMAN_FQ_STATE_ORL);
++ table_del_fq(p, fq);
++ break;
++ case QM_MR_VERB_FQRN:
++ DPA_ASSERT((fq->state == qman_fq_state_parked) ||
++ (fq->state == qman_fq_state_sched));
++ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
++ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
++ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
++ fq_set(fq, QMAN_FQ_STATE_NE);
++ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
++ fq_set(fq, QMAN_FQ_STATE_ORL);
++ else
++ table_del_fq(p, fq);
++ fq->state = qman_fq_state_retired;
++ break;
++ case QM_MR_VERB_FQPN:
++ DPA_ASSERT(fq->state == qman_fq_state_sched);
++ DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
++ fq->state = qman_fq_state_parked;
++ }
++ FQUNLOCK(fq);
++}
++
++static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
++{
++ const struct qm_mr_entry *msg;
++ struct qm_mr_entry swapped_msg;
++ int k;
++
++ if (is & QM_PIRQ_CSCI) {
++ struct qman_cgrs rr, c;
++ struct qm_mc_result *mcr;
++ struct qman_cgr *cgr;
++ unsigned long irqflags __maybe_unused;
++
++ spin_lock_irqsave(&p->cgr_lock, irqflags);
++ /*
++ * The CSCI bit must be cleared _before_ issuing the
++ * Query Congestion State command, to ensure that a long
++ * CGR State Change callback cannot miss an intervening
++ * state change.
++ */
++ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
++ qm_mc_start(&p->p);
++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ for (k = 0; k < 8; k++)
++ mcr->querycongestion.state.__state[k] = be32_to_cpu(
++ mcr->querycongestion.state.__state[k]);
++ /* mask out the ones I'm not interested in */
++ qman_cgrs_and(&rr, (const struct qman_cgrs *)
++ &mcr->querycongestion.state, &p->cgrs[0]);
++ /* check previous snapshot for delta, enter/exit congestion */
++ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
++ /* update snapshot */
++ qman_cgrs_cp(&p->cgrs[1], &rr);
++ /* Invoke callback */
++ list_for_each_entry(cgr, &p->cgr_cbs, node)
++ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
++ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
++ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++ }
++ if (is & QM_PIRQ_CCSCI) {
++ struct qman_ccgrs rr, c, congestion_result;
++ struct qm_mc_result *mcr;
++ struct qm_mc_command *mcc;
++ struct qm_ceetm_ccg *ccg;
++ unsigned long irqflags __maybe_unused;
++ int i, j;
++
++ spin_lock_irqsave(&p->ccgr_lock, irqflags);
++ /*
++ * The CCSCI bit must be cleared _before_ issuing the
++ * Query Congestion State command, to ensure that a long
++ * CCGR State Change callback cannot miss an intervening
++ * state change.
++ */
++ qm_isr_status_clear(&p->p, QM_PIRQ_CCSCI);
++
++ for (i = 0; i < num_ceetms; i++) {
++ for (j = 0; j < 2; j++) {
++ mcc = qm_mc_start(&p->p);
++ mcc->ccgr_query.ccgrid = cpu_to_be16(
++ CEETM_QUERY_CONGESTION_STATE | j);
++ mcc->ccgr_query.dcpid = i;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ for (k = 0; k < 8; k++)
++ mcr->ccgr_query.congestion_state.state.
++ __state[k] = be32_to_cpu(
++ mcr->ccgr_query.
++ congestion_state.state.
++ __state[k]);
++ congestion_result.q[j] =
++ mcr->ccgr_query.congestion_state.state;
++ }
++ /* mask out the ones I'm not interested in */
++ qman_ccgrs_and(&rr, &congestion_result,
++ &p->ccgrs[i][0]);
++ /*
++ * check previous snapshot for delta, enter/exit
++ * congestion.
++ */
++ qman_ccgrs_xor(&c, &rr, &p->ccgrs[i][1]);
++ /* update snapshot */
++ qman_ccgrs_cp(&p->ccgrs[i][1], &rr);
++ /* Invoke callback */
++ list_for_each_entry(ccg, &p->ccgr_cbs[i], cb_node)
++ if (ccg->cb && qman_ccgrs_get(&c,
++ (ccg->parent->idx << 4) | ccg->idx))
++ ccg->cb(ccg, ccg->cb_ctx,
++ qman_ccgrs_get(&rr,
++ (ccg->parent->idx << 4)
++ | ccg->idx));
++ }
++ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
++ }
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (is & QM_PIRQ_EQCI) {
++ unsigned long irqflags;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ p->eqci_owned = NULL;
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ wake_up(&affine_queue);
++ }
++#endif
++
++ if (is & QM_PIRQ_EQRI) {
++ unsigned long irqflags __maybe_unused;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ qm_eqcr_cce_update(&p->p);
++ qm_eqcr_set_ithresh(&p->p, 0);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ wake_up(&affine_queue);
++ }
++
++ if (is & QM_PIRQ_MRI) {
++ struct qman_fq *fq;
++ u8 verb, num = 0;
++mr_loop:
++ qm_mr_pvb_update(&p->p);
++ msg = qm_mr_current(&p->p);
++ if (!msg)
++ goto mr_done;
++ swapped_msg = *msg;
++ hw_fd_to_cpu(&swapped_msg.ern.fd);
++ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
++ /* The message is a software ERN iff the 0x20 bit is set */
++ if (verb & 0x20) {
++ switch (verb) {
++ case QM_MR_VERB_FQRNI:
++ /* nada, we drop FQRNIs on the floor */
++ break;
++ case QM_MR_VERB_FQRN:
++ case QM_MR_VERB_FQRL:
++ /* Lookup in the retirement table */
++ fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid));
++ BUG_ON(!fq);
++ fq_state_change(p, fq, &swapped_msg, verb);
++ if (fq->cb.fqs)
++ fq->cb.fqs(p, fq, &swapped_msg);
++ break;
++ case QM_MR_VERB_FQPN:
++ /* Parked */
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ fq = get_fq_table_entry(
++ be32_to_cpu(msg->fq.contextB));
++#else
++ fq = (void *)(uintptr_t)
++ be32_to_cpu(msg->fq.contextB);
++#endif
++ fq_state_change(p, fq, msg, verb);
++ if (fq->cb.fqs)
++ fq->cb.fqs(p, fq, &swapped_msg);
++ break;
++ case QM_MR_VERB_DC_ERN:
++ /* DCP ERN */
++ if (p->cb_dc_ern)
++ p->cb_dc_ern(p, msg);
++ else if (cb_dc_ern)
++ cb_dc_ern(p, msg);
++ else {
++ static int warn_once;
++ if (!warn_once) {
++ pr_crit("Leaking DCP ERNs!\n");
++ warn_once = 1;
++ }
++ }
++ break;
++ default:
++ pr_crit("Invalid MR verb 0x%02x\n", verb);
++ }
++ } else {
++ /* Its a software ERN */
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
++#else
++ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
++#endif
++ fq->cb.ern(p, fq, &swapped_msg);
++ }
++ num++;
++ qm_mr_next(&p->p);
++ goto mr_loop;
++mr_done:
++ qm_mr_cci_consume(&p->p, num);
++ }
++ /*
++ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
++ * processing. If that interrupt source has meanwhile been re-asserted,
++ * we mustn't clear it here (or in the top-level interrupt handler).
++ */
++ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
++}
++
++/* remove some slowish-path stuff from the "fast path" and make sure it isn't
++ * inlined. */
++static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
++{
++ p->vdqcr_owned = NULL;
++ FQLOCK(fq);
++ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
++ FQUNLOCK(fq);
++ wake_up(&affine_queue);
++}
++
++/* Copy a DQRR entry ensuring reads reach QBMan in order */
++static inline void safe_copy_dqrr(struct qm_dqrr_entry *dst,
++ const struct qm_dqrr_entry *src)
++{
++ int i = 0;
++ const u64 *s64 = (u64*)src;
++ u64 *d64 = (u64*)dst;
++
++ /* DQRR only has 32 bytes of valid data so only need to
++ * copy 4 - 64 bit values */
++ *d64 = *s64;
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ {
++ u32 res, zero = 0;
++ /* Create a dependancy after copying first bytes ensures no wrap
++ transaction generated to QBMan */
++ /* Logical AND the value pointed to by s64 with 0x0 and
++ store the result in res */
++ asm volatile("and %[result], %[in1], %[in2]"
++ : [result] "=r" (res)
++ : [in1] "r" (zero), [in2] "r" (*s64)
++ : "memory");
++ /* Add res to s64 - this creates a dependancy on the result of
++ reading the value of s64 before the next read. The side
++ effect of this is that the core must stall until the first
++ aligned read is complete therefore preventing a WRAP
++ transaction to be seen by the QBMan */
++ asm volatile("add %[result], %[in1], %[in2]"
++ : [result] "=r" (s64)
++ : [in1] "r" (res), [in2] "r" (s64)
++ : "memory");
++ }
++#endif
++ /* Copy the last 3 64 bit parts */
++ d64++; s64++;
++ for (;i<3; i++)
++ *d64++ = *s64++;
++}
++
++/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
++ * that would conflict with other things if they ran at the same time on the
++ * same cpu are;
++ *
++ * (i) setting/clearing vdqcr_owned, and
++ * (ii) clearing the NE (Not Empty) flag.
++ *
++ * Both are safe. Because;
++ *
++ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
++ * vdqcr_owned field (which it does before setting VDQCR), and
++ * qman_volatile_dequeue() blocks interrupts and preemption while this is
++ * done so that we can't interfere.
++ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
++ * with (i) that API prevents us from interfering until it's safe.
++ *
++ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
++ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
++ * advantage comes from this function not having to "lock" anything at all.
++ *
++ * Note also that the callbacks are invoked at points which are safe against the
++ * above potential conflicts, but that this function itself is not re-entrant
++ * (this is because the function tracks one end of each FIFO in the portal and
++ * we do *not* want to lock that). So the consequence is that it is safe for
++ * user callbacks to call into any Qman API *except* qman_poll() (as that's the
++ * sole API that could be invoking the callback through this function).
++ */
++static inline unsigned int __poll_portal_fast(struct qman_portal *p,
++ unsigned int poll_limit)
++{
++ const struct qm_dqrr_entry *dq;
++ struct qman_fq *fq;
++ enum qman_cb_dqrr_result res;
++ unsigned int limit = 0;
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ struct qm_dqrr_entry *shadow;
++ const struct qm_dqrr_entry *orig_dq;
++#endif
++loop:
++ qm_dqrr_pvb_update(&p->p);
++ dq = qm_dqrr_current(&p->p);
++ if (!dq)
++ goto done;
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ /* If running on an LE system the fields of the
++ dequeue entry must be swapped. Because the
++ QMan HW will ignore writes the DQRR entry is
++ copied and the index stored within the copy */
++ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
++ /* Use safe copy here to avoid WRAP transaction */
++ safe_copy_dqrr(shadow, dq);
++ orig_dq = dq;
++ dq = shadow;
++ shadow->fqid = be32_to_cpu(shadow->fqid);
++ shadow->contextB = be32_to_cpu(shadow->contextB);
++ shadow->seqnum = be16_to_cpu(shadow->seqnum);
++ hw_fd_to_cpu(&shadow->fd);
++#endif
++ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
++ /* VDQCR: don't trust contextB as the FQ may have been
++ * configured for h/w consumption and we're draining it
++ * post-retirement. */
++ fq = p->vdqcr_owned;
++ /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
++ * to check for clearing it when doing volatile dequeues. It's
++ * one less thing to check in the critical path (SDQCR). */
++ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
++ fq_clear(fq, QMAN_FQ_STATE_NE);
++ /* this is duplicated from the SDQCR code, but we have stuff to
++ * do before *and* after this callback, and we don't want
++ * multiple if()s in the critical path (SDQCR). */
++ res = fq->cb.dqrr(p, fq, dq);
++ if (res == qman_cb_dqrr_stop)
++ goto done;
++ /* Check for VDQCR completion */
++ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
++ clear_vdqcr(p, fq);
++ } else {
++ /* SDQCR: contextB points to the FQ */
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ fq = get_fq_table_entry(dq->contextB);
++#else
++ fq = (void *)(uintptr_t)dq->contextB;
++#endif
++ /* Now let the callback do its stuff */
++ res = fq->cb.dqrr(p, fq, dq);
++
++ /* The callback can request that we exit without consuming this
++ * entry nor advancing; */
++ if (res == qman_cb_dqrr_stop)
++ goto done;
++ }
++ /* Interpret 'dq' from a driver perspective. */
++ /* Parking isn't possible unless HELDACTIVE was set. NB,
++ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
++ * check for HELDACTIVE to cover both. */
++ DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
++ (res != qman_cb_dqrr_park));
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++ if (res != qman_cb_dqrr_defer)
++ qm_dqrr_cdc_consume_1ptr(&p->p, orig_dq,
++ (res == qman_cb_dqrr_park));
++#else
++ /* Defer just means "skip it, I'll consume it myself later on" */
++ if (res != qman_cb_dqrr_defer)
++ qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
++#endif
++ /* Move forward */
++ qm_dqrr_next(&p->p);
++ /* Entry processed and consumed, increment our counter. The callback can
++ * request that we exit after consuming the entry, and we also exit if
++ * we reach our processing limit, so loop back only if neither of these
++ * conditions is met. */
++ if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
++ goto loop;
++done:
++ return limit;
++}
++
++u32 qman_irqsource_get(void)
++{
++ /* "irqsource" and "poll" APIs mustn't redirect when sharing, they
++ * should shut the user out if they are not the primary CPU hosting the
++ * portal. That's why we use the "raw" interface. */
++ struct qman_portal *p = get_raw_affine_portal();
++ u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_irqsource_get);
++
++int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
++{
++ __maybe_unused unsigned long irqflags;
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (p->sharing_redirect)
++ return -EINVAL;
++ else
++#endif
++ {
++ bits = bits & QM_PIRQ_VISIBLE;
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ /* Clear any previously remaining interrupt conditions in
++ * QCSP_ISR. This prevents raising a false interrupt when
++ * interrupt conditions are enabled in QCSP_IER.
++ */
++ qm_isr_status_clear(&p->p, bits);
++ set_bits(bits, &p->irq_sources);
++ qm_isr_enable_write(&p->p, p->irq_sources);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_p_irqsource_add);
++
++int qman_irqsource_add(u32 bits __maybe_unused)
++{
++ struct qman_portal *p = get_raw_affine_portal();
++ int ret;
++ ret = qman_p_irqsource_add(p, bits);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_irqsource_add);
++
++int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
++{
++ __maybe_unused unsigned long irqflags;
++ u32 ier;
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (p->sharing_redirect) {
++ put_affine_portal();
++ return -EINVAL;
++ }
++#endif
++ /* Our interrupt handler only processes+clears status register bits that
++ * are in p->irq_sources. As we're trimming that mask, if one of them
++ * were to assert in the status register just before we remove it from
++ * the enable register, there would be an interrupt-storm when we
++ * release the IRQ lock. So we wait for the enable register update to
++ * take effect in h/w (by reading it back) and then clear all other bits
++ * in the status register. Ie. we clear them from ISR once it's certain
++ * IER won't allow them to reassert. */
++ PORTAL_IRQ_LOCK(p, irqflags);
++ bits &= QM_PIRQ_VISIBLE;
++ clear_bits(bits, &p->irq_sources);
++ qm_isr_enable_write(&p->p, p->irq_sources);
++
++ ier = qm_isr_enable_read(&p->p);
++ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
++ * data-dependency, ie. to protect against re-ordering. */
++ qm_isr_status_clear(&p->p, ~ier);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ return 0;
++}
++EXPORT_SYMBOL(qman_p_irqsource_remove);
++
++int qman_irqsource_remove(u32 bits)
++{
++ struct qman_portal *p = get_raw_affine_portal();
++ int ret;
++ ret = qman_p_irqsource_remove(p, bits);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_irqsource_remove);
++
++const cpumask_t *qman_affine_cpus(void)
++{
++ return &affine_mask;
++}
++EXPORT_SYMBOL(qman_affine_cpus);
++
++u16 qman_affine_channel(int cpu)
++{
++ if (cpu < 0) {
++ struct qman_portal *portal = get_raw_affine_portal();
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ BUG_ON(portal->sharing_redirect);
++#endif
++ cpu = portal->config->public_cfg.cpu;
++ put_affine_portal();
++ }
++ BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
++ return affine_channels[cpu];
++}
++EXPORT_SYMBOL(qman_affine_channel);
++
++void *qman_get_affine_portal(int cpu)
++{
++ return affine_portals[cpu];
++}
++EXPORT_SYMBOL(qman_get_affine_portal);
++
++int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
++{
++ int ret;
++
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (unlikely(p->sharing_redirect))
++ ret = -EINVAL;
++ else
++#endif
++ {
++ BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
++ ret = __poll_portal_fast(p, limit);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(qman_p_poll_dqrr);
++
++int qman_poll_dqrr(unsigned int limit)
++{
++ struct qman_portal *p = get_poll_portal();
++ int ret;
++ ret = qman_p_poll_dqrr(p, limit);
++ put_poll_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_poll_dqrr);
++
++u32 qman_p_poll_slow(struct qman_portal *p)
++{
++ u32 ret;
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (unlikely(p->sharing_redirect))
++ ret = (u32)-1;
++ else
++#endif
++ {
++ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
++ ret = __poll_portal_slow(p, is);
++ qm_isr_status_clear(&p->p, ret);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(qman_p_poll_slow);
++
++u32 qman_poll_slow(void)
++{
++ struct qman_portal *p = get_poll_portal();
++ u32 ret;
++ ret = qman_p_poll_slow(p);
++ put_poll_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_poll_slow);
++
++/* Legacy wrapper */
++void qman_p_poll(struct qman_portal *p)
++{
++#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
++ if (unlikely(p->sharing_redirect))
++ return;
++#endif
++ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
++ if (!(p->slowpoll--)) {
++ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
++ u32 active = __poll_portal_slow(p, is);
++ if (active) {
++ qm_isr_status_clear(&p->p, active);
++ p->slowpoll = SLOW_POLL_BUSY;
++ } else
++ p->slowpoll = SLOW_POLL_IDLE;
++ }
++ }
++ if ((~p->irq_sources) & QM_PIRQ_DQRI)
++ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
++}
++EXPORT_SYMBOL(qman_p_poll);
++
++void qman_poll(void)
++{
++ struct qman_portal *p = get_poll_portal();
++ qman_p_poll(p);
++ put_poll_portal();
++}
++EXPORT_SYMBOL(qman_poll);
++
++void qman_p_stop_dequeues(struct qman_portal *p)
++{
++ qman_stop_dequeues_ex(p);
++}
++EXPORT_SYMBOL(qman_p_stop_dequeues);
++
++void qman_stop_dequeues(void)
++{
++ struct qman_portal *p = get_affine_portal();
++ qman_p_stop_dequeues(p);
++ put_affine_portal();
++}
++EXPORT_SYMBOL(qman_stop_dequeues);
++
++void qman_p_start_dequeues(struct qman_portal *p)
++{
++ unsigned long irqflags __maybe_unused;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ DPA_ASSERT(p->dqrr_disable_ref > 0);
++ if (!(--p->dqrr_disable_ref))
++ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++}
++EXPORT_SYMBOL(qman_p_start_dequeues);
++
++void qman_start_dequeues(void)
++{
++ struct qman_portal *p = get_affine_portal();
++ qman_p_start_dequeues(p);
++ put_affine_portal();
++}
++EXPORT_SYMBOL(qman_start_dequeues);
++
++void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
++{
++ unsigned long irqflags __maybe_unused;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ pools &= p->config->public_cfg.pools;
++ p->sdqcr |= pools;
++ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++}
++EXPORT_SYMBOL(qman_p_static_dequeue_add);
++
++void qman_static_dequeue_add(u32 pools)
++{
++ struct qman_portal *p = get_affine_portal();
++ qman_p_static_dequeue_add(p, pools);
++ put_affine_portal();
++}
++EXPORT_SYMBOL(qman_static_dequeue_add);
++
++void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
++{
++ unsigned long irqflags __maybe_unused;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ pools &= p->config->public_cfg.pools;
++ p->sdqcr &= ~pools;
++ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++}
++EXPORT_SYMBOL(qman_p_static_dequeue_del);
++
++void qman_static_dequeue_del(u32 pools)
++{
++ struct qman_portal *p = get_affine_portal();
++ qman_p_static_dequeue_del(p, pools);
++ put_affine_portal();
++}
++EXPORT_SYMBOL(qman_static_dequeue_del);
++
++u32 qman_p_static_dequeue_get(struct qman_portal *p)
++{
++ return p->sdqcr;
++}
++EXPORT_SYMBOL(qman_p_static_dequeue_get);
++
++u32 qman_static_dequeue_get(void)
++{
++ struct qman_portal *p = get_affine_portal();
++ u32 ret = qman_p_static_dequeue_get(p);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_static_dequeue_get);
++
++void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
++ int park_request)
++{
++ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
++}
++EXPORT_SYMBOL(qman_p_dca);
++
++void qman_dca(struct qm_dqrr_entry *dq, int park_request)
++{
++ struct qman_portal *p = get_affine_portal();
++ qman_p_dca(p, dq, park_request);
++ put_affine_portal();
++}
++EXPORT_SYMBOL(qman_dca);
++
++/*******************/
++/* Frame queue API */
++/*******************/
++
++static const char *mcr_result_str(u8 result)
++{
++ switch (result) {
++ case QM_MCR_RESULT_NULL:
++ return "QM_MCR_RESULT_NULL";
++ case QM_MCR_RESULT_OK:
++ return "QM_MCR_RESULT_OK";
++ case QM_MCR_RESULT_ERR_FQID:
++ return "QM_MCR_RESULT_ERR_FQID";
++ case QM_MCR_RESULT_ERR_FQSTATE:
++ return "QM_MCR_RESULT_ERR_FQSTATE";
++ case QM_MCR_RESULT_ERR_NOTEMPTY:
++ return "QM_MCR_RESULT_ERR_NOTEMPTY";
++ case QM_MCR_RESULT_PENDING:
++ return "QM_MCR_RESULT_PENDING";
++ case QM_MCR_RESULT_ERR_BADCOMMAND:
++ return "QM_MCR_RESULT_ERR_BADCOMMAND";
++ }
++ return "<unknown MCR result>";
++}
++
++int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
++{
++ struct qm_fqd fqd;
++ struct qm_mcr_queryfq_np np;
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++
++ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
++ int ret = qman_alloc_fqid(&fqid);
++ if (ret)
++ return ret;
++ }
++ spin_lock_init(&fq->fqlock);
++ fq->fqid = fqid;
++ fq->flags = flags;
++ fq->state = qman_fq_state_oos;
++ fq->cgr_groupid = 0;
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
++ return -ENOMEM;
++#endif
++ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
++ return 0;
++ /* Everything else is AS_IS support */
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ mcc = qm_mc_start(&p->p);
++ mcc->queryfq.fqid = cpu_to_be32(fqid);
++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
++ if (mcr->result != QM_MCR_RESULT_OK) {
++ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
++ goto err;
++ }
++ fqd = mcr->queryfq.fqd;
++ hw_fqd_to_cpu(&fqd);
++ mcc = qm_mc_start(&p->p);
++ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
++ if (mcr->result != QM_MCR_RESULT_OK) {
++ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
++ goto err;
++ }
++ np = mcr->queryfq_np;
++ /* Phew, have queryfq and queryfq_np results, stitch together
++ * the FQ object from those. */
++ fq->cgr_groupid = fqd.cgid;
++ switch (np.state & QM_MCR_NP_STATE_MASK) {
++ case QM_MCR_NP_STATE_OOS:
++ break;
++ case QM_MCR_NP_STATE_RETIRED:
++ fq->state = qman_fq_state_retired;
++ if (np.frm_cnt)
++ fq_set(fq, QMAN_FQ_STATE_NE);
++ break;
++ case QM_MCR_NP_STATE_TEN_SCHED:
++ case QM_MCR_NP_STATE_TRU_SCHED:
++ case QM_MCR_NP_STATE_ACTIVE:
++ fq->state = qman_fq_state_sched;
++ if (np.state & QM_MCR_NP_STATE_R)
++ fq_set(fq, QMAN_FQ_STATE_CHANGING);
++ break;
++ case QM_MCR_NP_STATE_PARKED:
++ fq->state = qman_fq_state_parked;
++ break;
++ default:
++ DPA_ASSERT(NULL == "invalid FQ state");
++ }
++ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
++ fq->state |= QMAN_FQ_STATE_CGR_EN;
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return 0;
++err:
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
++ qman_release_fqid(fqid);
++ return -EIO;
++}
++EXPORT_SYMBOL(qman_create_fq);
++
++void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
++{
++
++ /* We don't need to lock the FQ as it is a pre-condition that the FQ be
++ * quiesced. Instead, run some checks. */
++ switch (fq->state) {
++ case qman_fq_state_parked:
++ DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
++ case qman_fq_state_oos:
++ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
++ qman_release_fqid(fq->fqid);
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ clear_fq_table_entry(fq->key);
++#endif
++ return;
++ default:
++ break;
++ }
++ DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
++}
++EXPORT_SYMBOL(qman_destroy_fq);
++
++u32 qman_fq_fqid(struct qman_fq *fq)
++{
++ return fq->fqid;
++}
++EXPORT_SYMBOL(qman_fq_fqid);
++
++void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
++{
++ if (state)
++ *state = fq->state;
++ if (flags)
++ *flags = fq->flags;
++}
++EXPORT_SYMBOL(qman_fq_state);
++
++int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
++ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
++
++ if ((fq->state != qman_fq_state_oos) &&
++ (fq->state != qman_fq_state_parked))
++ return -EINVAL;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
++ return -EINVAL;
++#endif
++ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
++ /* And can't be set at the same time as TDTHRESH */
++ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
++ return -EINVAL;
++ }
++ /* Issue an INITFQ_[PARKED|SCHED] management command */
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ FQLOCK(fq);
++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
++ ((fq->state != qman_fq_state_oos) &&
++ (fq->state != qman_fq_state_parked)))) {
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return -EBUSY;
++ }
++ mcc = qm_mc_start(&p->p);
++ if (opts)
++ mcc->initfq = *opts;
++ mcc->initfq.fqid = cpu_to_be32(fq->fqid);
++ mcc->initfq.count = 0;
++
++ /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
++ * demux pointer. Otherwise, the caller-provided value is allowed to
++ * stand, don't overwrite it. */
++ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
++ dma_addr_t phys_fq;
++ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ mcc->initfq.fqd.context_b = fq->key;
++#else
++ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
++#endif
++ /* and the physical address - NB, if the user wasn't trying to
++ * set CONTEXTA, clear the stashing settings. */
++ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
++ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
++ memset(&mcc->initfq.fqd.context_a, 0,
++ sizeof(mcc->initfq.fqd.context_a));
++ } else {
++ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
++ DMA_TO_DEVICE);
++ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
++ }
++ }
++ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
++ mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
++ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
++ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
++ mcc->initfq.fqd.dest.wq = 4;
++ }
++ }
++ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
++ cpu_to_hw_fqd(&mcc->initfq.fqd);
++ qm_mc_commit(&p->p, myverb);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return -EIO;
++ }
++ if (opts) {
++ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
++ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
++ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
++ else
++ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
++ }
++ if (opts->we_mask & QM_INITFQ_WE_CGID)
++ fq->cgr_groupid = opts->fqd.cgid;
++ }
++ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
++ qman_fq_state_sched : qman_fq_state_parked;
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return 0;
++}
++EXPORT_SYMBOL(qman_init_fq);
++
++int qman_schedule_fq(struct qman_fq *fq)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ int ret = 0;
++ u8 res;
++
++ if (fq->state != qman_fq_state_parked)
++ return -EINVAL;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
++ return -EINVAL;
++#endif
++ /* Issue a ALTERFQ_SCHED management command */
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ FQLOCK(fq);
++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
++ (fq->state != qman_fq_state_parked))) {
++ ret = -EBUSY;
++ goto out;
++ }
++ mcc = qm_mc_start(&p->p);
++ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
++ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ ret = -EIO;
++ goto out;
++ }
++ fq->state = qman_fq_state_sched;
++out:
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_schedule_fq);
++
++int qman_retire_fq(struct qman_fq *fq, u32 *flags)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ int rval;
++ u8 res;
++
++ if ((fq->state != qman_fq_state_parked) &&
++ (fq->state != qman_fq_state_sched))
++ return -EINVAL;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
++ return -EINVAL;
++#endif
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ FQLOCK(fq);
++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
++ (fq->state == qman_fq_state_retired) ||
++ (fq->state == qman_fq_state_oos))) {
++ rval = -EBUSY;
++ goto out;
++ }
++ rval = table_push_fq(p, fq);
++ if (rval)
++ goto out;
++ mcc = qm_mc_start(&p->p);
++ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
++ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
++ res = mcr->result;
++ /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
++ * and defer the flags until FQRNI or FQRN (respectively) show up. But
++ * "Friendly" is to process OK immediately, and not set CHANGING. We do
++ * friendly, otherwise the caller doesn't necessarily have a fully
++ * "retired" FQ on return even if the retirement was immediate. However
++ * this does mean some code duplication between here and
++ * fq_state_change(). */
++ if (likely(res == QM_MCR_RESULT_OK)) {
++ rval = 0;
++ /* Process 'fq' right away, we'll ignore FQRNI */
++ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
++ fq_set(fq, QMAN_FQ_STATE_NE);
++ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
++ fq_set(fq, QMAN_FQ_STATE_ORL);
++ else
++ table_del_fq(p, fq);
++ if (flags)
++ *flags = fq->flags;
++ fq->state = qman_fq_state_retired;
++ if (fq->cb.fqs) {
++ /* Another issue with supporting "immediate" retirement
++ * is that we're forced to drop FQRNIs, because by the
++ * time they're seen it may already be "too late" (the
++ * fq may have been OOS'd and free()'d already). But if
++ * the upper layer wants a callback whether it's
++ * immediate or not, we have to fake a "MR" entry to
++ * look like an FQRNI... */
++ struct qm_mr_entry msg;
++ msg.verb = QM_MR_VERB_FQRNI;
++ msg.fq.fqs = mcr->alterfq.fqs;
++ msg.fq.fqid = fq->fqid;
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ msg.fq.contextB = fq->key;
++#else
++ msg.fq.contextB = (u32)(uintptr_t)fq;
++#endif
++ fq->cb.fqs(p, fq, &msg);
++ }
++ } else if (res == QM_MCR_RESULT_PENDING) {
++ rval = 1;
++ fq_set(fq, QMAN_FQ_STATE_CHANGING);
++ } else {
++ rval = -EIO;
++ table_del_fq(p, fq);
++ }
++out:
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return rval;
++}
++EXPORT_SYMBOL(qman_retire_fq);
++
++int qman_oos_fq(struct qman_fq *fq)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ int ret = 0;
++ u8 res;
++
++ if (fq->state != qman_fq_state_retired)
++ return -EINVAL;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
++ return -EINVAL;
++#endif
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ FQLOCK(fq);
++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
++ (fq->state != qman_fq_state_retired))) {
++ ret = -EBUSY;
++ goto out;
++ }
++ mcc = qm_mc_start(&p->p);
++ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
++ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ ret = -EIO;
++ goto out;
++ }
++ fq->state = qman_fq_state_oos;
++out:
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_oos_fq);
++
++int qman_fq_flow_control(struct qman_fq *fq, int xon)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ int ret = 0;
++ u8 res;
++ u8 myverb;
++
++ if ((fq->state == qman_fq_state_oos) ||
++ (fq->state == qman_fq_state_retired) ||
++ (fq->state == qman_fq_state_parked))
++ return -EINVAL;
++
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
++ return -EINVAL;
++#endif
++ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ FQLOCK(fq);
++ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
++ (fq->state == qman_fq_state_parked) ||
++ (fq->state == qman_fq_state_oos) ||
++ (fq->state == qman_fq_state_retired))) {
++ ret = -EBUSY;
++ goto out;
++ }
++ mcc = qm_mc_start(&p->p);
++ mcc->alterfq.fqid = fq->fqid;
++ mcc->alterfq.count = 0;
++ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
++
++ qm_mc_commit(&p->p, myverb);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
++
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ ret = -EIO;
++ goto out;
++ }
++out:
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_fq_flow_control);
++
++int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ mcc = qm_mc_start(&p->p);
++ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK)
++ *fqd = mcr->queryfq.fqd;
++ hw_fqd_to_cpu(fqd);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res != QM_MCR_RESULT_OK)
++ return -EIO;
++ return 0;
++}
++EXPORT_SYMBOL(qman_query_fq);
++
++int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ mcc = qm_mc_start(&p->p);
++ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK) {
++ *np = mcr->queryfq_np;
++ np->fqd_link = be24_to_cpu(np->fqd_link);
++ np->odp_seq = be16_to_cpu(np->odp_seq);
++ np->orp_nesn = be16_to_cpu(np->orp_nesn);
++ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
++ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
++ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
++ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
++ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
++ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
++ np->ics_surp = be16_to_cpu(np->ics_surp);
++ np->byte_cnt = be32_to_cpu(np->byte_cnt);
++ np->frm_cnt = be24_to_cpu(np->frm_cnt);
++ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
++ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
++ np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
++ np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
++ np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
++ }
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res == QM_MCR_RESULT_ERR_FQID)
++ return -ERANGE;
++ else if (res != QM_MCR_RESULT_OK)
++ return -EIO;
++ return 0;
++}
++EXPORT_SYMBOL(qman_query_fq_np);
++
++int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res, myverb;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
++ QM_MCR_VERB_QUERYWQ;
++ mcc = qm_mc_start(&p->p);
++ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
++ qm_mc_commit(&p->p, myverb);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK) {
++ int i, array_len;
++ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
++ array_len = ARRAY_SIZE(mcr->querywq.wq_len);
++ for (i = 0; i < array_len; i++)
++ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
++ }
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
++ return -EIO;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_query_wq);
++
++int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
++ struct qm_mcr_cgrtestwrite *result)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ mcc = qm_mc_start(&p->p);
++ mcc->cgrtestwrite.cgid = cgr->cgrid;
++ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
++ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
++ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK)
++ *result = mcr->cgrtestwrite;
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
++ return -EIO;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_testwrite_cgr);
++
++int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++ int i;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ mcc = qm_mc_start(&p->p);
++ mcc->querycgr.cgid = cgr->cgrid;
++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK)
++ *cgrd = mcr->querycgr;
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
++ return -EIO;
++ }
++ cgrd->cgr.wr_parm_g.word =
++ be32_to_cpu(cgrd->cgr.wr_parm_g.word);
++ cgrd->cgr.wr_parm_y.word =
++ be32_to_cpu(cgrd->cgr.wr_parm_y.word);
++ cgrd->cgr.wr_parm_r.word =
++ be32_to_cpu(cgrd->cgr.wr_parm_r.word);
++ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
++ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
++ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
++ be32_to_cpus(&cgrd->cscn_targ_swp[i]);
++ return 0;
++}
++EXPORT_SYMBOL(qman_query_cgr);
++
++int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
++{
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++ int i;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ qm_mc_start(&p->p);
++ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_MCC_VERB_QUERYCONGESTION);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK)
++ memcpy_fromio(congestion, &mcr->querycongestion,
++ sizeof(*congestion));
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
++ return -EIO;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(congestion->state.__state); i++)
++ be32_to_cpus(&congestion->state.__state[i]);
++ return 0;
++}
++EXPORT_SYMBOL(qman_query_congestion);
++
++/* internal function used as a wait_event() expression */
++static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
++{
++ unsigned long irqflags __maybe_unused;
++ int ret = -EBUSY;
++ PORTAL_IRQ_LOCK(p, irqflags);
++ if (!p->vdqcr_owned) {
++ FQLOCK(fq);
++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
++ goto escape;
++ fq_set(fq, QMAN_FQ_STATE_VDQCR);
++ FQUNLOCK(fq);
++ p->vdqcr_owned = fq;
++ ret = 0;
++ }
++escape:
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ if (!ret)
++ qm_dqrr_vdqcr_set(&p->p, vdqcr);
++ return ret;
++}
++
++static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
++{
++ int ret;
++ *p = get_affine_portal();
++ ret = set_p_vdqcr(*p, fq, vdqcr);
++ put_affine_portal();
++ return ret;
++}
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
++ u32 vdqcr, u32 flags)
++{
++ int ret = 0;
++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
++ ret = wait_event_interruptible(affine_queue,
++ !(ret = set_p_vdqcr(p, fq, vdqcr)));
++ else
++ wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
++ return ret;
++}
++
++static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
++ u32 vdqcr, u32 flags)
++{
++ int ret = 0;
++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
++ ret = wait_event_interruptible(affine_queue,
++ !(ret = set_vdqcr(p, fq, vdqcr)));
++ else
++ wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
++ return ret;
++}
++#endif
++
++int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
++ u32 flags __maybe_unused, u32 vdqcr)
++{
++ int ret;
++
++ if ((fq->state != qman_fq_state_parked) &&
++ (fq->state != qman_fq_state_retired))
++ return -EINVAL;
++ if (vdqcr & QM_VDQCR_FQID_MASK)
++ return -EINVAL;
++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
++ return -EBUSY;
++ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_VOLATILE_FLAG_WAIT)
++ ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
++ else
++#endif
++ ret = set_p_vdqcr(p, fq, vdqcr);
++ if (ret)
++ return ret;
++ /* VDQCR is set */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
++ /* NB: don't propagate any error - the caller wouldn't
++ * know whether the VDQCR was issued or not. A signal
++ * could arrive after returning anyway, so the caller
++ * can check signal_pending() if that's an issue. */
++ wait_event_interruptible(affine_queue,
++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
++ else
++ wait_event(affine_queue,
++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_p_volatile_dequeue);
++
++int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
++ u32 vdqcr)
++{
++ struct qman_portal *p;
++ int ret;
++
++ if ((fq->state != qman_fq_state_parked) &&
++ (fq->state != qman_fq_state_retired))
++ return -EINVAL;
++ if (vdqcr & QM_VDQCR_FQID_MASK)
++ return -EINVAL;
++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
++ return -EBUSY;
++ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_VOLATILE_FLAG_WAIT)
++ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
++ else
++#endif
++ ret = set_vdqcr(&p, fq, vdqcr);
++ if (ret)
++ return ret;
++ /* VDQCR is set */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
++ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
++ /* NB: don't propagate any error - the caller wouldn't
++ * know whether the VDQCR was issued or not. A signal
++ * could arrive after returning anyway, so the caller
++ * can check signal_pending() if that's an issue. */
++ wait_event_interruptible(affine_queue,
++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
++ else
++ wait_event(affine_queue,
++ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_volatile_dequeue);
++
++static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
++{
++ if (avail)
++ qm_eqcr_cce_prefetch(&p->p);
++ else
++ qm_eqcr_cce_update(&p->p);
++}
++
++int qman_eqcr_is_empty(void)
++{
++ unsigned long irqflags __maybe_unused;
++ struct qman_portal *p = get_affine_portal();
++ u8 avail;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ update_eqcr_ci(p, 0);
++ avail = qm_eqcr_get_fill(&p->p);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return avail == 0;
++}
++EXPORT_SYMBOL(qman_eqcr_is_empty);
++
++void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
++{
++ if (affine) {
++ unsigned long irqflags __maybe_unused;
++ struct qman_portal *p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ p->cb_dc_ern = handler;
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ } else
++ cb_dc_ern = handler;
++}
++EXPORT_SYMBOL(qman_set_dc_ern);
++
++static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
++ unsigned long *irqflags __maybe_unused,
++ struct qman_fq *fq,
++ const struct qm_fd *fd,
++ u32 flags)
++{
++ struct qm_eqcr_entry *eq;
++ u8 avail;
++ PORTAL_IRQ_LOCK(p, (*irqflags));
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
++ if (p->eqci_owned) {
++ PORTAL_IRQ_UNLOCK(p, (*irqflags));
++ return NULL;
++ }
++ p->eqci_owned = fq;
++ }
++#endif
++ if (p->use_eqcr_ci_stashing) {
++ /*
++ * The stashing case is easy, only update if we need to in
++ * order to try and liberate ring entries.
++ */
++ eq = qm_eqcr_start_stash(&p->p);
++ } else {
++ /*
++ * The non-stashing case is harder, need to prefetch ahead of
++ * time.
++ */
++ avail = qm_eqcr_get_avail(&p->p);
++ if (avail < 2)
++ update_eqcr_ci(p, avail);
++ eq = qm_eqcr_start_no_stash(&p->p);
++ }
++
++ if (unlikely(!eq)) {
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
++ p->eqci_owned = NULL;
++#endif
++ PORTAL_IRQ_UNLOCK(p, (*irqflags));
++ return NULL;
++ }
++ if (flags & QMAN_ENQUEUE_FLAG_DCA)
++ eq->dca = QM_EQCR_DCA_ENABLE |
++ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
++ QM_EQCR_DCA_PARK : 0) |
++ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
++ eq->fqid = cpu_to_be32(fq->fqid);
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ eq->tag = cpu_to_be32(fq->key);
++#else
++ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
++#endif
++ eq->fd = *fd;
++ cpu_to_hw_fd(&eq->fd);
++ return eq;
++}
++
++static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
++ unsigned long *irqflags __maybe_unused,
++ struct qman_fq *fq,
++ const struct qm_fd *fd,
++ u32 flags)
++{
++ struct qm_eqcr_entry *eq;
++ *p = get_affine_portal();
++ eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
++ if (!eq)
++ put_affine_portal();
++ return eq;
++}
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
++ unsigned long *irqflags __maybe_unused,
++ struct qman_fq *fq,
++ const struct qm_fd *fd,
++ u32 flags)
++{
++ struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
++ if (!eq)
++ qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
++ return eq;
++}
++static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
++ unsigned long *irqflags __maybe_unused,
++ struct qman_fq *fq,
++ const struct qm_fd *fd,
++ u32 flags)
++{
++ struct qm_eqcr_entry *eq;
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return NULL if signal occurs before completion. Signal
++ * can occur during return. Caller must check for signal */
++ wait_event_interruptible(affine_queue,
++ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
++ else
++ wait_event(affine_queue,
++ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
++ return eq;
++}
++static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
++ unsigned long *irqflags __maybe_unused,
++ struct qman_fq *fq,
++ const struct qm_fd *fd,
++ u32 flags)
++{
++ struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
++ if (!eq)
++ qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
++ return eq;
++}
++static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
++ unsigned long *irqflags __maybe_unused,
++ struct qman_fq *fq,
++ const struct qm_fd *fd,
++ u32 flags)
++{
++ struct qm_eqcr_entry *eq;
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return NULL if signal occurs before completion. Signal
++ * can occur during return. Caller must check for signal */
++ wait_event_interruptible(affine_queue,
++ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
++ else
++ wait_event(affine_queue,
++ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
++ return eq;
++}
++#endif
++
++int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_fd *fd, u32 flags)
++{
++ struct qm_eqcr_entry *eq;
++ unsigned long irqflags __maybe_unused;
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
++ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
++ else
++#endif
++ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
++ if (!eq)
++ return -EBUSY;
++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
++ /* Factor the below out, it's used from qman_enqueue_orp() too */
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return success even if signal occurs before
++ * condition is true. pvb_commit guarantees success */
++ wait_event_interruptible(affine_queue,
++ (p->eqci_owned != fq));
++ else
++ wait_event(affine_queue, (p->eqci_owned != fq));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_p_enqueue);
++
++int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
++{
++ struct qman_portal *p;
++ struct qm_eqcr_entry *eq;
++ unsigned long irqflags __maybe_unused;
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
++ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
++ else
++#endif
++ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
++ if (!eq)
++ return -EBUSY;
++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
++ /* Factor the below out, it's used from qman_enqueue_orp() too */
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return success even if signal occurs before
++ * condition is true. pvb_commit guarantees success */
++ wait_event_interruptible(affine_queue,
++ (p->eqci_owned != fq));
++ else
++ wait_event(affine_queue, (p->eqci_owned != fq));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_enqueue);
++
++int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_fd *fd, u32 flags,
++ struct qman_fq *orp, u16 orp_seqnum)
++{
++ struct qm_eqcr_entry *eq;
++ unsigned long irqflags __maybe_unused;
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
++ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
++ else
++#endif
++ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
++ if (!eq)
++ return -EBUSY;
++ /* Process ORP-specifics here */
++ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
++ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
++ else {
++ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
++ if (flags & QMAN_ENQUEUE_FLAG_NESN)
++ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
++ else
++ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
++ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
++ }
++ eq->seqnum = cpu_to_be16(orp_seqnum);
++ eq->orp = cpu_to_be32(orp->fqid);
++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
++ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
++ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return success even if signal occurs before
++ * condition is true. pvb_commit guarantees success */
++ wait_event_interruptible(affine_queue,
++ (p->eqci_owned != fq));
++ else
++ wait_event(affine_queue, (p->eqci_owned != fq));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_p_enqueue_orp);
++
++int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
++ struct qman_fq *orp, u16 orp_seqnum)
++{
++ struct qman_portal *p;
++ struct qm_eqcr_entry *eq;
++ unsigned long irqflags __maybe_unused;
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
++ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
++ else
++#endif
++ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
++ if (!eq)
++ return -EBUSY;
++ /* Process ORP-specifics here */
++ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
++ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
++ else {
++ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
++ if (flags & QMAN_ENQUEUE_FLAG_NESN)
++ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
++ else
++ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
++ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
++ }
++ eq->seqnum = cpu_to_be16(orp_seqnum);
++ eq->orp = cpu_to_be32(orp->fqid);
++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
++ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
++ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return success even if signal occurs before
++ * condition is true. pvb_commit guarantees success */
++ wait_event_interruptible(affine_queue,
++ (p->eqci_owned != fq));
++ else
++ wait_event(affine_queue, (p->eqci_owned != fq));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_enqueue_orp);
++
++int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_fd *fd, u32 flags,
++ qman_cb_precommit cb, void *cb_arg)
++{
++ struct qm_eqcr_entry *eq;
++ unsigned long irqflags __maybe_unused;
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
++ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
++ else
++#endif
++ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
++ if (!eq)
++ return -EBUSY;
++ /* invoke user supplied callback function before writing commit verb */
++ if (cb(cb_arg)) {
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ return -EINVAL;
++ }
++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
++ /* Factor the below out, it's used from qman_enqueue_orp() too */
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return success even if signal occurs before
++ * condition is true. pvb_commit guarantees success */
++ wait_event_interruptible(affine_queue,
++ (p->eqci_owned != fq));
++ else
++ wait_event(affine_queue, (p->eqci_owned != fq));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_p_enqueue_precommit);
++
++int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
++ u32 flags, qman_cb_precommit cb, void *cb_arg)
++{
++ struct qman_portal *p;
++ struct qm_eqcr_entry *eq;
++ unsigned long irqflags __maybe_unused;
++
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
++ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
++ else
++#endif
++ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
++ if (!eq)
++ return -EBUSY;
++ /* invoke user supplied callback function before writing commit verb */
++ if (cb(cb_arg)) {
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return -EINVAL;
++ }
++ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
++ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
++ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
++ /* Factor the below out, it's used from qman_enqueue_orp() too */
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
++ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
++ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
++ /* NB: return success even if signal occurs before
++ * condition is true. pvb_commit guarantees success */
++ wait_event_interruptible(affine_queue,
++ (p->eqci_owned != fq));
++ else
++ wait_event(affine_queue, (p->eqci_owned != fq));
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_enqueue_precommit);
++
++int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
++ struct qm_mcc_initcgr *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++ u8 verb = QM_MCC_VERB_MODIFYCGR;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++ mcc = qm_mc_start(&p->p);
++ if (opts)
++ mcc->initcgr = *opts;
++ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
++ mcc->initcgr.cgr.wr_parm_g.word =
++ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
++ mcc->initcgr.cgr.wr_parm_y.word =
++ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
++ mcc->initcgr.cgr.wr_parm_r.word =
++ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
++ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
++ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
++
++ mcc->initcgr.cgid = cgr->cgrid;
++ if (flags & QMAN_CGR_FLAG_USE_INIT)
++ verb = QM_MCC_VERB_INITCGR;
++ qm_mc_commit(&p->p, verb);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
++ res = mcr->result;
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
++}
++EXPORT_SYMBOL(qman_modify_cgr);
++
++#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
++ QM_CHANNEL_SWPORTAL0))
++#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
++#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
++
++static u8 qman_cgr_cpus[__CGR_NUM];
++
++int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
++ struct qm_mcc_initcgr *opts)
++{
++ unsigned long irqflags __maybe_unused;
++ struct qm_mcr_querycgr cgr_state;
++ struct qm_mcc_initcgr local_opts;
++ int ret;
++ struct qman_portal *p;
++
++ /* We have to check that the provided CGRID is within the limits of the
++ * data-structures, for obvious reasons. However we'll let h/w take
++ * care of determining whether it's within the limits of what exists on
++ * the SoC. */
++ if (cgr->cgrid >= __CGR_NUM)
++ return -EINVAL;
++
++ preempt_disable();
++ p = get_affine_portal();
++ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
++ preempt_enable();
++
++ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
++ cgr->chan = p->config->public_cfg.channel;
++ spin_lock_irqsave(&p->cgr_lock, irqflags);
++
++ /* if no opts specified, just add it to the list */
++ if (!opts)
++ goto add_list;
++
++ ret = qman_query_cgr(cgr, &cgr_state);
++ if (ret)
++ goto release_lock;
++ if (opts)
++ local_opts = *opts;
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
++ local_opts.cgr.cscn_targ_upd_ctrl =
++ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
++ else
++ /* Overwrite TARG */
++ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
++ TARG_MASK(p);
++ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
++
++ /* send init if flags indicate so */
++ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
++ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
++ else
++ ret = qman_modify_cgr(cgr, 0, &local_opts);
++ if (ret)
++ goto release_lock;
++add_list:
++ list_add(&cgr->node, &p->cgr_cbs);
++
++ /* Determine if newly added object requires its callback to be called */
++ ret = qman_query_cgr(cgr, &cgr_state);
++ if (ret) {
++ /* we can't go back, so proceed and return success, but screen
++ * and wail to the log file */
++ pr_crit("CGR HW state partially modified\n");
++ ret = 0;
++ goto release_lock;
++ }
++ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
++ cgr->cgrid))
++ cgr->cb(p, cgr, 1);
++release_lock:
++ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_create_cgr);
++
++int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
++ struct qm_mcc_initcgr *opts)
++{
++ unsigned long irqflags __maybe_unused;
++ struct qm_mcc_initcgr local_opts;
++ struct qm_mcr_querycgr cgr_state;
++ int ret;
++
++ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
++ pr_warn("This QMan version doesn't support to send CSCN to DCP portal\n");
++ return -EINVAL;
++ }
++ /* We have to check that the provided CGRID is within the limits of the
++ * data-structures, for obvious reasons. However we'll let h/w take
++ * care of determining whether it's within the limits of what exists on
++ * the SoC.
++ */
++ if (cgr->cgrid >= __CGR_NUM)
++ return -EINVAL;
++
++ ret = qman_query_cgr(cgr, &cgr_state);
++ if (ret)
++ return ret;
++
++ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
++ if (opts)
++ local_opts = *opts;
++
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
++ local_opts.cgr.cscn_targ_upd_ctrl =
++ QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
++ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
++ else
++ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
++ TARG_DCP_MASK(dcp_portal);
++ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
++
++ /* send init if flags indicate so */
++ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
++ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
++ &local_opts);
++ else
++ ret = qman_modify_cgr(cgr, 0, &local_opts);
++
++ return ret;
++}
++EXPORT_SYMBOL(qman_create_cgr_to_dcp);
++
++int qman_delete_cgr(struct qman_cgr *cgr)
++{
++ unsigned long irqflags __maybe_unused;
++ struct qm_mcr_querycgr cgr_state;
++ struct qm_mcc_initcgr local_opts;
++ int ret = 0;
++ struct qman_cgr *i;
++ struct qman_portal *p = get_affine_portal();
++
++ if (cgr->chan != p->config->public_cfg.channel) {
++ pr_crit("Attempting to delete cgr from different portal "
++ "than it was create: create 0x%x, delete 0x%x\n",
++ cgr->chan, p->config->public_cfg.channel);
++ ret = -EINVAL;
++ goto put_portal;
++ }
++ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
++ spin_lock_irqsave(&p->cgr_lock, irqflags);
++ list_del(&cgr->node);
++ /*
++ * If there are no other CGR objects for this CGRID in the list, update
++ * CSCN_TARG accordingly
++ */
++ list_for_each_entry(i, &p->cgr_cbs, node)
++ if ((i->cgrid == cgr->cgrid) && i->cb)
++ goto release_lock;
++ ret = qman_query_cgr(cgr, &cgr_state);
++ if (ret) {
++ /* add back to the list */
++ list_add(&cgr->node, &p->cgr_cbs);
++ goto release_lock;
++ }
++ /* Overwrite TARG */
++ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
++ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
++ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
++ else
++ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
++ ~(TARG_MASK(p));
++ ret = qman_modify_cgr(cgr, 0, &local_opts);
++ if (ret)
++ /* add back to the list */
++ list_add(&cgr->node, &p->cgr_cbs);
++release_lock:
++ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++put_portal:
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_delete_cgr);
++
++struct cgr_comp {
++ struct qman_cgr *cgr;
++ struct completion completion;
++};
++
++static int qman_delete_cgr_thread(void *p)
++{
++ struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
++ int res;
++
++ res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr);
++ complete(&cgr_comp->completion);
++
++ return res;
++}
++
++void qman_delete_cgr_safe(struct qman_cgr *cgr)
++{
++ struct task_struct *thread;
++ struct cgr_comp cgr_comp;
++
++ preempt_disable();
++ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
++ init_completion(&cgr_comp.completion);
++ cgr_comp.cgr = cgr;
++ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
++ "cgr_del");
++
++ if (likely(!IS_ERR(thread))) {
++ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
++ wake_up_process(thread);
++ wait_for_completion(&cgr_comp.completion);
++ preempt_enable();
++ return;
++ }
++ }
++ qman_delete_cgr(cgr);
++ preempt_enable();
++}
++EXPORT_SYMBOL(qman_delete_cgr_safe);
++
++int qm_get_clock(u64 *clock_hz)
++{
++ if (!qman_clk) {
++ pr_warn("Qman clock speed is unknown\n");
++ return -EINVAL;
++ }
++ *clock_hz = (u64)qman_clk;
++ return 0;
++}
++EXPORT_SYMBOL(qm_get_clock);
++
++int qm_set_clock(u64 clock_hz)
++{
++ if (qman_clk)
++ return -1;
++ qman_clk = (u32)clock_hz;
++ return 0;
++}
++EXPORT_SYMBOL(qm_set_clock);
++
++/* CEETM management command */
++static int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->lfqmt_config = *opts;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_LFQMT_CONFIG);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: CONFIGURE LFQMT failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++int qman_ceetm_query_lfqmt(int lfqid,
++ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->lfqmt_query.lfqid = lfqid;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK)
++ *lfqmt_query = mcr->lfqmt_query;
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: QUERY LFQMT failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_query_lfqmt);
++
++static int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->cq_config = *opts;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ res = mcr->result;
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG);
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: CONFIGURE CQ failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
++ struct qm_mcr_ceetm_cq_query *cq_query)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->cq_query.cqid = cpu_to_be16(cqid);
++ mcc->cq_query.dcpid = dcpid;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK) {
++ *cq_query = mcr->cq_query;
++ hw_cq_query_to_cpu(cq_query);
++ }
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: QUERY CQ failed\n");
++ return -EIO;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_query_cq);
++
++static int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->dct_config = *opts;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG);
++ res = mcr->result;
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: CONFIGURE DCT failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++static int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts,
++ struct qm_mcr_ceetm_dct_query *dct_query)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p = get_affine_portal();
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->dct_query = *opts;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY);
++ res = mcr->result;
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: QUERY DCT failed\n");
++ return -EIO;
++ }
++
++ *dct_query = mcr->dct_query;
++ return 0;
++}
++
++static int qman_ceetm_configure_class_scheduler(
++ struct qm_mcc_ceetm_class_scheduler_config *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->csch_config = *opts;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
++ res = mcr->result;
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++static int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel,
++ struct qm_mcr_ceetm_class_scheduler_query *query)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->csch_query.cqcid = cpu_to_be16(channel->idx);
++ mcc->csch_query.dcpid = channel->dcp_idx;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
++ res = mcr->result;
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: QUERY CLASS SCHEDULER failed\n");
++ return -EIO;
++ }
++ *query = mcr->csch_query;
++ return 0;
++}
++
++static int qman_ceetm_configure_mapping_shaper_tcfc(
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->mst_config = *opts;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
++ res = mcr->result;
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++static int qman_ceetm_query_mapping_shaper_tcfc(
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts,
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->mst_query = *opts;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
++ res = mcr->result;
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: QUERY CHANNEL MAPPING failed\n");
++ return -EIO;
++ }
++
++ *response = mcr->mst_query;
++ return 0;
++}
++
++static int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->ccgr_config = *opts;
++
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG);
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: CONFIGURE CCGR failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
++ struct qm_mcr_ceetm_ccgr_query *response)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->ccgr_query.ccgrid = cpu_to_be16(ccgr_query->ccgrid);
++ mcc->ccgr_query.dcpid = ccgr_query->dcpid;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
++
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK) {
++ *response = mcr->ccgr_query;
++ hw_ccgr_query_to_cpu(response);
++ }
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: QUERY CCGR failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_query_ccgr);
++
++static int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq,
++ u8 command_type, u16 xsfdr,
++ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ switch (command_type) {
++ case 0:
++ case 1:
++ mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx;
++ break;
++ case 2:
++ mcc->cq_ppxr.xsfdr = xsfdr;
++ break;
++ default:
++ break;
++ }
++ mcc->cq_ppxr.ct = command_type;
++ mcc->cq_ppxr.dcpid = cq->parent->dcp_idx;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n");
++ return -EIO;
++ }
++ *cq_ppxr = mcr->cq_ppxr;
++ return 0;
++}
++
++static int qman_ceetm_query_statistics(u16 cid,
++ enum qm_dc_portal dcp_idx,
++ u16 command_type,
++ struct qm_mcr_ceetm_statistics_query *query_result)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->stats_query_write.cid = cid;
++ mcc->stats_query_write.dcpid = dcp_idx;
++ mcc->stats_query_write.ct = command_type;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
++
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: STATISTICS QUERY failed\n");
++ return -EIO;
++ }
++ *query_result = mcr->stats_query;
++ return 0;
++}
++
++int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
++ u16 command_type, u64 frame_count,
++ u64 byte_count)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ mcc->stats_query_write.cid = cid;
++ mcc->stats_query_write.dcpid = dcp_idx;
++ mcc->stats_query_write.ct = command_type;
++ mcc->stats_query_write.frm_cnt = frame_count;
++ mcc->stats_query_write.byte_cnt = byte_count;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
++
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
++
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++
++ res = mcr->result;
++ if (res != QM_MCR_RESULT_OK) {
++ pr_err("CEETM: STATISTICS WRITE failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_query_write_statistics);
++
++int qman_ceetm_bps2tokenrate(u64 bps, struct qm_ceetm_rate *token_rate,
++ int rounding)
++{
++ u16 pres;
++ u64 temp;
++ u64 qman_freq;
++ int ret;
++
++ /* Read PRES from CEET_CFG_PRES register */
++ ret = qman_ceetm_get_prescaler(&pres);
++ if (ret)
++ return -EINVAL;
++
++ ret = qm_get_clock(&qman_freq);
++ if (ret)
++ return -EINVAL;
++
++ /* token-rate = bytes-per-second * update-reference-period
++ *
++ * Where token-rate is N/8192 for a integer N, and
++ * update-reference-period is (2^22)/(PRES*QHz), where PRES
++ * is the prescalar value and QHz is the QMan clock frequency.
++ * So:
++ *
++ * token-rate = (byte-per-second*2^22)/PRES*QHZ)
++ *
++ * Converting to bits-per-second gives;
++ *
++ * token-rate = (bps*2^19) / (PRES*QHZ)
++ * N = (bps*2^32) / (PRES*QHz)
++ *
++ * And to avoid 64-bit overflow if 'bps' is larger than 4Gbps
++ * (yet minimise rounding error if 'bps' is small), we reorganise
++ * the formula to use two 16-bit shifts rather than 1 32-bit shift.
++ * N = (((bps*2^16)/PRES)*2^16)/QHz
++ */
++ temp = ROUNDING((bps << 16), pres, rounding);
++ temp = ROUNDING((temp << 16), qman_freq, rounding);
++ token_rate->whole = temp >> 13;
++ token_rate->fraction = temp & (((u64)1 << 13) - 1);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_bps2tokenrate);
++
++int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u64 *bps,
++ int rounding)
++{
++ u16 pres;
++ u64 temp;
++ u64 qman_freq;
++ int ret;
++
++ /* Read PRES from CEET_CFG_PRES register */
++ ret = qman_ceetm_get_prescaler(&pres);
++ if (ret)
++ return -EINVAL;
++
++ ret = qm_get_clock(&qman_freq);
++ if (ret)
++ return -EINVAL;
++
++ /* bytes-per-second = token-rate / update-reference-period
++ *
++ * where "token-rate" is N/8192 for an integer N, and
++ * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is
++ * the prescalar value and QHz is the QMan clock frequency. So;
++ *
++ * bytes-per-second = (N/8192) / (4194304/PRES*QHz)
++ * = N*PRES*QHz / (4194304*8192)
++ * = N*PRES*QHz / (2^35)
++ *
++ * Converting to bits-per-second gives;
++ *
++ * bps = N*PRES*QHZ / (2^32)
++ *
++ * Note, the numerator has a maximum width of 72 bits! So to
++ * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum
++ * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before
++ * multiplying by N (goes to maximum of 63 bits).
++ *
++ * temp = PRES*QHZ / (2^16)
++ * kbps = temp*N / (2^16)
++ */
++ temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding);
++ temp *= ((token_rate->whole << 13) + token_rate->fraction);
++ *bps = ROUNDING(temp, (u64)(1) << 16, rounding);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_tokenrate2bps);
++
++int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx,
++ unsigned int sp_idx)
++{
++ struct qm_ceetm_sp *p;
++
++ DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) ||
++ (dcp_idx == qm_dc_portal_fman1));
++
++ if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) ||
++ (sp_idx >= (qman_ceetms[dcp_idx].sp_range[0] +
++ qman_ceetms[dcp_idx].sp_range[1]))) {
++ pr_err("Sub-portal index doesn't exist\n");
++ return -EINVAL;
++ }
++
++ list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) {
++ if ((p->idx == sp_idx) && (p->is_claimed == 0)) {
++ p->is_claimed = 1;
++ *sp = p;
++ return 0;
++ }
++ }
++ pr_err("The sub-portal#%d is not available!\n", sp_idx);
++ return -ENODEV;
++}
++EXPORT_SYMBOL(qman_ceetm_sp_claim);
++
++int qman_ceetm_sp_release(struct qm_ceetm_sp *sp)
++{
++ struct qm_ceetm_sp *p;
++
++ if (sp->lni && sp->lni->is_claimed == 1) {
++ pr_err("The dependency of sub-portal has not been released!\n");
++ return -EBUSY;
++ }
++
++ list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) {
++ if (p->idx == sp->idx) {
++ p->is_claimed = 0;
++ p->lni = NULL;
++ }
++ }
++ /* Disable CEETM mode of this sub-portal */
++ qman_sp_disable_ceetm_mode(sp->dcp_idx, sp->idx);
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_sp_release);
++
++int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx,
++ unsigned int lni_idx)
++{
++ struct qm_ceetm_lni *p;
++
++ if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) ||
++ (lni_idx >= (qman_ceetms[dcp_idx].lni_range[0] +
++ qman_ceetms[dcp_idx].lni_range[1]))) {
++ pr_err("The lni index is out of range\n");
++ return -EINVAL;
++ }
++
++ list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) {
++ if ((p->idx == lni_idx) && (p->is_claimed == 0)) {
++ *lni = p;
++ p->is_claimed = 1;
++ return 0;
++ }
++ }
++
++ pr_err("The LNI#%d is not available!\n", lni_idx);
++ return -EINVAL;
++}
++EXPORT_SYMBOL(qman_ceetm_lni_claim);
++
++int qman_ceetm_lni_release(struct qm_ceetm_lni *lni)
++{
++ struct qm_ceetm_lni *p;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++ if (!list_empty(&lni->channels)) {
++ pr_err("The LNI dependencies are not released!\n");
++ return -EBUSY;
++ }
++
++ list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) {
++ if (p->idx == lni->idx) {
++ p->shaper_enable = 0;
++ p->shaper_couple = 0;
++ p->cr_token_rate.whole = 0;
++ p->cr_token_rate.fraction = 0;
++ p->er_token_rate.whole = 0;
++ p->er_token_rate.fraction = 0;
++ p->cr_token_bucket_limit = 0;
++ p->er_token_bucket_limit = 0;
++ p->is_claimed = 0;
++ }
++ }
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ config_opts.dcpid = lni->dcp_idx;
++ memset(&config_opts.shaper_config, 0,
++ sizeof(config_opts.shaper_config));
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_release);
++
++int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
++ config_opts.dcpid = sp->dcp_idx;
++ config_opts.sp_mapping.map_lni_id = lni->idx;
++ sp->lni = lni;
++
++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts))
++ return -EINVAL;
++
++ /* Enable CEETM mode for this sub-portal */
++ return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx);
++}
++EXPORT_SYMBOL(qman_ceetm_sp_set_lni);
++
++int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
++ query_opts.dcpid = sp->dcp_idx;
++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
++ pr_err("Can't get SP <-> LNI mapping\n");
++ return -EINVAL;
++ }
++ *lni_idx = query_result.sp_mapping_query.map_lni_id;
++ sp->lni->idx = query_result.sp_mapping_query.map_lni_id;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_sp_get_lni);
++
++int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
++ int oal)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++ if (lni->shaper_enable) {
++ pr_err("The shaper has already been enabled\n");
++ return -EINVAL;
++ }
++ lni->shaper_enable = 1;
++ lni->shaper_couple = coupled;
++ lni->oal = oal;
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ config_opts.dcpid = lni->dcp_idx;
++ config_opts.shaper_config.cpl = coupled;
++ config_opts.shaper_config.oal = oal;
++ config_opts.shaper_config.crtcr = cpu_to_be24((lni->cr_token_rate.whole
++ << 13) | lni->cr_token_rate.fraction);
++ config_opts.shaper_config.ertcr = cpu_to_be24((lni->er_token_rate.whole
++ << 13) | lni->er_token_rate.fraction);
++ config_opts.shaper_config.crtbl =
++ cpu_to_be16(lni->cr_token_bucket_limit);
++ config_opts.shaper_config.ertbl =
++ cpu_to_be16(lni->er_token_bucket_limit);
++ config_opts.shaper_config.mps = 60;
++
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper);
++
++int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++ if (!lni->shaper_enable) {
++ pr_err("The shaper has been disabled\n");
++ return -EINVAL;
++ }
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ config_opts.dcpid = lni->dcp_idx;
++ config_opts.shaper_config.cpl = lni->shaper_couple;
++ config_opts.shaper_config.oal = lni->oal;
++ config_opts.shaper_config.crtbl =
++ cpu_to_be16(lni->cr_token_bucket_limit);
++ config_opts.shaper_config.ertbl =
++ cpu_to_be16(lni->er_token_bucket_limit);
++ /* Set CR/ER rate with all 1's to configure an infinite rate, thus
++ * disable the shaping.
++ */
++ config_opts.shaper_config.crtcr = 0xFFFFFF;
++ config_opts.shaper_config.ertcr = 0xFFFFFF;
++ config_opts.shaper_config.mps = 60;
++ lni->shaper_enable = 0;
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper);
++
++int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni)
++{
++ return lni->shaper_enable;
++}
++EXPORT_SYMBOL(qman_ceetm_lni_is_shaper_enabled);
++
++int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ lni->cr_token_rate.whole = token_rate->whole;
++ lni->cr_token_rate.fraction = token_rate->fraction;
++ lni->cr_token_bucket_limit = token_limit;
++ if (!lni->shaper_enable)
++ return 0;
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ query_opts.dcpid = lni->dcp_idx;
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
++ &query_result);
++ if (ret) {
++ pr_err("Fail to get current LNI shaper setting\n");
++ return -EINVAL;
++ }
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ config_opts.dcpid = lni->dcp_idx;
++ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole << 13)
++ | (token_rate->fraction));
++ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
++ config_opts.shaper_config.oal = query_result.shaper_query.oal;
++ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
++ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
++ config_opts.shaper_config.mps = query_result.shaper_query.mps;
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate);
++
++int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
++ u64 bps,
++ u16 token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
++ if (ret) {
++ pr_err("Can not convert bps to token rate\n");
++ return -EINVAL;
++ }
++
++ return qman_ceetm_lni_set_commit_rate(lni, &token_rate, token_limit);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate_bps);
++
++int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ query_opts.dcpid = lni->dcp_idx;
++
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret) {
++ pr_err("The LNI CR rate or limit is not set\n");
++ return -EINVAL;
++ }
++ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
++ 0x1FFF;
++ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate);
++
++int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
++ u64 *bps, u16 *token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_lni_get_commit_rate(lni, &token_rate, token_limit);
++ if (ret) {
++ pr_err("The LNI CR rate or limit is not available\n");
++ return -EINVAL;
++ }
++
++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate_bps);
++
++int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ lni->er_token_rate.whole = token_rate->whole;
++ lni->er_token_rate.fraction = token_rate->fraction;
++ lni->er_token_bucket_limit = token_limit;
++ if (!lni->shaper_enable)
++ return 0;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ query_opts.dcpid = lni->dcp_idx;
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
++ &query_result);
++ if (ret) {
++ pr_err("Fail to get current LNI shaper setting\n");
++ return -EINVAL;
++ }
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ config_opts.dcpid = lni->dcp_idx;
++ config_opts.shaper_config.ertcr = cpu_to_be24(
++ (token_rate->whole << 13) | (token_rate->fraction));
++ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
++ config_opts.shaper_config.oal = query_result.shaper_query.oal;
++ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
++ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
++ config_opts.shaper_config.mps = query_result.shaper_query.mps;
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate);
++
++int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
++ u64 bps,
++ u16 token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
++ if (ret) {
++ pr_err("Can not convert bps to token rate\n");
++ return -EINVAL;
++ }
++ return qman_ceetm_lni_set_excess_rate(lni, &token_rate, token_limit);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate_bps);
++
++int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
++ query_opts.dcpid = lni->dcp_idx;
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret) {
++ pr_err("The LNI ER rate or limit is not set\n");
++ return -EINVAL;
++ }
++ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
++ 0x1FFF;
++ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate);
++
++int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
++ u64 *bps, u16 *token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_lni_get_excess_rate(lni, &token_rate, token_limit);
++ if (ret) {
++ pr_err("The LNI ER rate or limit is not available\n");
++ return -EINVAL;
++ }
++
++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate_bps);
++
++#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4)
++#define QMAN_CEETM_LNITCFCC_ENABLE 0x8
++int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
++ unsigned int cq_level,
++ int traffic_class)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ u64 lnitcfcc;
++
++ if ((cq_level > 15) | (traffic_class > 7)) {
++ pr_err("The CQ or traffic class id is out of range\n");
++ return -EINVAL;
++ }
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
++ query_opts.dcpid = lni->dcp_idx;
++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
++ pr_err("Fail to query tcfcc\n");
++ return -EINVAL;
++ }
++
++ lnitcfcc = be64_to_cpu(query_result.tcfc_query.lnitcfcc);
++ if (traffic_class == -1) {
++ /* disable tcfc for this CQ */
++ lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE <<
++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
++ } else {
++ lnitcfcc &= ~((u64)0xF <<
++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
++ lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE |
++ traffic_class)) <<
++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level);
++ }
++ config_opts.tcfc_config.lnitcfcc = cpu_to_be64(lnitcfcc);
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
++ config_opts.dcpid = lni->dcp_idx;
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc);
++
++#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7
++int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level,
++ int *traffic_class)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++ u8 lnitcfcc;
++
++ if (cq_level > 15) {
++ pr_err("the CQ level is out of range\n");
++ return -EINVAL;
++ }
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
++ query_opts.dcpid = lni->dcp_idx;
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret)
++ return ret;
++ lnitcfcc = (u8)be64_to_cpu((query_result.tcfc_query.lnitcfcc) >>
++ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
++ if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE)
++ *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK;
++ else
++ *traffic_class = -1;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc);
++
++int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
++ struct qm_ceetm_lni *lni)
++{
++ struct qm_ceetm_channel *p;
++ u32 channel_idx;
++ int ret = 0;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++ if (lni->dcp_idx == qm_dc_portal_fman0) {
++ ret = qman_alloc_ceetm0_channel(&channel_idx);
++ } else if (lni->dcp_idx == qm_dc_portal_fman1) {
++ ret = qman_alloc_ceetm1_channel(&channel_idx);
++ } else {
++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
++ lni->dcp_idx);
++ return -EINVAL;
++ }
++
++ if (ret) {
++ pr_err("The is no channel available for LNI#%d\n", lni->idx);
++ return -ENODEV;
++ }
++
++ p = kzalloc(sizeof(*p), GFP_KERNEL);
++ if (!p)
++ return -ENOMEM;
++ p->idx = channel_idx;
++ p->dcp_idx = lni->dcp_idx;
++ p->lni_idx = lni->idx;
++ list_add_tail(&p->node, &lni->channels);
++ INIT_LIST_HEAD(&p->class_queues);
++ INIT_LIST_HEAD(&p->ccgs);
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
++ channel_idx);
++ config_opts.dcpid = lni->dcp_idx;
++ config_opts.channel_mapping.map_lni_id = lni->idx;
++ config_opts.channel_mapping.map_shaped = 0;
++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
++ pr_err("Can't map channel#%d for LNI#%d\n",
++ channel_idx, lni->idx);
++ return -EINVAL;
++ }
++ *channel = p;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_claim);
++
++int qman_ceetm_channel_release(struct qm_ceetm_channel *channel)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++ if (!list_empty(&channel->class_queues)) {
++ pr_err("CEETM channel#%d has class queue unreleased!\n",
++ channel->idx);
++ return -EBUSY;
++ }
++ if (!list_empty(&channel->ccgs)) {
++ pr_err("CEETM channel#%d has ccg unreleased!\n",
++ channel->idx);
++ return -EBUSY;
++ }
++
++ /* channel->dcp_idx corresponds to known fman validation */
++ if ((channel->dcp_idx != qm_dc_portal_fman0) &&
++ (channel->dcp_idx != qm_dc_portal_fman1)) {
++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
++ channel->dcp_idx);
++ return -EINVAL;
++ }
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ config_opts.dcpid = channel->dcp_idx;
++ memset(&config_opts.shaper_config, 0,
++ sizeof(config_opts.shaper_config));
++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
++ pr_err("Can't reset channel shapping parameters\n");
++ return -EINVAL;
++ }
++
++ if (channel->dcp_idx == qm_dc_portal_fman0) {
++ qman_release_ceetm0_channelid(channel->idx);
++ } else if (channel->dcp_idx == qm_dc_portal_fman1) {
++ qman_release_ceetm1_channelid(channel->idx);
++ } else {
++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
++ channel->dcp_idx);
++ return -EINVAL;
++ }
++ list_del(&channel->node);
++ kfree(channel);
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_release);
++
++int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
++ int coupled)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++ if (channel->shaper_enable == 1) {
++ pr_err("This channel shaper has been enabled!\n");
++ return -EINVAL;
++ }
++
++ channel->shaper_enable = 1;
++ channel->shaper_couple = coupled;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++
++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
++ pr_err("Can't query channel mapping\n");
++ return -EINVAL;
++ }
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
++ channel->idx);
++ config_opts.dcpid = channel->dcp_idx;
++ config_opts.channel_mapping.map_lni_id =
++ query_result.channel_mapping_query.map_lni_id;
++ config_opts.channel_mapping.map_shaped = 1;
++ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
++ pr_err("Can't enable shaper for channel #%d\n", channel->idx);
++ return -EINVAL;
++ }
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ config_opts.shaper_config.cpl = coupled;
++ config_opts.shaper_config.crtcr =
++ cpu_to_be24((channel->cr_token_rate.whole
++ << 13) |
++ channel->cr_token_rate.fraction);
++ config_opts.shaper_config.ertcr =
++ cpu_to_be24(channel->er_token_rate.whole
++ << 13 |
++ channel->er_token_rate.fraction);
++ config_opts.shaper_config.crtbl =
++ cpu_to_be16(channel->cr_token_bucket_limit);
++ config_opts.shaper_config.ertbl =
++ cpu_to_be16(channel->er_token_bucket_limit);
++
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper);
++
++int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++
++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
++ pr_err("Can't query channel mapping\n");
++ return -EINVAL;
++ }
++
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
++ channel->idx);
++ config_opts.dcpid = channel->dcp_idx;
++ config_opts.channel_mapping.map_shaped = 0;
++ config_opts.channel_mapping.map_lni_id =
++ query_result.channel_mapping_query.map_lni_id;
++
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper);
++
++int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++
++ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
++ pr_err("Can't query channel mapping\n");
++ return -EINVAL;
++ }
++
++ return query_result.channel_mapping_query.map_shaped;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_is_shaper_enabled);
++
++int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret) {
++ pr_err("Fail to get the current channel shaper setting\n");
++ return -EINVAL;
++ }
++
++ channel->cr_token_rate.whole = token_rate->whole;
++ channel->cr_token_rate.fraction = token_rate->fraction;
++ channel->cr_token_bucket_limit = token_limit;
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ config_opts.dcpid = channel->dcp_idx;
++ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole
++ << 13) | (token_rate->fraction));
++ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
++ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
++ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate);
++
++int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
++ u64 bps, u16 token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
++ if (ret) {
++ pr_err("Can not convert bps to token rate\n");
++ return -EINVAL;
++ }
++ return qman_ceetm_channel_set_commit_rate(channel, &token_rate,
++ token_limit);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate_bps);
++
++int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret | !query_result.shaper_query.crtcr |
++ !query_result.shaper_query.crtbl) {
++ pr_err("The channel commit rate or limit is not set\n");
++ return -EINVAL;
++ }
++ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
++ 0x1FFF;
++ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate);
++
++int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
++ u64 *bps, u16 *token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_channel_get_commit_rate(channel, &token_rate,
++ token_limit);
++ if (ret) {
++ pr_err("The channel CR rate or limit is not available\n");
++ return -EINVAL;
++ }
++
++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate_bps);
++
++int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret) {
++ pr_err("Fail to get the current channel shaper setting\n");
++ return -EINVAL;
++ }
++
++ channel->er_token_rate.whole = token_rate->whole;
++ channel->er_token_rate.fraction = token_rate->fraction;
++ channel->er_token_bucket_limit = token_limit;
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ config_opts.dcpid = channel->dcp_idx;
++ config_opts.shaper_config.ertcr = cpu_to_be24(
++ (token_rate->whole << 13) | (token_rate->fraction));
++ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
++ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
++ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
++ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate);
++
++int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
++ u64 bps, u16 token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
++ if (ret) {
++ pr_err("Can not convert bps to token rate\n");
++ return -EINVAL;
++ }
++ return qman_ceetm_channel_set_excess_rate(channel, &token_rate,
++ token_limit);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate_bps);
++
++int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret | !query_result.shaper_query.ertcr |
++ !query_result.shaper_query.ertbl) {
++ pr_err("The channel excess rate or limit is not set\n");
++ return -EINVAL;
++ }
++ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
++ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
++ 0x1FFF;
++ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate);
++
++int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
++ u64 *bps, u16 *token_limit)
++{
++ struct qm_ceetm_rate token_rate;
++ int ret;
++
++ ret = qman_ceetm_channel_get_excess_rate(channel, &token_rate,
++ token_limit);
++ if (ret) {
++ pr_err("The channel ER rate or limit is not available\n");
++ return -EINVAL;
++ }
++
++ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate_bps);
++
++int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
++ u16 token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
++
++ if (channel->shaper_enable) {
++ pr_err("This channel is a shaped one\n");
++ return -EINVAL;
++ }
++
++ channel->cr_token_bucket_limit = token_limit;
++ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ config_opts.dcpid = channel->dcp_idx;
++ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
++ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_weight);
++
++int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
++ u16 *token_limit)
++{
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
++ int ret;
++
++ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
++ channel->idx);
++ query_opts.dcpid = channel->dcp_idx;
++ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
++ if (ret | !query_result.shaper_query.crtbl) {
++ pr_err("This unshaped channel's uFQ wight is unavailable\n");
++ return -EINVAL;
++ }
++ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_get_weight);
++
++int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b,
++ unsigned int prio_a, unsigned int prio_b)
++{
++ struct qm_mcc_ceetm_class_scheduler_config config_opts;
++ struct qm_mcr_ceetm_class_scheduler_query query_result;
++ int i;
++
++ if (prio_a > 7) {
++ pr_err("The priority of group A is out of range\n");
++ return -EINVAL;
++ }
++ if (group_b && (prio_b > 7)) {
++ pr_err("The priority of group B is out of range\n");
++ return -EINVAL;
++ }
++
++ if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
++ pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
++ return -EINVAL;
++ }
++
++ config_opts.cqcid = cpu_to_be16(channel->idx);
++ config_opts.dcpid = channel->dcp_idx;
++ config_opts.gpc_combine_flag = !group_b;
++ config_opts.gpc_prio_a = prio_a;
++ config_opts.gpc_prio_b = prio_b;
++
++ for (i = 0; i < 8; i++)
++ config_opts.w[i] = query_result.w[i];
++ config_opts.crem = query_result.crem;
++ config_opts.erem = query_result.erem;
++
++ return qman_ceetm_configure_class_scheduler(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_group);
++
++int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b,
++ unsigned int *prio_a, unsigned int *prio_b)
++{
++ struct qm_mcr_ceetm_class_scheduler_query query_result;
++
++ if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
++ pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
++ return -EINVAL;
++ }
++ *group_b = !query_result.gpc_combine_flag;
++ *prio_a = query_result.gpc_prio_a;
++ *prio_b = query_result.gpc_prio_b;
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_get_group);
++
++#define GROUP_A_ELIGIBILITY_SET (1 << 8)
++#define GROUP_B_ELIGIBILITY_SET (1 << 9)
++#define CQ_ELIGIBILITY_SET(n) (1 << (7 - n))
++int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
++ *channel, int group_b, int cre)
++{
++ struct qm_mcc_ceetm_class_scheduler_config csch_config;
++ struct qm_mcr_ceetm_class_scheduler_query csch_query;
++ int i;
++
++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
++ pr_err("Cannot get the channel %d scheduler setting.\n",
++ channel->idx);
++ return -EINVAL;
++ }
++ csch_config.cqcid = cpu_to_be16(channel->idx);
++ csch_config.dcpid = channel->dcp_idx;
++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
++ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
++ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
++
++ for (i = 0; i < 8; i++)
++ csch_config.w[i] = csch_query.w[i];
++ csch_config.erem = csch_query.erem;
++ if (group_b)
++ csch_config.crem = (be16_to_cpu(csch_query.crem)
++ & ~GROUP_B_ELIGIBILITY_SET)
++ | (cre ? GROUP_B_ELIGIBILITY_SET : 0);
++ else
++ csch_config.crem = (be16_to_cpu(csch_query.crem)
++ & ~GROUP_A_ELIGIBILITY_SET)
++ | (cre ? GROUP_A_ELIGIBILITY_SET : 0);
++
++ csch_config.crem = cpu_to_be16(csch_config.crem);
++
++ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
++ pr_err("Cannot config channel %d's scheduler with "
++ "group_%c's cr eligibility\n", channel->idx,
++ group_b ? 'b' : 'a');
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_group_cr_eligibility);
++
++int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
++ *channel, int group_b, int ere)
++{
++ struct qm_mcc_ceetm_class_scheduler_config csch_config;
++ struct qm_mcr_ceetm_class_scheduler_query csch_query;
++ int i;
++
++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
++ pr_err("Cannot get the channel %d scheduler setting.\n",
++ channel->idx);
++ return -EINVAL;
++ }
++ csch_config.cqcid = cpu_to_be16(channel->idx);
++ csch_config.dcpid = channel->dcp_idx;
++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
++ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
++ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
++
++ for (i = 0; i < 8; i++)
++ csch_config.w[i] = csch_query.w[i];
++ csch_config.crem = csch_query.crem;
++ if (group_b)
++ csch_config.erem = (be16_to_cpu(csch_query.erem)
++ & ~GROUP_B_ELIGIBILITY_SET)
++ | (ere ? GROUP_B_ELIGIBILITY_SET : 0);
++ else
++ csch_config.erem = (be16_to_cpu(csch_query.erem)
++ & ~GROUP_A_ELIGIBILITY_SET)
++ | (ere ? GROUP_A_ELIGIBILITY_SET : 0);
++
++ csch_config.erem = cpu_to_be16(csch_config.erem);
++
++ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
++ pr_err("Cannot config channel %d's scheduler with "
++ "group_%c's er eligibility\n", channel->idx,
++ group_b ? 'b' : 'a');
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_group_er_eligibility);
++
++int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
++ unsigned int idx, int cre)
++{
++ struct qm_mcc_ceetm_class_scheduler_config csch_config;
++ struct qm_mcr_ceetm_class_scheduler_query csch_query;
++ int i;
++
++ if (idx > 7) {
++ pr_err("CQ index is out of range\n");
++ return -EINVAL;
++ }
++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
++ pr_err("Cannot get the channel %d scheduler setting.\n",
++ channel->idx);
++ return -EINVAL;
++ }
++ csch_config.cqcid = cpu_to_be16(channel->idx);
++ csch_config.dcpid = channel->dcp_idx;
++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
++ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
++ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
++ for (i = 0; i < 8; i++)
++ csch_config.w[i] = csch_query.w[i];
++ csch_config.erem = csch_query.erem;
++ csch_config.crem = (be16_to_cpu(csch_query.crem)
++ & ~CQ_ELIGIBILITY_SET(idx)) |
++ (cre ? CQ_ELIGIBILITY_SET(idx) : 0);
++ csch_config.crem = cpu_to_be16(csch_config.crem);
++ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
++ pr_err("Cannot config channel scheduler to set "
++ "cr eligibility mask for CQ#%d\n", idx);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_cq_cr_eligibility);
++
++int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
++ unsigned int idx, int ere)
++{
++ struct qm_mcc_ceetm_class_scheduler_config csch_config;
++ struct qm_mcr_ceetm_class_scheduler_query csch_query;
++ int i;
++
++ if (idx > 7) {
++ pr_err("CQ index is out of range\n");
++ return -EINVAL;
++ }
++ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
++ pr_err("Cannot get the channel %d scheduler setting.\n",
++ channel->idx);
++ return -EINVAL;
++ }
++ csch_config.cqcid = cpu_to_be16(channel->idx);
++ csch_config.dcpid = channel->dcp_idx;
++ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
++ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
++ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
++ for (i = 0; i < 8; i++)
++ csch_config.w[i] = csch_query.w[i];
++ csch_config.crem = csch_query.crem;
++ csch_config.erem = (be16_to_cpu(csch_query.erem)
++ & ~CQ_ELIGIBILITY_SET(idx)) |
++ (ere ? CQ_ELIGIBILITY_SET(idx) : 0);
++ csch_config.erem = cpu_to_be16(csch_config.erem);
++ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
++ pr_err("Cannot config channel scheduler to set "
++ "er eligibility mask for CQ#%d\n", idx);
++ return -EINVAL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_channel_set_cq_er_eligibility);
++
++int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
++ struct qm_ceetm_channel *channel, unsigned int idx,
++ struct qm_ceetm_ccg *ccg)
++{
++ struct qm_ceetm_cq *p;
++ struct qm_mcc_ceetm_cq_config cq_config;
++
++ if (idx > 7) {
++ pr_err("The independent class queue id is out of range\n");
++ return -EINVAL;
++ }
++
++ list_for_each_entry(p, &channel->class_queues, node) {
++ if (p->idx == idx) {
++ pr_err("The CQ#%d has been claimed!\n", idx);
++ return -EINVAL;
++ }
++ }
++
++ p = kmalloc(sizeof(*p), GFP_KERNEL);
++ if (!p) {
++ pr_err("Can't allocate memory for CQ#%d!\n", idx);
++ return -ENOMEM;
++ }
++
++ list_add_tail(&p->node, &channel->class_queues);
++ p->idx = idx;
++ p->is_claimed = 1;
++ p->parent = channel;
++ INIT_LIST_HEAD(&p->bound_lfqids);
++
++ if (ccg) {
++ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
++ cq_config.dcpid = channel->dcp_idx;
++ cq_config.ccgid = cpu_to_be16(ccg->idx);
++ if (qman_ceetm_configure_cq(&cq_config)) {
++ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
++ idx, ccg->idx);
++ list_del(&p->node);
++ kfree(p);
++ return -EINVAL;
++ }
++ }
++
++ *cq = p;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cq_claim);
++
++int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
++ struct qm_ceetm_channel *channel, unsigned int idx,
++ struct qm_ceetm_ccg *ccg)
++{
++ struct qm_ceetm_cq *p;
++ struct qm_mcc_ceetm_cq_config cq_config;
++
++ if ((idx < 8) || (idx > 15)) {
++ pr_err("This grouped class queue id is out of range\n");
++ return -EINVAL;
++ }
++
++ list_for_each_entry(p, &channel->class_queues, node) {
++ if (p->idx == idx) {
++ pr_err("The CQ#%d has been claimed!\n", idx);
++ return -EINVAL;
++ }
++ }
++
++ p = kmalloc(sizeof(*p), GFP_KERNEL);
++ if (!p) {
++ pr_err("Can't allocate memory for CQ#%d!\n", idx);
++ return -ENOMEM;
++ }
++
++ list_add_tail(&p->node, &channel->class_queues);
++ p->idx = idx;
++ p->is_claimed = 1;
++ p->parent = channel;
++ INIT_LIST_HEAD(&p->bound_lfqids);
++
++ if (ccg) {
++ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
++ cq_config.dcpid = channel->dcp_idx;
++ cq_config.ccgid = cpu_to_be16(ccg->idx);
++ if (qman_ceetm_configure_cq(&cq_config)) {
++ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
++ idx, ccg->idx);
++ list_del(&p->node);
++ kfree(p);
++ return -EINVAL;
++ }
++ }
++ *cq = p;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cq_claim_A);
++
++int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
++ struct qm_ceetm_channel *channel, unsigned int idx,
++ struct qm_ceetm_ccg *ccg)
++{
++ struct qm_ceetm_cq *p;
++ struct qm_mcc_ceetm_cq_config cq_config;
++
++ if ((idx < 12) || (idx > 15)) {
++ pr_err("This grouped class queue id is out of range\n");
++ return -EINVAL;
++ }
++
++ list_for_each_entry(p, &channel->class_queues, node) {
++ if (p->idx == idx) {
++ pr_err("The CQ#%d has been claimed!\n", idx);
++ return -EINVAL;
++ }
++ }
++
++ p = kmalloc(sizeof(*p), GFP_KERNEL);
++ if (!p) {
++ pr_err("Can't allocate memory for CQ#%d!\n", idx);
++ return -ENOMEM;
++ }
++
++ list_add_tail(&p->node, &channel->class_queues);
++ p->idx = idx;
++ p->is_claimed = 1;
++ p->parent = channel;
++ INIT_LIST_HEAD(&p->bound_lfqids);
++
++ if (ccg) {
++ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
++ cq_config.dcpid = channel->dcp_idx;
++ cq_config.ccgid = cpu_to_be16(ccg->idx);
++ if (qman_ceetm_configure_cq(&cq_config)) {
++ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
++ idx, ccg->idx);
++ list_del(&p->node);
++ kfree(p);
++ return -EINVAL;
++ }
++ }
++ *cq = p;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cq_claim_B);
++
++int qman_ceetm_cq_release(struct qm_ceetm_cq *cq)
++{
++ if (!list_empty(&cq->bound_lfqids)) {
++ pr_err("The CQ#%d has unreleased LFQID\n", cq->idx);
++ return -EBUSY;
++ }
++ list_del(&cq->node);
++ qman_ceetm_drain_cq(cq);
++ kfree(cq);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cq_release);
++
++int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
++ struct qm_ceetm_weight_code *weight_code)
++{
++ struct qm_mcc_ceetm_class_scheduler_config config_opts;
++ struct qm_mcr_ceetm_class_scheduler_query query_result;
++ int i;
++
++ if (cq->idx < 8) {
++ pr_err("Can not set weight for ungrouped class queue\n");
++ return -EINVAL;
++ }
++
++ if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) {
++ pr_err("Can't query channel#%d's scheduler!\n",
++ cq->parent->idx);
++ return -EINVAL;
++ }
++
++ config_opts.cqcid = cpu_to_be16(cq->parent->idx);
++ config_opts.dcpid = cq->parent->dcp_idx;
++ config_opts.crem = query_result.crem;
++ config_opts.erem = query_result.erem;
++ config_opts.gpc_combine_flag = query_result.gpc_combine_flag;
++ config_opts.gpc_prio_a = query_result.gpc_prio_a;
++ config_opts.gpc_prio_b = query_result.gpc_prio_b;
++
++ for (i = 0; i < 8; i++)
++ config_opts.w[i] = query_result.w[i];
++ config_opts.w[cq->idx - 8] = ((weight_code->y << 3) |
++ (weight_code->x & 0x7));
++ return qman_ceetm_configure_class_scheduler(&config_opts);
++}
++EXPORT_SYMBOL(qman_ceetm_set_queue_weight);
++
++int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
++ struct qm_ceetm_weight_code *weight_code)
++{
++ struct qm_mcr_ceetm_class_scheduler_query query_result;
++
++ if (cq->idx < 8) {
++ pr_err("Can not get weight for ungrouped class queue\n");
++ return -EINVAL;
++ }
++
++ if (qman_ceetm_query_class_scheduler(cq->parent,
++ &query_result)) {
++ pr_err("Can't get the weight code for CQ#%d!\n", cq->idx);
++ return -EINVAL;
++ }
++ weight_code->y = query_result.w[cq->idx - 8] >> 3;
++ weight_code->x = query_result.w[cq->idx - 8] & 0x7;
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_get_queue_weight);
++
++/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as:
++ * effective weight = 2^x / (1 - (y/64))
++ * = 2^(x+6) / (64 - y)
++ */
++static void reduce_fraction(u32 *n, u32 *d)
++{
++ u32 factor = 2;
++ u32 lesser = (*n < *d) ? *n : *d;
++ /* If factor exceeds the square-root of the lesser of *n and *d,
++ * then there's no point continuing. Proof: if there was a factor
++ * bigger than the square root, that would imply there exists
++ * another factor smaller than the square-root with which it
++ * multiplies to give 'lesser' - but that's a contradiction
++ * because the other factor would have already been found and
++ * divided out.
++ */
++ while ((factor * factor) <= lesser) {
++ /* If 'factor' is a factor of *n and *d, divide them both
++ * by 'factor' as many times as possible.
++ */
++ while (!(*n % factor) && !(*d % factor)) {
++ *n /= factor;
++ *d /= factor;
++ lesser /= factor;
++ }
++ if (factor == 2)
++ factor = 3;
++ else
++ factor += 2;
++ }
++}
++
++int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
++ u32 *numerator,
++ u32 *denominator)
++{
++ *numerator = (u32) 1 << (weight_code->x + 6);
++ *denominator = 64 - weight_code->y;
++ reduce_fraction(numerator, denominator);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_wbfs2ratio);
++
++/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive).
++ * So find 'x' by range, and then estimate 'y' using:
++ * 64 - y = 2^(x + 6) / weight
++ * = 2^(x + 6) / (n/d)
++ * = d * 2^(x+6) / n
++ * y = 64 - (d * 2^(x+6) / n)
++ */
++int qman_ceetm_ratio2wbfs(u32 numerator,
++ u32 denominator,
++ struct qm_ceetm_weight_code *weight_code,
++ int rounding)
++{
++ unsigned int y, x = 0;
++ /* search incrementing 'x' until:
++ * weight < 2^(x+1)
++ * n/d < 2^(x+1)
++ * n < d * 2^(x+1)
++ */
++ while ((x < 8) && (numerator >= (denominator << (x + 1))))
++ x++;
++ if (x >= 8)
++ return -ERANGE;
++ /* because of the subtraction, use '-rounding' */
++ y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding);
++ if (y >= 32)
++ return -ERANGE;
++ weight_code->x = x;
++ weight_code->y = y;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_ratio2wbfs);
++
++int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio)
++{
++ struct qm_ceetm_weight_code weight_code;
++
++ if (qman_ceetm_ratio2wbfs(ratio, 100, &weight_code, 0)) {
++ pr_err("Cannot get wbfs code for cq %x\n", cq->idx);
++ return -EINVAL;
++ }
++ return qman_ceetm_set_queue_weight(cq, &weight_code);
++}
++EXPORT_SYMBOL(qman_ceetm_set_queue_weight_in_ratio);
++
++int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio)
++{
++ struct qm_ceetm_weight_code weight_code;
++ u32 n, d;
++
++ if (qman_ceetm_get_queue_weight(cq, &weight_code)) {
++ pr_err("Cannot query the weight code for cq%x\n", cq->idx);
++ return -EINVAL;
++ }
++
++ if (qman_ceetm_wbfs2ratio(&weight_code, &n, &d)) {
++ pr_err("Cannot get the ratio with wbfs code\n");
++ return -EINVAL;
++ }
++
++ *ratio = (n * 100) / d;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_get_queue_weight_in_ratio);
++
++int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
++ u64 *frame_count, u64 *byte_count)
++{
++ struct qm_mcr_ceetm_statistics_query result;
++ u16 cid, command_type;
++ enum qm_dc_portal dcp_idx;
++ int ret;
++
++ cid = cpu_to_be16((cq->parent->idx << 4) | cq->idx);
++ dcp_idx = cq->parent->dcp_idx;
++ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
++ command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS;
++ else
++ command_type = CEETM_QUERY_DEQUEUE_STATISTICS;
++
++ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
++ if (ret) {
++ pr_err("Can't query the statistics of CQ#%d!\n", cq->idx);
++ return -EINVAL;
++ }
++
++ *frame_count = be40_to_cpu(result.frm_cnt);
++ *byte_count = be48_to_cpu(result.byte_cnt);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics);
++
++int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq)
++{
++ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread ppxr;
++ int ret;
++
++ do {
++ ret = qman_ceetm_cq_peek_pop_xsfdrread(cq, 1, 0, &ppxr);
++ if (ret) {
++ pr_err("Failed to pop frame from CQ\n");
++ return -EINVAL;
++ }
++ } while (!(ppxr.stat & 0x2));
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_drain_cq);
++
++#define CEETM_LFQMT_LFQID_MSB 0xF00000
++#define CEETM_LFQMT_LFQID_LSB 0x000FFF
++int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
++ struct qm_ceetm_cq *cq)
++{
++ struct qm_ceetm_lfq *p;
++ u32 lfqid;
++ int ret = 0;
++ struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
++
++ if (cq->parent->dcp_idx == qm_dc_portal_fman0) {
++ ret = qman_alloc_ceetm0_lfqid(&lfqid);
++ } else if (cq->parent->dcp_idx == qm_dc_portal_fman1) {
++ ret = qman_alloc_ceetm1_lfqid(&lfqid);
++ } else {
++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
++ cq->parent->dcp_idx);
++ return -EINVAL;
++ }
++
++ if (ret) {
++ pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx);
++ return -ENODEV;
++ }
++ p = kmalloc(sizeof(*p), GFP_KERNEL);
++ if (!p)
++ return -ENOMEM;
++ p->idx = lfqid;
++ p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB);
++ p->parent = cq->parent;
++ list_add_tail(&p->node, &cq->bound_lfqids);
++
++ lfqmt_config.lfqid = cpu_to_be24(CEETM_LFQMT_LFQID_MSB |
++ (cq->parent->dcp_idx << 16) |
++ (lfqid & CEETM_LFQMT_LFQID_LSB));
++ lfqmt_config.cqid = cpu_to_be16((cq->parent->idx << 4) | (cq->idx));
++ lfqmt_config.dctidx = cpu_to_be16(p->dctidx);
++ if (qman_ceetm_configure_lfqmt(&lfqmt_config)) {
++ pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n",
++ lfqid, cq->idx);
++ list_del(&p->node);
++ kfree(p);
++ return -EINVAL;
++ }
++ *lfq = p;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_lfq_claim);
++
++int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq)
++{
++ if (lfq->parent->dcp_idx == qm_dc_portal_fman0) {
++ qman_release_ceetm0_lfqid(lfq->idx);
++ } else if (lfq->parent->dcp_idx == qm_dc_portal_fman1) {
++ qman_release_ceetm1_lfqid(lfq->idx);
++ } else {
++ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
++ lfq->parent->dcp_idx);
++ return -EINVAL;
++ }
++ list_del(&lfq->node);
++ kfree(lfq);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_lfq_release);
++
++int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a,
++ u32 context_b)
++{
++ struct qm_mcc_ceetm_dct_config dct_config;
++ lfq->context_a = context_a;
++ lfq->context_b = context_b;
++ dct_config.dctidx = cpu_to_be16((u16)lfq->dctidx);
++ dct_config.dcpid = lfq->parent->dcp_idx;
++ dct_config.context_b = cpu_to_be32(context_b);
++ dct_config.context_a = cpu_to_be64(context_a);
++
++ return qman_ceetm_configure_dct(&dct_config);
++}
++EXPORT_SYMBOL(qman_ceetm_lfq_set_context);
++
++int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a,
++ u32 *context_b)
++{
++ struct qm_mcc_ceetm_dct_query dct_query;
++ struct qm_mcr_ceetm_dct_query query_result;
++
++ dct_query.dctidx = cpu_to_be16(lfq->dctidx);
++ dct_query.dcpid = lfq->parent->dcp_idx;
++ if (qman_ceetm_query_dct(&dct_query, &query_result)) {
++ pr_err("Can't query LFQID#%d's context!\n", lfq->idx);
++ return -EINVAL;
++ }
++ *context_a = be64_to_cpu(query_result.context_a);
++ *context_b = be32_to_cpu(query_result.context_b);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_lfq_get_context);
++
++int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq)
++{
++ spin_lock_init(&fq->fqlock);
++ fq->fqid = lfq->idx;
++ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
++ if (lfq->ern)
++ fq->cb.ern = lfq->ern;
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
++ return -ENOMEM;
++#endif
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_create_fq);
++
++#define MAX_CCG_IDX 0x000F
++int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
++ struct qm_ceetm_channel *channel,
++ unsigned int idx,
++ void (*cscn)(struct qm_ceetm_ccg *,
++ void *cb_ctx,
++ int congested),
++ void *cb_ctx)
++{
++ struct qm_ceetm_ccg *p;
++
++ if (idx > MAX_CCG_IDX) {
++ pr_err("The given ccg index is out of range\n");
++ return -EINVAL;
++ }
++
++ list_for_each_entry(p, &channel->ccgs, node) {
++ if (p->idx == idx) {
++ pr_err("The CCG#%d has been claimed\n", idx);
++ return -EINVAL;
++ }
++ }
++
++ p = kmalloc(sizeof(*p), GFP_KERNEL);
++ if (!p) {
++ pr_err("Can't allocate memory for CCG#%d!\n", idx);
++ return -ENOMEM;
++ }
++
++ list_add_tail(&p->node, &channel->ccgs);
++
++ p->idx = idx;
++ p->parent = channel;
++ p->cb = cscn;
++ p->cb_ctx = cb_ctx;
++ INIT_LIST_HEAD(&p->cb_node);
++
++ *ccg = p;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_ccg_claim);
++
++int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg)
++{
++ unsigned long irqflags __maybe_unused;
++ struct qm_mcc_ceetm_ccgr_config config_opts;
++ int ret = 0;
++ struct qman_portal *p = get_affine_portal();
++
++ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
++ spin_lock_irqsave(&p->ccgr_lock, irqflags);
++ if (!list_empty(&ccg->cb_node))
++ list_del(&ccg->cb_node);
++ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
++ (ccg->parent->idx << 4) | ccg->idx);
++ config_opts.dcpid = ccg->parent->dcp_idx;
++ config_opts.we_mask = cpu_to_be16(QM_CCGR_WE_CSCN_TUPD);
++ config_opts.cm_config.cscn_tupd = cpu_to_be16(PORTAL_IDX(p));
++ ret = qman_ceetm_configure_ccgr(&config_opts);
++ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
++ put_affine_portal();
++
++ list_del(&ccg->node);
++ kfree(ccg);
++ return ret;
++}
++EXPORT_SYMBOL(qman_ceetm_ccg_release);
++
++int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask,
++ const struct qm_ceetm_ccg_params *params)
++{
++ struct qm_mcc_ceetm_ccgr_config config_opts;
++ unsigned long irqflags __maybe_unused;
++ int ret;
++ struct qman_portal *p;
++
++ if (((ccg->parent->idx << 4) | ccg->idx) >= (2 * __CGR_NUM))
++ return -EINVAL;
++
++ p = get_affine_portal();
++
++ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
++ spin_lock_irqsave(&p->ccgr_lock, irqflags);
++
++ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
++ (ccg->parent->idx << 4) | ccg->idx);
++ config_opts.dcpid = ccg->parent->dcp_idx;
++ config_opts.we_mask = we_mask;
++ if (we_mask & QM_CCGR_WE_CSCN_EN) {
++ config_opts.we_mask |= QM_CCGR_WE_CSCN_TUPD;
++ config_opts.cm_config.cscn_tupd = cpu_to_be16(
++ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p));
++ }
++ config_opts.we_mask = cpu_to_be16(config_opts.we_mask);
++ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
++ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
++ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
++ config_opts.cm_config.ctl_td_en = params->td_en;
++ config_opts.cm_config.ctl_td_mode = params->td_mode;
++ config_opts.cm_config.ctl_cscn_en = params->cscn_en;
++ config_opts.cm_config.ctl_mode = params->mode;
++ config_opts.cm_config.oal = params->oal;
++ config_opts.cm_config.cs_thres.hword =
++ cpu_to_be16(params->cs_thres_in.hword);
++ config_opts.cm_config.cs_thres_x.hword =
++ cpu_to_be16(params->cs_thres_out.hword);
++ config_opts.cm_config.td_thres.hword =
++ cpu_to_be16(params->td_thres.hword);
++ config_opts.cm_config.wr_parm_g.word =
++ cpu_to_be32(params->wr_parm_g.word);
++ config_opts.cm_config.wr_parm_y.word =
++ cpu_to_be32(params->wr_parm_y.word);
++ config_opts.cm_config.wr_parm_r.word =
++ cpu_to_be32(params->wr_parm_r.word);
++ ret = qman_ceetm_configure_ccgr(&config_opts);
++ if (ret) {
++ pr_err("Configure CCGR CM failed!\n");
++ goto release_lock;
++ }
++
++ if (we_mask & QM_CCGR_WE_CSCN_EN)
++ if (list_empty(&ccg->cb_node))
++ list_add(&ccg->cb_node,
++ &p->ccgr_cbs[ccg->parent->dcp_idx]);
++release_lock:
++ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
++ put_affine_portal();
++ return ret;
++}
++EXPORT_SYMBOL(qman_ceetm_ccg_set);
++
++#define CEETM_CCGR_CTL_MASK 0x01
++int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
++ struct qm_ceetm_ccg_params *params)
++{
++ struct qm_mcc_ceetm_ccgr_query query_opts;
++ struct qm_mcr_ceetm_ccgr_query query_result;
++
++ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
++ (ccg->parent->idx << 4) | ccg->idx);
++ query_opts.dcpid = ccg->parent->dcp_idx;
++
++ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
++ pr_err("Can't query CCGR#%d\n", ccg->idx);
++ return -EINVAL;
++ }
++
++ params->wr_parm_r.word = query_result.cm_query.wr_parm_r.word;
++ params->wr_parm_y.word = query_result.cm_query.wr_parm_y.word;
++ params->wr_parm_g.word = query_result.cm_query.wr_parm_g.word;
++ params->td_thres.hword = query_result.cm_query.td_thres.hword;
++ params->cs_thres_out.hword = query_result.cm_query.cs_thres_x.hword;
++ params->cs_thres_in.hword = query_result.cm_query.cs_thres.hword;
++ params->oal = query_result.cm_query.oal;
++ params->wr_en_g = query_result.cm_query.ctl_wr_en_g;
++ params->wr_en_y = query_result.cm_query.ctl_wr_en_y;
++ params->wr_en_r = query_result.cm_query.ctl_wr_en_r;
++ params->td_en = query_result.cm_query.ctl_td_en;
++ params->td_mode = query_result.cm_query.ctl_td_mode;
++ params->cscn_en = query_result.cm_query.ctl_cscn_en;
++ params->mode = query_result.cm_query.ctl_mode;
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_ccg_get);
++
++int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
++ u64 *frame_count, u64 *byte_count)
++{
++ struct qm_mcr_ceetm_statistics_query result;
++ u16 cid, command_type;
++ enum qm_dc_portal dcp_idx;
++ int ret;
++
++ cid = cpu_to_be16((ccg->parent->idx << 4) | ccg->idx);
++ dcp_idx = ccg->parent->dcp_idx;
++ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
++ command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS;
++ else
++ command_type = CEETM_QUERY_REJECT_STATISTICS;
++
++ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
++ if (ret) {
++ pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx);
++ return -EINVAL;
++ }
++
++ *frame_count = be40_to_cpu(result.frm_cnt);
++ *byte_count = be48_to_cpu(result.byte_cnt);
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics);
++
++int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
++ u16 swp_idx,
++ unsigned int *cscn_enabled)
++{
++ struct qm_mcc_ceetm_ccgr_query query_opts;
++ struct qm_mcr_ceetm_ccgr_query query_result;
++ int i;
++
++ DPA_ASSERT(swp_idx < 127);
++ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
++ (ccg->parent->idx << 4) | ccg->idx);
++ query_opts.dcpid = ccg->parent->dcp_idx;
++
++ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
++ pr_err("Can't query CCGR#%d\n", ccg->idx);
++ return -EINVAL;
++ }
++
++ i = swp_idx / 32;
++ i = 3 - i;
++ *cscn_enabled = query_result.cm_query.cscn_targ_swp[i] >>
++ (31 - swp_idx % 32);
++
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cscn_swp_get);
++
++int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
++ u16 dcp_idx,
++ u8 vcgid,
++ unsigned int cscn_enabled,
++ u16 we_mask,
++ const struct qm_ceetm_ccg_params *params)
++{
++ struct qm_mcc_ceetm_ccgr_config config_opts;
++ int ret;
++
++ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
++ (ccg->parent->idx << 4) | ccg->idx);
++ config_opts.dcpid = ccg->parent->dcp_idx;
++ config_opts.we_mask = cpu_to_be16(we_mask | QM_CCGR_WE_CSCN_TUPD |
++ QM_CCGR_WE_CDV);
++ config_opts.cm_config.cdv = vcgid;
++ config_opts.cm_config.cscn_tupd = cpu_to_be16((cscn_enabled << 15) |
++ QM_CGR_TARG_UDP_CTRL_DCP | dcp_idx);
++ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
++ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
++ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
++ config_opts.cm_config.ctl_td_en = params->td_en;
++ config_opts.cm_config.ctl_td_mode = params->td_mode;
++ config_opts.cm_config.ctl_cscn_en = params->cscn_en;
++ config_opts.cm_config.ctl_mode = params->mode;
++ config_opts.cm_config.cs_thres.hword =
++ cpu_to_be16(params->cs_thres_in.hword);
++ config_opts.cm_config.cs_thres_x.hword =
++ cpu_to_be16(params->cs_thres_out.hword);
++ config_opts.cm_config.td_thres.hword =
++ cpu_to_be16(params->td_thres.hword);
++ config_opts.cm_config.wr_parm_g.word =
++ cpu_to_be32(params->wr_parm_g.word);
++ config_opts.cm_config.wr_parm_y.word =
++ cpu_to_be32(params->wr_parm_y.word);
++ config_opts.cm_config.wr_parm_r.word =
++ cpu_to_be32(params->wr_parm_r.word);
++
++ ret = qman_ceetm_configure_ccgr(&config_opts);
++ if (ret) {
++ pr_err("Configure CSCN_TARG_DCP failed!\n");
++ return -EINVAL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set);
++
++int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
++ u16 dcp_idx,
++ u8 *vcgid,
++ unsigned int *cscn_enabled)
++{
++ struct qm_mcc_ceetm_ccgr_query query_opts;
++ struct qm_mcr_ceetm_ccgr_query query_result;
++
++ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
++ (ccg->parent->idx << 4) | ccg->idx);
++ query_opts.dcpid = ccg->parent->dcp_idx;
++
++ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
++ pr_err("Can't query CCGR#%d\n", ccg->idx);
++ return -EINVAL;
++ }
++
++ *vcgid = query_result.cm_query.cdv;
++ *cscn_enabled = (query_result.cm_query.cscn_targ_dcp >> dcp_idx) & 0x1;
++ return 0;
++}
++EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get);
++
++int qman_ceetm_querycongestion(struct __qm_mcr_querycongestion *ccg_state,
++ unsigned int dcp_idx)
++{
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ u8 res;
++ int i, j;
++
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++
++ mcc = qm_mc_start(&p->p);
++ for (i = 0; i < 2; i++) {
++ mcc->ccgr_query.ccgrid =
++ cpu_to_be16(CEETM_QUERY_CONGESTION_STATE | i);
++ mcc->ccgr_query.dcpid = dcp_idx;
++ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
++
++ while (!(mcr = qm_mc_result(&p->p)))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_CEETM_VERB_CCGR_QUERY);
++ res = mcr->result;
++ if (res == QM_MCR_RESULT_OK) {
++ for (j = 0; j < 8; j++)
++ mcr->ccgr_query.congestion_state.state.
++ __state[j] = be32_to_cpu(mcr->ccgr_query.
++ congestion_state.state.__state[j]);
++ *(ccg_state + i) =
++ mcr->ccgr_query.congestion_state.state;
++ } else {
++ pr_err("QUERY CEETM CONGESTION STATE failed\n");
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ return -EIO;
++ }
++ }
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return 0;
++}
++
++int qman_set_wpm(int wpm_enable)
++{
++ return qm_set_wpm(wpm_enable);
++}
++EXPORT_SYMBOL(qman_set_wpm);
++
++int qman_get_wpm(int *wpm_enable)
++{
++ return qm_get_wpm(wpm_enable);
++}
++EXPORT_SYMBOL(qman_get_wpm);
++
++int qman_shutdown_fq(u32 fqid)
++{
++ struct qman_portal *p;
++ unsigned long irqflags __maybe_unused;
++ int ret;
++ struct qm_portal *low_p;
++ p = get_affine_portal();
++ PORTAL_IRQ_LOCK(p, irqflags);
++ low_p = &p->p;
++ ret = qm_shutdown_fq(&low_p, 1, fqid);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return ret;
++}
++
++const struct qm_portal_config *qman_get_qm_portal_config(
++ struct qman_portal *portal)
++{
++ return portal->sharing_redirect ? NULL : portal->config;
++}
+diff --git a/drivers/staging/fsl_qbman/qman_low.h b/drivers/staging/fsl_qbman/qman_low.h
+new file mode 100644
+index 00000000..547b5fa2
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_low.h
+@@ -0,0 +1,1427 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qman_private.h"
++
++/***************************/
++/* Portal register assists */
++/***************************/
++
++/* Cache-inhibited register offsets */
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++
++#define QM_REG_EQCR_PI_CINH 0x0000
++#define QM_REG_EQCR_CI_CINH 0x0004
++#define QM_REG_EQCR_ITR 0x0008
++#define QM_REG_DQRR_PI_CINH 0x0040
++#define QM_REG_DQRR_CI_CINH 0x0044
++#define QM_REG_DQRR_ITR 0x0048
++#define QM_REG_DQRR_DCAP 0x0050
++#define QM_REG_DQRR_SDQCR 0x0054
++#define QM_REG_DQRR_VDQCR 0x0058
++#define QM_REG_DQRR_PDQCR 0x005c
++#define QM_REG_MR_PI_CINH 0x0080
++#define QM_REG_MR_CI_CINH 0x0084
++#define QM_REG_MR_ITR 0x0088
++#define QM_REG_CFG 0x0100
++#define QM_REG_ISR 0x0e00
++#define QM_REG_IIR 0x0e0c
++#define QM_REG_ITPR 0x0e14
++
++/* Cache-enabled register offsets */
++#define QM_CL_EQCR 0x0000
++#define QM_CL_DQRR 0x1000
++#define QM_CL_MR 0x2000
++#define QM_CL_EQCR_PI_CENA 0x3000
++#define QM_CL_EQCR_CI_CENA 0x3100
++#define QM_CL_DQRR_PI_CENA 0x3200
++#define QM_CL_DQRR_CI_CENA 0x3300
++#define QM_CL_MR_PI_CENA 0x3400
++#define QM_CL_MR_CI_CENA 0x3500
++#define QM_CL_CR 0x3800
++#define QM_CL_RR0 0x3900
++#define QM_CL_RR1 0x3940
++
++#endif
++
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++
++#define QM_REG_EQCR_PI_CINH 0x3000
++#define QM_REG_EQCR_CI_CINH 0x3040
++#define QM_REG_EQCR_ITR 0x3080
++#define QM_REG_DQRR_PI_CINH 0x3100
++#define QM_REG_DQRR_CI_CINH 0x3140
++#define QM_REG_DQRR_ITR 0x3180
++#define QM_REG_DQRR_DCAP 0x31C0
++#define QM_REG_DQRR_SDQCR 0x3200
++#define QM_REG_DQRR_VDQCR 0x3240
++#define QM_REG_DQRR_PDQCR 0x3280
++#define QM_REG_MR_PI_CINH 0x3300
++#define QM_REG_MR_CI_CINH 0x3340
++#define QM_REG_MR_ITR 0x3380
++#define QM_REG_CFG 0x3500
++#define QM_REG_ISR 0x3600
++#define QM_REG_IIR 0x36C0
++#define QM_REG_ITPR 0x3740
++
++/* Cache-enabled register offsets */
++#define QM_CL_EQCR 0x0000
++#define QM_CL_DQRR 0x1000
++#define QM_CL_MR 0x2000
++#define QM_CL_EQCR_PI_CENA 0x3000
++#define QM_CL_EQCR_CI_CENA 0x3040
++#define QM_CL_DQRR_PI_CENA 0x3100
++#define QM_CL_DQRR_CI_CENA 0x3140
++#define QM_CL_MR_PI_CENA 0x3300
++#define QM_CL_MR_CI_CENA 0x3340
++#define QM_CL_CR 0x3800
++#define QM_CL_RR0 0x3900
++#define QM_CL_RR1 0x3940
++
++#endif
++
++
++/* BTW, the drivers (and h/w programming model) already obtain the required
++ * synchronisation for portal accesses via lwsync(), hwsync(), and
++ * data-dependencies. Use of barrier()s or other order-preserving primitives
++ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
++ * simply ensure that the compiler treats the portal registers as volatile (ie.
++ * non-coherent). */
++
++/* Cache-inhibited register access. */
++#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ci + (o)))
++#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
++ (qm)->addr_ci + (o));
++#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
++#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
++
++/* Cache-enabled (index) register access */
++#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
++#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
++#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ce + (o)))
++#define __qm_cl_out(qm, o, val) \
++ do { \
++ u32 *__tmpclout = (qm)->addr_ce + (o); \
++ __raw_writel(cpu_to_be32(val), __tmpclout); \
++ dcbf(__tmpclout); \
++ } while (0)
++#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
++#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
++#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
++#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
++#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
++#define qm_cl_invalidate(reg)\
++ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
++
++/* Cache-enabled ring access */
++#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
++
++/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
++ * analysis, look at using the "extra" bit in the ring index registers to avoid
++ * cyclic issues. */
++static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
++{
++ /* 'first' is included, 'last' is excluded */
++ if (first <= last)
++ return last - first;
++ return ringsize + last - first;
++}
++
++/* Portal modes.
++ * Enum types;
++ * pmode == production mode
++ * cmode == consumption mode,
++ * dmode == h/w dequeue mode.
++ * Enum values use 3 letter codes. First letter matches the portal mode,
++ * remaining two letters indicate;
++ * ci == cache-inhibited portal register
++ * ce == cache-enabled portal register
++ * vb == in-band valid-bit (cache-enabled)
++ * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only
++ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
++ */
++enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
++ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
++ qm_eqcr_pce = 1, /* PI index, cache-enabled */
++ qm_eqcr_pvb = 2 /* valid-bit */
++};
++enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
++ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
++ qm_dqrr_dpull = 1 /* PDQCR */
++};
++enum qm_dqrr_pmode { /* s/w-only */
++ qm_dqrr_pci, /* reads DQRR_PI_CINH */
++ qm_dqrr_pce, /* reads DQRR_PI_CENA */
++ qm_dqrr_pvb /* reads valid-bit */
++};
++enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
++ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
++ qm_dqrr_cce = 1, /* CI index, cache-enabled */
++ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */
++};
++enum qm_mr_pmode { /* s/w-only */
++ qm_mr_pci, /* reads MR_PI_CINH */
++ qm_mr_pce, /* reads MR_PI_CENA */
++ qm_mr_pvb /* reads valid-bit */
++};
++enum qm_mr_cmode { /* matches QCSP_CFG::MM */
++ qm_mr_cci = 0, /* CI index, cache-inhibited */
++ qm_mr_cce = 1 /* CI index, cache-enabled */
++};
++
++
++/* ------------------------- */
++/* --- Portal structures --- */
++
++#define QM_EQCR_SIZE 8
++#define QM_DQRR_SIZE 16
++#define QM_MR_SIZE 8
++
++struct qm_eqcr {
++ struct qm_eqcr_entry *ring, *cursor;
++ u8 ci, available, ithresh, vbit;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ u32 busy;
++ enum qm_eqcr_pmode pmode;
++#endif
++};
++
++struct qm_dqrr {
++ const struct qm_dqrr_entry *ring, *cursor;
++ u8 pi, ci, fill, ithresh, vbit;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ enum qm_dqrr_dmode dmode;
++ enum qm_dqrr_pmode pmode;
++ enum qm_dqrr_cmode cmode;
++#endif
++};
++
++struct qm_mr {
++ const struct qm_mr_entry *ring, *cursor;
++ u8 pi, ci, fill, ithresh, vbit;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ enum qm_mr_pmode pmode;
++ enum qm_mr_cmode cmode;
++#endif
++};
++
++struct qm_mc {
++ struct qm_mc_command *cr;
++ struct qm_mc_result *rr;
++ u8 rridx, vbit;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ enum {
++ /* Can be _mc_start()ed */
++ qman_mc_idle,
++ /* Can be _mc_commit()ed or _mc_abort()ed */
++ qman_mc_user,
++ /* Can only be _mc_retry()ed */
++ qman_mc_hw
++ } state;
++#endif
++};
++
++#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
++
++struct qm_addr {
++ void __iomem *addr_ce; /* cache-enabled */
++ void __iomem *addr_ci; /* cache-inhibited */
++};
++
++struct qm_portal {
++ /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
++ * and including 'mc' fits within a cacheline (yay!). The 'config' part
++ * is setup-only, so isn't a cause for a concern. In other words, don't
++ * rearrange this structure on a whim, there be dragons ... */
++ struct qm_addr addr;
++ struct qm_eqcr eqcr;
++ struct qm_dqrr dqrr;
++ struct qm_mr mr;
++ struct qm_mc mc;
++} QM_PORTAL_ALIGNMENT;
++
++
++/* ---------------- */
++/* --- EQCR API --- */
++
++/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
++#define EQCR_CARRYCLEAR(p) \
++ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
++
++/* Bit-wise logic to convert a ring pointer to a ring index */
++static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
++{
++ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
++}
++
++/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
++static inline void EQCR_INC(struct qm_eqcr *eqcr)
++{
++ /* NB: this is odd-looking, but experiments show that it generates fast
++ * code with essentially no branching overheads. We increment to the
++ * next EQCR pointer and handle overflow and 'vbit'. */
++ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
++ eqcr->cursor = EQCR_CARRYCLEAR(partial);
++ if (partial != eqcr->cursor)
++ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
++}
++
++static inline int qm_eqcr_init(struct qm_portal *portal,
++ enum qm_eqcr_pmode pmode,
++ unsigned int eq_stash_thresh,
++ int eq_stash_prio)
++{
++ /* This use of 'register', as well as all other occurrences, is because
++ * it has been observed to generate much faster code with gcc than is
++ * otherwise the case. */
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ u32 cfg;
++ u8 pi;
++
++ eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
++ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
++ qm_cl_invalidate(EQCR_CI);
++ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
++ eqcr->cursor = eqcr->ring + pi;
++ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
++ QM_EQCR_VERB_VBIT : 0;
++ eqcr->available = QM_EQCR_SIZE - 1 -
++ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
++ eqcr->ithresh = qm_in(EQCR_ITR);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ eqcr->busy = 0;
++ eqcr->pmode = pmode;
++#endif
++ cfg = (qm_in(CFG) & 0x00ffffff) |
++ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
++ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
++ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
++ qm_out(CFG, cfg);
++ return 0;
++}
++
++static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
++{
++ return (qm_in(CFG) >> 28) & 0x7;
++}
++
++static inline void qm_eqcr_finish(struct qm_portal *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ u8 pi, ci;
++ u32 cfg;
++
++ /*
++ * Disable EQCI stashing because the QMan only
++ * presents the value it previously stashed to
++ * maintain coherency. Setting the stash threshold
++ * to 1 then 0 ensures that QMan has resyncronized
++ * its internal copy so that the portal is clean
++ * when it is reinitialized in the future
++ */
++ cfg = (qm_in(CFG) & 0x0fffffff) |
++ (1 << 28); /* QCSP_CFG: EST */
++ qm_out(CFG, cfg);
++ cfg &= 0x0fffffff; /* stash threshold = 0 */
++ qm_out(CFG, cfg);
++
++ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
++ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
++
++ /* Refresh EQCR CI cache value */
++ qm_cl_invalidate(EQCR_CI);
++ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
++
++ DPA_ASSERT(!eqcr->busy);
++ if (pi != EQCR_PTR2IDX(eqcr->cursor))
++ pr_crit("losing uncommited EQCR entries\n");
++ if (ci != eqcr->ci)
++ pr_crit("missing existing EQCR completions\n");
++ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
++ pr_crit("EQCR destroyed unquiesced\n");
++}
++
++static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
++ *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ DPA_ASSERT(!eqcr->busy);
++ if (!eqcr->available)
++ return NULL;
++
++
++#ifdef CONFIG_FSL_DPA_CHECKING
++ eqcr->busy = 1;
++#endif
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++ dcbz_64(eqcr->cursor);
++#endif
++ return eqcr->cursor;
++}
++
++static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
++ *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ u8 diff, old_ci;
++
++ DPA_ASSERT(!eqcr->busy);
++ if (!eqcr->available) {
++ old_ci = eqcr->ci;
++ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
++ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
++ eqcr->available += diff;
++ if (!diff)
++ return NULL;
++ }
++#ifdef CONFIG_FSL_DPA_CHECKING
++ eqcr->busy = 1;
++#endif
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++ dcbz_64(eqcr->cursor);
++#endif
++ return eqcr->cursor;
++}
++
++static inline void qm_eqcr_abort(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
++ DPA_ASSERT(eqcr->busy);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ eqcr->busy = 0;
++#endif
++}
++
++static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
++ struct qm_portal *portal, u8 myverb)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ DPA_ASSERT(eqcr->busy);
++ DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
++ if (eqcr->available == 1)
++ return NULL;
++ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
++ dcbf(eqcr->cursor);
++ EQCR_INC(eqcr);
++ eqcr->available--;
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++ dcbz_64(eqcr->cursor);
++#endif
++ return eqcr->cursor;
++}
++
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++#define EQCR_COMMIT_CHECKS(eqcr) \
++do { \
++ DPA_ASSERT(eqcr->busy); \
++ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0xffffff00)); \
++ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0xffffff00)); \
++} while (0)
++#else
++#define EQCR_COMMIT_CHECKS(eqcr) \
++do { \
++ DPA_ASSERT(eqcr->busy); \
++ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & \
++ cpu_to_be32(0x00ffffff))); \
++ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & \
++ cpu_to_be32(0x00ffffff))); \
++} while (0)
++#endif
++
++static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ EQCR_COMMIT_CHECKS(eqcr);
++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
++ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
++ EQCR_INC(eqcr);
++ eqcr->available--;
++ dcbf(eqcr->cursor);
++ hwsync();
++ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
++#ifdef CONFIG_FSL_DPA_CHECKING
++ eqcr->busy = 0;
++#endif
++}
++
++static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
++ qm_cl_invalidate(EQCR_PI);
++ qm_cl_touch_rw(EQCR_PI);
++}
++
++static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ EQCR_COMMIT_CHECKS(eqcr);
++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
++ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
++ EQCR_INC(eqcr);
++ eqcr->available--;
++ dcbf(eqcr->cursor);
++ lwsync();
++ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
++#ifdef CONFIG_FSL_DPA_CHECKING
++ eqcr->busy = 0;
++#endif
++}
++
++static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ struct qm_eqcr_entry *eqcursor;
++ EQCR_COMMIT_CHECKS(eqcr);
++ DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
++ lwsync();
++ eqcursor = eqcr->cursor;
++ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
++ dcbf(eqcursor);
++ EQCR_INC(eqcr);
++ eqcr->available--;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ eqcr->busy = 0;
++#endif
++}
++
++static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ u8 diff, old_ci = eqcr->ci;
++ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
++ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
++ eqcr->available += diff;
++ return diff;
++}
++
++static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
++ qm_cl_touch_ro(EQCR_CI);
++}
++
++static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ u8 diff, old_ci = eqcr->ci;
++ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
++ qm_cl_invalidate(EQCR_CI);
++ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
++ eqcr->available += diff;
++ return diff;
++}
++
++static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ return eqcr->ithresh;
++}
++
++static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ eqcr->ithresh = ithresh;
++ qm_out(EQCR_ITR, ithresh);
++}
++
++static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ return eqcr->available;
++}
++
++static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
++{
++ register struct qm_eqcr *eqcr = &portal->eqcr;
++ return QM_EQCR_SIZE - 1 - eqcr->available;
++}
++
++
++/* ---------------- */
++/* --- DQRR API --- */
++
++/* FIXME: many possible improvements;
++ * - look at changing the API to use pointer rather than index parameters now
++ * that 'cursor' is a pointer,
++ * - consider moving other parameters to pointer if it could help (ci)
++ */
++
++#define DQRR_CARRYCLEAR(p) \
++ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
++
++static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
++{
++ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
++}
++
++static inline const struct qm_dqrr_entry *DQRR_INC(
++ const struct qm_dqrr_entry *e)
++{
++ return DQRR_CARRYCLEAR(e + 1);
++}
++
++static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
++{
++ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
++ ((mf & (QM_DQRR_SIZE - 1)) << 20));
++}
++
++static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
++ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
++ qm_out(DQRR_CI_CINH, dqrr->ci);
++}
++
++static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
++ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
++ qm_cl_out(DQRR_CI, dqrr->ci);
++}
++
++static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
++ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
++ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
++ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
++ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
++}
++
++static inline int qm_dqrr_init(struct qm_portal *portal,
++ const struct qm_portal_config *config,
++ enum qm_dqrr_dmode dmode,
++ __maybe_unused enum qm_dqrr_pmode pmode,
++ enum qm_dqrr_cmode cmode, u8 max_fill)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ u32 cfg;
++
++ /* Make sure the DQRR will be idle when we enable */
++ qm_out(DQRR_SDQCR, 0);
++ qm_out(DQRR_VDQCR, 0);
++ qm_out(DQRR_PDQCR, 0);
++ dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
++ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
++ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
++ dqrr->cursor = dqrr->ring + dqrr->ci;
++ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
++ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
++ QM_DQRR_VERB_VBIT : 0;
++ dqrr->ithresh = qm_in(DQRR_ITR);
++
++ /* Free up pending DQRR entries if any as per current DCM */
++ if (dqrr->fill) {
++ enum qm_dqrr_cmode dcm = (qm_in(CFG) >> 16) & 3;
++
++#ifdef CONFIG_FSL_DPA_CHECKING
++ dqrr->cmode = dcm;
++#endif
++ switch (dcm) {
++ case qm_dqrr_cci:
++ qm_dqrr_cci_consume(portal, dqrr->fill);
++ break;
++ case qm_dqrr_cce:
++ qm_dqrr_cce_consume(portal, dqrr->fill);
++ break;
++ case qm_dqrr_cdc:
++ qm_dqrr_cdc_consume_n(portal, (QM_DQRR_SIZE - 1));
++ break;
++ default:
++ DPA_ASSERT(0);
++ }
++ }
++
++#ifdef CONFIG_FSL_DPA_CHECKING
++ dqrr->dmode = dmode;
++ dqrr->pmode = pmode;
++ dqrr->cmode = cmode;
++#endif
++ /* Invalidate every ring entry before beginning */
++ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
++ dcbi(qm_cl(dqrr->ring, cfg));
++ cfg = (qm_in(CFG) & 0xff000f00) |
++ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
++ ((dmode & 1) << 18) | /* DP */
++ ((cmode & 3) << 16) | /* DCM */
++ 0xa0 | /* RE+SE */
++ (0 ? 0x40 : 0) | /* Ignore RP */
++ (0 ? 0x10 : 0); /* Ignore SP */
++ qm_out(CFG, cfg);
++ qm_dqrr_set_maxfill(portal, max_fill);
++ return 0;
++}
++
++static inline void qm_dqrr_finish(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if ((dqrr->cmode != qm_dqrr_cdc) &&
++ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
++ pr_crit("Ignoring completed DQRR entries\n");
++#endif
++}
++
++static inline const struct qm_dqrr_entry *qm_dqrr_current(
++ struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ if (!dqrr->fill)
++ return NULL;
++ return dqrr->cursor;
++}
++
++static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ return DQRR_PTR2IDX(dqrr->cursor);
++}
++
++static inline u8 qm_dqrr_next(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->fill);
++ dqrr->cursor = DQRR_INC(dqrr->cursor);
++ return --dqrr->fill;
++}
++
++static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ u8 diff, old_pi = dqrr->pi;
++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
++ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
++ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
++ dqrr->fill += diff;
++ return diff;
++}
++
++static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
++ qm_cl_invalidate(DQRR_PI);
++ qm_cl_touch_ro(DQRR_PI);
++}
++
++static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ u8 diff, old_pi = dqrr->pi;
++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
++ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
++ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
++ dqrr->fill += diff;
++ return diff;
++}
++
++static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
++ DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
++#if (defined CONFIG_PPC || defined CONFIG_PPC64) && !defined CONFIG_FSL_PAMU
++ /*
++ * On PowerPC platforms if PAMU is not available we need to
++ * manually invalidate the cache. When PAMU is available the
++ * cache is updated by stashing operations generated by QMan
++ */
++ dcbi(res);
++ dcbt_ro(res);
++#endif
++
++ /* when accessing 'verb', use __raw_readb() to ensure that compiler
++ * inlining doesn't try to optimise out "excess reads". */
++ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
++ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
++ if (!dqrr->pi)
++ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
++ dqrr->fill++;
++ }
++}
++
++
++static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
++ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
++ qm_out(DQRR_CI_CINH, dqrr->ci);
++}
++
++static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
++ qm_cl_invalidate(DQRR_CI);
++ qm_cl_touch_rw(DQRR_CI);
++}
++
++static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
++ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
++ qm_cl_out(DQRR_CI, dqrr->ci);
++}
++
++static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
++ int park)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
++ DPA_ASSERT(idx < QM_DQRR_SIZE);
++ qm_out(DQRR_DCAP, (0 << 8) | /* S */
++ ((park ? 1 : 0) << 6) | /* PK */
++ idx); /* DCAP_CI */
++}
++
++static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
++ const struct qm_dqrr_entry *dq,
++ int park)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ u8 idx = DQRR_PTR2IDX(dq);
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
++ DPA_ASSERT((dqrr->ring + idx) == dq);
++ DPA_ASSERT(idx < QM_DQRR_SIZE);
++ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
++ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
++ idx); /* DQRR_DCAP::DCAP_CI */
++}
++
++static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
++ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
++}
++
++static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
++ qm_cl_invalidate(DQRR_CI);
++ qm_cl_touch_ro(DQRR_CI);
++}
++
++static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
++ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
++}
++
++static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
++ return dqrr->ci;
++}
++
++static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
++{
++ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
++ qm_out(DQRR_DCAP, (0 << 8) | /* S */
++ (1 << 6) | /* PK */
++ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
++}
++
++static inline void qm_dqrr_park_current(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
++ qm_out(DQRR_DCAP, (0 << 8) | /* S */
++ (1 << 6) | /* PK */
++ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
++}
++
++static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
++{
++ qm_out(DQRR_SDQCR, sdqcr);
++}
++
++static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
++{
++ return qm_in(DQRR_SDQCR);
++}
++
++static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
++{
++ qm_out(DQRR_VDQCR, vdqcr);
++}
++
++static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
++{
++ return qm_in(DQRR_VDQCR);
++}
++
++static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
++{
++ qm_out(DQRR_PDQCR, pdqcr);
++}
++
++static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
++{
++ return qm_in(DQRR_PDQCR);
++}
++
++static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
++{
++ register struct qm_dqrr *dqrr = &portal->dqrr;
++ return dqrr->ithresh;
++}
++
++static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
++{
++ qm_out(DQRR_ITR, ithresh);
++}
++
++static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
++{
++ return (qm_in(CFG) & 0x00f00000) >> 20;
++}
++
++
++/* -------------- */
++/* --- MR API --- */
++
++#define MR_CARRYCLEAR(p) \
++ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
++
++static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
++{
++ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
++}
++
++static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
++{
++ return MR_CARRYCLEAR(e + 1);
++}
++
++static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
++ enum qm_mr_cmode cmode)
++{
++ register struct qm_mr *mr = &portal->mr;
++ u32 cfg;
++
++ mr->ring = portal->addr.addr_ce + QM_CL_MR;
++ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
++ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
++ mr->cursor = mr->ring + mr->ci;
++ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
++ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
++ mr->ithresh = qm_in(MR_ITR);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mr->pmode = pmode;
++ mr->cmode = cmode;
++#endif
++ cfg = (qm_in(CFG) & 0xfffff0ff) |
++ ((cmode & 1) << 8); /* QCSP_CFG:MM */
++ qm_out(CFG, cfg);
++ return 0;
++}
++
++static inline void qm_mr_finish(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ if (mr->ci != MR_PTR2IDX(mr->cursor))
++ pr_crit("Ignoring completed MR entries\n");
++}
++
++static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ if (!mr->fill)
++ return NULL;
++ return mr->cursor;
++}
++
++static inline u8 qm_mr_cursor(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ return MR_PTR2IDX(mr->cursor);
++}
++
++static inline u8 qm_mr_next(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ DPA_ASSERT(mr->fill);
++ mr->cursor = MR_INC(mr->cursor);
++ return --mr->fill;
++}
++
++static inline u8 qm_mr_pci_update(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ u8 diff, old_pi = mr->pi;
++ DPA_ASSERT(mr->pmode == qm_mr_pci);
++ mr->pi = qm_in(MR_PI_CINH);
++ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
++ mr->fill += diff;
++ return diff;
++}
++
++static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_mr *mr = &portal->mr;
++ DPA_ASSERT(mr->pmode == qm_mr_pce);
++ qm_cl_invalidate(MR_PI);
++ qm_cl_touch_ro(MR_PI);
++}
++
++static inline u8 qm_mr_pce_update(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ u8 diff, old_pi = mr->pi;
++ DPA_ASSERT(mr->pmode == qm_mr_pce);
++ mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
++ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
++ mr->fill += diff;
++ return diff;
++}
++
++static inline void qm_mr_pvb_update(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
++ DPA_ASSERT(mr->pmode == qm_mr_pvb);
++ /* when accessing 'verb', use __raw_readb() to ensure that compiler
++ * inlining doesn't try to optimise out "excess reads". */
++ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
++ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
++ if (!mr->pi)
++ mr->vbit ^= QM_MR_VERB_VBIT;
++ mr->fill++;
++ res = MR_INC(res);
++ }
++ dcbit_ro(res);
++}
++
++static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
++{
++ register struct qm_mr *mr = &portal->mr;
++ DPA_ASSERT(mr->cmode == qm_mr_cci);
++ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
++ qm_out(MR_CI_CINH, mr->ci);
++}
++
++static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ DPA_ASSERT(mr->cmode == qm_mr_cci);
++ mr->ci = MR_PTR2IDX(mr->cursor);
++ qm_out(MR_CI_CINH, mr->ci);
++}
++
++static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_mr *mr = &portal->mr;
++ DPA_ASSERT(mr->cmode == qm_mr_cce);
++ qm_cl_invalidate(MR_CI);
++ qm_cl_touch_rw(MR_CI);
++}
++
++static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
++{
++ register struct qm_mr *mr = &portal->mr;
++ DPA_ASSERT(mr->cmode == qm_mr_cce);
++ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
++ qm_cl_out(MR_CI, mr->ci);
++}
++
++static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ DPA_ASSERT(mr->cmode == qm_mr_cce);
++ mr->ci = MR_PTR2IDX(mr->cursor);
++ qm_cl_out(MR_CI, mr->ci);
++}
++
++static inline u8 qm_mr_get_ci(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ return mr->ci;
++}
++
++static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
++{
++ register struct qm_mr *mr = &portal->mr;
++ return mr->ithresh;
++}
++
++static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
++{
++ qm_out(MR_ITR, ithresh);
++}
++
++
++/* ------------------------------ */
++/* --- Management command API --- */
++
++static inline int qm_mc_init(struct qm_portal *portal)
++{
++ register struct qm_mc *mc = &portal->mc;
++ mc->cr = portal->addr.addr_ce + QM_CL_CR;
++ mc->rr = portal->addr.addr_ce + QM_CL_RR0;
++ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
++ QM_MCC_VERB_VBIT) ? 0 : 1;
++ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = qman_mc_idle;
++#endif
++ return 0;
++}
++
++static inline void qm_mc_finish(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_mc *mc = &portal->mc;
++ DPA_ASSERT(mc->state == qman_mc_idle);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ if (mc->state != qman_mc_idle)
++ pr_crit("Losing incomplete MC command\n");
++#endif
++}
++
++static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
++{
++ register struct qm_mc *mc = &portal->mc;
++ DPA_ASSERT(mc->state == qman_mc_idle);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = qman_mc_user;
++#endif
++#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
++ dcbz_64(mc->cr);
++#endif
++ return mc->cr;
++}
++
++static inline void qm_mc_abort(struct qm_portal *portal)
++{
++ __maybe_unused register struct qm_mc *mc = &portal->mc;
++ DPA_ASSERT(mc->state == qman_mc_user);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = qman_mc_idle;
++#endif
++}
++
++static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
++{
++ register struct qm_mc *mc = &portal->mc;
++ struct qm_mc_result *rr = mc->rr + mc->rridx;
++ DPA_ASSERT(mc->state == qman_mc_user);
++ lwsync();
++ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
++ dcbf(mc->cr);
++ dcbit_ro(rr);
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = qman_mc_hw;
++#endif
++}
++
++static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
++{
++ register struct qm_mc *mc = &portal->mc;
++ struct qm_mc_result *rr = mc->rr + mc->rridx;
++ DPA_ASSERT(mc->state == qman_mc_hw);
++ /* The inactive response register's verb byte always returns zero until
++ * its command is submitted and completed. This includes the valid-bit,
++ * in case you were wondering... */
++ if (!__raw_readb(&rr->verb)) {
++ dcbit_ro(rr);
++ return NULL;
++ }
++ mc->rridx ^= 1;
++ mc->vbit ^= QM_MCC_VERB_VBIT;
++#ifdef CONFIG_FSL_DPA_CHECKING
++ mc->state = qman_mc_idle;
++#endif
++ return rr;
++}
++
++
++/* ------------------------------------- */
++/* --- Portal interrupt register API --- */
++
++static inline int qm_isr_init(__always_unused struct qm_portal *portal)
++{
++ return 0;
++}
++
++static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
++{
++}
++
++static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
++{
++ qm_out(ITPR, iperiod);
++}
++
++static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
++{
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
++#else
++ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
++#endif
++}
++
++static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
++ u32 val)
++{
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
++#else
++ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
++#endif
++}
++
++/* Cleanup FQs */
++static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
++ u32 fqid)
++{
++
++ struct qm_mc_command *mcc;
++ struct qm_mc_result *mcr;
++ u8 state;
++ int orl_empty, fq_empty, i, drain = 0;
++ u32 result;
++ u32 channel, wq;
++ u16 dest_wq;
++
++ /* Determine the state of the FQID */
++ mcc = qm_mc_start(portal[0]);
++ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
++ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
++ while (!(mcr = qm_mc_result(portal[0])))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
++ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
++ if (state == QM_MCR_NP_STATE_OOS)
++ return 0; /* Already OOS, no need to do anymore checks */
++
++ /* Query which channel the FQ is using */
++ mcc = qm_mc_start(portal[0]);
++ mcc->queryfq.fqid = cpu_to_be32(fqid);
++ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
++ while (!(mcr = qm_mc_result(portal[0])))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
++
++ /* Need to store these since the MCR gets reused */
++ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
++ wq = dest_wq & 0x7;
++ channel = dest_wq>>3;
++
++ switch (state) {
++ case QM_MCR_NP_STATE_TEN_SCHED:
++ case QM_MCR_NP_STATE_TRU_SCHED:
++ case QM_MCR_NP_STATE_ACTIVE:
++ case QM_MCR_NP_STATE_PARKED:
++ orl_empty = 0;
++ mcc = qm_mc_start(portal[0]);
++ mcc->alterfq.fqid = cpu_to_be32(fqid);
++ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
++ while (!(mcr = qm_mc_result(portal[0])))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_MCR_VERB_ALTER_RETIRE);
++ result = mcr->result; /* Make a copy as we reuse MCR below */
++
++ if (result == QM_MCR_RESULT_PENDING) {
++ /* Need to wait for the FQRN in the message ring, which
++ will only occur once the FQ has been drained. In
++ order for the FQ to drain the portal needs to be set
++ to dequeue from the channel the FQ is scheduled on */
++ const struct qm_mr_entry *msg;
++ const struct qm_dqrr_entry *dqrr = NULL;
++ int found_fqrn = 0;
++ u16 dequeue_wq = 0;
++
++ /* Flag that we need to drain FQ */
++ drain = 1;
++
++ if (channel >= qm_channel_pool1 &&
++ channel < (qm_channel_pool1 + 15)) {
++ /* Pool channel, enable the bit in the portal */
++ dequeue_wq = (channel -
++ qm_channel_pool1 + 1)<<4 | wq;
++ } else if (channel < qm_channel_pool1) {
++ /* Dedicated channel */
++ dequeue_wq = wq;
++ } else {
++ pr_info("Cannot recover FQ 0x%x, it is "
++ "scheduled on channel 0x%x",
++ fqid, channel);
++ return -EBUSY;
++ }
++ /* Set the sdqcr to drain this channel */
++ if (channel < qm_channel_pool1)
++ for (i = 0; i < portal_count; i++)
++ qm_dqrr_sdqcr_set(portal[i],
++ QM_SDQCR_TYPE_ACTIVE |
++ QM_SDQCR_CHANNELS_DEDICATED);
++ else
++ for (i = 0; i < portal_count; i++)
++ qm_dqrr_sdqcr_set(
++ portal[i],
++ QM_SDQCR_TYPE_ACTIVE |
++ QM_SDQCR_CHANNELS_POOL_CONV
++ (channel));
++ while (!found_fqrn) {
++ /* Keep draining DQRR while checking the MR*/
++ for (i = 0; i < portal_count; i++) {
++ qm_dqrr_pvb_update(portal[i]);
++ dqrr = qm_dqrr_current(portal[i]);
++ while (dqrr) {
++ qm_dqrr_cdc_consume_1ptr(
++ portal[i], dqrr, 0);
++ qm_dqrr_pvb_update(portal[i]);
++ qm_dqrr_next(portal[i]);
++ dqrr = qm_dqrr_current(
++ portal[i]);
++ }
++ /* Process message ring too */
++ qm_mr_pvb_update(portal[i]);
++ msg = qm_mr_current(portal[i]);
++ while (msg) {
++ if ((msg->verb &
++ QM_MR_VERB_TYPE_MASK)
++ == QM_MR_VERB_FQRN)
++ found_fqrn = 1;
++ qm_mr_next(portal[i]);
++ qm_mr_cci_consume_to_current(
++ portal[i]);
++ qm_mr_pvb_update(portal[i]);
++ msg = qm_mr_current(portal[i]);
++ }
++ cpu_relax();
++ }
++ }
++ }
++ if (result != QM_MCR_RESULT_OK &&
++ result != QM_MCR_RESULT_PENDING) {
++ /* error */
++ pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
++ fqid, result);
++ return -1;
++ }
++ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
++ /* ORL had no entries, no need to wait until the
++ ERNs come in */
++ orl_empty = 1;
++ }
++ /* Retirement succeeded, check to see if FQ needs
++ to be drained */
++ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
++ /* FQ is Not Empty, drain using volatile DQ commands */
++ fq_empty = 0;
++ do {
++ const struct qm_dqrr_entry *dqrr = NULL;
++ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
++ qm_dqrr_vdqcr_set(portal[0], vdqcr);
++
++ /* Wait for a dequeue to occur */
++ while (dqrr == NULL) {
++ qm_dqrr_pvb_update(portal[0]);
++ dqrr = qm_dqrr_current(portal[0]);
++ if (!dqrr)
++ cpu_relax();
++ }
++ /* Process the dequeues, making sure to
++ empty the ring completely */
++ while (dqrr) {
++ if (be32_to_cpu(dqrr->fqid) == fqid &&
++ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
++ fq_empty = 1;
++ qm_dqrr_cdc_consume_1ptr(portal[0],
++ dqrr, 0);
++ qm_dqrr_pvb_update(portal[0]);
++ qm_dqrr_next(portal[0]);
++ dqrr = qm_dqrr_current(portal[0]);
++ }
++ } while (fq_empty == 0);
++ }
++ for (i = 0; i < portal_count; i++)
++ qm_dqrr_sdqcr_set(portal[i], 0);
++
++ /* Wait for the ORL to have been completely drained */
++ while (orl_empty == 0) {
++ const struct qm_mr_entry *msg;
++ qm_mr_pvb_update(portal[0]);
++ msg = qm_mr_current(portal[0]);
++ while (msg) {
++ if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
++ QM_MR_VERB_FQRL)
++ orl_empty = 1;
++ qm_mr_next(portal[0]);
++ qm_mr_cci_consume_to_current(portal[0]);
++ qm_mr_pvb_update(portal[0]);
++ msg = qm_mr_current(portal[0]);
++ }
++ cpu_relax();
++ }
++ mcc = qm_mc_start(portal[0]);
++ mcc->alterfq.fqid = cpu_to_be32(fqid);
++ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
++ while (!(mcr = qm_mc_result(portal[0])))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_MCR_VERB_ALTER_OOS);
++ if (mcr->result != QM_MCR_RESULT_OK) {
++ pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
++ fqid, mcr->result);
++ return -1;
++ }
++ return 0;
++ case QM_MCR_NP_STATE_RETIRED:
++ /* Send OOS Command */
++ mcc = qm_mc_start(portal[0]);
++ mcc->alterfq.fqid = cpu_to_be32(fqid);
++ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
++ while (!(mcr = qm_mc_result(portal[0])))
++ cpu_relax();
++ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
++ QM_MCR_VERB_ALTER_OOS);
++ if (mcr->result) {
++ pr_err("OOS Failed on FQID 0x%x\n", fqid);
++ return -1;
++ }
++ return 0;
++ }
++ return -1;
++}
+diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h
+new file mode 100644
+index 00000000..ee025cff
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_private.h
+@@ -0,0 +1,398 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "dpa_sys.h"
++#include <linux/fsl_qman.h>
++#include <linux/iommu.h>
++
++#if defined(CONFIG_FSL_PAMU)
++#include <asm/fsl_pamu_stash.h>
++#endif
++
++#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64)
++#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP"
++#endif
++
++#define QBMAN_ANY_PORTAL_IDX 0xffffffff
++ /* ----------------- */
++ /* Congestion Groups */
++ /* ----------------- */
++/* This wrapper represents a bit-array for the state of the 256 Qman congestion
++ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
++ * those that don't concern us. We harness the structure and accessor details
++ * already used in the management command to query congestion groups. */
++struct qman_cgrs {
++ struct __qm_mcr_querycongestion q;
++};
++static inline void qman_cgrs_init(struct qman_cgrs *c)
++{
++ memset(c, 0, sizeof(*c));
++}
++static inline void qman_cgrs_fill(struct qman_cgrs *c)
++{
++ memset(c, 0xff, sizeof(*c));
++}
++static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
++{
++ return QM_MCR_QUERYCONGESTION(&c->q, num);
++}
++static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
++{
++ c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
++}
++static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
++{
++ c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
++}
++static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
++{
++ while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
++ ;
++ return num;
++}
++static inline void qman_cgrs_cp(struct qman_cgrs *dest,
++ const struct qman_cgrs *src)
++{
++ *dest = *src;
++}
++static inline void qman_cgrs_and(struct qman_cgrs *dest,
++ const struct qman_cgrs *a, const struct qman_cgrs *b)
++{
++ int ret;
++ u32 *_d = dest->q.__state;
++ const u32 *_a = a->q.__state;
++ const u32 *_b = b->q.__state;
++ for (ret = 0; ret < 8; ret++)
++ *(_d++) = *(_a++) & *(_b++);
++}
++static inline void qman_cgrs_xor(struct qman_cgrs *dest,
++ const struct qman_cgrs *a, const struct qman_cgrs *b)
++{
++ int ret;
++ u32 *_d = dest->q.__state;
++ const u32 *_a = a->q.__state;
++ const u32 *_b = b->q.__state;
++ for (ret = 0; ret < 8; ret++)
++ *(_d++) = *(_a++) ^ *(_b++);
++}
++
++ /* ----------------------- */
++ /* CEETM Congestion Groups */
++ /* ----------------------- */
++/* This wrapper represents a bit-array for the state of the 512 Qman CEETM
++ * congestion groups.
++ */
++struct qman_ccgrs {
++ struct __qm_mcr_querycongestion q[2];
++};
++static inline void qman_ccgrs_init(struct qman_ccgrs *c)
++{
++ memset(c, 0, sizeof(*c));
++}
++static inline void qman_ccgrs_fill(struct qman_ccgrs *c)
++{
++ memset(c, 0xff, sizeof(*c));
++}
++static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num)
++{
++ if (num < __CGR_NUM)
++ return QM_MCR_QUERYCONGESTION(&c->q[0], num);
++ else
++ return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM));
++}
++static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num)
++{
++ while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num))
++ ;
++ return num;
++}
++static inline void qman_ccgrs_cp(struct qman_ccgrs *dest,
++ const struct qman_ccgrs *src)
++{
++ *dest = *src;
++}
++static inline void qman_ccgrs_and(struct qman_ccgrs *dest,
++ const struct qman_ccgrs *a, const struct qman_ccgrs *b)
++{
++ int ret, i;
++ u32 *_d;
++ const u32 *_a, *_b;
++ for (i = 0; i < 2; i++) {
++ _d = dest->q[i].__state;
++ _a = a->q[i].__state;
++ _b = b->q[i].__state;
++ for (ret = 0; ret < 8; ret++)
++ *(_d++) = *(_a++) & *(_b++);
++ }
++}
++static inline void qman_ccgrs_xor(struct qman_ccgrs *dest,
++ const struct qman_ccgrs *a, const struct qman_ccgrs *b)
++{
++ int ret, i;
++ u32 *_d;
++ const u32 *_a, *_b;
++ for (i = 0; i < 2; i++) {
++ _d = dest->q[i].__state;
++ _a = a->q[i].__state;
++ _b = b->q[i].__state;
++ for (ret = 0; ret < 8; ret++)
++ *(_d++) = *(_a++) ^ *(_b++);
++ }
++}
++
++/* used by CCSR and portal interrupt code */
++enum qm_isr_reg {
++ qm_isr_status = 0,
++ qm_isr_enable = 1,
++ qm_isr_disable = 2,
++ qm_isr_inhibit = 3
++};
++
++struct qm_portal_config {
++ /* Corenet portal addresses;
++ * [0]==cache-enabled, [1]==cache-inhibited. */
++ __iomem void *addr_virt[2];
++ struct resource addr_phys[2];
++ struct device dev;
++ struct iommu_domain *iommu_domain;
++ /* Allow these to be joined in lists */
++ struct list_head list;
++ /* User-visible portal configuration settings */
++ struct qman_portal_config public_cfg;
++ /* power management saved data */
++ u32 saved_isdr;
++};
++
++/* Revision info (for errata and feature handling) */
++#define QMAN_REV11 0x0101
++#define QMAN_REV12 0x0102
++#define QMAN_REV20 0x0200
++#define QMAN_REV30 0x0300
++#define QMAN_REV31 0x0301
++#define QMAN_REV32 0x0302
++
++/* QMan REV_2 register contains the Cfg option */
++#define QMAN_REV_CFG_0 0x0
++#define QMAN_REV_CFG_1 0x1
++#define QMAN_REV_CFG_2 0x2
++#define QMAN_REV_CFG_3 0x3
++
++extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
++extern u8 qman_ip_cfg;
++extern u32 qman_clk;
++extern u16 qman_portal_max;
++
++#ifdef CONFIG_FSL_QMAN_CONFIG
++/* Hooks from qman_driver.c to qman_config.c */
++int qman_init_ccsr(struct device_node *node);
++void qman_liodn_fixup(u16 channel);
++int qman_set_sdest(u16 channel, unsigned int cpu_idx);
++size_t get_qman_fqd_size(void);
++#else
++static inline size_t get_qman_fqd_size(void)
++{
++ return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ);
++}
++#endif
++
++int qm_set_wpm(int wpm);
++int qm_get_wpm(int *wpm);
++
++/* Hooks from qman_driver.c in to qman_high.c */
++struct qman_portal *qman_create_portal(
++ struct qman_portal *portal,
++ const struct qm_portal_config *config,
++ const struct qman_cgrs *cgrs);
++
++struct qman_portal *qman_create_affine_portal(
++ const struct qm_portal_config *config,
++ const struct qman_cgrs *cgrs);
++struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
++ int cpu);
++const struct qm_portal_config *qman_destroy_affine_portal(void);
++void qman_destroy_portal(struct qman_portal *qm);
++
++/* Hooks from fsl_usdpaa.c to qman_driver.c */
++struct qm_portal_config *qm_get_unused_portal(void);
++struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
++
++void qm_put_unused_portal(struct qm_portal_config *pcfg);
++void qm_set_liodns(struct qm_portal_config *pcfg);
++
++/* This CGR feature is supported by h/w and required by unit-tests and the
++ * debugfs hooks, so is implemented in the driver. However it allows an explicit
++ * corruption of h/w fields by s/w that are usually incorruptible (because the
++ * counters are usually maintained entirely within h/w). As such, we declare
++ * this API internally. */
++int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
++ struct qm_mcr_cgrtestwrite *result);
++
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++/* If the fq object pointer is greater than the size of context_b field,
++ * than a lookup table is required. */
++int qman_setup_fq_lookup_table(size_t num_entries);
++#endif
++
++
++/*************************************************/
++/* QMan s/w corenet portal, low-level i/face */
++/*************************************************/
++
++/* Note: most functions are only used by the high-level interface, so are
++ * inlined from qman_low.h. The stuff below is for use by other parts of the
++ * driver. */
++
++/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
++ * dequeue TYPE. Choose TOKEN (8-bit).
++ * If SOURCE == CHANNELS,
++ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
++ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
++ * priority.
++ * If SOURCE == SPECIFICWQ,
++ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
++ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
++ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
++ * same value.
++ */
++#define QM_SDQCR_SOURCE_CHANNELS 0x0
++#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
++#define QM_SDQCR_COUNT_EXACT1 0x0
++#define QM_SDQCR_COUNT_UPTO3 0x20000000
++#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
++#define QM_SDQCR_TYPE_MASK 0x03000000
++#define QM_SDQCR_TYPE_NULL 0x0
++#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
++#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
++#define QM_SDQCR_TYPE_ACTIVE 0x03000000
++#define QM_SDQCR_TOKEN_MASK 0x00ff0000
++#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
++#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
++#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
++#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
++#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
++#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
++#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
++
++/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
++#define QM_VDQCR_FQID_MASK 0x00ffffff
++#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
++
++/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
++ * If MODE==SCHEDULED
++ * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
++ * If CHANNELS,
++ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
++ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
++ * priority.
++ * If SPECIFICWQ,
++ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
++ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
++ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
++ * same value.
++ * If MODE==UNSCHEDULED
++ * Choose FQID().
++ */
++#define QM_PDQCR_MODE_SCHEDULED 0x0
++#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
++#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
++#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
++#define QM_PDQCR_COUNT_EXACT1 0x0
++#define QM_PDQCR_COUNT_UPTO3 0x20000000
++#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
++#define QM_PDQCR_TYPE_MASK 0x03000000
++#define QM_PDQCR_TYPE_NULL 0x0
++#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
++#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
++#define QM_PDQCR_TYPE_ACTIVE 0x03000000
++#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
++#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
++#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
++#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
++#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
++#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
++#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
++
++/* Used by all portal interrupt registers except 'inhibit'
++ * Channels with frame availability
++ */
++#define QM_PIRQ_DQAVAIL 0x0000ffff
++
++/* The DQAVAIL interrupt fields break down into these bits; */
++#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
++#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
++#define QM_DQAVAIL_MASK 0xffff
++/* This mask contains all the "irqsource" bits visible to API users */
++#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
++
++/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
++ * the disable register" rather than "disable the ability to write". */
++#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
++#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
++#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
++#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
++#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
++#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
++/* TODO: unfortunate name-clash here, reword? */
++#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
++#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
++
++#ifdef CONFIG_FSL_QMAN_CONFIG
++int qman_have_ccsr(void);
++#else
++#define qman_have_ccsr 0
++#endif
++
++__init int qman_init(void);
++__init int qman_resource_init(void);
++
++/* CEETM related */
++#define QMAN_CEETM_MAX 2
++extern u8 num_ceetms;
++extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
++int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
++int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
++int qman_ceetm_set_prescaler(enum qm_dc_portal portal);
++int qman_ceetm_get_prescaler(u16 *pres);
++int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
++ struct qm_mcr_ceetm_cq_query *cq_query);
++int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
++ struct qm_mcr_ceetm_ccgr_query *response);
++int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num);
++
++extern void *affine_portals[NR_CPUS];
++const struct qm_portal_config *qman_get_qm_portal_config(
++ struct qman_portal *portal);
++
++/* power management */
++#ifdef CONFIG_SUSPEND
++void suspend_unused_qportal(void);
++void resume_unused_qportal(void);
++#endif
+diff --git a/drivers/staging/fsl_qbman/qman_test.c b/drivers/staging/fsl_qbman/qman_test.c
+new file mode 100644
+index 00000000..7995dd8c
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_test.c
+@@ -0,0 +1,57 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qman_test.h"
++
++MODULE_AUTHOR("Geoff Thorpe");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("Qman testing");
++
++static int test_init(void)
++{
++ int loop = 1;
++ while (loop--) {
++#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO
++ qman_test_hotpotato();
++#endif
++#ifdef CONFIG_FSL_QMAN_TEST_HIGH
++ qman_test_high();
++#endif
++ }
++ return 0;
++}
++
++static void test_exit(void)
++{
++}
++
++module_init(test_init);
++module_exit(test_exit);
+diff --git a/drivers/staging/fsl_qbman/qman_test.h b/drivers/staging/fsl_qbman/qman_test.h
+new file mode 100644
+index 00000000..8c4181c7
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_test.h
+@@ -0,0 +1,45 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++
++#include <linux/fsl_qman.h>
++
++void qman_test_hotpotato(void);
++void qman_test_high(void);
++
+diff --git a/drivers/staging/fsl_qbman/qman_test_high.c b/drivers/staging/fsl_qbman/qman_test_high.c
+new file mode 100644
+index 00000000..65ee270e
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_test_high.c
+@@ -0,0 +1,216 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qman_test.h"
++
++/*************/
++/* constants */
++/*************/
++
++#define CGR_ID 27
++#define POOL_ID 2
++#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
++#define NUM_ENQUEUES 10
++#define NUM_PARTIAL 4
++#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
++ QM_SDQCR_TYPE_PRIO_QOS | \
++ QM_SDQCR_TOKEN_SET(0x98) | \
++ QM_SDQCR_CHANNELS_DEDICATED | \
++ QM_SDQCR_CHANNELS_POOL(POOL_ID))
++#define PORTAL_OPAQUE ((void *)0xf00dbeef)
++#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
++
++/*************************************/
++/* Predeclarations (eg. for fq_base) */
++/*************************************/
++
++static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
++ struct qman_fq *,
++ const struct qm_dqrr_entry *);
++static void cb_ern(struct qman_portal *, struct qman_fq *,
++ const struct qm_mr_entry *);
++static void cb_fqs(struct qman_portal *, struct qman_fq *,
++ const struct qm_mr_entry *);
++
++/***************/
++/* global vars */
++/***************/
++
++static struct qm_fd fd, fd_dq;
++static struct qman_fq fq_base = {
++ .cb.dqrr = cb_dqrr,
++ .cb.ern = cb_ern,
++ .cb.fqs = cb_fqs
++};
++static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
++static int retire_complete, sdqcr_complete;
++
++/**********************/
++/* internal functions */
++/**********************/
++
++/* Helpers for initialising and "incrementing" a frame descriptor */
++static void fd_init(struct qm_fd *__fd)
++{
++ qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
++ __fd->format = qm_fd_contig_big;
++ __fd->length29 = 0x0000ffff;
++ __fd->cmd = 0xfeedf00d;
++}
++
++static void fd_inc(struct qm_fd *__fd)
++{
++ u64 t = qm_fd_addr_get64(__fd);
++ int z = t >> 40;
++ t <<= 1;
++ if (z)
++ t |= 1;
++ qm_fd_addr_set64(__fd, t);
++ __fd->length29--;
++ __fd->cmd++;
++}
++
++/* The only part of the 'fd' we can't memcmp() is the ppid */
++static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
++{
++ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
++ if (!r)
++ r = a->format - b->format;
++ if (!r)
++ r = a->opaque - b->opaque;
++ if (!r)
++ r = a->cmd - b->cmd;
++ return r;
++}
++
++/********/
++/* test */
++/********/
++
++static void do_enqueues(struct qman_fq *fq)
++{
++ unsigned int loop;
++ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
++ if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
++ (((loop + 1) == NUM_ENQUEUES) ?
++ QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
++ panic("qman_enqueue() failed\n");
++ fd_inc(&fd);
++ }
++}
++
++void qman_test_high(void)
++{
++ unsigned int flags;
++ int res;
++ struct qman_fq *fq = &fq_base;
++
++ pr_info("qman_test_high starting\n");
++ fd_init(&fd);
++ fd_init(&fd_dq);
++
++ /* Initialise (parked) FQ */
++ if (qman_create_fq(0, FQ_FLAGS, fq))
++ panic("qman_create_fq() failed\n");
++ if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
++ panic("qman_init_fq() failed\n");
++
++ /* Do enqueues + VDQCR, twice. (Parked FQ) */
++ do_enqueues(fq);
++ pr_info("VDQCR (till-empty);\n");
++ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
++ QM_VDQCR_NUMFRAMES_TILLEMPTY))
++ panic("qman_volatile_dequeue() failed\n");
++ do_enqueues(fq);
++ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
++ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
++ QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
++ panic("qman_volatile_dequeue() failed\n");
++ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
++ NUM_ENQUEUES);
++ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
++ QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
++ panic("qman_volatile_dequeue() failed\n");
++
++ do_enqueues(fq);
++ pr_info("scheduled dequeue (till-empty)\n");
++ if (qman_schedule_fq(fq))
++ panic("qman_schedule_fq() failed\n");
++ wait_event(waitqueue, sdqcr_complete);
++
++ /* Retire and OOS the FQ */
++ res = qman_retire_fq(fq, &flags);
++ if (res < 0)
++ panic("qman_retire_fq() failed\n");
++ wait_event(waitqueue, retire_complete);
++ if (flags & QMAN_FQ_STATE_BLOCKOOS)
++ panic("leaking frames\n");
++ if (qman_oos_fq(fq))
++ panic("qman_oos_fq() failed\n");
++ qman_destroy_fq(fq, 0);
++ pr_info("qman_test_high finished\n");
++}
++
++static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dq)
++{
++ if (fd_cmp(&fd_dq, &dq->fd)) {
++ pr_err("BADNESS: dequeued frame doesn't match;\n");
++ pr_err("Expected 0x%llx, got 0x%llx\n",
++ (unsigned long long)fd_dq.length29,
++ (unsigned long long)dq->fd.length29);
++ BUG();
++ }
++ fd_inc(&fd_dq);
++ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
++ sdqcr_complete = 1;
++ wake_up(&waitqueue);
++ }
++ return qman_cb_dqrr_consume;
++}
++
++static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ panic("cb_ern() unimplemented");
++}
++
++static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
++ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
++ panic("unexpected FQS message");
++ pr_info("Retirement message received\n");
++ retire_complete = 1;
++ wake_up(&waitqueue);
++}
+diff --git a/drivers/staging/fsl_qbman/qman_test_hotpotato.c b/drivers/staging/fsl_qbman/qman_test_hotpotato.c
+new file mode 100644
+index 00000000..899d2aa9
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c
+@@ -0,0 +1,502 @@
++/* Copyright 2009-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kthread.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include "qman_test.h"
++
++/* Algorithm:
++ *
++ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
++ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
++ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
++ * shuttle a "hot potato" frame around them such that every forwarding action
++ * moves it from one cpu to another. (The use of more than one handler per cpu
++ * is to allow enough handlers/FQs to truly test the significance of caching -
++ * ie. when cache-expiries are occurring.)
++ *
++ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
++ * first and last words of the frame data will undergo a transformation step on
++ * each forwarding action. To achieve this, each handler will be assigned a
++ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
++ * received by a handler, the mixer of the expected sender is XOR'd into all
++ * words of the entire frame, which is then validated against the original
++ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
++ * the current handler. Apart from validating that the frame is taking the
++ * expected path, this also provides some quasi-realistic overheads to each
++ * forwarding action - dereferencing *all* the frame data, computation, and
++ * conditional branching. There is a "special" handler designated to act as the
++ * instigator of the test by creating an enqueuing the "hot potato" frame, and
++ * to determine when the test has completed by counting HP_LOOPS iterations.
++ *
++ * Init phases:
++ *
++ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
++ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
++ * handlers and link-list them (but do no other handler setup).
++ *
++ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
++ * hp_cpu's 'iterator' to point to its first handler. With each loop,
++ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
++ * and advance the iterator for the next loop. This includes a final fixup,
++ * which connects the last handler to the first (and which is why phase 2
++ * and 3 are separate).
++ *
++ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
++ * hp_cpu's 'iterator' to point to its first handler. With each loop,
++ * initialise FQ objects and advance the iterator for the next loop.
++ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
++ * initialisation targets the correct cpu.
++ */
++
++/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
++ * the fn from irq context, which is too restrictive). */
++struct bstrap {
++ void (*fn)(void);
++ atomic_t started;
++};
++static int bstrap_fn(void *__bstrap)
++{
++ struct bstrap *bstrap = __bstrap;
++ atomic_inc(&bstrap->started);
++ bstrap->fn();
++ while (!kthread_should_stop())
++ msleep(1);
++ return 0;
++}
++static int on_all_cpus(void (*fn)(void))
++{
++ int cpu;
++ for_each_cpu(cpu, cpu_online_mask) {
++ struct bstrap bstrap = {
++ .fn = fn,
++ .started = ATOMIC_INIT(0)
++ };
++ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
++ "hotpotato%d", cpu);
++ int ret;
++ if (IS_ERR(k))
++ return -ENOMEM;
++ kthread_bind(k, cpu);
++ wake_up_process(k);
++ /* If we call kthread_stop() before the "wake up" has had an
++ * effect, then the thread may exit with -EINTR without ever
++ * running the function. So poll until it's started before
++ * requesting it to stop. */
++ while (!atomic_read(&bstrap.started))
++ msleep(10);
++ ret = kthread_stop(k);
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
++
++struct hp_handler {
++
++ /* The following data is stashed when 'rx' is dequeued; */
++ /* -------------- */
++ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
++ struct qman_fq rx;
++ /* The Tx FQ we should forward to */
++ struct qman_fq tx;
++ /* The value we XOR post-dequeue, prior to validating */
++ u32 rx_mixer;
++ /* The value we XOR pre-enqueue, after validating */
++ u32 tx_mixer;
++ /* what the hotpotato address should be on dequeue */
++ dma_addr_t addr;
++ u32 *frame_ptr;
++
++ /* The following data isn't (necessarily) stashed on dequeue; */
++ /* -------------- */
++ u32 fqid_rx, fqid_tx;
++ /* list node for linking us into 'hp_cpu' */
++ struct list_head node;
++ /* Just to check ... */
++ unsigned int processor_id;
++} ____cacheline_aligned;
++
++struct hp_cpu {
++ /* identify the cpu we run on; */
++ unsigned int processor_id;
++ /* root node for the per-cpu list of handlers */
++ struct list_head handlers;
++ /* list node for linking us into 'hp_cpu_list' */
++ struct list_head node;
++ /* when repeatedly scanning 'hp_list', each time linking the n'th
++ * handlers together, this is used as per-cpu iterator state */
++ struct hp_handler *iterator;
++};
++
++/* Each cpu has one of these */
++static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
++
++/* links together the hp_cpu structs, in first-come first-serve order. */
++static LIST_HEAD(hp_cpu_list);
++static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
++
++static unsigned int hp_cpu_list_length;
++
++/* the "special" handler, that starts and terminates the test. */
++static struct hp_handler *special_handler;
++static int loop_counter;
++
++/* handlers are allocated out of this, so they're properly aligned. */
++static struct kmem_cache *hp_handler_slab;
++
++/* this is the frame data */
++static void *__frame_ptr;
++static u32 *frame_ptr;
++static dma_addr_t frame_dma;
++
++/* the main function waits on this */
++static DECLARE_WAIT_QUEUE_HEAD(queue);
++
++#define HP_PER_CPU 2
++#define HP_LOOPS 8
++/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
++#define HP_NUM_WORDS 80
++/* First word of the LFSR-based frame data */
++#define HP_FIRST_WORD 0xabbaf00d
++
++static inline u32 do_lfsr(u32 prev)
++{
++ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
++}
++
++static void allocate_frame_data(void)
++{
++ u32 lfsr = HP_FIRST_WORD;
++ int loop;
++ struct platform_device *pdev = platform_device_alloc("foobar", -1);
++ if (!pdev)
++ panic("platform_device_alloc() failed");
++ if (platform_device_add(pdev))
++ panic("platform_device_add() failed");
++ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
++ if (!__frame_ptr)
++ panic("kmalloc() failed");
++ frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
++ ~(unsigned long)63);
++ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
++ frame_ptr[loop] = lfsr;
++ lfsr = do_lfsr(lfsr);
++ }
++ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
++ DMA_BIDIRECTIONAL);
++ platform_device_del(pdev);
++ platform_device_put(pdev);
++}
++
++static void deallocate_frame_data(void)
++{
++ kfree(__frame_ptr);
++}
++
++static inline void process_frame_data(struct hp_handler *handler,
++ const struct qm_fd *fd)
++{
++ u32 *p = handler->frame_ptr;
++ u32 lfsr = HP_FIRST_WORD;
++ int loop;
++ if (qm_fd_addr_get64(fd) != (handler->addr & 0xffffffffff)) {
++ pr_err("Got 0x%llx expected 0x%llx\n",
++ qm_fd_addr_get64(fd), handler->addr);
++ panic("bad frame address");
++ }
++ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
++ *p ^= handler->rx_mixer;
++ if (*p != lfsr)
++ panic("corrupt frame data");
++ *p ^= handler->tx_mixer;
++ lfsr = do_lfsr(lfsr);
++ }
++}
++
++static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dqrr)
++{
++ struct hp_handler *handler = (struct hp_handler *)fq;
++
++ process_frame_data(handler, &dqrr->fd);
++ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
++ panic("qman_enqueue() failed");
++ return qman_cb_dqrr_consume;
++}
++
++static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dqrr)
++{
++ struct hp_handler *handler = (struct hp_handler *)fq;
++
++ process_frame_data(handler, &dqrr->fd);
++ if (++loop_counter < HP_LOOPS) {
++ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
++ panic("qman_enqueue() failed");
++ } else {
++ pr_info("Received final (%dth) frame\n", loop_counter);
++ wake_up(&queue);
++ }
++ return qman_cb_dqrr_consume;
++}
++
++static void create_per_cpu_handlers(void)
++{
++ struct hp_handler *handler;
++ int loop;
++ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
++
++ hp_cpu->processor_id = smp_processor_id();
++ spin_lock(&hp_lock);
++ list_add_tail(&hp_cpu->node, &hp_cpu_list);
++ hp_cpu_list_length++;
++ spin_unlock(&hp_lock);
++ INIT_LIST_HEAD(&hp_cpu->handlers);
++ for (loop = 0; loop < HP_PER_CPU; loop++) {
++ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
++ if (!handler)
++ panic("kmem_cache_alloc() failed");
++ handler->processor_id = hp_cpu->processor_id;
++ handler->addr = frame_dma;
++ handler->frame_ptr = frame_ptr;
++ list_add_tail(&handler->node, &hp_cpu->handlers);
++ }
++ put_cpu_var(hp_cpus);
++}
++
++static void destroy_per_cpu_handlers(void)
++{
++ struct list_head *loop, *tmp;
++ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
++
++ spin_lock(&hp_lock);
++ list_del(&hp_cpu->node);
++ spin_unlock(&hp_lock);
++ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
++ u32 flags;
++ struct hp_handler *handler = list_entry(loop, struct hp_handler,
++ node);
++ if (qman_retire_fq(&handler->rx, &flags))
++ panic("qman_retire_fq(rx) failed");
++ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
++ if (qman_oos_fq(&handler->rx))
++ panic("qman_oos_fq(rx) failed");
++ qman_destroy_fq(&handler->rx, 0);
++ qman_destroy_fq(&handler->tx, 0);
++ qman_release_fqid(handler->fqid_rx);
++ list_del(&handler->node);
++ kmem_cache_free(hp_handler_slab, handler);
++ }
++ put_cpu_var(hp_cpus);
++}
++
++static inline u8 num_cachelines(u32 offset)
++{
++ u8 res = (offset + (L1_CACHE_BYTES - 1))
++ / (L1_CACHE_BYTES);
++ if (res > 3)
++ return 3;
++ return res;
++}
++#define STASH_DATA_CL \
++ num_cachelines(HP_NUM_WORDS * 4)
++#define STASH_CTX_CL \
++ num_cachelines(offsetof(struct hp_handler, fqid_rx))
++
++static void init_handler(void *__handler)
++{
++ struct qm_mcc_initfq opts;
++ struct hp_handler *handler = __handler;
++ BUG_ON(handler->processor_id != smp_processor_id());
++ /* Set up rx */
++ memset(&handler->rx, 0, sizeof(handler->rx));
++ if (handler == special_handler)
++ handler->rx.cb.dqrr = special_dqrr;
++ else
++ handler->rx.cb.dqrr = normal_dqrr;
++ if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
++ panic("qman_create_fq(rx) failed");
++ memset(&opts, 0, sizeof(opts));
++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
++ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
++ opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
++ opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
++ if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
++ QMAN_INITFQ_FLAG_LOCAL, &opts))
++ panic("qman_init_fq(rx) failed");
++ /* Set up tx */
++ memset(&handler->tx, 0, sizeof(handler->tx));
++ if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
++ &handler->tx))
++ panic("qman_create_fq(tx) failed");
++}
++
++static void init_phase2(void)
++{
++ int loop;
++ u32 fqid = 0;
++ u32 lfsr = 0xdeadbeef;
++ struct hp_cpu *hp_cpu;
++ struct hp_handler *handler;
++
++ for (loop = 0; loop < HP_PER_CPU; loop++) {
++ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
++ int ret;
++ if (!loop)
++ hp_cpu->iterator = list_first_entry(
++ &hp_cpu->handlers,
++ struct hp_handler, node);
++ else
++ hp_cpu->iterator = list_entry(
++ hp_cpu->iterator->node.next,
++ struct hp_handler, node);
++ /* Rx FQID is the previous handler's Tx FQID */
++ hp_cpu->iterator->fqid_rx = fqid;
++ /* Allocate new FQID for Tx */
++ ret = qman_alloc_fqid(&fqid);
++ if (ret)
++ panic("qman_alloc_fqid() failed");
++ hp_cpu->iterator->fqid_tx = fqid;
++ /* Rx mixer is the previous handler's Tx mixer */
++ hp_cpu->iterator->rx_mixer = lfsr;
++ /* Get new mixer for Tx */
++ lfsr = do_lfsr(lfsr);
++ hp_cpu->iterator->tx_mixer = lfsr;
++ }
++ }
++ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
++ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
++ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
++ BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
++ handler->fqid_rx = fqid;
++ handler->rx_mixer = lfsr;
++ /* and tag it as our "special" handler */
++ special_handler = handler;
++}
++
++static void init_phase3(void)
++{
++ int loop;
++ struct hp_cpu *hp_cpu;
++
++ for (loop = 0; loop < HP_PER_CPU; loop++) {
++ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
++ if (!loop)
++ hp_cpu->iterator = list_first_entry(
++ &hp_cpu->handlers,
++ struct hp_handler, node);
++ else
++ hp_cpu->iterator = list_entry(
++ hp_cpu->iterator->node.next,
++ struct hp_handler, node);
++ preempt_disable();
++ if (hp_cpu->processor_id == smp_processor_id())
++ init_handler(hp_cpu->iterator);
++ else
++ smp_call_function_single(hp_cpu->processor_id,
++ init_handler, hp_cpu->iterator, 1);
++ preempt_enable();
++ }
++ }
++}
++
++static void send_first_frame(void *ignore)
++{
++ u32 *p = special_handler->frame_ptr;
++ u32 lfsr = HP_FIRST_WORD;
++ int loop;
++ struct qm_fd fd;
++
++ BUG_ON(special_handler->processor_id != smp_processor_id());
++ memset(&fd, 0, sizeof(fd));
++ qm_fd_addr_set64(&fd, special_handler->addr);
++ fd.format = qm_fd_contig_big;
++ fd.length29 = HP_NUM_WORDS * 4;
++ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
++ if (*p != lfsr)
++ panic("corrupt frame data");
++ *p ^= special_handler->tx_mixer;
++ lfsr = do_lfsr(lfsr);
++ }
++ pr_info("Sending first frame\n");
++ if (qman_enqueue(&special_handler->tx, &fd, 0))
++ panic("qman_enqueue() failed");
++}
++
++void qman_test_hotpotato(void)
++{
++ if (cpumask_weight(cpu_online_mask) < 2) {
++ pr_info("qman_test_hotpotato, skip - only 1 CPU\n");
++ return;
++ }
++
++ pr_info("qman_test_hotpotato starting\n");
++
++ hp_cpu_list_length = 0;
++ loop_counter = 0;
++ hp_handler_slab = kmem_cache_create("hp_handler_slab",
++ sizeof(struct hp_handler), L1_CACHE_BYTES,
++ SLAB_HWCACHE_ALIGN, NULL);
++ if (!hp_handler_slab)
++ panic("kmem_cache_create() failed");
++
++ allocate_frame_data();
++
++ /* Init phase 1 */
++ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
++ if (on_all_cpus(create_per_cpu_handlers))
++ panic("on_each_cpu() failed");
++ pr_info("Number of cpus: %d, total of %d handlers\n",
++ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
++
++ init_phase2();
++
++ init_phase3();
++
++ preempt_disable();
++ if (special_handler->processor_id == smp_processor_id())
++ send_first_frame(NULL);
++ else
++ smp_call_function_single(special_handler->processor_id,
++ send_first_frame, NULL, 1);
++ preempt_enable();
++
++ wait_event(queue, loop_counter == HP_LOOPS);
++ deallocate_frame_data();
++ if (on_all_cpus(destroy_per_cpu_handlers))
++ panic("on_each_cpu() failed");
++ kmem_cache_destroy(hp_handler_slab);
++ pr_info("qman_test_hotpotato finished\n");
++}
+diff --git a/drivers/staging/fsl_qbman/qman_utility.c b/drivers/staging/fsl_qbman/qman_utility.c
+new file mode 100644
+index 00000000..f1e39023
+--- /dev/null
++++ b/drivers/staging/fsl_qbman/qman_utility.c
+@@ -0,0 +1,129 @@
++/* Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qman_private.h"
++
++/* ----------------- */
++/* --- FQID Pool --- */
++
++struct qman_fqid_pool {
++ /* Base and size of the FQID range */
++ u32 fqid_base;
++ u32 total;
++ /* Number of FQIDs currently "allocated" */
++ u32 used;
++ /* Allocation optimisation. When 'used<total', it is the index of an
++ * available FQID. Otherwise there are no available FQIDs, and this
++ * will be set when the next deallocation occurs. */
++ u32 next;
++ /* A bit-field representation of the FQID range. */
++ unsigned long *bits;
++};
++
++#define QLONG_BYTES sizeof(unsigned long)
++#define QLONG_BITS (QLONG_BYTES * 8)
++/* Number of 'longs' required for the given number of bits */
++#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
++/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
++#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
++/* And in bits */
++#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
++
++struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
++{
++ struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
++ unsigned int i;
++
++ BUG_ON(!num);
++ if (!pool)
++ return NULL;
++ pool->fqid_base = fqid_start;
++ pool->total = num;
++ pool->used = 0;
++ pool->next = 0;
++ pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
++ if (!pool->bits) {
++ kfree(pool);
++ return NULL;
++ }
++ /* If num is not an even multiple of QLONG_BITS (or even 8, for
++ * byte-oriented searching) then we fill the trailing bits with 1, to
++ * make them look allocated (permanently). */
++ for (i = num + 1; i < QNUM_BITS(num); i++)
++ set_bit(i, pool->bits);
++ return pool;
++}
++EXPORT_SYMBOL(qman_fqid_pool_create);
++
++int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
++{
++ int ret = pool->used;
++ kfree(pool->bits);
++ kfree(pool);
++ return ret;
++}
++EXPORT_SYMBOL(qman_fqid_pool_destroy);
++
++int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
++{
++ int ret;
++ if (pool->used == pool->total)
++ return -ENOMEM;
++ *fqid = pool->fqid_base + pool->next;
++ ret = test_and_set_bit(pool->next, pool->bits);
++ BUG_ON(ret);
++ if (++pool->used == pool->total)
++ return 0;
++ pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
++ if (pool->next >= pool->total)
++ pool->next = find_first_zero_bit(pool->bits, pool->total);
++ BUG_ON(pool->next >= pool->total);
++ return 0;
++}
++EXPORT_SYMBOL(qman_fqid_pool_alloc);
++
++void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
++{
++ int ret;
++
++ fqid -= pool->fqid_base;
++ ret = test_and_clear_bit(fqid, pool->bits);
++ BUG_ON(!ret);
++ if (pool->used-- == pool->total)
++ pool->next = fqid;
++}
++EXPORT_SYMBOL(qman_fqid_pool_free);
++
++u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
++{
++ return pool->used;
++}
++EXPORT_SYMBOL(qman_fqid_pool_used);
+diff --git a/include/linux/fsl_bman.h b/include/linux/fsl_bman.h
+new file mode 100644
+index 00000000..43942221
+--- /dev/null
++++ b/include/linux/fsl_bman.h
+@@ -0,0 +1,532 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef FSL_BMAN_H
++#define FSL_BMAN_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Last updated for v00.79 of the BG */
++
++/* Portal processing (interrupt) sources */
++#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
++#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
++
++/* This wrapper represents a bit-array for the depletion state of the 64 Bman
++ * buffer pools. */
++struct bman_depletion {
++ u32 __state[2];
++};
++#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
++#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
++#define __bmdep_word(x) ((x) >> 5)
++#define __bmdep_shift(x) ((x) & 0x1f)
++#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
++static inline void bman_depletion_init(struct bman_depletion *c)
++{
++ c->__state[0] = c->__state[1] = 0;
++}
++static inline void bman_depletion_fill(struct bman_depletion *c)
++{
++ c->__state[0] = c->__state[1] = ~0;
++}
++static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
++{
++ return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
++}
++static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
++{
++ c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
++}
++static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
++{
++ c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
++}
++
++/* ------------------------------------------------------- */
++/* --- Bman data structures (and associated constants) --- */
++
++/* Represents s/w corenet portal mapped data structures */
++struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
++struct bm_mc_command; /* MC (Management Command) command */
++struct bm_mc_result; /* MC result */
++
++/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
++ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
++ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
++struct bm_buffer {
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 __reserved1;
++ u8 bpid;
++ u16 hi; /* High 16-bits of 48-bit address */
++ u32 lo; /* Low 32-bits of 48-bit address */
++#else
++ u32 lo;
++ u16 hi;
++ u8 bpid;
++ u8 __reserved;
++#endif
++ };
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u64 __notaddress:16;
++ u64 addr:48;
++#else
++ u64 addr:48;
++ u64 __notaddress:16;
++#endif
++ };
++ u64 opaque;
++ };
++} __aligned(8);
++static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
++{
++ return buf->addr;
++}
++static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
++{
++ return (dma_addr_t)buf->addr;
++}
++/* Macro, so we compile better if 'v' isn't always 64-bit */
++#define bm_buffer_set64(buf, v) \
++ do { \
++ struct bm_buffer *__buf931 = (buf); \
++ __buf931->hi = upper_32_bits(v); \
++ __buf931->lo = lower_32_bits(v); \
++ } while (0)
++
++/* See 1.5.3.5.4: "Release Command" */
++struct bm_rcr_entry {
++ union {
++ struct {
++ u8 __dont_write_directly__verb;
++ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
++ u8 __reserved1[62];
++ };
++ struct bm_buffer bufs[8];
++ };
++} __packed;
++#define BM_RCR_VERB_VBIT 0x80
++#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
++#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
++#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
++#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
++
++/* See 1.5.3.1: "Acquire Command" */
++/* See 1.5.3.2: "Query Command" */
++struct bm_mcc_acquire {
++ u8 bpid;
++ u8 __reserved1[62];
++} __packed;
++struct bm_mcc_query {
++ u8 __reserved2[63];
++} __packed;
++struct bm_mc_command {
++ u8 __dont_write_directly__verb;
++ union {
++ struct bm_mcc_acquire acquire;
++ struct bm_mcc_query query;
++ };
++} __packed;
++#define BM_MCC_VERB_VBIT 0x80
++#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
++#define BM_MCC_VERB_CMD_ACQUIRE 0x10
++#define BM_MCC_VERB_CMD_QUERY 0x40
++#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
++
++/* See 1.5.3.3: "Acquire Response" */
++/* See 1.5.3.4: "Query Response" */
++struct bm_pool_state {
++ u8 __reserved1[32];
++ /* "availability state" and "depletion state" */
++ struct {
++ u8 __reserved1[8];
++ /* Access using bman_depletion_***() */
++ struct bman_depletion state;
++ } as, ds;
++};
++struct bm_mc_result {
++ union {
++ struct {
++ u8 verb;
++ u8 __reserved1[63];
++ };
++ union {
++ struct {
++ u8 __reserved1;
++ u8 bpid;
++ u8 __reserved2[62];
++ };
++ struct bm_buffer bufs[8];
++ } acquire;
++ struct bm_pool_state query;
++ };
++} __packed;
++#define BM_MCR_VERB_VBIT 0x80
++#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
++#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
++#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
++#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
++#define BM_MCR_VERB_CMD_ERR_ECC 0x70
++#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
++/* Determine the "availability state" of pool 'p' from a query result 'r' */
++#define BM_MCR_QUERY_AVAILABILITY(r, p) \
++ bman_depletion_get(&r->query.as.state, p)
++/* Determine the "depletion state" of pool 'p' from a query result 'r' */
++#define BM_MCR_QUERY_DEPLETION(r, p) \
++ bman_depletion_get(&r->query.ds.state, p)
++
++/*******************************************************************/
++/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
++/*******************************************************************/
++
++ /* Portal and Buffer Pools */
++ /* ----------------------- */
++/* Represents a managed portal */
++struct bman_portal;
++
++/* This object type represents Bman buffer pools. */
++struct bman_pool;
++
++struct bman_portal_config {
++ /* This is used for any "core-affine" portals, ie. default portals
++ * associated to the corresponding cpu. -1 implies that there is no core
++ * affinity configured. */
++ int cpu;
++ /* portal interrupt line */
++ int irq;
++ /* the unique index of this portal */
++ u32 index;
++ /* Is this portal shared? (If so, it has coarser locking and demuxes
++ * processing on behalf of other CPUs.) */
++ int is_shared;
++ /* These are the buffer pool IDs that may be used via this portal. */
++ struct bman_depletion mask;
++};
++
++/* This callback type is used when handling pool depletion entry/exit. The
++ * 'cb_ctx' value is the opaque value associated with the pool object in
++ * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
++ * depletion-exit. */
++typedef void (*bman_cb_depletion)(struct bman_portal *bm,
++ struct bman_pool *pool, void *cb_ctx, int depleted);
++
++/* This struct specifies parameters for a bman_pool object. */
++struct bman_pool_params {
++ /* index of the buffer pool to encapsulate (0-63), ignored if
++ * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
++ u32 bpid;
++ /* bit-mask of BMAN_POOL_FLAG_*** options */
++ u32 flags;
++ /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
++ bman_cb_depletion cb;
++ /* opaque user value passed as a parameter to 'cb' */
++ void *cb_ctx;
++ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
++ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
++ * when run in the control plane (which controls Bman CCSR). This array
++ * matches the definition of bm_pool_set(). */
++ u32 thresholds[4];
++};
++
++/* Flags to bman_new_pool() */
++#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
++#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
++#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
++#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
++#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
++#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
++
++/* Flags to bman_release() */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
++#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
++#endif
++#endif
++#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
++
++/* Flags to bman_acquire() */
++#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
++
++ /* Portal Management */
++ /* ----------------- */
++/**
++ * bman_get_portal_config - get portal configuration settings
++ *
++ * This returns a read-only view of the current cpu's affine portal settings.
++ */
++const struct bman_portal_config *bman_get_portal_config(void);
++
++/**
++ * bman_irqsource_get - return the portal work that is interrupt-driven
++ *
++ * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
++ * enabled for interrupt handling on the current cpu's affine portal. These
++ * sources will trigger the portal interrupt and the interrupt handler (or a
++ * tasklet/bottom-half it defers to) will perform the corresponding processing
++ * work. The bman_poll_***() functions will only process sources that are not in
++ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
++ * this always returns zero.
++ */
++u32 bman_irqsource_get(void);
++
++/**
++ * bman_irqsource_add - add processing sources to be interrupt-driven
++ * @bits: bitmask of BM_PIRQ_**I processing sources
++ *
++ * Adds processing sources that should be interrupt-driven (rather than
++ * processed via bman_poll_***() functions). Returns zero for success, or
++ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
++int bman_irqsource_add(u32 bits);
++
++/**
++ * bman_irqsource_remove - remove processing sources from being interrupt-driven
++ * @bits: bitmask of BM_PIRQ_**I processing sources
++ *
++ * Removes processing sources from being interrupt-driven, so that they will
++ * instead be processed via bman_poll_***() functions. Returns zero for success,
++ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
++int bman_irqsource_remove(u32 bits);
++
++/**
++ * bman_affine_cpus - return a mask of cpus that have affine portals
++ */
++const cpumask_t *bman_affine_cpus(void);
++
++/**
++ * bman_poll_slow - process anything that isn't interrupt-driven.
++ *
++ * This function does any portal processing that isn't interrupt-driven. If the
++ * current CPU is sharing a portal hosted on another CPU, this function will
++ * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
++ * indicating what interrupt sources were actually processed by the call.
++ *
++ * NB, unlike the legacy wrapper bman_poll(), this function will
++ * deterministically check for the presence of portal processing work and do it,
++ * which implies some latency even if there's nothing to do. The bman_poll()
++ * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
++ * checking for (and doing) portal processing infrequently. Ie. such that
++ * qman_poll() and bman_poll() can be called from core-processing loops. Use
++ * bman_poll_slow() when you yourself are deciding when to incur the overhead of
++ * processing.
++ */
++u32 bman_poll_slow(void);
++
++/**
++ * bman_poll - process anything that isn't interrupt-driven.
++ *
++ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
++ * affine portal. This function does whatever processing is not triggered by
++ * interrupts. This is a legacy wrapper that can be used in core-processing
++ * loops but mitigates the performance overhead of portal processing by
++ * adaptively bypassing true portal processing most of the time. (Processing is
++ * done once every 10 calls if the previous processing revealed that work needed
++ * to be done, or once very 1000 calls if the previous processing revealed no
++ * work needed doing.) If you wish to control this yourself, call
++ * bman_poll_slow() instead, which always checks for portal processing work.
++ */
++void bman_poll(void);
++
++/**
++ * bman_rcr_is_empty - Determine if portal's RCR is empty
++ *
++ * For use in situations where a cpu-affine caller needs to determine when all
++ * releases for the local portal have been processed by Bman but can't use the
++ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
++ * The function forces tracking of RCR consumption (which normally doesn't
++ * happen until release processing needs to find space to put new release
++ * commands), and returns zero if the ring still has unprocessed entries,
++ * non-zero if it is empty.
++ */
++int bman_rcr_is_empty(void);
++
++/**
++ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
++ * @result: is set by the API to the base BPID of the allocated range
++ * @count: the number of BPIDs required
++ * @align: required alignment of the allocated range
++ * @partial: non-zero if the API can return fewer than @count BPIDs
++ *
++ * Returns the number of buffer pools allocated, or a negative error code. If
++ * @partial is non zero, the allocation request may return a smaller range of
++ * BPs than requested (though alignment will be as requested). If @partial is
++ * zero, the return value will either be 'count' or negative.
++ */
++int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
++static inline int bman_alloc_bpid(u32 *result)
++{
++ int ret = bman_alloc_bpid_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++
++/**
++ * bman_release_bpid_range - Release the specified range of buffer pool IDs
++ * @bpid: the base BPID of the range to deallocate
++ * @count: the number of BPIDs in the range
++ *
++ * This function can also be used to seed the allocator with ranges of BPIDs
++ * that it can subsequently allocate from.
++ */
++void bman_release_bpid_range(u32 bpid, unsigned int count);
++static inline void bman_release_bpid(u32 bpid)
++{
++ bman_release_bpid_range(bpid, 1);
++}
++
++int bman_reserve_bpid_range(u32 bpid, unsigned int count);
++static inline int bman_reserve_bpid(u32 bpid)
++{
++ return bman_reserve_bpid_range(bpid, 1);
++}
++
++void bman_seed_bpid_range(u32 bpid, unsigned int count);
++
++
++int bman_shutdown_pool(u32 bpid);
++
++ /* Pool management */
++ /* --------------- */
++/**
++ * bman_new_pool - Allocates a Buffer Pool object
++ * @params: parameters specifying the buffer pool ID and behaviour
++ *
++ * Creates a pool object for the given @params. A portal and the depletion
++ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
++ * is set. NB, the fields from @params are copied into the new pool object, so
++ * the structure provided by the caller can be released or reused after the
++ * function returns.
++ */
++struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
++
++/**
++ * bman_free_pool - Deallocates a Buffer Pool object
++ * @pool: the pool object to release
++ *
++ */
++void bman_free_pool(struct bman_pool *pool);
++
++/**
++ * bman_get_params - Returns a pool object's parameters.
++ * @pool: the pool object
++ *
++ * The returned pointer refers to state within the pool object so must not be
++ * modified and can no longer be read once the pool object is destroyed.
++ */
++const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
++
++/**
++ * bman_release - Release buffer(s) to the buffer pool
++ * @pool: the buffer pool object to release to
++ * @bufs: an array of buffers to release
++ * @num: the number of buffers in @bufs (1-8)
++ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
++ *
++ * Adds the given buffers to RCR entries. If the portal @p was created with the
++ * "COMPACT" flag, then it will be using a compaction algorithm to improve
++ * utilisation of RCR. As such, these buffers may join an existing ring entry
++ * and/or it may not be issued right away so as to allow future releases to join
++ * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
++ * behaviour by committing the RCR entry (or entries) right away. If the RCR
++ * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
++ * is selected, in which case it will sleep waiting for space to become
++ * available in RCR. If the function receives a signal before such time (and
++ * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
++ * it returns zero.
++ */
++int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
++ u32 flags);
++
++/**
++ * bman_acquire - Acquire buffer(s) from a buffer pool
++ * @pool: the buffer pool object to acquire from
++ * @bufs: array for storing the acquired buffers
++ * @num: the number of buffers desired (@bufs is at least this big)
++ *
++ * Issues an "Acquire" command via the portal's management command interface.
++ * The return value will be the number of buffers obtained from the pool, or a
++ * negative error code if a h/w error or pool starvation was encountered. In
++ * the latter case, the content of @bufs is undefined.
++ */
++int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
++ u32 flags);
++
++/**
++ * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
++ * @pool: the buffer pool object the stockpile belongs
++ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
++ *
++ * Adds stockpile buffers to RCR entries until the stockpile is empty.
++ * The return value will be a negative error code if a h/w error occurred.
++ * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
++ * -EAGAIN will be returned.
++ */
++int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
++
++/**
++ * bman_query_pools - Query all buffer pool states
++ * @state: storage for the queried availability and depletion states
++ */
++int bman_query_pools(struct bm_pool_state *state);
++
++#ifdef CONFIG_FSL_BMAN_CONFIG
++/**
++ * bman_query_free_buffers - Query how many free buffers are in buffer pool
++ * @pool: the buffer pool object to query
++ *
++ * Return the number of the free buffers
++ */
++u32 bman_query_free_buffers(struct bman_pool *pool);
++
++/**
++ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
++ * @pool: the buffer pool object to which the thresholds will be set
++ * @thresholds: the new thresholds
++ */
++int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
++#endif
++
++/**
++ * The below bman_p_***() variant might be called in a situation that the cpu
++ * which the portal affine to is not online yet.
++ * @bman_portal specifies which portal the API will use.
++*/
++int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* FSL_BMAN_H */
+diff --git a/include/linux/fsl_qman.h b/include/linux/fsl_qman.h
+new file mode 100644
+index 00000000..4e4b21d5
+--- /dev/null
++++ b/include/linux/fsl_qman.h
+@@ -0,0 +1,3888 @@
++/* Copyright 2008-2012 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef FSL_QMAN_H
++#define FSL_QMAN_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Last updated for v00.800 of the BG */
++
++/* Hardware constants */
++#define QM_CHANNEL_SWPORTAL0 0
++#define QMAN_CHANNEL_POOL1 0x21
++#define QMAN_CHANNEL_CAAM 0x80
++#define QMAN_CHANNEL_PME 0xa0
++#define QMAN_CHANNEL_POOL1_REV3 0x401
++#define QMAN_CHANNEL_CAAM_REV3 0x840
++#define QMAN_CHANNEL_PME_REV3 0x860
++#define QMAN_CHANNEL_DCE 0x8a0
++#define QMAN_CHANNEL_DCE_QMANREV312 0x880
++extern u16 qm_channel_pool1;
++extern u16 qm_channel_caam;
++extern u16 qm_channel_pme;
++extern u16 qm_channel_dce;
++enum qm_dc_portal {
++ qm_dc_portal_fman0 = 0,
++ qm_dc_portal_fman1 = 1,
++ qm_dc_portal_caam = 2,
++ qm_dc_portal_pme = 3,
++ qm_dc_portal_rman = 4,
++ qm_dc_portal_dce = 5
++};
++
++/* Portal processing (interrupt) sources */
++#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
++#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
++#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
++#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
++#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
++#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
++/* This mask contains all the interrupt sources that need handling except DQRI,
++ * ie. that if present should trigger slow-path processing. */
++#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
++ QM_PIRQ_MRI | QM_PIRQ_CCSCI)
++
++/* --- Clock speed --- */
++/* A qman driver instance may or may not know the current qman clock speed.
++ * However, certain CEETM calculations may not be possible if this is not known.
++ * The 'set' function will only succeed (return zero) if the driver did not
++ * already know the clock speed. Likewise, the 'get' function will only succeed
++ * if the driver does know the clock speed (either because it knew when booting,
++ * or was told via 'set'). In cases where software is running on a driver
++ * instance that does not know the clock speed (eg. on a hypervised data-plane),
++ * and the user can obtain the current qman clock speed by other means (eg. from
++ * a message sent from the control-plane), then the 'set' function can be used
++ * to enable rate-calculations in a driver where it would otherwise not be
++ * possible. */
++int qm_get_clock(u64 *clock_hz);
++int qm_set_clock(u64 clock_hz);
++
++/* For qman_static_dequeue_*** APIs */
++#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
++/* for n in [1,15] */
++#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
++/* for conversion from n of qm_channel */
++static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
++{
++ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
++}
++
++/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
++ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
++ * FQID(n) to fill in the frame queue ID. */
++#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
++#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
++#define QM_VDQCR_EXACT 0x40000000
++#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
++#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
++#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
++#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
++
++
++/* ------------------------------------------------------- */
++/* --- Qman data structures (and associated constants) --- */
++
++/* Represents s/w corenet portal mapped data structures */
++struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
++struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
++struct qm_mr_entry; /* MR (Message Ring) entries */
++struct qm_mc_command; /* MC (Management Command) command */
++struct qm_mc_result; /* MC result */
++
++/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
++#define QM_FD_FORMAT_SG 0x4
++#define QM_FD_FORMAT_LONG 0x2
++#define QM_FD_FORMAT_COMPOUND 0x1
++enum qm_fd_format {
++ /* 'contig' implies a contiguous buffer, whereas 'sg' implies a
++ * scatter-gather table. 'big' implies a 29-bit length with no offset
++ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
++ * implies a s/g-like table, where each entry itself represents a frame
++ * (contiguous or scatter-gather) and the 29-bit "length" is
++ * interpreted purely for congestion calculations, ie. a "congestion
++ * weight". */
++ qm_fd_contig = 0,
++ qm_fd_contig_big = QM_FD_FORMAT_LONG,
++ qm_fd_sg = QM_FD_FORMAT_SG,
++ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
++ qm_fd_compound = QM_FD_FORMAT_COMPOUND
++};
++
++/* Capitalised versions are un-typed but can be used in static expressions */
++#define QM_FD_CONTIG 0
++#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
++#define QM_FD_SG QM_FD_FORMAT_SG
++#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
++#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
++
++/* See 1.5.1.1: "Frame Descriptor (FD)" */
++struct qm_fd {
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 dd:2; /* dynamic debug */
++ u8 liodn_offset:6;
++ u8 bpid:8; /* Buffer Pool ID */
++ u8 eliodn_offset:4;
++ u8 __reserved:4;
++ u8 addr_hi; /* high 8-bits of 40-bit address */
++ u32 addr_lo; /* low 32-bits of 40-bit address */
++#else
++ u32 addr_lo; /* low 32-bits of 40-bit address */
++ u8 addr_hi; /* high 8-bits of 40-bit address */
++ u8 __reserved:4;
++ u8 eliodn_offset:4;
++ u8 bpid:8; /* Buffer Pool ID */
++ u8 liodn_offset:6;
++ u8 dd:2; /* dynamic debug */
++#endif
++ };
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u64 __notaddress:24;
++ u64 addr:40;
++#else
++ u64 addr:40;
++ u64 __notaddress:24;
++#endif
++ };
++ u64 opaque_addr;
++ };
++ /* The 'format' field indicates the interpretation of the remaining 29
++ * bits of the 32-bit word. For packing reasons, it is duplicated in the
++ * other union elements. Note, union'd structs are difficult to use with
++ * static initialisation under gcc, in which case use the "opaque" form
++ * with one of the macros. */
++ union {
++ /* For easier/faster copying of this part of the fd (eg. from a
++ * DQRR entry to an EQCR entry) copy 'opaque' */
++ u32 opaque;
++ /* If 'format' is _contig or _sg, 20b length and 9b offset */
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ enum qm_fd_format format:3;
++ u16 offset:9;
++ u32 length20:20;
++#else
++ u32 length20:20;
++ u16 offset:9;
++ enum qm_fd_format format:3;
++#endif
++ };
++ /* If 'format' is _contig_big or _sg_big, 29b length */
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ enum qm_fd_format _format1:3;
++ u32 length29:29;
++#else
++ u32 length29:29;
++ enum qm_fd_format _format1:3;
++#endif
++ };
++ /* If 'format' is _compound, 29b "congestion weight" */
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ enum qm_fd_format _format2:3;
++ u32 cong_weight:29;
++#else
++ u32 cong_weight:29;
++ enum qm_fd_format _format2:3;
++#endif
++ };
++ };
++ union {
++ u32 cmd;
++ u32 status;
++ };
++} __aligned(8);
++#define QM_FD_DD_NULL 0x00
++#define QM_FD_PID_MASK 0x3f
++static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
++{
++ return fd->addr;
++}
++
++static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
++{
++ return (dma_addr_t)fd->addr;
++}
++/* Macro, so we compile better if 'v' isn't always 64-bit */
++#define qm_fd_addr_set64(fd, v) \
++ do { \
++ struct qm_fd *__fd931 = (fd); \
++ __fd931->addr = v; \
++ } while (0)
++
++/* For static initialisation of FDs (which is complicated by the use of unions
++ * in "struct qm_fd"), use the following macros. Note that;
++ * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
++ * use-case),
++ * - use capitalised QM_FD_*** formats for static initialisation.
++ */
++#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
++ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
++ { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
++ { cmd } }
++#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
++ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
++ { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
++ { cmd } }
++
++/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
++#define QM_SG_OFFSET_MASK 0x1FFF
++struct qm_sg_entry {
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 __reserved1[3];
++ u8 addr_hi; /* high 8-bits of 40-bit address */
++ u32 addr_lo; /* low 32-bits of 40-bit address */
++#else
++ u32 addr_lo; /* low 32-bits of 40-bit address */
++ u8 addr_hi; /* high 8-bits of 40-bit address */
++ u8 __reserved1[3];
++#endif
++ };
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u64 __notaddress:24;
++ u64 addr:40;
++#else
++ u64 addr:40;
++ u64 __notaddress:24;
++#endif
++ };
++ u64 opaque;
++ };
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 extension:1; /* Extension bit */
++ u32 final:1; /* Final bit */
++ u32 length:30;
++#else
++ u32 length:30;
++ u32 final:1; /* Final bit */
++ u32 extension:1; /* Extension bit */
++#endif
++ };
++ u32 sgt_efl;
++ };
++ u8 __reserved2;
++ u8 bpid;
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 __reserved3:3;
++ u16 offset:13;
++#else
++ u16 offset:13;
++ u16 __reserved3:3;
++#endif
++ };
++ u16 opaque_offset;
++ };
++} __packed;
++union qm_sg_efl {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 extension:1; /* Extension bit */
++ u32 final:1; /* Final bit */
++ u32 length:30;
++#else
++ u32 length:30;
++ u32 final:1; /* Final bit */
++ u32 extension:1; /* Extension bit */
++#endif
++ };
++ u32 efl;
++};
++static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
++{
++ return (dma_addr_t)be64_to_cpu(sg->opaque) & 0xffffffffffULL;
++}
++static inline u8 qm_sg_entry_get_ext(const struct qm_sg_entry *sg)
++{
++ union qm_sg_efl u;
++
++ u.efl = be32_to_cpu(sg->sgt_efl);
++ return u.extension;
++}
++static inline u8 qm_sg_entry_get_final(const struct qm_sg_entry *sg)
++{
++ union qm_sg_efl u;
++
++ u.efl = be32_to_cpu(sg->sgt_efl);
++ return u.final;
++}
++static inline u32 qm_sg_entry_get_len(const struct qm_sg_entry *sg)
++{
++ union qm_sg_efl u;
++
++ u.efl = be32_to_cpu(sg->sgt_efl);
++ return u.length;
++}
++static inline u8 qm_sg_entry_get_bpid(const struct qm_sg_entry *sg)
++{
++ return sg->bpid;
++}
++static inline u16 qm_sg_entry_get_offset(const struct qm_sg_entry *sg)
++{
++ u32 opaque_offset = be16_to_cpu(sg->opaque_offset);
++
++ return opaque_offset & 0x1fff;
++}
++
++/* Macro, so we compile better if 'v' isn't always 64-bit */
++#define qm_sg_entry_set64(sg, v) \
++ do { \
++ struct qm_sg_entry *__sg931 = (sg); \
++ __sg931->opaque = cpu_to_be64(v); \
++ } while (0)
++#define qm_sg_entry_set_ext(sg, v) \
++ do { \
++ union qm_sg_efl __u932; \
++ __u932.efl = be32_to_cpu((sg)->sgt_efl); \
++ __u932.extension = v; \
++ (sg)->sgt_efl = cpu_to_be32(__u932.efl); \
++ } while (0)
++#define qm_sg_entry_set_final(sg, v) \
++ do { \
++ union qm_sg_efl __u933; \
++ __u933.efl = be32_to_cpu((sg)->sgt_efl); \
++ __u933.final = v; \
++ (sg)->sgt_efl = cpu_to_be32(__u933.efl); \
++ } while (0)
++#define qm_sg_entry_set_len(sg, v) \
++ do { \
++ union qm_sg_efl __u934; \
++ __u934.efl = be32_to_cpu((sg)->sgt_efl); \
++ __u934.length = v; \
++ (sg)->sgt_efl = cpu_to_be32(__u934.efl); \
++ } while (0)
++#define qm_sg_entry_set_bpid(sg, v) \
++ do { \
++ struct qm_sg_entry *__u935 = (sg); \
++ __u935->bpid = v; \
++ } while (0)
++#define qm_sg_entry_set_offset(sg, v) \
++ do { \
++ struct qm_sg_entry *__u936 = (sg); \
++ __u936->opaque_offset = cpu_to_be16(v); \
++ } while (0)
++
++/* See 1.5.8.1: "Enqueue Command" */
++struct qm_eqcr_entry {
++ u8 __dont_write_directly__verb;
++ u8 dca;
++ u16 seqnum;
++ u32 orp; /* 24-bit */
++ u32 fqid; /* 24-bit */
++ u32 tag;
++ struct qm_fd fd;
++ u8 __reserved3[32];
++} __packed;
++#define QM_EQCR_VERB_VBIT 0x80
++#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
++#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
++#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
++#define QM_EQCR_VERB_COLOUR_GREEN 0x00
++#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
++#define QM_EQCR_VERB_COLOUR_RED 0x10
++#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
++#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
++#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
++#define QM_EQCR_DCA_ENABLE 0x80
++#define QM_EQCR_DCA_PARK 0x40
++#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
++#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
++#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
++#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
++#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
++
++/* See 1.5.8.2: "Frame Dequeue Response" */
++struct qm_dqrr_entry {
++ u8 verb;
++ u8 stat;
++ u16 seqnum; /* 15-bit */
++ u8 tok;
++ u8 __reserved2[3];
++ u32 fqid; /* 24-bit */
++ u32 contextB;
++ struct qm_fd fd;
++ u8 __reserved4[32];
++};
++#define QM_DQRR_VERB_VBIT 0x80
++#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
++#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
++#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
++#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
++#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
++#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
++#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
++#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
++
++/* See 1.5.8.3: "ERN Message Response" */
++/* See 1.5.8.4: "FQ State Change Notification" */
++struct qm_mr_entry {
++ u8 verb;
++ union {
++ struct {
++ u8 dca;
++ u16 seqnum;
++ u8 rc; /* Rejection Code */
++ u32 orp:24;
++ u32 fqid; /* 24-bit */
++ u32 tag;
++ struct qm_fd fd;
++ } __packed ern;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
++ u8 __reserved1:3;
++ enum qm_dc_portal portal:3;
++#else
++ enum qm_dc_portal portal:3;
++ u8 __reserved1:3;
++ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
++#endif
++ u16 __reserved2;
++ u8 rc; /* Rejection Code */
++ u32 __reserved3:24;
++ u32 fqid; /* 24-bit */
++ u32 tag;
++ struct qm_fd fd;
++ } __packed dcern;
++ struct {
++ u8 fqs; /* Frame Queue Status */
++ u8 __reserved1[6];
++ u32 fqid; /* 24-bit */
++ u32 contextB;
++ u8 __reserved2[16];
++ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
++ };
++ u8 __reserved2[32];
++} __packed;
++#define QM_MR_VERB_VBIT 0x80
++/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
++ * originating from direct-connect portals ("dcern") use 0x20 as a verb which
++ * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
++ * the other MR types by noting if the 0x20 bit is unset. */
++#define QM_MR_VERB_TYPE_MASK 0x27
++#define QM_MR_VERB_DC_ERN 0x20
++#define QM_MR_VERB_FQRN 0x21
++#define QM_MR_VERB_FQRNI 0x22
++#define QM_MR_VERB_FQRL 0x23
++#define QM_MR_VERB_FQPN 0x24
++#define QM_MR_RC_MASK 0xf0 /* contains one of; */
++#define QM_MR_RC_CGR_TAILDROP 0x00
++#define QM_MR_RC_WRED 0x10
++#define QM_MR_RC_ERROR 0x20
++#define QM_MR_RC_ORPWINDOW_EARLY 0x30
++#define QM_MR_RC_ORPWINDOW_LATE 0x40
++#define QM_MR_RC_FQ_TAILDROP 0x50
++#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
++#define QM_MR_RC_ORP_ZERO 0x70
++#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
++#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
++#define QM_MR_DCERN_COLOUR_GREEN 0x00
++#define QM_MR_DCERN_COLOUR_YELLOW 0x01
++#define QM_MR_DCERN_COLOUR_RED 0x02
++#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
++
++/* An identical structure of FQD fields is present in the "Init FQ" command and
++ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
++ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
++ * latter has two inlines to assist with converting to/from the mant+exp
++ * representation. */
++struct qm_fqd_stashing {
++ /* See QM_STASHING_EXCL_<...> */
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 exclusive;
++ u8 __reserved1:2;
++ /* Numbers of cachelines */
++ u8 annotation_cl:2;
++ u8 data_cl:2;
++ u8 context_cl:2;
++#else
++ u8 context_cl:2;
++ u8 data_cl:2;
++ u8 annotation_cl:2;
++ u8 __reserved1:2;
++ u8 exclusive;
++#endif
++} __packed;
++struct qm_fqd_taildrop {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 __reserved1:3;
++ u16 mant:8;
++ u16 exp:5;
++#else
++ u16 exp:5;
++ u16 mant:8;
++ u16 __reserved1:3;
++#endif
++} __packed;
++struct qm_fqd_oac {
++ /* See QM_OAC_<...> */
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 oac:2; /* "Overhead Accounting Control" */
++ u8 __reserved1:6;
++#else
++ u8 __reserved1:6;
++ u8 oac:2; /* "Overhead Accounting Control" */
++#endif
++ /* Two's-complement value (-128 to +127) */
++ signed char oal; /* "Overhead Accounting Length" */
++} __packed;
++struct qm_fqd {
++ union {
++ u8 orpc;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 __reserved1:2;
++ u8 orprws:3;
++ u8 oa:1;
++ u8 olws:2;
++#else
++ u8 olws:2;
++ u8 oa:1;
++ u8 orprws:3;
++ u8 __reserved1:2;
++#endif
++ } __packed;
++ };
++ u8 cgid;
++ u16 fq_ctrl; /* See QM_FQCTRL_<...> */
++ union {
++ u16 dest_wq;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 channel:13; /* qm_channel */
++ u16 wq:3;
++#else
++ u16 wq:3;
++ u16 channel:13; /* qm_channel */
++#endif
++ } __packed dest;
++ };
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 __reserved2:1;
++ u16 ics_cred:15;
++#else
++ u16 __reserved2:1;
++ u16 ics_cred:15;
++#endif
++ /* For "Initialize Frame Queue" commands, the write-enable mask
++ * determines whether 'td' or 'oac_init' is observed. For query
++ * commands, this field is always 'td', and 'oac_query' (below) reflects
++ * the Overhead ACcounting values. */
++ union {
++ struct qm_fqd_taildrop td;
++ struct qm_fqd_oac oac_init;
++ };
++ u32 context_b;
++ union {
++ /* Treat it as 64-bit opaque */
++ u64 opaque;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 hi;
++ u32 lo;
++#else
++ u32 lo;
++ u32 hi;
++#endif
++ };
++ /* Treat it as s/w portal stashing config */
++ /* See 1.5.6.7.1: "FQD Context_A field used for [...] */
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ struct qm_fqd_stashing stashing;
++ /* 48-bit address of FQ context to
++ * stash, must be cacheline-aligned */
++ u16 context_hi;
++ u32 context_lo;
++#else
++ u32 context_lo;
++ u16 context_hi;
++ struct qm_fqd_stashing stashing;
++#endif
++ } __packed;
++ } context_a;
++ struct qm_fqd_oac oac_query;
++} __packed;
++/* 64-bit converters for context_hi/lo */
++static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
++{
++ return ((u64)fqd->context_a.context_hi << 32) |
++ (u64)fqd->context_a.context_lo;
++}
++static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
++{
++ return (dma_addr_t)qm_fqd_stashing_get64(fqd);
++}
++static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
++{
++ return ((u64)fqd->context_a.hi << 32) |
++ (u64)fqd->context_a.lo;
++}
++/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
++#define qm_fqd_stashing_set64(fqd, v) \
++ do { \
++ struct qm_fqd *__fqd931 = (fqd); \
++ __fqd931->context_a.context_hi = upper_32_bits(v); \
++ __fqd931->context_a.context_lo = lower_32_bits(v); \
++ } while (0)
++#define qm_fqd_context_a_set64(fqd, v) \
++ do { \
++ struct qm_fqd *__fqd931 = (fqd); \
++ __fqd931->context_a.hi = upper_32_bits(v); \
++ __fqd931->context_a.lo = lower_32_bits(v); \
++ } while (0)
++/* convert a threshold value into mant+exp representation */
++static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
++ int roundup)
++{
++ u32 e = 0;
++ int oddbit = 0;
++ if (val > 0xe0000000)
++ return -ERANGE;
++ while (val > 0xff) {
++ oddbit = val & 1;
++ val >>= 1;
++ e++;
++ if (roundup && oddbit)
++ val++;
++ }
++ td->exp = e;
++ td->mant = val;
++ return 0;
++}
++/* and the other direction */
++static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
++{
++ return (u32)td->mant << td->exp;
++}
++
++/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
++/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
++#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
++#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
++#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
++#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
++#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
++#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
++#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
++#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
++#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
++#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
++#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
++
++/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
++/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
++#define QM_STASHING_EXCL_ANNOTATION 0x04
++#define QM_STASHING_EXCL_DATA 0x02
++#define QM_STASHING_EXCL_CTX 0x01
++
++/* See 1.5.5.3: "Intra Class Scheduling" */
++/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
++#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
++#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
++
++/* See 1.5.8.4: "FQ State Change Notification" */
++/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
++ * and associated commands/responses. The WRED parameters are calculated from
++ * these fields as follows;
++ * MaxTH = MA * (2 ^ Mn)
++ * Slope = SA / (2 ^ Sn)
++ * MaxP = 4 * (Pn + 1)
++ */
++struct qm_cgr_wr_parm {
++ union {
++ u32 word;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 MA:8;
++ u32 Mn:5;
++ u32 SA:7; /* must be between 64-127 */
++ u32 Sn:6;
++ u32 Pn:6;
++#else
++ u32 Pn:6;
++ u32 Sn:6;
++ u32 SA:7; /* must be between 64-127 */
++ u32 Mn:5;
++ u32 MA:8;
++#endif
++ } __packed;
++ };
++} __packed;
++/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
++ * management commands, this is padded to a 16-bit structure field, so that's
++ * how we represent it here. The congestion state threshold is calculated from
++ * these fields as follows;
++ * CS threshold = TA * (2 ^ Tn)
++ */
++struct qm_cgr_cs_thres {
++ union {
++ u16 hword;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 __reserved:3;
++ u16 TA:8;
++ u16 Tn:5;
++#else
++ u16 Tn:5;
++ u16 TA:8;
++ u16 __reserved:3;
++#endif
++ } __packed;
++ };
++} __packed;
++/* This identical structure of CGR fields is present in the "Init/Modify CGR"
++ * commands and the "Query CGR" result. It's suctioned out here into its own
++ * struct. */
++struct __qm_mc_cgr {
++ struct qm_cgr_wr_parm wr_parm_g;
++ struct qm_cgr_wr_parm wr_parm_y;
++ struct qm_cgr_wr_parm wr_parm_r;
++ u8 wr_en_g; /* boolean, use QM_CGR_EN */
++ u8 wr_en_y; /* boolean, use QM_CGR_EN */
++ u8 wr_en_r; /* boolean, use QM_CGR_EN */
++ u8 cscn_en; /* boolean, use QM_CGR_EN */
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
++ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
++#else
++ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
++ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
++#endif
++ };
++ u32 cscn_targ; /* use QM_CGR_TARG_* */
++ };
++ u8 cstd_en; /* boolean, use QM_CGR_EN */
++ u8 cs; /* boolean, only used in query response */
++ union {
++ /* use qm_cgr_cs_thres_set64() */
++ struct qm_cgr_cs_thres cs_thres;
++ u16 __cs_thres;
++ };
++ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
++} __packed;
++#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
++#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
++#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
++#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
++#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
++#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
++/* Convert CGR thresholds to/from "cs_thres" format */
++static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
++{
++ return (u64)th->TA << th->Tn;
++}
++static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
++ int roundup)
++{
++ u32 e = 0;
++ int oddbit = 0;
++ while (val > 0xff) {
++ oddbit = val & 1;
++ val >>= 1;
++ e++;
++ if (roundup && oddbit)
++ val++;
++ }
++ th->Tn = e;
++ th->TA = val;
++ return 0;
++}
++
++/* See 1.5.8.5.1: "Initialize FQ" */
++/* See 1.5.8.5.2: "Query FQ" */
++/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
++/* See 1.5.8.5.4: "Alter FQ State Commands " */
++/* See 1.5.8.6.1: "Initialize/Modify CGR" */
++/* See 1.5.8.6.2: "CGR Test Write" */
++/* See 1.5.8.6.3: "Query CGR" */
++/* See 1.5.8.6.4: "Query Congestion Group State" */
++struct qm_mcc_initfq {
++ u8 __reserved1;
++ u16 we_mask; /* Write Enable Mask */
++ u32 fqid; /* 24-bit */
++ u16 count; /* Initialises 'count+1' FQDs */
++ struct qm_fqd fqd; /* the FQD fields go here */
++ u8 __reserved3[30];
++} __packed;
++struct qm_mcc_queryfq {
++ u8 __reserved1[3];
++ u32 fqid; /* 24-bit */
++ u8 __reserved2[56];
++} __packed;
++struct qm_mcc_queryfq_np {
++ u8 __reserved1[3];
++ u32 fqid; /* 24-bit */
++ u8 __reserved2[56];
++} __packed;
++struct qm_mcc_alterfq {
++ u8 __reserved1[3];
++ u32 fqid; /* 24-bit */
++ u8 __reserved2;
++ u8 count; /* number of consecutive FQID */
++ u8 __reserved3[10];
++ u32 context_b; /* frame queue context b */
++ u8 __reserved4[40];
++} __packed;
++struct qm_mcc_initcgr {
++ u8 __reserved1;
++ u16 we_mask; /* Write Enable Mask */
++ struct __qm_mc_cgr cgr; /* CGR fields */
++ u8 __reserved2[2];
++ u8 cgid;
++ u8 __reserved4[32];
++} __packed;
++struct qm_mcc_cgrtestwrite {
++ u8 __reserved1[2];
++ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
++ u8 __reserved2[23];
++ u8 cgid;
++ u8 __reserved3[32];
++} __packed;
++struct qm_mcc_querycgr {
++ u8 __reserved1[30];
++ u8 cgid;
++ u8 __reserved2[32];
++} __packed;
++struct qm_mcc_querycongestion {
++ u8 __reserved[63];
++} __packed;
++struct qm_mcc_querywq {
++ u8 __reserved;
++ /* select channel if verb != QUERYWQ_DEDICATED */
++ union {
++ u16 channel_wq; /* ignores wq (3 lsbits) */
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 id:13; /* qm_channel */
++ u16 __reserved1:3;
++#else
++ u16 __reserved1:3;
++ u16 id:13; /* qm_channel */
++#endif
++ } __packed channel;
++ };
++ u8 __reserved2[60];
++} __packed;
++
++struct qm_mcc_ceetm_lfqmt_config {
++ u8 __reserved1[4];
++ u32 lfqid:24;
++ u8 __reserved2[2];
++ u16 cqid;
++ u8 __reserved3[2];
++ u16 dctidx;
++ u8 __reserved4[48];
++} __packed;
++
++struct qm_mcc_ceetm_lfqmt_query {
++ u8 __reserved1[4];
++ u32 lfqid:24;
++ u8 __reserved2[56];
++} __packed;
++
++struct qm_mcc_ceetm_cq_config {
++ u8 __reserved1;
++ u16 cqid;
++ u8 dcpid;
++ u8 __reserved2;
++ u16 ccgid;
++ u8 __reserved3[56];
++} __packed;
++
++struct qm_mcc_ceetm_cq_query {
++ u8 __reserved1;
++ u16 cqid;
++ u8 dcpid;
++ u8 __reserved2[59];
++} __packed;
++
++struct qm_mcc_ceetm_dct_config {
++ u8 __reserved1;
++ u16 dctidx;
++ u8 dcpid;
++ u8 __reserved2[15];
++ u32 context_b;
++ u64 context_a;
++ u8 __reserved3[32];
++} __packed;
++
++struct qm_mcc_ceetm_dct_query {
++ u8 __reserved1;
++ u16 dctidx;
++ u8 dcpid;
++ u8 __reserved2[59];
++} __packed;
++
++struct qm_mcc_ceetm_class_scheduler_config {
++ u8 __reserved1;
++ u16 cqcid;
++ u8 dcpid;
++ u8 __reserved2[6];
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 gpc_reserved:1;
++ u8 gpc_combine_flag:1;
++ u8 gpc_prio_b:3;
++ u8 gpc_prio_a:3;
++#else
++ u8 gpc_prio_a:3;
++ u8 gpc_prio_b:3;
++ u8 gpc_combine_flag:1;
++ u8 gpc_reserved:1;
++#endif
++ u16 crem;
++ u16 erem;
++ u8 w[8];
++ u8 __reserved3[40];
++} __packed;
++
++struct qm_mcc_ceetm_class_scheduler_query {
++ u8 __reserved1;
++ u16 cqcid;
++ u8 dcpid;
++ u8 __reserved2[59];
++} __packed;
++
++#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12)
++#define CEETM_COMMAND_SP_MAPPING (1 << 12)
++#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12)
++#define CEETM_COMMAND_LNI_SHAPER (3 << 12)
++#define CEETM_COMMAND_TCFC (4 << 12)
++
++#define CEETM_CCGRID_MASK 0x01FF
++#define CEETM_CCGR_CM_CONFIGURE (0 << 14)
++#define CEETM_CCGR_DN_CONFIGURE (1 << 14)
++#define CEETM_CCGR_TEST_WRITE (2 << 14)
++#define CEETM_CCGR_CM_QUERY (0 << 14)
++#define CEETM_CCGR_DN_QUERY (1 << 14)
++#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14)
++#define CEETM_QUERY_CONGESTION_STATE (3 << 14)
++
++struct qm_mcc_ceetm_mapping_shaper_tcfc_config {
++ u8 __reserved1;
++ u16 cid;
++ u8 dcpid;
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 map_shaped:1;
++ u8 map_reserved:4;
++ u8 map_lni_id:3;
++#else
++ u8 map_lni_id:3;
++ u8 map_reserved:4;
++ u8 map_shaped:1;
++#endif
++ u8 __reserved2[58];
++ } __packed channel_mapping;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 map_reserved:5;
++ u8 map_lni_id:3;
++#else
++ u8 map_lni_id:3;
++ u8 map_reserved:5;
++#endif
++ u8 __reserved2[58];
++ } __packed sp_mapping;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 cpl:1;
++ u8 cpl_reserved:2;
++ u8 oal:5;
++#else
++ u8 oal:5;
++ u8 cpl_reserved:2;
++ u8 cpl:1;
++#endif
++ u32 crtcr:24;
++ u32 ertcr:24;
++ u16 crtbl;
++ u16 ertbl;
++ u8 mps; /* This will be hardcoded by driver with 60 */
++ u8 __reserved2[47];
++ } __packed shaper_config;
++ struct {
++ u8 __reserved2[11];
++ u64 lnitcfcc;
++ u8 __reserved3[40];
++ } __packed tcfc_config;
++ };
++} __packed;
++
++struct qm_mcc_ceetm_mapping_shaper_tcfc_query {
++ u8 __reserved1;
++ u16 cid;
++ u8 dcpid;
++ u8 __reserved2[59];
++} __packed;
++
++struct qm_mcc_ceetm_ccgr_config {
++ u8 __reserved1;
++ u16 ccgrid;
++ u8 dcpid;
++ u8 __reserved2;
++ u16 we_mask;
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 ctl_reserved:1;
++ u8 ctl_wr_en_g:1;
++ u8 ctl_wr_en_y:1;
++ u8 ctl_wr_en_r:1;
++ u8 ctl_td_en:1;
++ u8 ctl_td_mode:1;
++ u8 ctl_cscn_en:1;
++ u8 ctl_mode:1;
++#else
++ u8 ctl_mode:1;
++ u8 ctl_cscn_en:1;
++ u8 ctl_td_mode:1;
++ u8 ctl_td_en:1;
++ u8 ctl_wr_en_r:1;
++ u8 ctl_wr_en_y:1;
++ u8 ctl_wr_en_g:1;
++ u8 ctl_reserved:1;
++#endif
++ u8 cdv;
++ u16 cscn_tupd;
++ u8 oal;
++ u8 __reserved3;
++ struct qm_cgr_cs_thres cs_thres;
++ struct qm_cgr_cs_thres cs_thres_x;
++ struct qm_cgr_cs_thres td_thres;
++ struct qm_cgr_wr_parm wr_parm_g;
++ struct qm_cgr_wr_parm wr_parm_y;
++ struct qm_cgr_wr_parm wr_parm_r;
++ } __packed cm_config;
++ struct {
++ u8 dnc;
++ u8 dn0;
++ u8 dn1;
++ u64 dnba:40;
++ u8 __reserved3[2];
++ u16 dnth_0;
++ u8 __reserved4[2];
++ u16 dnth_1;
++ u8 __reserved5[8];
++ } __packed dn_config;
++ struct {
++ u8 __reserved3[3];
++ u64 i_cnt:40;
++ u8 __reserved4[16];
++ } __packed test_write;
++ };
++ u8 __reserved5[32];
++} __packed;
++
++struct qm_mcc_ceetm_ccgr_query {
++ u8 __reserved1;
++ u16 ccgrid;
++ u8 dcpid;
++ u8 __reserved2[59];
++} __packed;
++
++struct qm_mcc_ceetm_cq_peek_pop_xsfdrread {
++ u8 __reserved1;
++ u16 cqid;
++ u8 dcpid;
++ u8 ct;
++ u16 xsfdr;
++ u8 __reserved2[56];
++} __packed;
++
++#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00
++#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01
++#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02
++#define CEETM_QUERY_REJECT_STATISTICS 0x03
++#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04
++#define CEETM_WRITE_REJECT_STATISTICS 0x05
++struct qm_mcc_ceetm_statistics_query_write {
++ u8 __reserved1;
++ u16 cid;
++ u8 dcpid;
++ u8 ct;
++ u8 __reserved2[13];
++ u64 frm_cnt:40;
++ u8 __reserved3[2];
++ u64 byte_cnt:48;
++ u8 __reserved[32];
++} __packed;
++
++struct qm_mc_command {
++ u8 __dont_write_directly__verb;
++ union {
++ struct qm_mcc_initfq initfq;
++ struct qm_mcc_queryfq queryfq;
++ struct qm_mcc_queryfq_np queryfq_np;
++ struct qm_mcc_alterfq alterfq;
++ struct qm_mcc_initcgr initcgr;
++ struct qm_mcc_cgrtestwrite cgrtestwrite;
++ struct qm_mcc_querycgr querycgr;
++ struct qm_mcc_querycongestion querycongestion;
++ struct qm_mcc_querywq querywq;
++ struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
++ struct qm_mcc_ceetm_lfqmt_query lfqmt_query;
++ struct qm_mcc_ceetm_cq_config cq_config;
++ struct qm_mcc_ceetm_cq_query cq_query;
++ struct qm_mcc_ceetm_dct_config dct_config;
++ struct qm_mcc_ceetm_dct_query dct_query;
++ struct qm_mcc_ceetm_class_scheduler_config csch_config;
++ struct qm_mcc_ceetm_class_scheduler_query csch_query;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config;
++ struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query;
++ struct qm_mcc_ceetm_ccgr_config ccgr_config;
++ struct qm_mcc_ceetm_ccgr_query ccgr_query;
++ struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
++ struct qm_mcc_ceetm_statistics_query_write stats_query_write;
++ };
++} __packed;
++#define QM_MCC_VERB_VBIT 0x80
++#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
++#define QM_MCC_VERB_INITFQ_PARKED 0x40
++#define QM_MCC_VERB_INITFQ_SCHED 0x41
++#define QM_MCC_VERB_QUERYFQ 0x44
++#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
++#define QM_MCC_VERB_QUERYWQ 0x46
++#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
++#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
++#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
++#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
++#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
++#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
++#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
++#define QM_MCC_VERB_INITCGR 0x50
++#define QM_MCC_VERB_MODIFYCGR 0x51
++#define QM_MCC_VERB_CGRTESTWRITE 0x52
++#define QM_MCC_VERB_QUERYCGR 0x58
++#define QM_MCC_VERB_QUERYCONGESTION 0x59
++/* INITFQ-specific flags */
++#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
++#define QM_INITFQ_WE_OAC 0x0100
++#define QM_INITFQ_WE_ORPC 0x0080
++#define QM_INITFQ_WE_CGID 0x0040
++#define QM_INITFQ_WE_FQCTRL 0x0020
++#define QM_INITFQ_WE_DESTWQ 0x0010
++#define QM_INITFQ_WE_ICSCRED 0x0008
++#define QM_INITFQ_WE_TDTHRESH 0x0004
++#define QM_INITFQ_WE_CONTEXTB 0x0002
++#define QM_INITFQ_WE_CONTEXTA 0x0001
++/* INITCGR/MODIFYCGR-specific flags */
++#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
++#define QM_CGR_WE_WR_PARM_G 0x0400
++#define QM_CGR_WE_WR_PARM_Y 0x0200
++#define QM_CGR_WE_WR_PARM_R 0x0100
++#define QM_CGR_WE_WR_EN_G 0x0080
++#define QM_CGR_WE_WR_EN_Y 0x0040
++#define QM_CGR_WE_WR_EN_R 0x0020
++#define QM_CGR_WE_CSCN_EN 0x0010
++#define QM_CGR_WE_CSCN_TARG 0x0008
++#define QM_CGR_WE_CSTD_EN 0x0004
++#define QM_CGR_WE_CS_THRES 0x0002
++#define QM_CGR_WE_MODE 0x0001
++
++/* See 1.5.9.7 CEETM Management Commands */
++#define QM_CEETM_VERB_LFQMT_CONFIG 0x70
++#define QM_CEETM_VERB_LFQMT_QUERY 0x71
++#define QM_CEETM_VERB_CQ_CONFIG 0x72
++#define QM_CEETM_VERB_CQ_QUERY 0x73
++#define QM_CEETM_VERB_DCT_CONFIG 0x74
++#define QM_CEETM_VERB_DCT_QUERY 0x75
++#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76
++#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77
++#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78
++#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79
++#define QM_CEETM_VERB_CCGR_CONFIG 0x7A
++#define QM_CEETM_VERB_CCGR_QUERY 0x7B
++#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C
++#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D
++
++/* See 1.5.8.5.1: "Initialize FQ" */
++/* See 1.5.8.5.2: "Query FQ" */
++/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
++/* See 1.5.8.5.4: "Alter FQ State Commands " */
++/* See 1.5.8.6.1: "Initialize/Modify CGR" */
++/* See 1.5.8.6.2: "CGR Test Write" */
++/* See 1.5.8.6.3: "Query CGR" */
++/* See 1.5.8.6.4: "Query Congestion Group State" */
++struct qm_mcr_initfq {
++ u8 __reserved1[62];
++} __packed;
++struct qm_mcr_queryfq {
++ u8 __reserved1[8];
++ struct qm_fqd fqd; /* the FQD fields are here */
++ u8 __reserved2[30];
++} __packed;
++struct qm_mcr_queryfq_np {
++ u8 __reserved1;
++ u8 state; /* QM_MCR_NP_STATE_*** */
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 __reserved2;
++ u32 fqd_link:24;
++ u16 __reserved3:2;
++ u16 odp_seq:14;
++ u16 __reserved4:2;
++ u16 orp_nesn:14;
++ u16 __reserved5:1;
++ u16 orp_ea_hseq:15;
++ u16 __reserved6:1;
++ u16 orp_ea_tseq:15;
++ u8 __reserved7;
++ u32 orp_ea_hptr:24;
++ u8 __reserved8;
++ u32 orp_ea_tptr:24;
++ u8 __reserved9;
++ u32 pfdr_hptr:24;
++ u8 __reserved10;
++ u32 pfdr_tptr:24;
++ u8 __reserved11[5];
++ u8 __reserved12:7;
++ u8 is:1;
++ u16 ics_surp;
++ u32 byte_cnt;
++ u8 __reserved13;
++ u32 frm_cnt:24;
++ u32 __reserved14;
++ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
++ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
++ u16 __reserved15;
++ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
++ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
++ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
++#else
++ u8 __reserved2;
++ u32 fqd_link:24;
++
++ u16 odp_seq:14;
++ u16 __reserved3:2;
++
++ u16 orp_nesn:14;
++ u16 __reserved4:2;
++
++ u16 orp_ea_hseq:15;
++ u16 __reserved5:1;
++
++ u16 orp_ea_tseq:15;
++ u16 __reserved6:1;
++
++ u8 __reserved7;
++ u32 orp_ea_hptr:24;
++
++ u8 __reserved8;
++ u32 orp_ea_tptr:24;
++
++ u8 __reserved9;
++ u32 pfdr_hptr:24;
++
++ u8 __reserved10;
++ u32 pfdr_tptr:24;
++
++ u8 __reserved11[5];
++ u8 is:1;
++ u8 __reserved12:7;
++ u16 ics_surp;
++ u32 byte_cnt;
++ u8 __reserved13;
++ u32 frm_cnt:24;
++ u32 __reserved14;
++ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
++ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
++ u16 __reserved15;
++ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
++ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
++ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
++#endif
++} __packed;
++
++
++struct qm_mcr_alterfq {
++ u8 fqs; /* Frame Queue Status */
++ u8 __reserved1[61];
++} __packed;
++struct qm_mcr_initcgr {
++ u8 __reserved1[62];
++} __packed;
++struct qm_mcr_cgrtestwrite {
++ u16 __reserved1;
++ struct __qm_mc_cgr cgr; /* CGR fields */
++ u8 __reserved2[3];
++ u32 __reserved3:24;
++ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
++ u32 __reserved4:24;
++ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
++ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
++ u16 lgt; /* Last Group Tick */
++ u16 wr_prob_g;
++ u16 wr_prob_y;
++ u16 wr_prob_r;
++ u8 __reserved5[8];
++} __packed;
++struct qm_mcr_querycgr {
++ u16 __reserved1;
++ struct __qm_mc_cgr cgr; /* CGR fields */
++ u8 __reserved2[3];
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 __reserved3:24;
++ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
++#else
++ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
++ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
++ u32 __reserved3:24;
++#endif
++ };
++ u64 i_bcnt;
++ };
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u32 __reserved4:24;
++ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
++ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
++#else
++ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
++ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
++ u32 __reserved4:24;
++#endif
++ };
++ u64 a_bcnt;
++ };
++ union {
++ u32 cscn_targ_swp[4];
++ u8 __reserved5[16];
++ };
++} __packed;
++static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
++{
++ return be64_to_cpu(q->i_bcnt);
++}
++static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
++{
++ return be64_to_cpu(q->a_bcnt);
++}
++static inline u64 qm_mcr_cgrtestwrite_i_get64(
++ const struct qm_mcr_cgrtestwrite *q)
++{
++ return be64_to_cpu(((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo);
++}
++static inline u64 qm_mcr_cgrtestwrite_a_get64(
++ const struct qm_mcr_cgrtestwrite *q)
++{
++ return be64_to_cpu(((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo);
++}
++/* Macro, so we compile better if 'v' isn't always 64-bit */
++#define qm_mcr_querycgr_i_set64(q, v) \
++ do { \
++ struct qm_mcr_querycgr *__q931 = (fd); \
++ __q931->i_bcnt_hi = upper_32_bits(v); \
++ __q931->i_bcnt_lo = lower_32_bits(v); \
++ } while (0)
++#define qm_mcr_querycgr_a_set64(q, v) \
++ do { \
++ struct qm_mcr_querycgr *__q931 = (fd); \
++ __q931->a_bcnt_hi = upper_32_bits(v); \
++ __q931->a_bcnt_lo = lower_32_bits(v); \
++ } while (0)
++struct __qm_mcr_querycongestion {
++ u32 __state[8];
++};
++struct qm_mcr_querycongestion {
++ u8 __reserved[30];
++ /* Access this struct using QM_MCR_QUERYCONGESTION() */
++ struct __qm_mcr_querycongestion state;
++} __packed;
++struct qm_mcr_querywq {
++ union {
++ u16 channel_wq; /* ignores wq (3 lsbits) */
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u16 id:13; /* qm_channel */
++ u16 __reserved:3;
++#else
++ u16 __reserved:3;
++ u16 id:13; /* qm_channel */
++#endif
++ } __packed channel;
++ };
++ u8 __reserved[28];
++ u32 wq_len[8];
++} __packed;
++
++/* QMAN CEETM Management Command Response */
++struct qm_mcr_ceetm_lfqmt_config {
++ u8 __reserved1[62];
++} __packed;
++struct qm_mcr_ceetm_lfqmt_query {
++ u8 __reserved1[8];
++ u16 cqid;
++ u8 __reserved2[2];
++ u16 dctidx;
++ u8 __reserved3[2];
++ u16 ccgid;
++ u8 __reserved4[44];
++} __packed;
++
++struct qm_mcr_ceetm_cq_config {
++ u8 __reserved1[62];
++} __packed;
++
++struct qm_mcr_ceetm_cq_query {
++ u8 __reserved1[4];
++ u16 ccgid;
++ u16 state;
++ u32 pfdr_hptr:24;
++ u32 pfdr_tptr:24;
++ u16 od1_xsfdr;
++ u16 od2_xsfdr;
++ u16 od3_xsfdr;
++ u16 od4_xsfdr;
++ u16 od5_xsfdr;
++ u16 od6_xsfdr;
++ u16 ra1_xsfdr;
++ u16 ra2_xsfdr;
++ u8 __reserved2;
++ u32 frm_cnt:24;
++ u8 __reserved333[28];
++} __packed;
++
++struct qm_mcr_ceetm_dct_config {
++ u8 __reserved1[62];
++} __packed;
++
++struct qm_mcr_ceetm_dct_query {
++ u8 __reserved1[18];
++ u32 context_b;
++ u64 context_a;
++ u8 __reserved2[32];
++} __packed;
++
++struct qm_mcr_ceetm_class_scheduler_config {
++ u8 __reserved1[62];
++} __packed;
++
++struct qm_mcr_ceetm_class_scheduler_query {
++ u8 __reserved1[9];
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 gpc_reserved:1;
++ u8 gpc_combine_flag:1;
++ u8 gpc_prio_b:3;
++ u8 gpc_prio_a:3;
++#else
++ u8 gpc_prio_a:3;
++ u8 gpc_prio_b:3;
++ u8 gpc_combine_flag:1;
++ u8 gpc_reserved:1;
++#endif
++ u16 crem;
++ u16 erem;
++ u8 w[8];
++ u8 __reserved2[5];
++ u32 wbfslist:24;
++ u32 d8;
++ u32 d9;
++ u32 d10;
++ u32 d11;
++ u32 d12;
++ u32 d13;
++ u32 d14;
++ u32 d15;
++} __packed;
++
++struct qm_mcr_ceetm_mapping_shaper_tcfc_config {
++ u16 cid;
++ u8 __reserved2[60];
++} __packed;
++
++struct qm_mcr_ceetm_mapping_shaper_tcfc_query {
++ u16 cid;
++ u8 __reserved1;
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 map_shaped:1;
++ u8 map_reserved:4;
++ u8 map_lni_id:3;
++#else
++ u8 map_lni_id:3;
++ u8 map_reserved:4;
++ u8 map_shaped:1;
++#endif
++ u8 __reserved2[58];
++ } __packed channel_mapping_query;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 map_reserved:5;
++ u8 map_lni_id:3;
++#else
++ u8 map_lni_id:3;
++ u8 map_reserved:5;
++#endif
++ u8 __reserved2[58];
++ } __packed sp_mapping_query;
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 cpl:1;
++ u8 cpl_reserved:2;
++ u8 oal:5;
++#else
++ u8 oal:5;
++ u8 cpl_reserved:2;
++ u8 cpl:1;
++#endif
++ u32 crtcr:24;
++ u32 ertcr:24;
++ u16 crtbl;
++ u16 ertbl;
++ u8 mps;
++ u8 __reserved2[15];
++ u32 crat;
++ u32 erat;
++ u8 __reserved3[24];
++ } __packed shaper_query;
++ struct {
++ u8 __reserved1[11];
++ u64 lnitcfcc;
++ u8 __reserved3[40];
++ } __packed tcfc_query;
++ };
++} __packed;
++
++struct qm_mcr_ceetm_ccgr_config {
++ u8 __reserved1[46];
++ union {
++ u8 __reserved2[8];
++ struct {
++ u16 timestamp;
++ u16 wr_porb_g;
++ u16 wr_prob_y;
++ u16 wr_prob_r;
++ } __packed test_write;
++ };
++ u8 __reserved3[8];
++} __packed;
++
++struct qm_mcr_ceetm_ccgr_query {
++ u8 __reserved1[6];
++ union {
++ struct {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ u8 ctl_reserved:1;
++ u8 ctl_wr_en_g:1;
++ u8 ctl_wr_en_y:1;
++ u8 ctl_wr_en_r:1;
++ u8 ctl_td_en:1;
++ u8 ctl_td_mode:1;
++ u8 ctl_cscn_en:1;
++ u8 ctl_mode:1;
++#else
++ u8 ctl_mode:1;
++ u8 ctl_cscn_en:1;
++ u8 ctl_td_mode:1;
++ u8 ctl_td_en:1;
++ u8 ctl_wr_en_r:1;
++ u8 ctl_wr_en_y:1;
++ u8 ctl_wr_en_g:1;
++ u8 ctl_reserved:1;
++#endif
++ u8 cdv;
++ u8 __reserved2[2];
++ u8 oal;
++ u8 __reserved3;
++ struct qm_cgr_cs_thres cs_thres;
++ struct qm_cgr_cs_thres cs_thres_x;
++ struct qm_cgr_cs_thres td_thres;
++ struct qm_cgr_wr_parm wr_parm_g;
++ struct qm_cgr_wr_parm wr_parm_y;
++ struct qm_cgr_wr_parm wr_parm_r;
++ u16 cscn_targ_dcp;
++ u8 dcp_lsn;
++ u64 i_cnt:40;
++ u8 __reserved4[3];
++ u64 a_cnt:40;
++ u32 cscn_targ_swp[4];
++ } __packed cm_query;
++ struct {
++ u8 dnc;
++ u8 dn0;
++ u8 dn1;
++ u64 dnba:40;
++ u8 __reserved2[2];
++ u16 dnth_0;
++ u8 __reserved3[2];
++ u16 dnth_1;
++ u8 __reserved4[10];
++ u16 dnacc_0;
++ u8 __reserved5[2];
++ u16 dnacc_1;
++ u8 __reserved6[24];
++ } __packed dn_query;
++ struct {
++ u8 __reserved2[24];
++ struct __qm_mcr_querycongestion state;
++ } __packed congestion_state;
++
++ };
++} __packed;
++
++struct qm_mcr_ceetm_cq_peek_pop_xsfdrread {
++ u8 stat;
++ u8 __reserved1[11];
++ u16 dctidx;
++ struct qm_fd fd;
++ u8 __reserved2[32];
++} __packed;
++
++struct qm_mcr_ceetm_statistics_query {
++ u8 __reserved1[17];
++ u64 frm_cnt:40;
++ u8 __reserved2[2];
++ u64 byte_cnt:48;
++ u8 __reserved3[32];
++} __packed;
++
++struct qm_mc_result {
++ u8 verb;
++ u8 result;
++ union {
++ struct qm_mcr_initfq initfq;
++ struct qm_mcr_queryfq queryfq;
++ struct qm_mcr_queryfq_np queryfq_np;
++ struct qm_mcr_alterfq alterfq;
++ struct qm_mcr_initcgr initcgr;
++ struct qm_mcr_cgrtestwrite cgrtestwrite;
++ struct qm_mcr_querycgr querycgr;
++ struct qm_mcr_querycongestion querycongestion;
++ struct qm_mcr_querywq querywq;
++ struct qm_mcr_ceetm_lfqmt_config lfqmt_config;
++ struct qm_mcr_ceetm_lfqmt_query lfqmt_query;
++ struct qm_mcr_ceetm_cq_config cq_config;
++ struct qm_mcr_ceetm_cq_query cq_query;
++ struct qm_mcr_ceetm_dct_config dct_config;
++ struct qm_mcr_ceetm_dct_query dct_query;
++ struct qm_mcr_ceetm_class_scheduler_config csch_config;
++ struct qm_mcr_ceetm_class_scheduler_query csch_query;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config;
++ struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query;
++ struct qm_mcr_ceetm_ccgr_config ccgr_config;
++ struct qm_mcr_ceetm_ccgr_query ccgr_query;
++ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
++ struct qm_mcr_ceetm_statistics_query stats_query;
++ };
++} __packed;
++
++#define QM_MCR_VERB_RRID 0x80
++#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
++#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
++#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
++#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
++#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
++#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
++#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
++#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
++#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
++#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
++#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
++#define QM_MCR_RESULT_NULL 0x00
++#define QM_MCR_RESULT_OK 0xf0
++#define QM_MCR_RESULT_ERR_FQID 0xf1
++#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
++#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
++#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
++#define QM_MCR_RESULT_PENDING 0xf8
++#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
++#define QM_MCR_NP_STATE_FE 0x10
++#define QM_MCR_NP_STATE_R 0x08
++#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
++#define QM_MCR_NP_STATE_OOS 0x00
++#define QM_MCR_NP_STATE_RETIRED 0x01
++#define QM_MCR_NP_STATE_TEN_SCHED 0x02
++#define QM_MCR_NP_STATE_TRU_SCHED 0x03
++#define QM_MCR_NP_STATE_PARKED 0x04
++#define QM_MCR_NP_STATE_ACTIVE 0x05
++#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
++#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
++#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
++#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
++#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
++#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
++#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
++/* This extracts the state for congestion group 'n' from a query response.
++ * Eg.
++ * u8 cgr = [...];
++ * struct qm_mc_result *res = [...];
++ * printf("congestion group %d congestion state: %d\n", cgr,
++ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
++ */
++#define __CGR_WORD(num) (num >> 5)
++#define __CGR_SHIFT(num) (num & 0x1f)
++#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
++static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
++ u8 cgr)
++{
++ return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
++}
++
++
++/*********************/
++/* Utility interface */
++/*********************/
++
++/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
++ * spinlock them yourself if needed. */
++struct qman_fqid_pool;
++
++/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
++ * always succeeds, but returns non-zero if there were "leaked" FQID
++ * allocations. */
++struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
++int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
++/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
++int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
++void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
++u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
++
++/*******************************************************************/
++/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
++/*******************************************************************/
++
++ /* Portal and Frame Queues */
++ /* ----------------------- */
++/* Represents a managed portal */
++struct qman_portal;
++
++/* This object type represents Qman frame queue descriptors (FQD), it is
++ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
++ * defined further down. */
++struct qman_fq;
++
++/* This object type represents a Qman congestion group, it is defined further
++ * down. */
++struct qman_cgr;
++
++struct qman_portal_config {
++ /* If the caller enables DQRR stashing (and thus wishes to operate the
++ * portal from only one cpu), this is the logical CPU that the portal
++ * will stash to. Whether stashing is enabled or not, this setting is
++ * also used for any "core-affine" portals, ie. default portals
++ * associated to the corresponding cpu. -1 implies that there is no core
++ * affinity configured. */
++ int cpu;
++ /* portal interrupt line */
++ int irq;
++ /* the unique index of this portal */
++ u32 index;
++ /* Is this portal shared? (If so, it has coarser locking and demuxes
++ * processing on behalf of other CPUs.) */
++ int is_shared;
++ /* The portal's dedicated channel id, use this value for initialising
++ * frame queues to target this portal when scheduled. */
++ u16 channel;
++ /* A mask of which pool channels this portal has dequeue access to
++ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
++ u32 pools;
++};
++
++/* This enum, and the callback type that returns it, are used when handling
++ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
++ * portal object (for handling dequeues that do not demux because contextB is
++ * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
++enum qman_cb_dqrr_result {
++ /* DQRR entry can be consumed */
++ qman_cb_dqrr_consume,
++ /* Like _consume, but requests parking - FQ must be held-active */
++ qman_cb_dqrr_park,
++ /* Does not consume, for DCA mode only. This allows out-of-order
++ * consumes by explicit calls to qman_dca() and/or the use of implicit
++ * DCA via EQCR entries. */
++ qman_cb_dqrr_defer,
++ /* Stop processing without consuming this ring entry. Exits the current
++ * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
++ * interrupt handler, the callback would typically call
++ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
++ * otherwise the interrupt will reassert immediately. */
++ qman_cb_dqrr_stop,
++ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
++ qman_cb_dqrr_consume_stop
++};
++typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
++ struct qman_fq *fq,
++ const struct qm_dqrr_entry *dqrr);
++
++/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
++ * are always consumed after the callback returns. */
++typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
++ const struct qm_mr_entry *msg);
++
++/* This callback type is used when handling DCP ERNs */
++typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
++ const struct qm_mr_entry *msg);
++
++/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
++ * held-active + held-suspended are just "sched". Things like "retired" will not
++ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
++ * then, to indicate it's completing and to gate attempts to retry the retire
++ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
++ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
++ * index rather than the FQ that ring entry corresponds to), so repeated park
++ * commands are allowed (if you're silly enough to try) but won't change FQ
++ * state, and the resulting park notifications move FQs from "sched" to
++ * "parked". */
++enum qman_fq_state {
++ qman_fq_state_oos,
++ qman_fq_state_parked,
++ qman_fq_state_sched,
++ qman_fq_state_retired
++};
++
++/* Frame queue objects (struct qman_fq) are stored within memory passed to
++ * qman_create_fq(), as this allows stashing of caller-provided demux callback
++ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
++ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
++ * they should;
++ *
++ * (a) extend the qman_fq structure with their state; eg.
++ *
++ * // myfq is allocated and driver_fq callbacks filled in;
++ * struct my_fq {
++ * struct qman_fq base;
++ * int an_extra_field;
++ * [ ... add other fields to be associated with each FQ ...]
++ * } *myfq = some_my_fq_allocator();
++ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
++ *
++ * // in a dequeue callback, access extra fields from 'fq' via a cast;
++ * struct my_fq *myfq = (struct my_fq *)fq;
++ * do_something_with(myfq->an_extra_field);
++ * [...]
++ *
++ * (b) when and if configuring the FQ for context stashing, specify how ever
++ * many cachelines are required to stash 'struct my_fq', to accelerate not
++ * only the Qman driver but the callback as well.
++ */
++
++struct qman_fq_cb {
++ qman_cb_dqrr dqrr; /* for dequeued frames */
++ qman_cb_mr ern; /* for s/w ERNs */
++ qman_cb_mr fqs; /* frame-queue state changes*/
++};
++
++struct qman_fq {
++ /* Caller of qman_create_fq() provides these demux callbacks */
++ struct qman_fq_cb cb;
++ /* These are internal to the driver, don't touch. In particular, they
++ * may change, be removed, or extended (so you shouldn't rely on
++ * sizeof(qman_fq) being a constant). */
++ spinlock_t fqlock;
++ u32 fqid;
++ volatile unsigned long flags;
++ enum qman_fq_state state;
++ int cgr_groupid;
++ struct rb_node node;
++#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
++ u32 key;
++#endif
++};
++
++/* This callback type is used when handling congestion group entry/exit.
++ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
++typedef void (*qman_cb_cgr)(struct qman_portal *qm,
++ struct qman_cgr *cgr, int congested);
++
++struct qman_cgr {
++ /* Set these prior to qman_create_cgr() */
++ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
++ qman_cb_cgr cb;
++ /* These are private to the driver */
++ u16 chan; /* portal channel this object is created on */
++ struct list_head node;
++};
++
++/* Flags to qman_create_fq() */
++#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
++#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
++#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
++#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
++#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
++#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
++
++/* Flags to qman_destroy_fq() */
++#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
++
++/* Flags from qman_fq_state() */
++#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
++#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
++#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
++#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
++#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
++#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
++
++/* Flags to qman_init_fq() */
++#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
++#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
++
++/* Flags to qman_volatile_dequeue() */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
++#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
++#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
++#endif
++
++/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
++ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
++ * any change here should be audited in PME.) */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT
++#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */
++#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */
++#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
++#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
++#endif
++#endif
++#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
++#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
++#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
++#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
++ (((u32)(p) << 2) & 0x00000f00)
++#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
++#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
++#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
++#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
++/* For the ORP-specific qman_enqueue_orp() variant;
++ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
++ * of a frame. */
++#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
++/* - this flag performs no enqueue but fills in an ORP sequence number that
++ * would otherwise block it (eg. if a frame has been dropped). */
++#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
++/* - this flag performs no enqueue but advances NESN to the given sequence
++ * number. */
++#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
++
++/* Flags to qman_modify_cgr() */
++#define QMAN_CGR_FLAG_USE_INIT 0x00000001
++#define QMAN_CGR_MODE_FRAME 0x00000001
++
++ /* Portal Management */
++ /* ----------------- */
++/**
++ * qman_get_portal_config - get portal configuration settings
++ *
++ * This returns a read-only view of the current cpu's affine portal settings.
++ */
++const struct qman_portal_config *qman_get_portal_config(void);
++
++/**
++ * qman_irqsource_get - return the portal work that is interrupt-driven
++ *
++ * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
++ * enabled for interrupt handling on the current cpu's affine portal. These
++ * sources will trigger the portal interrupt and the interrupt handler (or a
++ * tasklet/bottom-half it defers to) will perform the corresponding processing
++ * work. The qman_poll_***() functions will only process sources that are not in
++ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
++ * this always returns zero.
++ */
++u32 qman_irqsource_get(void);
++
++/**
++ * qman_irqsource_add - add processing sources to be interrupt-driven
++ * @bits: bitmask of QM_PIRQ_**I processing sources
++ *
++ * Adds processing sources that should be interrupt-driven (rather than
++ * processed via qman_poll_***() functions). Returns zero for success, or
++ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
++ */
++int qman_irqsource_add(u32 bits);
++
++/**
++ * qman_irqsource_remove - remove processing sources from being interrupt-driven
++ * @bits: bitmask of QM_PIRQ_**I processing sources
++ *
++ * Removes processing sources from being interrupt-driven, so that they will
++ * instead be processed via qman_poll_***() functions. Returns zero for success,
++ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
++ */
++int qman_irqsource_remove(u32 bits);
++
++/**
++ * qman_affine_cpus - return a mask of cpus that have affine portals
++ */
++const cpumask_t *qman_affine_cpus(void);
++
++/**
++ * qman_affine_channel - return the channel ID of an portal
++ * @cpu: the cpu whose affine portal is the subject of the query
++ *
++ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
++ * bug to call this function for any value of @cpu (other than -1) that is not a
++ * member of the mask returned from qman_affine_cpus().
++ */
++u16 qman_affine_channel(int cpu);
++
++/**
++ * qman_get_affine_portal - return the portal pointer affine to cpu
++ * @cpu: the cpu whose affine portal is the subject of the query
++ *
++ */
++void *qman_get_affine_portal(int cpu);
++
++/**
++ * qman_poll_dqrr - process DQRR (fast-path) entries
++ * @limit: the maximum number of DQRR entries to process
++ *
++ * Use of this function requires that DQRR processing not be interrupt-driven.
++ * Ie. the value returned by qman_irqsource_get() should not include
++ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
++ * this function will return -EINVAL, otherwise the return value is >=0 and
++ * represents the number of DQRR entries processed.
++ */
++int qman_poll_dqrr(unsigned int limit);
++
++/**
++ * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
++ *
++ * This function does any portal processing that isn't interrupt-driven. If the
++ * current CPU is sharing a portal hosted on another CPU, this function will
++ * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
++ * indicating what interrupt sources were actually processed by the call.
++ */
++u32 qman_poll_slow(void);
++
++/**
++ * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
++ *
++ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
++ * affine portal. There are two classes of portal processing in question;
++ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
++ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
++ * thresholds, congestion state changes, etc). This function does whatever
++ * processing is not triggered by interrupts.
++ *
++ * Note, if DQRR and some slow-path processing are poll-driven (rather than
++ * interrupt-driven) then this function uses a heuristic to determine how often
++ * to run slow-path processing - as slow-path processing introduces at least a
++ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
++ * close to zero-cost if there is no work to be done. Applications can tune this
++ * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
++ * rather than going via this wrapper.
++ */
++void qman_poll(void);
++
++/**
++ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
++ *
++ * Disables DQRR processing of the portal. This is reference-counted, so
++ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
++ * truly re-enable dequeuing.
++ */
++void qman_stop_dequeues(void);
++
++/**
++ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
++ *
++ * Enables DQRR processing of the portal. This is reference-counted, so
++ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
++ * truly re-enable dequeuing.
++ */
++void qman_start_dequeues(void);
++
++/**
++ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
++ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
++ *
++ * Adds a set of pool channels to the portal's static dequeue command register
++ * (SDQCR). The requested pools are limited to those the portal has dequeue
++ * access to.
++ */
++void qman_static_dequeue_add(u32 pools);
++
++/**
++ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
++ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
++ *
++ * Removes a set of pool channels from the portal's static dequeue command
++ * register (SDQCR). The requested pools are limited to those the portal has
++ * dequeue access to.
++ */
++void qman_static_dequeue_del(u32 pools);
++
++/**
++ * qman_static_dequeue_get - return the portal's current SDQCR
++ *
++ * Returns the portal's current static dequeue command register (SDQCR). The
++ * entire register is returned, so if only the currently-enabled pool channels
++ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
++ */
++u32 qman_static_dequeue_get(void);
++
++/**
++ * qman_dca - Perform a Discrete Consumption Acknowledgement
++ * @dq: the DQRR entry to be consumed
++ * @park_request: indicates whether the held-active @fq should be parked
++ *
++ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
++ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
++ * does not take a 'portal' argument but implies the core affine portal from the
++ * cpu that is currently executing the function. For reasons of locking, this
++ * function must be called from the same CPU as that which processed the DQRR
++ * entry in the first place.
++ */
++void qman_dca(struct qm_dqrr_entry *dq, int park_request);
++
++/**
++ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
++ *
++ * For use in situations where a cpu-affine caller needs to determine when all
++ * enqueues for the local portal have been processed by Qman but can't use the
++ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
++ * The function forces tracking of EQCR consumption (which normally doesn't
++ * happen until enqueue processing needs to find space to put new enqueue
++ * commands), and returns zero if the ring still has unprocessed entries,
++ * non-zero if it is empty.
++ */
++int qman_eqcr_is_empty(void);
++
++/**
++ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
++ * @handler: callback for processing DCP ERNs
++ * @affine: whether this handler is specific to the locally affine portal
++ *
++ * If a hardware block's interface to Qman (ie. its direct-connect portal, or
++ * DCP) is configured not to receive enqueue rejections, then any enqueues
++ * through that DCP that are rejected will be sent to a given software portal.
++ * If @affine is non-zero, then this handler will only be used for DCP ERNs
++ * received on the portal affine to the current CPU. If multiple CPUs share a
++ * portal and they all call this function, they will be setting the handler for
++ * the same portal! If @affine is zero, then this handler will be global to all
++ * portals handled by this instance of the driver. Only those portals that do
++ * not have their own affine handler will use the global handler.
++ */
++void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
++
++ /* FQ management */
++ /* ------------- */
++/**
++ * qman_create_fq - Allocates a FQ
++ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
++ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
++ * @fq: memory for storing the 'fq', with callbacks filled in
++ *
++ * Creates a frame queue object for the given @fqid, unless the
++ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
++ * dynamically allocated (or the function fails if none are available). Once
++ * created, the caller should not touch the memory at 'fq' except as extended to
++ * adjacent memory for user-defined fields (see the definition of "struct
++ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
++ * pre-existing frame-queues that aren't to be otherwise interfered with, it
++ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
++ * causes the driver to honour any contextB modifications requested in the
++ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
++ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
++ * software portals, the contextB field is controlled by the driver and can't be
++ * modified by the caller. If the AS_IS flag is specified, management commands
++ * will be used on portal @p to query state for frame queue @fqid and construct
++ * a frame queue object based on that, rather than assuming/requiring that it be
++ * Out of Service.
++ */
++int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
++
++/**
++ * qman_destroy_fq - Deallocates a FQ
++ * @fq: the frame queue object to release
++ * @flags: bit-mask of QMAN_FQ_FREE_*** options
++ *
++ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
++ * not deallocated but the caller regains ownership, to do with as desired. The
++ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
++ * is specified, in which case it may also be in the 'parked' state.
++ */
++void qman_destroy_fq(struct qman_fq *fq, u32 flags);
++
++/**
++ * qman_fq_fqid - Queries the frame queue ID of a FQ object
++ * @fq: the frame queue object to query
++ */
++u32 qman_fq_fqid(struct qman_fq *fq);
++
++/**
++ * qman_fq_state - Queries the state of a FQ object
++ * @fq: the frame queue object to query
++ * @state: pointer to state enum to return the FQ scheduling state
++ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
++ *
++ * Queries the state of the FQ object, without performing any h/w commands.
++ * This captures the state, as seen by the driver, at the time the function
++ * executes.
++ */
++void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
++
++/**
++ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
++ * @fq: the frame queue object to modify, must be 'parked' or new.
++ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
++ * @opts: the FQ-modification settings, as defined in the low-level API
++ *
++ * The @opts parameter comes from the low-level portal API. Select
++ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
++ * rather than parked. NB, @opts can be NULL.
++ *
++ * Note that some fields and options within @opts may be ignored or overwritten
++ * by the driver;
++ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
++ * affects one frame queue: @fq).
++ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
++ * 'fqd' structure's 'context_b' field are sometimes overwritten;
++ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
++ * initialised to a value used by the driver for demux.
++ * - if context_b is initialised for demux, so is context_a in case stashing
++ * is requested (see item 4).
++ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
++ * objects.)
++ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
++ * 'dest::channel' field will be overwritten to match the portal used to issue
++ * the command. If the WE_DESTWQ write-enable bit had already been set by the
++ * caller, the channel workqueue will be left as-is, otherwise the write-enable
++ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
++ * isn't set, the destination channel/workqueue fields and the write-enable bit
++ * are left as-is.
++ * 4. if the driver overwrites context_a/b for demux, then if
++ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
++ * context_a.address fields and will leave the stashing fields provided by the
++ * user alone, otherwise it will zero out the context_a.stashing fields.
++ */
++int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
++
++/**
++ * qman_schedule_fq - Schedules a FQ
++ * @fq: the frame queue object to schedule, must be 'parked'
++ *
++ * Schedules the frame queue, which must be Parked, which takes it to
++ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
++ */
++int qman_schedule_fq(struct qman_fq *fq);
++
++/**
++ * qman_retire_fq - Retires a FQ
++ * @fq: the frame queue object to retire
++ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
++ *
++ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
++ * the retirement was started asynchronously, otherwise it returns negative for
++ * failure. When this function returns zero, @flags is set to indicate whether
++ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
++ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
++ * FQRN message shows up on the portal's message ring.
++ *
++ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
++ * Active state), the completion will be via the message ring as a FQRN - but
++ * the corresponding callback may occur before this function returns!! Ie. the
++ * caller should be prepared to accept the callback as the function is called,
++ * not only once it has returned.
++ */
++int qman_retire_fq(struct qman_fq *fq, u32 *flags);
++
++/**
++ * qman_oos_fq - Puts a FQ "out of service"
++ * @fq: the frame queue object to be put out-of-service, must be 'retired'
++ *
++ * The frame queue must be retired and empty, and if any order restoration list
++ * was released as ERNs at the time of retirement, they must all be consumed.
++ */
++int qman_oos_fq(struct qman_fq *fq);
++
++/**
++ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
++ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
++ * or 'retired' or 'parked' state
++ * @xon: boolean to set fq in XON or XOFF state
++ *
++ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
++ * otherwise the IFSI interrupt will be asserted.
++ */
++int qman_fq_flow_control(struct qman_fq *fq, int xon);
++
++/**
++ * qman_query_fq - Queries FQD fields (via h/w query command)
++ * @fq: the frame queue object to be queried
++ * @fqd: storage for the queried FQD fields
++ */
++int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
++
++/**
++ * qman_query_fq_np - Queries non-programmable FQD fields
++ * @fq: the frame queue object to be queried
++ * @np: storage for the queried FQD fields
++ */
++int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
++
++/**
++ * qman_query_wq - Queries work queue lengths
++ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
++ * to this software portal. Otherwise, query length of WQs in a
++ * channel specified in wq.
++ * @wq: storage for the queried WQs lengths. Also specified the channel to
++ * to query if query_dedicated is zero.
++ */
++int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
++
++/**
++ * qman_volatile_dequeue - Issue a volatile dequeue command
++ * @fq: the frame queue object to dequeue from
++ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
++ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
++ *
++ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
++ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
++ * the VDQCR is already in use, otherwise returns non-zero for failure. If
++ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
++ * the VDQCR command has finished executing (ie. once the callback for the last
++ * DQRR entry resulting from the VDQCR command has been called). If not using
++ * the FINISH flag, completion can be determined either by detecting the
++ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
++ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
++ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
++ * "flags" retrieved from qman_fq_state().
++ */
++int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
++
++/**
++ * qman_enqueue - Enqueue a frame to a frame queue
++ * @fq: the frame queue object to enqueue to
++ * @fd: a descriptor of the frame to be enqueued
++ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
++ *
++ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
++ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
++ * field is ignored. The return value is non-zero on error, such as ring full
++ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
++ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
++ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
++ * interrupt will assert when Qman consumes the EQCR entry (subject to "status
++ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
++ * perform an implied "discrete consumption acknowledgement" on the dequeue
++ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
++ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
++ * this implicit DCA can delay the release of a "held active" frame queue
++ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
++ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
++ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
++ * acknowledgement should "park request" the "held active" frame queue. Ie.
++ * when the portal eventually releases that frame queue, it will be left in the
++ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
++ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
++ * is requested, and the FQ is a member of a congestion group, then this
++ * function returns -EAGAIN if the congestion group is currently congested.
++ * Note, this does not eliminate ERNs, as the async interface means we can be
++ * sending enqueue commands to an un-congested FQ that becomes congested before
++ * the enqueue commands are processed, but it does minimise needless thrashing
++ * of an already busy hardware resource by throttling many of the to-be-dropped
++ * enqueues "at the source".
++ */
++int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
++
++typedef int (*qman_cb_precommit) (void *arg);
++/**
++ * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
++ * @fq: the frame queue object to enqueue to
++ * @fd: a descriptor of the frame to be enqueued
++ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
++ * @cb: user supplied callback function to invoke before writing commit verb.
++ * @cb_arg: callback function argument
++ *
++ * This is similar to qman_enqueue except that it will invoke a user supplied
++ * callback function just before writng the commit verb. This is useful
++ * when the user want to do something *just before* enqueuing the request and
++ * the enqueue can't fail.
++ */
++int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
++ u32 flags, qman_cb_precommit cb, void *cb_arg);
++
++/**
++ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
++ * @fq: the frame queue object to enqueue to
++ * @fd: a descriptor of the frame to be enqueued
++ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
++ * @orp: the frame queue object used as an order restoration point.
++ * @orp_seqnum: the sequence number of this frame in the order restoration path
++ *
++ * Similar to qman_enqueue(), but with the addition of an Order Restoration
++ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
++ * enqueue operation to employ order restoration. Each frame queue object acts
++ * as an Order Definition Point (ODP) by providing each frame dequeued from it
++ * with an incrementing sequence number, this value is generally ignored unless
++ * that sequence of dequeued frames will need order restoration later. Each
++ * frame queue object also encapsulates an Order Restoration Point (ORP), which
++ * is a re-assembly context for re-ordering frames relative to their sequence
++ * numbers as they are enqueued. The ORP does not have to be within the frame
++ * queue that receives the enqueued frame, in fact it is usually the frame
++ * queue from which the frames were originally dequeued. For the purposes of
++ * order restoration, multiple frames (or "fragments") can be enqueued for a
++ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
++ * enqueues except the final fragment of a given sequence number. Ordering
++ * between sequence numbers is guaranteed, even if fragments of different
++ * sequence numbers are interlaced with one another. Fragments of the same
++ * sequence number will retain the order in which they are enqueued. If no
++ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
++ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
++ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
++ * sequence number should become the ORP's "Next Expected Sequence Number".
++ *
++ * Side note: a frame queue object can be used purely as an ORP, without
++ * carrying any frames at all. Care should be taken not to deallocate a frame
++ * queue object that is being actively used as an ORP, as a future allocation
++ * of the frame queue object may start using the internal ORP before the
++ * previous use has finished.
++ */
++int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
++ struct qman_fq *orp, u16 orp_seqnum);
++
++/**
++ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
++ * @result: is set by the API to the base FQID of the allocated range
++ * @count: the number of FQIDs required
++ * @align: required alignment of the allocated range
++ * @partial: non-zero if the API can return fewer than @count FQIDs
++ *
++ * Returns the number of frame queues allocated, or a negative error code. If
++ * @partial is non zero, the allocation request may return a smaller range of
++ * FQs than requested (though alignment will be as requested). If @partial is
++ * zero, the return value will either be 'count' or negative.
++ */
++int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
++static inline int qman_alloc_fqid(u32 *result)
++{
++ int ret = qman_alloc_fqid_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++
++/**
++ * qman_release_fqid_range - Release the specified range of frame queue IDs
++ * @fqid: the base FQID of the range to deallocate
++ * @count: the number of FQIDs in the range
++ *
++ * This function can also be used to seed the allocator with ranges of FQIDs
++ * that it can subsequently allocate from.
++ */
++void qman_release_fqid_range(u32 fqid, unsigned int count);
++static inline void qman_release_fqid(u32 fqid)
++{
++ qman_release_fqid_range(fqid, 1);
++}
++
++void qman_seed_fqid_range(u32 fqid, unsigned int count);
++
++
++int qman_shutdown_fq(u32 fqid);
++
++/**
++ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
++ * @fqid: the base FQID of the range to deallocate
++ * @count: the number of FQIDs in the range
++ */
++int qman_reserve_fqid_range(u32 fqid, unsigned int count);
++static inline int qman_reserve_fqid(u32 fqid)
++{
++ return qman_reserve_fqid_range(fqid, 1);
++}
++
++ /* Pool-channel management */
++ /* ----------------------- */
++/**
++ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
++ * @result: is set by the API to the base pool-channel ID of the allocated range
++ * @count: the number of pool-channel IDs required
++ * @align: required alignment of the allocated range
++ * @partial: non-zero if the API can return fewer than @count
++ *
++ * Returns the number of pool-channel IDs allocated, or a negative error code.
++ * If @partial is non zero, the allocation request may return a smaller range of
++ * than requested (though alignment will be as requested). If @partial is zero,
++ * the return value will either be 'count' or negative.
++ */
++int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
++static inline int qman_alloc_pool(u32 *result)
++{
++ int ret = qman_alloc_pool_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++
++/**
++ * qman_release_pool_range - Release the specified range of pool-channel IDs
++ * @id: the base pool-channel ID of the range to deallocate
++ * @count: the number of pool-channel IDs in the range
++ */
++void qman_release_pool_range(u32 id, unsigned int count);
++static inline void qman_release_pool(u32 id)
++{
++ qman_release_pool_range(id, 1);
++}
++
++/**
++ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
++ * @id: the base pool-channel ID of the range to reserve
++ * @count: the number of pool-channel IDs in the range
++ */
++int qman_reserve_pool_range(u32 id, unsigned int count);
++static inline int qman_reserve_pool(u32 id)
++{
++ return qman_reserve_pool_range(id, 1);
++}
++
++void qman_seed_pool_range(u32 id, unsigned int count);
++
++ /* CGR management */
++ /* -------------- */
++/**
++ * qman_create_cgr - Register a congestion group object
++ * @cgr: the 'cgr' object, with fields filled in
++ * @flags: QMAN_CGR_FLAG_* values
++ * @opts: optional state of CGR settings
++ *
++ * Registers this object to receiving congestion entry/exit callbacks on the
++ * portal affine to the cpu portal on which this API is executed. If opts is
++ * NULL then only the callback (cgr->cb) function is registered. If @flags
++ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
++ * any unspecified parameters) will be used rather than a modify hw hardware
++ * (which only modifies the specified parameters).
++ */
++int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
++ struct qm_mcc_initcgr *opts);
++
++/**
++ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
++ * @cgr: the 'cgr' object, with fields filled in
++ * @flags: QMAN_CGR_FLAG_* values
++ * @dcp_portal: the DCP portal to which the cgr object is registered.
++ * @opts: optional state of CGR settings
++ *
++ */
++int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
++ struct qm_mcc_initcgr *opts);
++
++/**
++ * qman_delete_cgr - Deregisters a congestion group object
++ * @cgr: the 'cgr' object to deregister
++ *
++ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
++ * is executed. This must be excuted on the same affine portal on which it was
++ * created.
++ */
++int qman_delete_cgr(struct qman_cgr *cgr);
++
++/**
++ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
++ * @cgr: the 'cgr' object to deregister
++ *
++ * This will select the proper CPU and run there qman_delete_cgr().
++ */
++void qman_delete_cgr_safe(struct qman_cgr *cgr);
++
++/**
++ * qman_modify_cgr - Modify CGR fields
++ * @cgr: the 'cgr' object to modify
++ * @flags: QMAN_CGR_FLAG_* values
++ * @opts: the CGR-modification settings
++ *
++ * The @opts parameter comes from the low-level portal API, and can be NULL.
++ * Note that some fields and options within @opts may be ignored or overwritten
++ * by the driver, in particular the 'cgrid' field is ignored (this operation
++ * only affects the given CGR object). If @flags contains
++ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
++ * unspecified parameters) will be used rather than a modify hw hardware (which
++ * only modifies the specified parameters).
++ */
++int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
++ struct qm_mcc_initcgr *opts);
++
++/**
++* qman_query_cgr - Queries CGR fields
++* @cgr: the 'cgr' object to query
++* @result: storage for the queried congestion group record
++*/
++int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
++
++/**
++ * qman_query_congestion - Queries the state of all congestion groups
++ * @congestion: storage for the queried state of all congestion groups
++ */
++int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
++
++/**
++ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
++ * @result: is set by the API to the base CGR ID of the allocated range
++ * @count: the number of CGR IDs required
++ * @align: required alignment of the allocated range
++ * @partial: non-zero if the API can return fewer than @count
++ *
++ * Returns the number of CGR IDs allocated, or a negative error code.
++ * If @partial is non zero, the allocation request may return a smaller range of
++ * than requested (though alignment will be as requested). If @partial is zero,
++ * the return value will either be 'count' or negative.
++ */
++int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
++static inline int qman_alloc_cgrid(u32 *result)
++{
++ int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++
++/**
++ * qman_release_cgrid_range - Release the specified range of CGR IDs
++ * @id: the base CGR ID of the range to deallocate
++ * @count: the number of CGR IDs in the range
++ */
++void qman_release_cgrid_range(u32 id, unsigned int count);
++static inline void qman_release_cgrid(u32 id)
++{
++ qman_release_cgrid_range(id, 1);
++}
++
++/**
++ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
++ * @id: the base CGR ID of the range to reserve
++ * @count: the number of CGR IDs in the range
++ */
++int qman_reserve_cgrid_range(u32 id, unsigned int count);
++static inline int qman_reserve_cgrid(u32 id)
++{
++ return qman_reserve_cgrid_range(id, 1);
++}
++
++void qman_seed_cgrid_range(u32 id, unsigned int count);
++
++
++ /* Helpers */
++ /* ------- */
++/**
++ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
++ * @fqid: the FQID that will be initialised by other s/w
++ *
++ * In many situations, a FQID is provided for communication between s/w
++ * entities, and whilst the consumer is responsible for initialising and
++ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
++ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
++ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
++ * However, data can not be enqueued to the FQ until it is initialised out of
++ * the OOS state - this function polls for that condition. It is particularly
++ * useful for users of IPC functions - each endpoint's Rx FQ is the other
++ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
++ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
++ * synchronise. The function returns zero for success, +1 if the FQ is still in
++ * the OOS state, or negative if there was an error.
++ */
++static inline int qman_poll_fq_for_init(struct qman_fq *fq)
++{
++ struct qm_mcr_queryfq_np np;
++ int err;
++ err = qman_query_fq_np(fq, &np);
++ if (err)
++ return err;
++ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
++ return 1;
++ return 0;
++}
++
++ /* -------------- */
++ /* CEETM :: types */
++ /* -------------- */
++/**
++ * Token Rate Structure
++ * Shaping rates are based on a "credit" system and a pre-configured h/w
++ * internal timer. The following type represents a shaper "rate" parameter as a
++ * fractional number of "tokens". Here's how it works. This (fractional) number
++ * of tokens is added to the shaper's "credit" every time the h/w timer elapses
++ * (up to a limit which is set by another shaper parameter). Every time a frame
++ * is enqueued through a shaper, the shaper deducts as many tokens as there are
++ * bytes of data in the enqueued frame. A shaper will not allow itself to
++ * enqueue any frames if its token count is negative. As such;
++ *
++ * The rate at which data is enqueued is limited by the
++ * rate at which tokens are added.
++ *
++ * Therefore if the user knows the period between these h/w timer updates in
++ * seconds, they can calculate the maximum traffic rate of the shaper (in
++ * bytes-per-second) from the token rate. And vice versa, they can calculate
++ * the token rate to use in order to achieve a given traffic rate.
++ */
++struct qm_ceetm_rate {
++ /* The token rate is; whole + (fraction/8192) */
++ u32 whole:11; /* 0..2047 */
++ u32 fraction:13; /* 0..8191 */
++};
++
++struct qm_ceetm_weight_code {
++ /* The weight code is; 5 msbits + 3 lsbits */
++ u8 y:5;
++ u8 x:3;
++};
++
++struct qm_ceetm {
++ unsigned int idx;
++ struct list_head sub_portals;
++ struct list_head lnis;
++ unsigned int sp_range[2];
++ unsigned int lni_range[2];
++};
++
++struct qm_ceetm_sp {
++ struct list_head node;
++ unsigned int idx;
++ unsigned int dcp_idx;
++ int is_claimed;
++ struct qm_ceetm_lni *lni;
++};
++
++/* Logical Network Interface */
++struct qm_ceetm_lni {
++ struct list_head node;
++ unsigned int idx;
++ unsigned int dcp_idx;
++ int is_claimed;
++ struct qm_ceetm_sp *sp;
++ struct list_head channels;
++ int shaper_enable;
++ int shaper_couple;
++ int oal;
++ struct qm_ceetm_rate cr_token_rate;
++ struct qm_ceetm_rate er_token_rate;
++ u16 cr_token_bucket_limit;
++ u16 er_token_bucket_limit;
++};
++
++/* Class Queue Channel */
++struct qm_ceetm_channel {
++ struct list_head node;
++ unsigned int idx;
++ unsigned int lni_idx;
++ unsigned int dcp_idx;
++ struct list_head class_queues;
++ struct list_head ccgs;
++ u8 shaper_enable;
++ u8 shaper_couple;
++ struct qm_ceetm_rate cr_token_rate;
++ struct qm_ceetm_rate er_token_rate;
++ u16 cr_token_bucket_limit;
++ u16 er_token_bucket_limit;
++};
++
++struct qm_ceetm_ccg;
++
++/* This callback type is used when handling congestion entry/exit. The
++ * 'cb_ctx' value is the opaque value associated with ccg object.
++ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
++ */
++typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx,
++ int congested);
++
++/* Class Congestion Group */
++struct qm_ceetm_ccg {
++ struct qm_ceetm_channel *parent;
++ struct list_head node;
++ struct list_head cb_node;
++ qman_cb_ccgr cb;
++ void *cb_ctx;
++ unsigned int idx;
++};
++
++/* Class Queue */
++struct qm_ceetm_cq {
++ struct qm_ceetm_channel *parent;
++ struct qm_ceetm_ccg *ccg;
++ struct list_head node;
++ unsigned int idx;
++ int is_claimed;
++ struct list_head bound_lfqids;
++ struct list_head binding_node;
++};
++
++/* Logical Frame Queue */
++struct qm_ceetm_lfq {
++ struct qm_ceetm_channel *parent;
++ struct list_head node;
++ unsigned int idx;
++ unsigned int dctidx;
++ u64 context_a;
++ u32 context_b;
++ qman_cb_mr ern;
++};
++
++/**
++ * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps
++ * (ie. bits-per-second), compute the 'token_rate' fraction that best
++ * approximates that rate.
++ * @bps: the desired shaper rate in bps.
++ * @token_rate: the output token rate computed with the given kbps.
++ * @rounding: dictates how to round if an exact conversion is not possible; if
++ * it is negative then 'token_rate' will round down to the highest value that
++ * does not exceed the desired rate, if it is positive then 'token_rate' will
++ * round up to the lowest value that is greater than or equal to the desired
++ * rate, and if it is zero then it will round to the nearest approximation,
++ * whether that be up or down.
++ *
++ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
++ */
++int qman_ceetm_bps2tokenrate(u64 bps,
++ struct qm_ceetm_rate *token_rate,
++ int rounding);
++
++/**
++ * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the
++ * corresponding number of 'bps'.
++ * @token_rate: the input desired token_rate fraction.
++ * @bps: the output shaper rate in bps computed with the give token rate.
++ * @rounding: has the same semantics as the previous function.
++ *
++ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
++ */
++int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate,
++ u64 *bps,
++ int rounding);
++
++int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
++ int partial);
++static inline int qman_alloc_ceetm0_channel(u32 *result)
++{
++ int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++void qman_release_ceetm0_channel_range(u32 channelid, u32 count);
++static inline void qman_release_ceetm0_channelid(u32 channelid)
++{
++ qman_release_ceetm0_channel_range(channelid, 1);
++}
++
++int qman_reserve_ceetm0_channel_range(u32 channelid, u32 count);
++static inline int qman_reserve_ceetm0_channelid(u32 channelid)
++{
++ return qman_reserve_ceetm0_channel_range(channelid, 1);
++}
++
++void qman_seed_ceetm0_channel_range(u32 channelid, u32 count);
++
++
++int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
++ int partial);
++static inline int qman_alloc_ceetm1_channel(u32 *result)
++{
++ int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++void qman_release_ceetm1_channel_range(u32 channelid, u32 count);
++static inline void qman_release_ceetm1_channelid(u32 channelid)
++{
++ qman_release_ceetm1_channel_range(channelid, 1);
++}
++int qman_reserve_ceetm1_channel_range(u32 channelid, u32 count);
++static inline int qman_reserve_ceetm1_channelid(u32 channelid)
++{
++ return qman_reserve_ceetm1_channel_range(channelid, 1);
++}
++
++void qman_seed_ceetm1_channel_range(u32 channelid, u32 count);
++
++
++int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
++ int partial);
++static inline int qman_alloc_ceetm0_lfqid(u32 *result)
++{
++ int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count);
++static inline void qman_release_ceetm0_lfqid(u32 lfqid)
++{
++ qman_release_ceetm0_lfqid_range(lfqid, 1);
++}
++int qman_reserve_ceetm0_lfqid_range(u32 lfqid, u32 count);
++static inline int qman_reserve_ceetm0_lfqid(u32 lfqid)
++{
++ return qman_reserve_ceetm0_lfqid_range(lfqid, 1);
++}
++
++void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count);
++
++
++int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
++ int partial);
++static inline int qman_alloc_ceetm1_lfqid(u32 *result)
++{
++ int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0);
++ return (ret > 0) ? 0 : ret;
++}
++void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count);
++static inline void qman_release_ceetm1_lfqid(u32 lfqid)
++{
++ qman_release_ceetm1_lfqid_range(lfqid, 1);
++}
++int qman_reserve_ceetm1_lfqid_range(u32 lfqid, u32 count);
++static inline int qman_reserve_ceetm1_lfqid(u32 lfqid)
++{
++ return qman_reserve_ceetm1_lfqid_range(lfqid, 1);
++}
++
++void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count);
++
++
++ /* ----------------------------- */
++ /* CEETM :: sub-portals */
++ /* ----------------------------- */
++
++/**
++ * qman_ceetm_sp_claim - Claims the given sub-portal, provided it is available
++ * to us and configured for traffic-management.
++ * @sp: the returned sub-portal object, if successful.
++ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
++ * instance),
++ * @sp_idx" is the desired sub-portal index from 0 to 15.
++ *
++ * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL
++ * if the sp_idx is out of range.
++ *
++ * Note that if there are multiple driver domains (eg. a linux kernel versus
++ * user-space drivers in USDPAA, or multiple guests running under a hypervisor)
++ * then a sub-portal may be accessible by more than one instance of a qman
++ * driver and so it may be claimed multiple times. If this is the case, it is
++ * up to the system architect to prevent conflicting configuration actions
++ * coming from the different driver domains. The qman drivers do not have any
++ * behind-the-scenes coordination to prevent this from happening.
++ */
++int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp,
++ enum qm_dc_portal dcp_idx,
++ unsigned int sp_idx);
++
++/**
++ * qman_ceetm_sp_release - Releases a previously claimed sub-portal.
++ * @sp: the sub-portal to be released.
++ *
++ * Returns 0 for success, or -EBUSY for failure if the dependencies are not
++ * released.
++ */
++int qman_ceetm_sp_release(struct qm_ceetm_sp *sp);
++
++ /* ----------------------------------- */
++ /* CEETM :: logical network interfaces */
++ /* ----------------------------------- */
++
++/**
++ * qman_ceetm_lni_claim - Claims an unclaimed LNI.
++ * @lni: the returned LNI object, if successful.
++ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
++ * instance)
++ * @lni_idx: is the desired LNI index.
++ *
++ * Returns zero for success, or -EINVAL on failure, which will happen if the LNI
++ * is not available or has already been claimed (and not yet successfully
++ * released), or lni_dix is out of range.
++ *
++ * Note that there may be multiple driver domains (or instances) that need to
++ * transmit out the same LNI, so this claim is only guaranteeing exclusivity
++ * within the domain of the driver being called. See qman_ceetm_sp_claim() and
++ * qman_ceetm_sp_get_lni() for more information.
++ */
++int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni,
++ enum qm_dc_portal dcp_id,
++ unsigned int lni_idx);
++
++/**
++ * qman_ceetm_lni_releaes - Releases a previously claimed LNI.
++ * @lni: the lni needs to be released.
++ *
++ * This will only succeed if all dependent objects have been released.
++ * Returns zero for success, or -EBUSY if the dependencies are not released.
++ */
++int qman_ceetm_lni_release(struct qm_ceetm_lni *lni);
++
++/**
++ * qman_ceetm_sp_set_lni
++ * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently
++ * mapped to.
++ * @sp: the given sub-portal.
++ * @lni(in "set"function): the LNI object which the sp will be mappaed to.
++ * @lni_idx(in "get" function): the LNI index which the sp is mapped to.
++ *
++ * Returns zero for success, or -EINVAL for the "set" function when this sp-lni
++ * mapping has been set, or configure mapping command returns error, and
++ * -EINVAL for "get" function when this sp-lni mapping is not set or the query
++ * mapping command returns error.
++ *
++ * This may be useful in situations where multiple driver domains have access
++ * to the same sub-portals in order to all be able to transmit out the same
++ * physical interface (perhaps they're on different IP addresses or VPNs, so
++ * Fman is splitting Rx traffic and here we need to converge Tx traffic). In
++ * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed
++ * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains
++ * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim()
++ * in order to determine the LNI that the control-plane had assigned. This is
++ * why the "get" returns an index, whereas the "set" takes an (already claimed)
++ * LNI object.
++ */
++int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp,
++ struct qm_ceetm_lni *lni);
++int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp,
++ unsigned int *lni_idx);
++
++/**
++ * qman_ceetm_lni_enable_shaper
++ * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI.
++ * @lni: the given LNI.
++ * @coupled: indicates whether CR and ER are coupled.
++ * @oal: the overhead accounting length which is added to the actual length of
++ * each frame when performing shaper calculations.
++ *
++ * When the number of (unused) committed-rate tokens reach the committed-rate
++ * token limit, 'coupled' indicates whether surplus tokens should be added to
++ * the excess-rate token count (up to the excess-rate token limit).
++ * When LNI is claimed, the shaper is disabled by default. The enable function
++ * will turn on this shaper for this lni.
++ * Whenever a claimed LNI is first enabled for shaping, its committed and
++ * excess token rates and limits are zero, so will need to be changed to do
++ * anything useful. The shaper can subsequently be enabled/disabled without
++ * resetting the shaping parameters, but the shaping parameters will be reset
++ * when the LNI is released.
++ *
++ * Returns zero for success, or errno for "enable" function in the cases as:
++ * a) -EINVAL if the shaper is already enabled,
++ * b) -EIO if the configure shaper command returns error.
++ * For "disable" function, returns:
++ * a) -EINVAL if the shaper is has already disabled.
++ * b) -EIO if calling configure shaper command returns error.
++ */
++int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
++ int oal);
++int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni);
++
++/**
++ * qman_ceetm_lni_is_shaper_enabled - Check LNI shaper status
++ * @lni: the give LNI
++ */
++int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni);
++
++/**
++ * qman_ceetm_lni_set_commit_rate
++ * qman_ceetm_lni_get_commit_rate
++ * qman_ceetm_lni_set_excess_rate
++ * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and
++ * token limit for the given LNI.
++ * @lni: the given LNI.
++ * @token_rate: the desired token rate for "set" fuction, or the token rate of
++ * the LNI queried by "get" function.
++ * @token_limit: the desired token bucket limit for "set" function, or the token
++ * limit of the given LNI queried by "get" function.
++ *
++ * Returns zero for success. The "set" function returns -EINVAL if the given
++ * LNI is unshapped or -EIO if the configure shaper command returns error.
++ * The "get" function returns -EINVAL if the token rate or the token limit is
++ * not set or the query command returns error.
++ */
++int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit);
++int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit);
++int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit);
++int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit);
++/**
++ * qman_ceetm_lni_set_commit_rate_bps
++ * qman_ceetm_lni_get_commit_rate_bps
++ * qman_ceetm_lni_set_excess_rate_bps
++ * qman_ceetm_lni_get_excess_rate_bps - Set/get the shaper CR/ER rate
++ * and token limit for the given LNI.
++ * @lni: the given LNI.
++ * @bps: the desired shaping rate in bps for "set" fuction, or the shaping rate
++ * of the LNI queried by "get" function.
++ * @token_limit: the desired token bucket limit for "set" function, or the token
++ * limit of the given LNI queried by "get" function.
++ *
++ * Returns zero for success. The "set" function returns -EINVAL if the given
++ * LNI is unshapped or -EIO if the configure shaper command returns error.
++ * The "get" function returns -EINVAL if the token rate or the token limit is
++ * not set or the query command returns error.
++ */
++int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
++ u64 bps,
++ u16 token_limit);
++int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
++ u64 *bps, u16 *token_limit);
++int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
++ u64 bps,
++ u16 token_limit);
++int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
++ u64 *bps, u16 *token_limit);
++
++/**
++ * qman_ceetm_lni_set_tcfcc
++ * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control".
++ * @lni: the given LNI.
++ * @cq_level: is between 0 and 15, representing individual class queue levels
++ * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15
++ * for every channel).
++ * @traffic_class: is between 0 and 7 when associating a given class queue level
++ * to a traffic class, or -1 when disabling traffic class flow control for this
++ * class queue level.
++ *
++ * Return zero for success, or -EINVAL if the cq_level or traffic_class is out
++ * of range as indicated above, or -EIO if the configure/query tcfcc command
++ * returns error.
++ *
++ * Refer to the section of QMan CEETM traffic class flow control in the
++ * Reference Manual.
++ */
++int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
++ unsigned int cq_level,
++ int traffic_class);
++int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni,
++ unsigned int cq_level,
++ int *traffic_class);
++
++ /* ----------------------------- */
++ /* CEETM :: class queue channels */
++ /* ----------------------------- */
++
++/**
++ * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to
++ * the given LNI.
++ * @channel: the returned class queue channel object, if successful.
++ * @lni: the LNI that the channel belongs to.
++ *
++ * Channels are always initially "unshaped".
++ *
++ * Return zero for success, or -ENODEV if there is no channel available(all 32
++ * channels are claimed) or -EINVAL if the channel mapping command returns
++ * error.
++ */
++int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
++ struct qm_ceetm_lni *lni);
++
++/**
++ * qman_ceetm_channel_release - Releases a previously claimed CQ channel.
++ * @channel: the channel needs to be released.
++ *
++ * Returns zero for success, or -EBUSY if the dependencies are still in use.
++ *
++ * Note any shaping of the channel will be cleared to leave it in an unshaped
++ * state.
++ */
++int qman_ceetm_channel_release(struct qm_ceetm_channel *channel);
++
++/**
++ * qman_ceetm_channel_enable_shaper
++ * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel.
++ * @channel: the given channel.
++ * @coupled: indicates whether surplus CR tokens should be added to the
++ * excess-rate token count (up to the excess-rate token limit) when the number
++ * of (unused) committed-rate tokens reach the committed_rate token limit.
++ *
++ * Whenever a claimed channel is first enabled for shaping, its committed and
++ * excess token rates and limits are zero, so will need to be changed to do
++ * anything useful. The shaper can subsequently be enabled/disabled without
++ * resetting the shaping parameters, but the shaping parameters will be reset
++ * when the channel is released.
++ *
++ * Return 0 for success, or -EINVAL for failure, in the case that the channel
++ * shaper has been enabled/disabled or the management command returns error.
++ */
++int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
++ int coupled);
++int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel);
++
++/**
++ * qman_ceetm_channel_is_shaper_enabled - Check channel shaper status.
++ * @channel: the give channel.
++ */
++int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel);
++
++/**
++ * qman_ceetm_channel_set_commit_rate
++ * qman_ceetm_channel_get_commit_rate
++ * qman_ceetm_channel_set_excess_rate
++ * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters.
++ * @channel: the given channel.
++ * @token_rate: the desired token rate for "set" function, or the queried token
++ * rate for "get" function.
++ * @token_limit: the desired token limit for "set" function, or the queried
++ * token limit for "get" function.
++ *
++ * Return zero for success. The "set" function returns -EINVAL if the channel
++ * is unshaped, or -EIO if the configure shapper command returns error. The
++ * "get" function returns -EINVAL if token rate of token limit is not set, or
++ * the query shaper command returns error.
++ */
++int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit);
++int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit);
++int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
++ const struct qm_ceetm_rate *token_rate,
++ u16 token_limit);
++int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
++ struct qm_ceetm_rate *token_rate,
++ u16 *token_limit);
++/**
++ * qman_ceetm_channel_set_commit_rate_bps
++ * qman_ceetm_channel_get_commit_rate_bps
++ * qman_ceetm_channel_set_excess_rate_bps
++ * qman_ceetm_channel_get_excess_rate_bps - Set/get channel CR/ER shaper
++ * parameters.
++ * @channel: the given channel.
++ * @token_rate: the desired shaper rate in bps for "set" function, or the
++ * shaper rate in bps for "get" function.
++ * @token_limit: the desired token limit for "set" function, or the queried
++ * token limit for "get" function.
++ *
++ * Return zero for success. The "set" function returns -EINVAL if the channel
++ * is unshaped, or -EIO if the configure shapper command returns error. The
++ * "get" function returns -EINVAL if token rate of token limit is not set, or
++ * the query shaper command returns error.
++ */
++int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
++ u64 bps, u16 token_limit);
++int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
++ u64 *bps, u16 *token_limit);
++int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
++ u64 bps, u16 token_limit);
++int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
++ u64 *bps, u16 *token_limit);
++
++/**
++ * qman_ceetm_channel_set_weight
++ * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel
++ * @channel: the given channel.
++ * @token_limit: the desired token limit as the weight of the unshaped channel
++ * for "set" function, or the queried token limit for "get" function.
++ *
++ * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel.
++ * It allows the unshaped channels to be included in the CR time eligible list,
++ * and thus use the configured CR token limit value as their fair queuing
++ * weight.
++ *
++ * Return zero for success, or -EINVAL if the channel is a shaped channel or
++ * the management command returns error.
++ */
++int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
++ u16 token_limit);
++int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
++ u16 *token_limit);
++
++/**
++ * qman_ceetm_channel_set_group
++ * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler.
++ * @channel: the given channel.
++ * @group_b: indicates whether there is group B in this channel.
++ * @prio_a: the priority of group A.
++ * @prio_b: the priority of group B.
++ *
++ * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues
++ * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in
++ * group A, otherwise they are split into group A (CQ8-11) and group B
++ * (CQ12-C15). The individual class queues and the group(s) are in strict
++ * priority order relative to each other. Within the group(s), the scheduling
++ * is not strict priority order, but the result of scheduling within a group
++ * is in strict priority order relative to the other class queues in the
++ * channel. 'prio_a' and 'prio_b' control the priority order of the groups
++ * relative to the individual class queues, and take values from 0-7. Eg. if
++ * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict
++ * priority order would be;
++ * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7
++ *
++ * Return 0 for success. For "set" function, returns -EINVAL if prio_a or
++ * prio_b are out of the range 0 - 7 (priority of group A or group B can not
++ * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if
++ * the configure scheduler command returns error. For "get" function, return
++ * -EINVAL if the query scheduler command returns error.
++ */
++int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel,
++ int group_b,
++ unsigned int prio_a,
++ unsigned int prio_b);
++int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel,
++ int *group_b,
++ unsigned int *prio_a,
++ unsigned int *prio_b);
++
++/**
++ * qman_ceetm_channel_set_group_cr_eligibility
++ * qman_ceetm_channel_set_group_er_eligibility - Set channel group eligibility
++ * @channel: the given channel object
++ * @group_b: indicates whether there is group B in this channel.
++ * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
++ *
++ * Return zero for success, or -EINVAL if eligibility setting fails.
++*/
++int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
++ *channel, int group_b, int cre);
++int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
++ *channel, int group_b, int ere);
++
++/**
++ * qman_ceetm_channel_set_cq_cr_eligibility
++ * qman_ceetm_channel_set_cq_er_eligibility - Set channel cq eligibility
++ * @channel: the given channel object
++ * @idx: is from 0 to 7 (representing CQ0 to CQ7).
++ * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
++ *
++ * Return zero for success, or -EINVAL if eligibility setting fails.
++*/
++int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
++ unsigned int idx, int cre);
++int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
++ unsigned int idx, int ere);
++
++ /* --------------------- */
++ /* CEETM :: class queues */
++ /* --------------------- */
++
++/**
++ * qman_ceetm_cq_claim - Claims an individual class queue.
++ * @cq: the returned class queue object, if successful.
++ * @channel: the class queue channel.
++ * @idx: is from 0 to 7 (representing CQ0 to CQ7).
++ * @ccg: represents the class congestion group that this class queue should be
++ * subscribed to, or NULL if no congestion group membership is desired.
++ *
++ * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or
++ * if this class queue has been claimed, or configure class queue command
++ * returns error, or returns -ENOMEM if allocating CQ memory fails.
++ */
++int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
++ struct qm_ceetm_channel *channel,
++ unsigned int idx,
++ struct qm_ceetm_ccg *ccg);
++
++/**
++ * qman_ceetm_cq_claim_A - Claims a class queue group A.
++ * @cq: the returned class queue object, if successful.
++ * @channel: the class queue channel.
++ * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11.
++ * @ccg: represents the class congestion group that this class queue should be
++ * subscribed to, or NULL if no congestion group membership is desired.
++ *
++ * Return zero for success, or -EINVAL if @idx is out the range or if
++ * this class queue has been claimed or configure class queue command returns
++ * error, or returns -ENOMEM if allocating CQ memory fails.
++ */
++int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
++ struct qm_ceetm_channel *channel,
++ unsigned int idx,
++ struct qm_ceetm_ccg *ccg);
++
++/**
++ * qman_ceetm_cq_claim_B - Claims a class queue group B.
++ * @cq: the returned class queue object, if successful.
++ * @channel: the class queue channel.
++ * @idx: is from 0 to 3 (CQ12 to CQ15).
++ * @ccg: represents the class congestion group that this class queue should be
++ * subscribed to, or NULL if no congestion group membership is desired.
++ *
++ * Return zero for success, or -EINVAL if @idx is out the range or if
++ * this class queue has been claimed or configure class queue command returns
++ * error, or returns -ENOMEM if allocating CQ memory fails.
++ */
++int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
++ struct qm_ceetm_channel *channel,
++ unsigned int idx,
++ struct qm_ceetm_ccg *ccg);
++
++/**
++ * qman_ceetm_cq_release - Releases a previously claimed class queue.
++ * @cq: The class queue to be released.
++ *
++ * Return zero for success, or -EBUSY if the dependent objects (eg. logical
++ * FQIDs) have not been released.
++ */
++int qman_ceetm_cq_release(struct qm_ceetm_cq *cq);
++
++/**
++ * qman_ceetm_set_queue_weight
++ * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class
++ * queue.
++ * @cq: the given class queue.
++ * @weight_code: the desired weight code to set for the given class queue for
++ * "set" function or the queired weight code for "get" function.
++ *
++ * Grouped class queues have a default weight code of zero, which corresponds to
++ * a scheduler weighting of 1. This function can be used to modify a grouped
++ * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio()
++ * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values
++ * and the corresponding sharing weight.)
++ *
++ * Returns zero for success, or -EIO if the configure weight command returns
++ * error for "set" function, or -EINVAL if the query command returns
++ * error for "get" function.
++ * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference
++ * Manual for weight and weight code.
++ */
++int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
++ struct qm_ceetm_weight_code *weight_code);
++int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
++ struct qm_ceetm_weight_code *weight_code);
++
++/**
++ * qman_ceetm_set_queue_weight_in_ratio
++ * qman_ceetm_get_queue_weight_in_ratio - Configure/query the weight of a
++ * grouped class queue.
++ * @cq: the given class queue.
++ * @ratio: the weight in ratio. It should be the real ratio number multiplied
++ * by 100 to get rid of fraction.
++ *
++ * Returns zero for success, or -EIO if the configure weight command returns
++ * error for "set" function, or -EINVAL if the query command returns
++ * error for "get" function.
++ */
++int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio);
++int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio);
++
++/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0,
++ * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights
++ * corresponding to intermediate weight codes are calculated using linear
++ * interpolation on the inverted values. Or put another way, the inverse weights
++ * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals
++ * between these are divided linearly into 32 intermediate values, the inverses
++ * of which form the remaining weight codes.
++ *
++ * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of
++ * scheduling within a group of class queues (group A or B). Weights are used to
++ * normalise the class queues to an underlying BFS algorithm where all class
++ * queues are assumed to require "equal bandwidth". So the weights referred to
++ * by the weight codes act as divisors on the size of frames being enqueued. Ie.
++ * one class queue in a group is assigned a weight of 2 whilst the other class
++ * queues in the group keep the default weight of 1, then the WBFS scheduler
++ * will effectively treat all frames enqueued on the weight-2 class queue as
++ * having half the number of bytes they really have. Ie. if all other things are
++ * equal, that class queue would get twice as much bytes-per-second bandwidth as
++ * the others. So weights should be chosen to provide bandwidth ratios between
++ * members of the same class queue group. These weights have no bearing on
++ * behaviour outside that group's WBFS mechanism though.
++ */
++
++/**
++ * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional
++ * representation of the corresponding weight is given (in order to not lose
++ * any precision).
++ * @weight_code: The given weight code in WBFS.
++ * @numerator: the numerator part of the weight computed by the weight code.
++ * @denominator: the denominator part of the weight computed by the weight code
++ *
++ * Returns zero for success or -EINVAL if the given weight code is illegal.
++ */
++int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
++ u32 *numerator,
++ u32 *denominator);
++/**
++ * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code
++ * If the user needs to know how close this is, convert the resulting weight
++ * code back to a weight and compare.
++ * @numerator: numerator part of the given weight.
++ * @denominator: denominator part of the given weight.
++ * @weight_code: the weight code computed from the given weight.
++ *
++ * Returns zero for success, or -ERANGE if "numerator/denominator" is outside
++ * the range of weights.
++ */
++int qman_ceetm_ratio2wbfs(u32 numerator,
++ u32 denominator,
++ struct qm_ceetm_weight_code *weight_code,
++ int rounding);
++
++#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1
++/**
++ * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM
++ * CQ counters.
++ * @cq: the given CQ object.
++ * @flags: indicates whether the statistics counter will be cleared after query.
++ * @frame_count: The number of the frames that have been counted since the
++ * counter was cleared last time.
++ * @byte_count: the number of bytes in all frames that have been counted.
++ *
++ * Return zero for success or -EINVAL if query statistics command returns error.
++ *
++ */
++int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
++ u64 *frame_count, u64 *byte_count);
++
++/**
++ * qman_ceetm_drain_cq - drain the CQ till it is empty.
++ * @cq: the give CQ object.
++ * Return 0 for success or -EINVAL for unsuccessful command to empty CQ.
++ */
++int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq);
++
++ /* ---------------------- */
++ /* CEETM :: logical FQIDs */
++ /* ---------------------- */
++/**
++ * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with
++ * the given class queue.
++ * @lfq: the returned lfq object, if successful.
++ * @cq: the class queue which needs to claim a LFQID.
++ *
++ * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if
++ * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails.
++ */
++int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
++ struct qm_ceetm_cq *cq);
++
++/**
++ * qman_ceetm_lfq_release - Releases a previously claimed logical FQID.
++ * @lfq: the lfq to be released.
++ *
++ * Return zero for success.
++ */
++int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq);
++
++/**
++ * qman_ceetm_lfq_set_context
++ * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the
++ * "dequeue context table" associated with the logical FQID.
++ * @lfq: the given logical FQ object.
++ * @context_a: contextA of the dequeue context.
++ * @context_b: contextB of the dequeue context.
++ *
++ * Returns zero for success, or -EINVAL if there is error to set/get the
++ * context pair.
++ */
++int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq,
++ u64 context_a,
++ u32 context_b);
++int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq,
++ u64 *context_a,
++ u32 *context_b);
++
++/**
++ * qman_ceetm_create_fq - Initialise a FQ object for the LFQ.
++ * @lfq: the given logic fq.
++ * @fq: the fq object created for the given logic fq.
++ *
++ * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to
++ * target a logical FQID (and the class queue it is associated with).
++ * Note that this FQ object can only be used for enqueues, and
++ * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter,
++ * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only
++ * valid as long as the underlying 'lfq' remains claimed. It is the user's
++ * responsibility to ensure that the underlying 'lfq' is not released until any
++ * enqueues to this FQ object have completed. The only field the user needs to
++ * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that
++ * could conceivably be called on this FQ object. This API can be called
++ * multiple times to create multiple FQ objects referring to the same logical
++ * FQID, and any enqueue rejections will respect the callback of the object that
++ * issued the enqueue (and will identify the object via the parameter passed to
++ * the callback too). There is no 'flags' parameter to this API as there is for
++ * qman_create_fq() - the created FQ object behaves as though qman_create_fq()
++ * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY.
++ *
++ * Returns 0 for success.
++ */
++int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq);
++
++ /* -------------------------------- */
++ /* CEETM :: class congestion groups */
++ /* -------------------------------- */
++
++/**
++ * qman_ceetm_ccg_claim - Claims an unused CCG.
++ * @ccg: the returned CCG object, if successful.
++ * @channel: the given class queue channel
++ * @cscn: the callback function of this CCG.
++ * @cb_ctx: the corresponding context to be used used if state change
++ * notifications are later enabled for this CCG.
++ *
++ * The congestion group is local to the given class queue channel, so only
++ * class queues within the channel can be associated with that congestion group.
++ * The association of class queues to congestion groups occurs when the class
++ * queues are claimed, see qman_ceetm_cq_claim() and related functions.
++ * Congestion groups are in a "zero" state when initially claimed, and they are
++ * returned to that state when released.
++ *
++ * Return zero for success, or -EINVAL if no CCG in the channel is available.
++ */
++int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
++ struct qm_ceetm_channel *channel,
++ unsigned int idx,
++ void (*cscn)(struct qm_ceetm_ccg *,
++ void *cb_ctx,
++ int congested),
++ void *cb_ctx);
++
++/**
++ * qman_ceetm_ccg_release - Releases a previously claimed CCG.
++ * @ccg: the given ccg.
++ *
++ * Returns zero for success, or -EBUSY if the given ccg's dependent objects
++ * (class queues that are associated with the CCG) have not been released.
++ */
++int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg);
++
++/* This struct is used to specify attributes for a CCG. The 'we_mask' field
++ * controls which CCG attributes are to be updated, and the remainder specify
++ * the values for those attributes. A CCG counts either frames or the bytes
++ * within those frames, but not both ('mode'). A CCG can optionally cause
++ * enqueues to be rejected, due to tail-drop or WRED, or both (they are
++ * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be
++ * level-triggered due to a single threshold ('td_thres') or edge-triggered due
++ * to a "congestion state", but not both ('td_mode'). Congestion state has
++ * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and
++ * notifications can be sent to software the CCG goes in to and out of this
++ * congested state ('cscn_en'). */
++struct qm_ceetm_ccg_params {
++ /* Boolean fields together in a single bitfield struct */
++ struct {
++ /* Whether to count bytes or frames. 1==frames */
++ u8 mode:1;
++ /* En/disable tail-drop. 1==enable */
++ u8 td_en:1;
++ /* Tail-drop on congestion-state or threshold. 1=threshold */
++ u8 td_mode:1;
++ /* Generate congestion state change notifications. 1==enable */
++ u8 cscn_en:1;
++ /* Enable WRED rejections (per colour). 1==enable */
++ u8 wr_en_g:1;
++ u8 wr_en_y:1;
++ u8 wr_en_r:1;
++ } __packed;
++ /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */
++ struct qm_cgr_cs_thres td_thres;
++ /* Congestion state thresholds, for entry and exit. */
++ struct qm_cgr_cs_thres cs_thres_in;
++ struct qm_cgr_cs_thres cs_thres_out;
++ /* Overhead accounting length. Per-packet "tax", from -128 to +127 */
++ signed char oal;
++ /* Congestion state change notification for DCP portal, virtual CCGID*/
++ /* WRED parameters. */
++ struct qm_cgr_wr_parm wr_parm_g;
++ struct qm_cgr_wr_parm wr_parm_y;
++ struct qm_cgr_wr_parm wr_parm_r;
++};
++/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of
++ * the CCGR are to be updated. */
++#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */
++#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */
++#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */
++#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */
++#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */
++#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */
++#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */
++#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */
++#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */
++#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */
++#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */
++#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */
++#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */
++#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */
++#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */
++#define QM_CCGR_WE_CDV 0x8000 /* cdv */
++
++/**
++ * qman_ceetm_ccg_set
++ * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes.
++ * @ccg: the given CCG object.
++ * @we_mask: the write enable mask.
++ * @params: the parameters setting for this ccg
++ *
++ * Return 0 for success, or -EIO if configure ccg command returns error for
++ * "set" function, or -EINVAL if query ccg command returns error for "get"
++ * function.
++ */
++int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg,
++ u16 we_mask,
++ const struct qm_ceetm_ccg_params *params);
++int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
++ struct qm_ceetm_ccg_params *params);
++
++/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target
++ * mask.
++ * qman_ceetm_cscn_swp_get - Query whether a given software portal index is
++ * in the cscn target mask.
++ * @ccg: the give CCG object.
++ * @swp_idx: the index of the software portal.
++ * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from
++ * the target mask.
++ * @we_mask: the write enable mask.
++ * @params: the parameters setting for this ccg
++ *
++ * Return 0 for success, or -EINVAL if command in set/get function fails.
++ */
++int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg,
++ u16 swp_idx,
++ unsigned int cscn_enabled,
++ u16 we_mask,
++ const struct qm_ceetm_ccg_params *params);
++int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
++ u16 swp_idx,
++ unsigned int *cscn_enabled);
++
++/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\
++ * target mask.
++ * qman_ceetm_cscn_dcp_get - Query whether a given direct connect portal index
++ * is in the cscn target mask.
++ * @ccg: the give CCG object.
++ * @dcp_idx: the index of the direct connect portal.
++ * @vcgid: congestion state change notification for dcp portal, virtual CGID.
++ * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from
++ * the target mask.
++ * @we_mask: the write enable mask.
++ * @params: the parameters setting for this ccg
++ *
++ * Return 0 for success, or -EINVAL if command in set/get function fails.
++ */
++int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
++ u16 dcp_idx,
++ u8 vcgid,
++ unsigned int cscn_enabled,
++ u16 we_mask,
++ const struct qm_ceetm_ccg_params *params);
++int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
++ u16 dcp_idx,
++ u8 *vcgid,
++ unsigned int *cscn_enabled);
++
++/**
++ * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by
++ * CEETM CCG counters.
++ * @ccg: the given CCG object.
++ * @flags: indicates whether the statistics counter will be cleared after query.
++ * @frame_count: The number of the frames that have been counted since the
++ * counter was cleared last time.
++ * @byte_count: the number of bytes in all frames that have been counted.
++ *
++ * Return zero for success or -EINVAL if query statistics command returns error.
++ *
++ */
++int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
++ u64 *frame_count, u64 *byte_count);
++
++/**
++ * qman_ceetm_query_lfqmt - Query the logical frame queue mapping table
++ * @lfqid: Logical Frame Queue ID
++ * @lfqmt_query: Results of the query command
++ *
++ * Returns zero for success or -EIO if the query command returns error.
++ *
++ */
++int qman_ceetm_query_lfqmt(int lfqid,
++ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query);
++
++/**
++ * qman_ceetm_query_write_statistics - Query (and optionally write) statistics
++ * @cid: Target ID (CQID or CCGRID)
++ * @dcp_idx: CEETM portal ID
++ * @command_type: One of the following:
++ * 0 = Query dequeue statistics. CID carries the CQID to be queried.
++ * 1 = Query and clear dequeue statistics. CID carries the CQID to be queried
++ * 2 = Write dequeue statistics. CID carries the CQID to be written.
++ * 3 = Query reject statistics. CID carries the CCGRID to be queried.
++ * 4 = Query and clear reject statistics. CID carries the CCGRID to be queried
++ * 5 = Write reject statistics. CID carries the CCGRID to be written
++ * @frame_count: Frame count value to be written if this is a write command
++ * @byte_count: Bytes count value to be written if this is a write command
++ *
++ * Returns zero for success or -EIO if the query command returns error.
++ */
++int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
++ u16 command_type, u64 frame_count,
++ u64 byte_count);
++
++/**
++ * qman_set_wpm - Set waterfall power management
++ *
++ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
++ *
++ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
++ * accessible.
++ */
++int qman_set_wpm(int wpm_enable);
++
++/**
++ * qman_get_wpm - Query the waterfall power management setting
++ *
++ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
++ *
++ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
++ * accessible.
++ */
++int qman_get_wpm(int *wpm_enable);
++
++/* The below qman_p_***() variants might be called in a migration situation
++ * (e.g. cpu hotplug). They are used to continue accessing the portal that
++ * execution was affine to prior to migration.
++ * @qman_portal specifies which portal the APIs will use.
++*/
++const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
++ *p);
++int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
++int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
++int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
++u32 qman_p_poll_slow(struct qman_portal *p);
++void qman_p_poll(struct qman_portal *p);
++void qman_p_stop_dequeues(struct qman_portal *p);
++void qman_p_start_dequeues(struct qman_portal *p);
++void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
++void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
++u32 qman_p_static_dequeue_get(struct qman_portal *p);
++void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
++ int park_request);
++int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
++ u32 flags __maybe_unused, u32 vdqcr);
++int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_fd *fd, u32 flags);
++int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_fd *fd, u32 flags,
++ struct qman_fq *orp, u16 orp_seqnum);
++int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
++ const struct qm_fd *fd, u32 flags,
++ qman_cb_precommit cb, void *cb_arg);
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* FSL_QMAN_H */
+diff --git a/include/linux/fsl_usdpaa.h b/include/linux/fsl_usdpaa.h
+new file mode 100644
+index 00000000..381853de
+--- /dev/null
++++ b/include/linux/fsl_usdpaa.h
+@@ -0,0 +1,372 @@
++/* Copyright 2011-2012 Freescale Semiconductor, Inc.
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#ifndef FSL_USDPAA_H
++#define FSL_USDPAA_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include <linux/uaccess.h>
++#include <linux/ioctl.h>
++#include <linux/fsl_qman.h> /* For "enum qm_channel" */
++#include <linux/compat.h>
++
++#ifdef CONFIG_FSL_USDPAA
++
++/******************************/
++/* Allocation of resource IDs */
++/******************************/
++
++/* This enum is used to distinguish between the type of underlying object being
++ * manipulated. */
++enum usdpaa_id_type {
++ usdpaa_id_fqid,
++ usdpaa_id_bpid,
++ usdpaa_id_qpool,
++ usdpaa_id_cgrid,
++ usdpaa_id_ceetm0_lfqid,
++ usdpaa_id_ceetm0_channelid,
++ usdpaa_id_ceetm1_lfqid,
++ usdpaa_id_ceetm1_channelid,
++ usdpaa_id_max /* <-- not a valid type, represents the number of types */
++};
++#define USDPAA_IOCTL_MAGIC 'u'
++struct usdpaa_ioctl_id_alloc {
++ uint32_t base; /* Return value, the start of the allocated range */
++ enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */
++ uint32_t num; /* how many IDs to allocate (and return value) */
++ uint32_t align; /* must be a power of 2, 0 is treated like 1 */
++ int partial; /* whether to allow less than 'num' */
++};
++struct usdpaa_ioctl_id_release {
++ /* Input; */
++ enum usdpaa_id_type id_type;
++ uint32_t base;
++ uint32_t num;
++};
++struct usdpaa_ioctl_id_reserve {
++ enum usdpaa_id_type id_type;
++ uint32_t base;
++ uint32_t num;
++};
++
++
++/* ioctl() commands */
++#define USDPAA_IOCTL_ID_ALLOC \
++ _IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc)
++#define USDPAA_IOCTL_ID_RELEASE \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release)
++#define USDPAA_IOCTL_ID_RESERVE \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve)
++
++/**********************/
++/* Mapping DMA memory */
++/**********************/
++
++/* Maximum length for a map name, including NULL-terminator */
++#define USDPAA_DMA_NAME_MAX 16
++/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named.
++ * For a sharable and named map, specify _SHARED (whether creating one or
++ * binding to an existing one). If _SHARED is specified and _CREATE is not, then
++ * the mapping must already exist. If _SHARED and _CREATE are specified and the
++ * mapping doesn't already exist, it will be created. If _SHARED and _CREATE are
++ * specified and the mapping already exists, the mapping will fail unless _LAZY
++ * is specified. When mapping to a pre-existing sharable map, the length must be
++ * an exact match. Lengths must be a power-of-4 multiple of page size.
++ *
++ * Note that this does not actually map the memory to user-space, that is done
++ * by a subsequent mmap() using the page offset returned from this ioctl(). The
++ * ioctl() is what gives the process permission to do this, and a page-offset
++ * with which to do so.
++ */
++#define USDPAA_DMA_FLAG_SHARE 0x01
++#define USDPAA_DMA_FLAG_CREATE 0x02
++#define USDPAA_DMA_FLAG_LAZY 0x04
++#define USDPAA_DMA_FLAG_RDONLY 0x08
++struct usdpaa_ioctl_dma_map {
++ /* Output parameters - virtual and physical addresses */
++ void *ptr;
++ uint64_t phys_addr;
++ /* Input parameter, the length of the region to be created (or if
++ * mapping an existing region, this must match it). Must be a power-of-4
++ * multiple of page size. */
++ uint64_t len;
++ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
++ uint32_t flags;
++ /* If _FLAG_SHARE is specified, the name of the region to be created (or
++ * of the existing mapping to use). */
++ char name[USDPAA_DMA_NAME_MAX];
++ /* If this ioctl() creates the mapping, this is an input parameter
++ * stating whether the region supports locking. If mapping an existing
++ * region, this is a return value indicating the same thing. */
++ int has_locking;
++ /* In the case of a successful map with _CREATE and _LAZY, this return
++ * value indicates whether we created the mapped region or whether it
++ * already existed. */
++ int did_create;
++};
++
++#ifdef CONFIG_COMPAT
++struct usdpaa_ioctl_dma_map_compat {
++ /* Output parameters - virtual and physical addresses */
++ compat_uptr_t ptr;
++ uint64_t phys_addr;
++ /* Input parameter, the length of the region to be created (or if
++ * mapping an existing region, this must match it). Must be a power-of-4
++ * multiple of page size. */
++ uint64_t len;
++ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
++ uint32_t flags;
++ /* If _FLAG_SHARE is specified, the name of the region to be created (or
++ * of the existing mapping to use). */
++ char name[USDPAA_DMA_NAME_MAX];
++ /* If this ioctl() creates the mapping, this is an input parameter
++ * stating whether the region supports locking. If mapping an existing
++ * region, this is a return value indicating the same thing. */
++ int has_locking;
++ /* In the case of a successful map with _CREATE and _LAZY, this return
++ * value indicates whether we created the mapped region or whether it
++ * already existed. */
++ int did_create;
++};
++
++#define USDPAA_IOCTL_DMA_MAP_COMPAT \
++ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat)
++#endif
++
++
++#define USDPAA_IOCTL_DMA_MAP \
++ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map)
++/* munmap() does not remove the DMA map, just the user-space mapping to it.
++ * This ioctl will do both (though you can munmap() before calling the ioctl
++ * too). */
++#define USDPAA_IOCTL_DMA_UNMAP \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char)
++/* We implement a cross-process locking scheme per DMA map. Call this ioctl()
++ * with a mmap()'d address, and the process will (interruptible) sleep if the
++ * lock is already held by another process. Process destruction will
++ * automatically clean up any held locks. */
++#define USDPAA_IOCTL_DMA_LOCK \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char)
++#define USDPAA_IOCTL_DMA_UNLOCK \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char)
++
++/***************************************/
++/* Mapping and using QMan/BMan portals */
++/***************************************/
++enum usdpaa_portal_type {
++ usdpaa_portal_qman,
++ usdpaa_portal_bman,
++};
++
++#define QBMAN_ANY_PORTAL_IDX 0xffffffff
++
++struct usdpaa_ioctl_portal_map {
++ /* Input parameter, is a qman or bman portal required. */
++
++ enum usdpaa_portal_type type;
++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
++ for don't care. The portal index will be populated by the
++ driver when the ioctl() successfully completes */
++ uint32_t index;
++
++ /* Return value if the map succeeds, this gives the mapped
++ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
++ struct usdpaa_portal_map {
++ void *cinh;
++ void *cena;
++ } addr;
++ /* Qman-specific return values */
++ uint16_t channel;
++ uint32_t pools;
++};
++
++#ifdef CONFIG_COMPAT
++struct compat_usdpaa_ioctl_portal_map {
++ /* Input parameter, is a qman or bman portal required. */
++ enum usdpaa_portal_type type;
++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
++ for don't care. The portal index will be populated by the
++ driver when the ioctl() successfully completes */
++ uint32_t index;
++ /* Return value if the map succeeds, this gives the mapped
++ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
++ struct usdpaa_portal_map_compat {
++ compat_uptr_t cinh;
++ compat_uptr_t cena;
++ } addr;
++ /* Qman-specific return values */
++ uint16_t channel;
++ uint32_t pools;
++};
++#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \
++ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map)
++#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat)
++#endif
++
++#define USDPAA_IOCTL_PORTAL_MAP \
++ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map)
++#define USDPAA_IOCTL_PORTAL_UNMAP \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map)
++
++struct usdpaa_ioctl_irq_map {
++ enum usdpaa_portal_type type; /* Type of portal to map */
++ int fd; /* File descriptor that contains the portal */
++ void *portal_cinh; /* Cache inhibited area to identify the portal */
++};
++
++#define USDPAA_IOCTL_PORTAL_IRQ_MAP \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map)
++
++#ifdef CONFIG_COMPAT
++
++struct compat_ioctl_irq_map {
++ enum usdpaa_portal_type type; /* Type of portal to map */
++ compat_int_t fd; /* File descriptor that contains the portal */
++ compat_uptr_t portal_cinh; /* Used identify the portal */};
++
++#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \
++ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map)
++#endif
++
++/* ioctl to query the amount of DMA memory used in the system */
++struct usdpaa_ioctl_dma_used {
++ uint64_t free_bytes;
++ uint64_t total_bytes;
++};
++#define USDPAA_IOCTL_DMA_USED \
++ _IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used)
++
++/* ioctl to allocate a raw portal */
++struct usdpaa_ioctl_raw_portal {
++ /* inputs */
++ enum usdpaa_portal_type type; /* Type of portal to allocate */
++
++ /* set to non zero to turn on stashing */
++ uint8_t enable_stash;
++ /* Stashing attributes for the portal */
++ uint32_t cpu;
++ uint32_t cache;
++ uint32_t window;
++
++ /* Specifies the stash request queue this portal should use */
++ uint8_t sdest;
++
++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
++ * for don't care. The portal index will be populated by the
++ * driver when the ioctl() successfully completes */
++ uint32_t index;
++
++ /* outputs */
++ uint64_t cinh;
++ uint64_t cena;
++};
++
++#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \
++ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal)
++
++#define USDPAA_IOCTL_FREE_RAW_PORTAL \
++ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal)
++
++#ifdef CONFIG_COMPAT
++
++struct compat_ioctl_raw_portal {
++ /* inputs */
++ enum usdpaa_portal_type type; /* Type of portal to allocate */
++
++ /* set to non zero to turn on stashing */
++ uint8_t enable_stash;
++ /* Stashing attributes for the portal */
++ uint32_t cpu;
++ uint32_t cache;
++ uint32_t window;
++ /* Specifies the stash request queue this portal should use */
++ uint8_t sdest;
++
++ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
++ * for don't care. The portal index will be populated by the
++ * driver when the ioctl() successfully completes */
++ uint32_t index;
++
++ /* outputs */
++ uint64_t cinh;
++ uint64_t cena;
++};
++
++#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \
++ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal)
++
++#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \
++ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal)
++
++#endif
++
++#ifdef __KERNEL__
++
++/* Early-boot hook */
++int __init fsl_usdpaa_init_early(void);
++
++/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect
++ * faults within its ranges via this hook. */
++int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size);
++
++#endif /* __KERNEL__ */
++
++#endif /* CONFIG_FSL_USDPAA */
++
++#ifdef __KERNEL__
++/* This interface is needed in a few places and though it's not specific to
++ * USDPAA as such, creating a new header for it doesn't make any sense. The
++ * qbman kernel driver implements this interface and uses it as the backend for
++ * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this
++ * interface for tracking per-process allocations handed out to user-space. */
++struct dpa_alloc {
++ struct list_head free;
++ spinlock_t lock;
++ struct list_head used;
++};
++#define DECLARE_DPA_ALLOC(name) \
++ struct dpa_alloc name = { \
++ .free = { \
++ .prev = &name.free, \
++ .next = &name.free \
++ }, \
++ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
++ .used = { \
++ .prev = &name.used, \
++ .next = &name.used \
++ } \
++ }
++static inline void dpa_alloc_init(struct dpa_alloc *alloc)
++{
++ INIT_LIST_HEAD(&alloc->free);
++ INIT_LIST_HEAD(&alloc->used);
++ spin_lock_init(&alloc->lock);
++}
++int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
++ int partial);
++void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count);
++void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count);
++
++/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
++ * desired range is not available, or 0 for success. */
++int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count);
++/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when
++ * 'alloc' is empty. */
++int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count);
++/* Returns 1 if the specified id is alloced, 0 otherwise */
++int dpa_alloc_check(struct dpa_alloc *list, u32 id);
++#endif /* __KERNEL__ */
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* FSL_USDPAA_H */
+diff --git a/include/uapi/linux/fmd/Kbuild b/include/uapi/linux/fmd/Kbuild
+new file mode 100644
+index 00000000..56a20401
+--- /dev/null
++++ b/include/uapi/linux/fmd/Kbuild
+@@ -0,0 +1,5 @@
++header-y += integrations/
++header-y += Peripherals/
++
++header-y += ioctls.h
++header-y += net_ioctls.h
+diff --git a/include/uapi/linux/fmd/Peripherals/Kbuild b/include/uapi/linux/fmd/Peripherals/Kbuild
+new file mode 100644
+index 00000000..43883efe
+--- /dev/null
++++ b/include/uapi/linux/fmd/Peripherals/Kbuild
+@@ -0,0 +1,4 @@
++header-y += fm_ioctls.h
++header-y += fm_port_ioctls.h
++header-y += fm_pcd_ioctls.h
++header-y += fm_test_ioctls.h
+diff --git a/include/uapi/linux/fmd/Peripherals/fm_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_ioctls.h
+new file mode 100644
+index 00000000..e0c2dd31
+--- /dev/null
++++ b/include/uapi/linux/fmd/Peripherals/fm_ioctls.h
+@@ -0,0 +1,628 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File fm_ioctls.h
++
++ @Description FM Char device ioctls
++*//***************************************************************************/
++#ifndef __FM_IOCTLS_H
++#define __FM_IOCTLS_H
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_grp Frame Manager Linux IOCTL API
++
++ @Description FM Linux ioctls definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection FM IOCTL device ('/dev') definitions
++*//***************************************************************************/
++#define DEV_FM_NAME "fm" /**< Name of the FM chardev */
++
++#define DEV_FM_MINOR_BASE 0
++#define DEV_FM_PCD_MINOR_BASE (DEV_FM_MINOR_BASE + 1) /*/dev/fmx-pcd */
++#define DEV_FM_OH_PORTS_MINOR_BASE (DEV_FM_PCD_MINOR_BASE + 1) /*/dev/fmx-port-ohy */
++#define DEV_FM_RX_PORTS_MINOR_BASE (DEV_FM_OH_PORTS_MINOR_BASE + FM_MAX_NUM_OF_OH_PORTS) /*/dev/fmx-port-rxy */
++#define DEV_FM_TX_PORTS_MINOR_BASE (DEV_FM_RX_PORTS_MINOR_BASE + FM_MAX_NUM_OF_RX_PORTS) /*/dev/fmx-port-txy */
++#define DEV_FM_MAX_MINORS (DEV_FM_TX_PORTS_MINOR_BASE + FM_MAX_NUM_OF_TX_PORTS)
++
++#define FM_IOC_NUM(n) (n)
++#define FM_PCD_IOC_NUM(n) (n+20)
++#define FM_PORT_IOC_NUM(n) (n+70)
++/* @} */
++
++#define IOC_FM_MAX_NUM_OF_PORTS 64
++
++
++/**************************************************************************//**
++ @Description Enum for defining port types
++ (must match enum e_FmPortType defined in fm_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_port_type {
++ e_IOC_FM_PORT_TYPE_OH_OFFLINE_PARSING = 0, /**< Offline parsing port */
++ e_IOC_FM_PORT_TYPE_RX, /**< 1G Rx port */
++ e_IOC_FM_PORT_TYPE_RX_10G, /**< 10G Rx port */
++ e_IOC_FM_PORT_TYPE_TX, /**< 1G Tx port */
++ e_IOC_FM_PORT_TYPE_TX_10G, /**< 10G Tx port */
++ e_IOC_FM_PORT_TYPE_DUMMY
++} ioc_fm_port_type;
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_lib_grp FM library
++
++ @Description FM API functions, definitions and enums
++ The FM module is the main driver module and is a mandatory module
++ for FM driver users. Before any further module initialization,
++ this module must be initialized.
++ The FM is a "single-tone" module. It is responsible of the common
++ HW modules: FPM, DMA, common QMI, common BMI initializations and
++ run-time control routines. This module must be initialized always
++ when working with any of the FM modules.
++ NOTE - We assumes that the FML will be initialize only by core No. 0!
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description FM Exceptions
++*//***************************************************************************/
++typedef enum ioc_fm_exceptions {
++ e_IOC_FM_EX_DMA_BUS_ERROR, /**< DMA bus error. */
++ e_IOC_EX_DMA_READ_ECC, /**< Read Buffer ECC error (Valid for FM rev < 6)*/
++ e_IOC_EX_DMA_SYSTEM_WRITE_ECC, /**< Write Buffer ECC error on system side (Valid for FM rev < 6)*/
++ e_IOC_EX_DMA_FM_WRITE_ECC, /**< Write Buffer ECC error on FM side (Valid for FM rev < 6)*/
++ e_IOC_EX_DMA_SINGLE_PORT_ECC, /**< Single Port ECC error on FM side (Valid for FM rev > 6)*/
++ e_IOC_EX_FPM_STALL_ON_TASKS, /**< Stall of tasks on FPM */
++ e_IOC_EX_FPM_SINGLE_ECC, /**< Single ECC on FPM. */
++ e_IOC_EX_FPM_DOUBLE_ECC, /**< Double ECC error on FPM ram access */
++ e_IOC_EX_QMI_SINGLE_ECC, /**< Single ECC on QMI. */
++ e_IOC_EX_QMI_DOUBLE_ECC, /**< Double bit ECC occurred on QMI */
++ e_IOC_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/**< Dequeue from unknown port id */
++ e_IOC_EX_BMI_LIST_RAM_ECC, /**< Linked List RAM ECC error */
++ e_IOC_EX_BMI_STORAGE_PROFILE_ECC, /**< Storage Profile ECC Error */
++ e_IOC_EX_BMI_STATISTICS_RAM_ECC, /**< Statistics Count RAM ECC Error Enable */
++ e_IOC_EX_BMI_DISPATCH_RAM_ECC, /**< Dispatch RAM ECC Error Enable */
++ e_IOC_EX_IRAM_ECC, /**< Double bit ECC occurred on IRAM*/
++ e_IOC_EX_MURAM_ECC /**< Double bit ECC occurred on MURAM*/
++} ioc_fm_exceptions;
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_runtime_control_grp FM Runtime Control Unit
++
++ @Description FM Runtime control unit API functions, definitions and enums.
++ The FM driver provides a set of control routines for each module.
++ These routines may only be called after the module was fully
++ initialized (both configuration and initialization routines were
++ called). They are typically used to get information from hardware
++ (status, counters/statistics, revision etc.), to modify a current
++ state or to force/enable a required action. Run-time control may
++ be called whenever necessary and as many times as needed.
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection General FM defines.
++ *//***************************************************************************/
++#define IOC_FM_MAX_NUM_OF_VALID_PORTS (FM_MAX_NUM_OF_OH_PORTS + \
++ FM_MAX_NUM_OF_1G_RX_PORTS + \
++ FM_MAX_NUM_OF_10G_RX_PORTS + \
++ FM_MAX_NUM_OF_1G_TX_PORTS + \
++ FM_MAX_NUM_OF_10G_TX_PORTS)
++/* @} */
++
++/**************************************************************************//**
++ @Description Structure for Port bandwidth requirement. Port is identified
++ by type and relative id.
++ (must be identical to t_FmPortBandwidth defined in fm_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_bandwidth_t {
++ ioc_fm_port_type type; /**< FM port type */
++ uint8_t relative_port_id; /**< Type relative port id */
++ uint8_t bandwidth; /**< bandwidth - (in term of percents) */
++} ioc_fm_port_bandwidth_t;
++
++/**************************************************************************//**
++ @Description A Structure containing an array of Port bandwidth requirements.
++ The user should state the ports requiring bandwidth in terms of
++ percentage - i.e. all port's bandwidths in the array must add
++ up to 100.
++ (must be identical to t_FmPortsBandwidthParams defined in fm_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_bandwidth_params {
++ uint8_t num_of_ports;
++ /**< num of ports listed in the array below */
++ ioc_fm_port_bandwidth_t ports_bandwidths[IOC_FM_MAX_NUM_OF_VALID_PORTS];
++ /**< for each port, it's bandwidth (all port's
++ bandwidths must add up to 100.*/
++} ioc_fm_port_bandwidth_params;
++
++/**************************************************************************//**
++ @Description enum for defining FM counters
++*//***************************************************************************/
++typedef enum ioc_fm_counters {
++ e_IOC_FM_COUNTERS_ENQ_TOTAL_FRAME, /**< QMI total enqueued frames counter */
++ e_IOC_FM_COUNTERS_DEQ_TOTAL_FRAME, /**< QMI total dequeued frames counter */
++ e_IOC_FM_COUNTERS_DEQ_0, /**< QMI 0 frames from QMan counter */
++ e_IOC_FM_COUNTERS_DEQ_1, /**< QMI 1 frames from QMan counter */
++ e_IOC_FM_COUNTERS_DEQ_2, /**< QMI 2 frames from QMan counter */
++ e_IOC_FM_COUNTERS_DEQ_3, /**< QMI 3 frames from QMan counter */
++ e_IOC_FM_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI dequeue from default queue counter */
++ e_IOC_FM_COUNTERS_DEQ_FROM_CONTEXT, /**< QMI dequeue from FQ context counter */
++ e_IOC_FM_COUNTERS_DEQ_FROM_FD, /**< QMI dequeue from FD command field counter */
++ e_IOC_FM_COUNTERS_DEQ_CONFIRM, /**< QMI dequeue confirm counter */
++} ioc_fm_counters;
++
++typedef struct ioc_fm_obj_t {
++ void *obj;
++} ioc_fm_obj_t;
++
++/**************************************************************************//**
++ @Description A structure for returning revision information
++ (must match struct t_FmRevisionInfo declared in fm_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_revision_info_t {
++ uint8_t major; /**< Major revision */
++ uint8_t minor; /**< Minor revision */
++} ioc_fm_revision_info_t;
++
++/**************************************************************************//**
++ @Description A structure for FM counters
++*//***************************************************************************/
++typedef struct ioc_fm_counters_params_t {
++ ioc_fm_counters cnt; /**< The requested counter */
++ uint32_t val; /**< The requested value to get/set from/into the counter */
++} ioc_fm_counters_params_t;
++
++typedef union ioc_fm_api_version_t {
++ struct {
++ uint8_t major;
++ uint8_t minor;
++ uint8_t respin;
++ uint8_t reserved;
++ } version;
++ uint32_t ver;
++} ioc_fm_api_version_t;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description A structure of information about each of the external
++ buffer pools used by a port or storage-profile.
++ (must be identical to t_FmExtPoolParams defined in fm_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_ext_pool_params {
++ uint8_t id; /**< External buffer pool id */
++ uint16_t size; /**< External buffer pool buffer size */
++} ioc_fm_ext_pool_params;
++
++/**************************************************************************//**
++ @Description A structure for informing the driver about the external
++ buffer pools allocated in the BM and used by a port or a
++ storage-profile.
++ (must be identical to t_FmExtPools defined in fm_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_ext_pools {
++ uint8_t num_of_pools_used; /**< Number of pools use by this port */
++ ioc_fm_ext_pool_params ext_buf_pool[FM_PORT_MAX_NUM_OF_EXT_POOLS];
++ /**< Parameters for each port */
++} ioc_fm_ext_pools;
++
++typedef struct ioc_fm_vsp_params_t {
++ void *p_fm; /**< A handle to the FM object this VSP related to */
++ ioc_fm_ext_pools ext_buf_pools; /**< Which external buffer pools are used
++ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes.
++ parameter associated with Rx / OP port */
++ uint16_t liodn_offset; /**< VSP's LIODN offset */
++ struct {
++ ioc_fm_port_type port_type; /**< Port type */
++ uint8_t port_id; /**< Port Id - relative to type */
++ } port_params;
++ uint8_t relative_profile_id; /**< VSP Id - relative to VSP's range
++ defined in relevant FM object */
++ void *id; /**< return value */
++} ioc_fm_vsp_params_t;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description A structure for defining BM pool depletion criteria
++*//***************************************************************************/
++typedef struct ioc_fm_buf_pool_depletion_t {
++ bool pools_grp_mode_enable; /**< select mode in which pause frames will be sent after
++ a number of pools (all together!) are depleted */
++ uint8_t num_of_pools; /**< the number of depleted pools that will invoke
++ pause frames transmission. */
++ bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
++ /**< For each pool, TRUE if it should be considered for
++ depletion (Note - this pool must be used by this port!). */
++ bool single_pool_mode_enable; /**< select mode in which pause frames will be sent after
++ a single-pool is depleted; */
++ bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
++ /**< For each pool, TRUE if it should be considered for
++ depletion (Note - this pool must be used by this port!) */
++#if (DPAA_VERSION >= 11)
++ bool pfc_priorities_en[FM_MAX_NUM_OF_PFC_PRIORITIES];
++ /**< This field is used by the MAC as the Priority Enable Vector in the PFC frame
++ which is transmitted */
++#endif /* (DPAA_VERSION >= 11) */
++} ioc_fm_buf_pool_depletion_t;
++
++#if (DPAA_VERSION >= 11)
++typedef struct ioc_fm_buf_pool_depletion_params_t {
++ void *p_fm_vsp;
++ ioc_fm_buf_pool_depletion_t fm_buf_pool_depletion;
++} ioc_fm_buf_pool_depletion_params_t;
++#endif /* (DPAA_VERSION >= 11) */
++
++typedef struct ioc_fm_buffer_prefix_content_t {
++ uint16_t priv_data_size; /**< Number of bytes to be left at the beginning
++ of the external buffer; Note that the private-area will
++ start from the base of the buffer address. */
++ bool pass_prs_result; /**< TRUE to pass the parse result to/from the FM;
++ User may use FM_PORT_GetBufferPrsResult() in order to
++ get the parser-result from a buffer. */
++ bool pass_time_stamp; /**< TRUE to pass the timeStamp to/from the FM
++ User may use FM_PORT_GetBufferTimeStamp() in order to
++ get the parser-result from a buffer. */
++ bool pass_hash_result; /**< TRUE to pass the KG hash result to/from the FM
++ User may use FM_PORT_GetBufferHashResult() in order to
++ get the parser-result from a buffer. */
++ bool pass_all_other_pcd_info; /**< Add all other Internal-Context information:
++ AD, hash-result, key, etc. */
++ uint16_t data_align; /**< 0 to use driver's default alignment [64],
++ other value for selecting a data alignment (must be a power of 2);
++ if write optimization is used, must be >= 16. */
++ uint8_t manip_extra_space; /**< Maximum extra size needed (insertion-size minus removal-size);
++ Note that this field impacts the size of the buffer-prefix
++ (i.e. it pushes the data offset);
++ This field is irrelevant if DPAA_VERSION==10 */
++} ioc_fm_buffer_prefix_content_t;
++
++typedef struct ioc_fm_buffer_prefix_content_params_t {
++ void *p_fm_vsp;
++ ioc_fm_buffer_prefix_content_t fm_buffer_prefix_content;
++} ioc_fm_buffer_prefix_content_params_t;
++
++#if (DPAA_VERSION >= 11)
++typedef struct ioc_fm_vsp_config_no_sg_params_t {
++ void *p_fm_vsp;
++ bool no_sg;
++} ioc_fm_vsp_config_no_sg_params_t;
++
++typedef struct ioc_fm_vsp_prs_result_params_t {
++ void *p_fm_vsp;
++ void *p_data;
++} ioc_fm_vsp_prs_result_params_t;
++#endif
++
++typedef struct fm_ctrl_mon_t {
++ uint8_t percent_cnt[2];
++} fm_ctrl_mon_t;
++
++typedef struct ioc_fm_ctrl_mon_counters_params_t {
++ uint8_t fm_ctrl_index;
++ fm_ctrl_mon_t *p_mon;
++} ioc_fm_ctrl_mon_counters_params_t;
++
++/**************************************************************************//**
++ @Function FM_IOC_SET_PORTS_BANDWIDTH
++
++ @Description Sets relative weights between ports when accessing common resources.
++
++ @Param[in] ioc_fm_port_bandwidth_params Port bandwidth percentages,
++ their sum must equal 100.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#define FM_IOC_SET_PORTS_BANDWIDTH _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(2), ioc_fm_port_bandwidth_params)
++
++/**************************************************************************//**
++ @Function FM_IOC_GET_REVISION
++
++ @Description Returns the FM revision
++
++ @Param[out] ioc_fm_revision_info_t A structure of revision information parameters.
++
++ @Return None.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#define FM_IOC_GET_REVISION _IOR(FM_IOC_TYPE_BASE, FM_IOC_NUM(3), ioc_fm_revision_info_t)
++
++/**************************************************************************//**
++ @Function FM_IOC_GET_COUNTER
++
++ @Description Reads one of the FM counters.
++
++ @Param[in,out] ioc_fm_counters_params_t The requested counter parameters.
++
++ @Return Counter's current value.
++
++ @Cautions Allowed only following FM_Init().
++ Note that it is user's responsibilty to call this routine only
++ for enabled counters, and there will be no indication if a
++ disabled counter is accessed.
++*//***************************************************************************/
++#define FM_IOC_GET_COUNTER _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(4), ioc_fm_counters_params_t)
++
++/**************************************************************************//**
++ @Function FM_IOC_SET_COUNTER
++
++ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
++
++ @Param[in] ioc_fm_counters_params_t The requested counter parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#define FM_IOC_SET_COUNTER _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(5), ioc_fm_counters_params_t)
++
++/**************************************************************************//**
++ @Function FM_IOC_FORCE_INTR
++
++ @Description Causes an interrupt event on the requested source.
++
++ @Param[in] ioc_fm_exceptions An exception to be forced.
++
++ @Return E_OK on success; Error code if the exception is not enabled,
++ or is not able to create interrupt.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#define FM_IOC_FORCE_INTR _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(6), ioc_fm_exceptions)
++
++/**************************************************************************//**
++ @Function FM_IOC_GET_API_VERSION
++
++ @Description Reads the FMD IOCTL API version.
++
++ @Param[in,out] ioc_fm_api_version_t The requested counter parameters.
++
++ @Return Version's value.
++*//***************************************************************************/
++#define FM_IOC_GET_API_VERSION _IOR(FM_IOC_TYPE_BASE, FM_IOC_NUM(7), ioc_fm_api_version_t)
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Function FM_VSP_Config
++
++ @Description Creates descriptor for the FM VSP module.
++
++ The routine returns a handle (descriptor) to the FM VSP object.
++ This descriptor must be passed as first parameter to all other
++ FM VSP function calls.
++
++ No actual initialization or configuration of FM hardware is
++ done by this routine.
++
++@Param[in] p_FmVspParams Pointer to data structure of parameters
++
++ @Retval Handle to FM VSP object, or NULL for Failure.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_VSP_CONFIG_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(8), ioc_compat_fm_vsp_params_t)
++#endif
++#define FM_IOC_VSP_CONFIG _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(8), ioc_fm_vsp_params_t)
++
++/**************************************************************************//**
++ @Function FM_VSP_Init
++
++ @Description Initializes the FM VSP module
++
++ @Param[in] h_FmVsp - FM VSP module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_VSP_INIT_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(9), ioc_compat_fm_obj_t)
++#endif
++#define FM_IOC_VSP_INIT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(9), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_VSP_Free
++
++ @Description Frees all resources that were assigned to FM VSP module.
++
++ Calling this routine invalidates the descriptor.
++
++ @Param[in] h_FmVsp - FM VSP module descriptor
++
++ @Return E_OK on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_VSP_FREE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(10), ioc_compat_fm_obj_t)
++#endif
++#define FM_IOC_VSP_FREE _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(10), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigPoolDepletion
++
++ @Description Calling this routine enables pause frame generation depending on the
++ depletion status of BM pools. It also defines the conditions to activate
++ this functionality. By default, this functionality is disabled.
++
++ @Param[in] ioc_fm_buf_pool_depletion_params_t A structure holding the required parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_VSP_CONFIG_POOL_DEPLETION_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(11), ioc_compat_fm_buf_pool_depletion_params_t)
++#endif
++#define FM_IOC_VSP_CONFIG_POOL_DEPLETION _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(11), ioc_fm_buf_pool_depletion_params_t)
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigBufferPrefixContent
++
++ @Description Defines the structure, size and content of the application buffer.
++
++ The prefix will
++ In VSPs defined for Tx ports, if 'passPrsResult', the application
++ should set a value to their offsets in the prefix of
++ the FM will save the first 'privDataSize', than,
++ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
++ and timeStamp, and the packet itself (in this order), to the
++ application buffer, and to offset.
++
++ Calling this routine changes the buffer margins definitions
++ in the internal driver data base from its default
++ configuration: Data size: [DEFAULT_FM_SP_bufferPrefixContent_privDataSize]
++ Pass Parser result: [DEFAULT_FM_SP_bufferPrefixContent_passPrsResult].
++ Pass timestamp: [DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp].
++
++ @Param[in] ioc_fm_buffer_prefix_content_params_t A structure holding the required parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(12), ioc_compat_fm_buffer_prefix_content_params_t)
++#endif
++#define FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(12), ioc_fm_buffer_prefix_content_params_t)
++
++/**************************************************************************//**
++ @Function FM_VSP_ConfigNoScatherGather
++
++ @Description Calling this routine changes the possibility to receive S/G frame
++ in the internal driver data base
++ from its default configuration: optimize = [DEFAULT_FM_SP_noScatherGather]
++
++ @Param[in] ioc_fm_vsp_config_no_sg_params_t A structure holding the required parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_VSP_CONFIG_NO_SG_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(13), ioc_compat_fm_vsp_config_no_sg_params_t)
++#endif
++#define FM_IOC_VSP_CONFIG_NO_SG _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(13), ioc_fm_vsp_config_no_sg_params_t)
++
++/**************************************************************************//**
++ @Function FM_VSP_GetBufferPrsResult
++
++ @Description Returns the pointer to the parse result in the data buffer.
++ In Rx ports this is relevant after reception, if parse
++ result is configured to be part of the data passed to the
++ application. For non Rx ports it may be used to get the pointer
++ of the area in the buffer where parse result should be
++ initialized - if so configured.
++ See FM_VSP_ConfigBufferPrefixContent for data buffer prefix
++ configuration.
++
++ @Param[in] ioc_fm_vsp_prs_result_params_t A structure holding the required parameters.
++
++ @Return Parse result pointer on success, NULL if parse result was not
++ configured for this port.
++
++ @Cautions Allowed only following FM_VSP_Init().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_VSP_GET_BUFFER_PRS_RESULT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(14), ioc_compat_fm_vsp_prs_result_params_t)
++#endif
++#define FM_IOC_VSP_GET_BUFFER_PRS_RESULT _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(14), ioc_fm_vsp_prs_result_params_t)
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Function FM_CtrlMonStart
++
++ @Description Start monitoring utilization of all available FM controllers.
++
++ In order to obtain FM controllers utilization the following sequence
++ should be used:
++ -# FM_CtrlMonStart()
++ -# FM_CtrlMonStop()
++ -# FM_CtrlMonGetCounters() - issued for each FM controller
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#define FM_IOC_CTRL_MON_START _IO(FM_IOC_TYPE_BASE, FM_IOC_NUM(15))
++
++
++/**************************************************************************//**
++ @Function FM_CtrlMonStop
++
++ @Description Stop monitoring utilization of all available FM controllers.
++
++ In order to obtain FM controllers utilization the following sequence
++ should be used:
++ -# FM_CtrlMonStart()
++ -# FM_CtrlMonStop()
++ -# FM_CtrlMonGetCounters() - issued for each FM controller
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#define FM_IOC_CTRL_MON_STOP _IO(FM_IOC_TYPE_BASE, FM_IOC_NUM(16))
++
++/**************************************************************************//**
++ @Function FM_CtrlMonGetCounters
++
++ @Description Obtain FM controller utilization parameters.
++
++ In order to obtain FM controllers utilization the following sequence
++ should be used:
++ -# FM_CtrlMonStart()
++ -# FM_CtrlMonStop()
++ -# FM_CtrlMonGetCounters() - issued for each FM controller
++
++ @Param[in] ioc_fm_ctrl_mon_counters_params_t A structure holding the required parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_IOC_CTRL_MON_GET_COUNTERS_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(17), ioc_compat_fm_ctrl_mon_counters_params_t)
++#endif
++#define FM_IOC_CTRL_MON_GET_COUNTERS _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(17), ioc_fm_ctrl_mon_counters_params_t)
++
++/** @} */ /* end of lnx_ioctl_FM_runtime_control_grp group */
++/** @} */ /* end of lnx_ioctl_FM_lib_grp group */
++/** @} */ /* end of lnx_ioctl_FM_grp */
++
++#define FMD_API_VERSION_MAJOR 21
++#define FMD_API_VERSION_MINOR 1
++#define FMD_API_VERSION_RESPIN 0
++
++#endif /* __FM_IOCTLS_H */
+diff --git a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
+new file mode 100644
+index 00000000..d13e878d
+--- /dev/null
++++ b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
+@@ -0,0 +1,3084 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/******************************************************************************
++ @File fm_pcd_ioctls.h
++
++ @Description FM PCD ...
++*//***************************************************************************/
++#ifndef __FM_PCD_IOCTLS_H
++#define __FM_PCD_IOCTLS_H
++
++#include "net_ioctls.h"
++#include "fm_ioctls.h"
++
++
++/**************************************************************************//**
++
++ @Group lnx_ioctl_FM_grp Frame Manager Linux IOCTL API
++
++ @Description Frame Manager Linux ioctls definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_PCD_grp FM PCD
++
++ @Description Frame Manager PCD API functions, definitions and enums
++
++ The FM PCD module is responsible for the initialization of all
++ global classifying FM modules. This includes the parser general and
++ common registers, the key generator global and common registers,
++ and the policer global and common registers.
++ In addition, the FM PCD SW module will initialize all required
++ key generator schemes, coarse classification flows, and policer
++ profiles. When an FM module is configured to work with one of these
++ entities, it will register to it using the FM PORT API. The PCD
++ module will manage the PCD resources - i.e. resource management of
++ KeyGen schemes, etc.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Collection General PCD defines
++*//***************************************************************************/
++#define IOC_FM_PCD_MAX_NUM_OF_PRIVATE_HDRS 2 /**< Number of units/headers saved for user */
++
++#define IOC_FM_PCD_PRS_NUM_OF_HDRS 16 /**< Number of headers supported by HW parser */
++#define IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS (32 - IOC_FM_PCD_MAX_NUM_OF_PRIVATE_HDRS)
++ /**< Number of distinction units is limited by
++ register size (32 bits) minus reserved bits
++ for private headers. */
++#define IOC_FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS 4 /**< Maximum number of interchangeable headers
++ in a distinction unit */
++#define IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS 8 /**< Total number of generic KeyGen registers */
++#define IOC_FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY 35 /**< Max number allowed on any configuration;
++ For HW implementation reasons, in most
++ cases less than this will be allowed; The
++ driver will return an initialization error
++ if resource is unavailable. */
++#define IOC_FM_PCD_KG_NUM_OF_EXTRACT_MASKS 4 /**< Total number of masks allowed on KeyGen extractions. */
++#define IOC_FM_PCD_KG_NUM_OF_DEFAULT_GROUPS 16 /**< Number of default value logical groups */
++
++#define IOC_FM_PCD_PRS_NUM_OF_LABELS 32 /**< Maximum number of SW parser labels */
++#define IOC_FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
++
++#define IOC_FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE 128 /**< Maximum size of insertion template for
++ insert manipulation */
++
++#if DPAA_VERSION >= 11
++#define IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES 64 /**< Maximum possible entries for frame replicator group */
++#endif /* DPAA_VERSION >= 11 */
++/* @} */
++
++#ifdef FM_CAPWAP_SUPPORT
++#error "FM_CAPWAP_SUPPORT not implemented!"
++#endif
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_PCD_init_grp FM PCD Initialization Unit
++
++ @Description Frame Manager PCD Initialization Unit API
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description PCD counters
++ (must match enum e_FmPcdCounters defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_counters {
++ e_IOC_FM_PCD_KG_COUNTERS_TOTAL, /**< KeyGen counter */
++ e_IOC_FM_PCD_PLCR_COUNTERS_RED, /**< Policer counter - counts the total number of RED packets that exit the Policer. */
++ e_IOC_FM_PCD_PLCR_COUNTERS_YELLOW, /**< Policer counter - counts the total number of YELLOW packets that exit the Policer. */
++ e_IOC_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED, /**< Policer counter - counts the number of packets that changed color to RED by the Policer;
++ This is a subset of e_IOC_FM_PCD_PLCR_COUNTERS_RED packet count, indicating active color changes. */
++ e_IOC_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW, /**< Policer counter - counts the number of packets that changed color to YELLOW by the Policer;
++ This is a subset of e_IOC_FM_PCD_PLCR_COUNTERS_YELLOW packet count, indicating active color changes. */
++ e_IOC_FM_PCD_PLCR_COUNTERS_TOTAL, /**< Policer counter - counts the total number of packets passed in the Policer. */
++ e_IOC_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH, /**< Policer counter - counts the number of packets with length mismatch. */
++ e_IOC_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH, /**< Parser counter - counts the number of times the parser block is dispatched. */
++ e_IOC_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L2 parse result is returned (including errors). */
++ e_IOC_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L3 parse result is returned (including errors). */
++ e_IOC_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L4 parse result is returned (including errors). */
++ e_IOC_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times SHIM parse result is returned (including errors). */
++ e_IOC_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L2 parse result is returned with errors. */
++ e_IOC_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L3 parse result is returned with errors. */
++ e_IOC_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L4 parse result is returned with errors. */
++ e_IOC_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times SHIM parse result is returned with errors. */
++ e_IOC_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES, /**< Parser counter - counts the number of cycles spent executing soft parser instruction (including stall cycles). */
++ e_IOC_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES, /**< Parser counter - counts the number of cycles stalled waiting for parser internal memory reads while executing soft parser instruction. */
++ e_IOC_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES, /**< Parser counter - counts the number of cycles spent executing hard parser (including stall cycles). */
++ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory read. */
++ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory read. */
++ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory write. */
++ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory write. */
++ e_IOC_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES /**< FPM counter - counts the number of cycles stalled while performing a FPM Command. */
++} ioc_fm_pcd_counters;
++
++/**************************************************************************//**
++ @Description PCD interrupts
++ (must match enum e_FmPcdExceptions defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_exceptions {
++ e_IOC_FM_PCD_KG_EXCEPTION_DOUBLE_ECC, /**< KeyGen double-bit ECC error is detected on internal memory read access. */
++ e_IOC_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, /**< KeyGen scheme configuration error indicating a key size larger than 56 bytes. */
++ e_IOC_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC, /**< Policer double-bit ECC error has been detected on PRAM read access. */
++ e_IOC_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR, /**< Policer access to a non-initialized profile has been detected. */
++ e_IOC_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE, /**< Policer RAM self-initialization complete */
++ e_IOC_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE, /**< Policer atomic action complete */
++ e_IOC_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC, /**< Parser double-bit ECC error */
++ e_IOC_FM_PCD_PRS_EXCEPTION_SINGLE_ECC /**< Parser single-bit ECC error */
++} ioc_fm_pcd_exceptions;
++
++/** @} */ /* end of lnx_ioctl_FM_PCD_init_grp group */
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_PCD_Runtime_grp FM PCD Runtime Unit
++
++ @Description Frame Manager PCD Runtime Unit
++
++ The runtime control allows creation of PCD infrastructure modules
++ such as Network Environment Characteristics, Classification Plan
++ Groups and Coarse Classification Trees.
++ It also allows on-the-fly initialization, modification and removal
++ of PCD modules such as KeyGen schemes, coarse classification nodes
++ and Policer profiles.
++
++ In order to explain the programming model of the PCD driver interface
++ a few terms should be explained, and will be used below.
++ - Distinction Header - One of the 16 protocols supported by the FM parser,
++ or one of the SHIM headers (1 or 2). May be a header with a special
++ option (see below).
++ - Interchangeable Headers Group - This is a group of Headers recognized
++ by either one of them. For example, if in a specific context the user
++ chooses to treat IPv4 and IPV6 in the same way, they may create an
++ interchangeable Headers Unit consisting of these 2 headers.
++ - A Distinction Unit - a Distinction Header or an Interchangeable Headers
++ Group.
++ - Header with special option - applies to Ethernet, MPLS, VLAN, IPv4 and
++ IPv6, includes multicast, broadcast and other protocol specific options.
++ In terms of hardware it relates to the options available in the classification
++ plan.
++ - Network Environment Characteristics - a set of Distinction Units that define
++ the total recognizable header selection for a certain environment. This is
++ NOT the list of all headers that will ever appear in a flow, but rather
++ everything that needs distinction in a flow, where distinction is made by KeyGen
++ schemes and coarse classification action descriptors.
++
++ The PCD runtime modules initialization is done in stages. The first stage after
++ initializing the PCD module itself is to establish a Network Flows Environment
++ Definition. The application may choose to establish one or more such environments.
++ Later, when needed, the application will have to state, for some of its modules,
++ to which single environment it belongs.
++
++ @{
++*//***************************************************************************/
++
++
++/**************************************************************************//**
++ @Description structure for FM counters
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_counters_params_t {
++ ioc_fm_pcd_counters cnt; /**< The requested counter */
++ uint32_t val; /**< The requested value to get/set from/into the counter */
++} ioc_fm_pcd_counters_params_t;
++
++/**************************************************************************//**
++ @Description structure for FM exception definitios
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_exception_params_t {
++ ioc_fm_pcd_exceptions exception; /**< The requested exception */
++ bool enable; /**< TRUE to enable interrupt, FALSE to mask it. */
++} ioc_fm_pcd_exception_params_t;
++
++/**************************************************************************//**
++ @Description A structure for SW parser labels
++ (must be identical to struct t_FmPcdPrsLabelParams defined in fm_pcd_ext.h)
++ *//***************************************************************************/
++typedef struct ioc_fm_pcd_prs_label_params_t {
++ uint32_t instruction_offset; /**< SW parser label instruction offset (2 bytes
++ resolution), relative to Parser RAM. */
++ ioc_net_header_type hdr; /**< The existence of this header will invoke
++ the SW parser code. */
++ uint8_t index_per_hdr; /**< Normally 0, if more than one SW parser
++ attachments for the same header, use this
++ index to distinguish between them. */
++} ioc_fm_pcd_prs_label_params_t;
++
++/**************************************************************************//**
++ @Description A structure for SW parser
++ (Must match struct t_FmPcdPrsSwParams defined in fm_pcd_ext.h)
++ *//***************************************************************************/
++typedef struct ioc_fm_pcd_prs_sw_params_t {
++ bool override; /**< FALSE to invoke a check that nothing else
++ was loaded to this address, including
++ internal patches.
++ TRUE to override any existing code.*/
++ uint32_t size; /**< SW parser code size */
++ uint16_t base; /**< SW parser base (in instruction counts!
++ must be larger than 0x20)*/
++ uint8_t *p_code; /**< SW parser code */
++ uint32_t sw_prs_data_params[IOC_FM_PCD_PRS_NUM_OF_HDRS];
++ /**< SW parser data (parameters) */
++ uint8_t num_of_labels; /**< Number of labels for SW parser. */
++ ioc_fm_pcd_prs_label_params_t labels_table[IOC_FM_PCD_PRS_NUM_OF_LABELS];
++ /**< SW parser labels table,
++ containing num_of_labels entries */
++} ioc_fm_pcd_prs_sw_params_t;
++
++/**************************************************************************//**
++ @Description A structure to set the a KeyGen default value
++ *//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_dflt_value_params_t {
++ uint8_t valueId; /**< 0,1 - one of 2 global default values */
++ uint32_t value; /**< The requested default value */
++} ioc_fm_pcd_kg_dflt_value_params_t;
++
++
++/**************************************************************************//**
++ @Function FM_PCD_Enable
++
++ @Description This routine should be called after PCD is initialized for enabling all
++ PCD engines according to their existing configuration.
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only when PCD is disabled.
++*//***************************************************************************/
++#define FM_PCD_IOC_ENABLE _IO(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(1))
++
++/**************************************************************************//**
++ @Function FM_PCD_Disable
++
++ @Description This routine may be called when PCD is enabled in order to
++ disable all PCD engines. It may be called
++ only when none of the ports in the system are using the PCD.
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only when PCD is enabled.
++*//***************************************************************************/
++#define FM_PCD_IOC_DISABLE _IO(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(2))
++
++ /**************************************************************************//**
++ @Function FM_PCD_PrsLoadSw
++
++ @Description This routine may be called only when all ports in the
++ system are actively using the classification plan scheme.
++ In such cases it is recommended in order to save resources.
++ The driver automatically saves 8 classification plans for
++ ports that do NOT use the classification plan mechanism, to
++ avoid this (in order to save those entries) this routine may
++ be called.
++
++ @Param[in] ioc_fm_pcd_prs_sw_params_t A pointer to the image of the software parser code.
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only when PCD is disabled.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_PRS_LOAD_SW_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(3), ioc_compat_fm_pcd_prs_sw_params_t)
++#endif
++#define FM_PCD_IOC_PRS_LOAD_SW _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(3), ioc_fm_pcd_prs_sw_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSetDfltValue
++
++ @Description Calling this routine sets a global default value to be used
++ by the KeyGen when parser does not recognize a required
++ field/header.
++ By default default values are 0.
++
++ @Param[in] ioc_fm_pcd_kg_dflt_value_params_t A pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only when PCD is disabled.
++*//***************************************************************************/
++#define FM_PCD_IOC_KG_SET_DFLT_VALUE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(6), ioc_fm_pcd_kg_dflt_value_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSetAdditionalDataAfterParsing
++
++ @Description Calling this routine allows the keygen to access data past
++ the parser finishing point.
++
++ @Param[in] uint8_t payload-offset; the number of bytes beyond the parser location.
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only when PCD is disabled.
++*//***************************************************************************/
++#define FM_PCD_IOC_KG_SET_ADDITIONAL_DATA_AFTER_PARSING _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(7), uint8_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_SetException
++
++ @Description Calling this routine enables/disables PCD interrupts.
++
++ @Param[in] ioc_fm_pcd_exception_params_t Arguments struct with exception to be enabled/disabled.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#define FM_PCD_IOC_SET_EXCEPTION _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(8), ioc_fm_pcd_exception_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_GetCounter
++
++ @Description Reads one of the FM PCD counters.
++
++ @Param[in,out] ioc_fm_pcd_counters_params_t The requested counter parameters.
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Note that it is user's responsibilty to call this routine only
++ for enabled counters, and there will be no indication if a
++ disabled counter is accessed.
++*//***************************************************************************/
++#define FM_PCD_IOC_GET_COUNTER _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(9), ioc_fm_pcd_counters_params_t)
++
++/**************************************************************************//**
++
++ @Function FM_PCD_KgSchemeGetCounter
++
++ @Description Reads scheme packet counter.
++
++ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet().
++
++ @Return Counter's current value.
++
++ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_KG_SCHEME_GET_CNTR_COMPAT _IOR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(4), ioc_compat_fm_pcd_kg_scheme_spc_t)
++#endif
++#define FM_PCD_IOC_KG_SCHEME_GET_CNTR _IOR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(4), ioc_fm_pcd_kg_scheme_spc_t)
++
++#if 0
++TODO: unused IOCTL
++/**************************************************************************//**
++ @Function FM_PCD_ModifyCounter
++
++ @Description Writes a value to an enabled counter. Use "0" to reset the counter.
++
++ @Param[in] ioc_fm_pcd_counters_params_t - The requested counter parameters.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#define FM_PCD_IOC_MODIFY_COUNTER _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(10), ioc_fm_pcd_counters_params_t)
++#define FM_PCD_IOC_SET_COUNTER FM_PCD_IOC_MODIFY_COUNTER
++#endif
++
++/**************************************************************************//**
++ @Function FM_PCD_ForceIntr
++
++ @Description Causes an interrupt event on the requested source.
++
++ @Param[in] ioc_fm_pcd_exceptions - An exception to be forced.
++
++ @Return 0 on success; error code if the exception is not enabled,
++ or is not able to create interrupt.
++*//***************************************************************************/
++#define FM_PCD_IOC_FORCE_INTR _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(11), ioc_fm_pcd_exceptions)
++
++/**************************************************************************//**
++ @Collection Definitions of coarse classification parameters as required by KeyGen
++ (when coarse classification is the next engine after this scheme).
++*//***************************************************************************/
++#define IOC_FM_PCD_MAX_NUM_OF_CC_TREES 8
++#define IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS 16
++#define IOC_FM_PCD_MAX_NUM_OF_CC_UNITS 4
++#define IOC_FM_PCD_MAX_NUM_OF_KEYS 256
++#define IOC_FM_PCD_MAX_NUM_OF_FLOWS (4*KILOBYTE)
++#define IOC_FM_PCD_MAX_SIZE_OF_KEY 56
++#define IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP 16
++#define IOC_FM_PCD_LAST_KEY_INDEX 0xffff
++#define IOC_FM_PCD_MANIP_DSCP_VALUES 64
++/* @} */
++
++/**************************************************************************//**
++ @Collection A set of definitions to allow protocol
++ special option description.
++*//***************************************************************************/
++typedef uint32_t ioc_protocol_opt_t; /**< A general type to define a protocol option. */
++
++typedef ioc_protocol_opt_t ioc_eth_protocol_opt_t; /**< Ethernet protocol options. */
++#define IOC_ETH_BROADCAST 0x80000000 /**< Ethernet Broadcast. */
++#define IOC_ETH_MULTICAST 0x40000000 /**< Ethernet Multicast. */
++
++typedef ioc_protocol_opt_t ioc_vlan_protocol_opt_t; /**< Vlan protocol options. */
++#define IOC_VLAN_STACKED 0x20000000 /**< Stacked VLAN. */
++
++typedef ioc_protocol_opt_t ioc_mpls_protocol_opt_t; /**< MPLS protocol options. */
++#define IOC_MPLS_STACKED 0x10000000 /**< Stacked MPLS. */
++
++typedef ioc_protocol_opt_t ioc_ipv4_protocol_opt_t; /**< IPv4 protocol options. */
++#define IOC_IPV4_BROADCAST_1 0x08000000 /**< IPv4 Broadcast. */
++#define IOC_IPV4_MULTICAST_1 0x04000000 /**< IPv4 Multicast. */
++#define IOC_IPV4_UNICAST_2 0x02000000 /**< Tunneled IPv4 - Unicast. */
++#define IOC_IPV4_MULTICAST_BROADCAST_2 0x01000000 /**< Tunneled IPv4 - Broadcast/Multicast. */
++
++#define IOC_IPV4_FRAG_1 0x00000008 /**< IPV4 reassembly option.
++ IPV4 Reassembly manipulation requires network
++ environment with IPV4 header and IPV4_FRAG_1 option */
++
++typedef ioc_protocol_opt_t ioc_ipv6_protocol_opt_t; /**< IPv6 protocol options. */
++#define IOC_IPV6_MULTICAST_1 0x00800000 /**< IPv6 Multicast. */
++#define IOC_IPV6_UNICAST_2 0x00400000 /**< Tunneled IPv6 - Unicast. */
++#define IOC_IPV6_MULTICAST_2 0x00200000 /**< Tunneled IPv6 - Multicast. */
++
++#define IOC_IPV6_FRAG_1 0x00000004 /**< IPV6 reassembly option.
++ IPV6 Reassembly manipulation requires network
++ environment with IPV6 header and IPV6_FRAG_1 option */
++#if (DPAA_VERSION >= 11)
++typedef ioc_protocol_opt_t ioc_capwap_protocol_opt_t; /**< CAPWAP protocol options. */
++#define CAPWAP_FRAG_1 0x00000008 /**< CAPWAP reassembly option.
++ CAPWAP Reassembly manipulation requires network
++ environment with CAPWAP header and CAPWAP_FRAG_1 option;
++ in case where fragment found, the fragment-extension offset
++ may be found at 'shim2' (in parser-result). */
++#endif /* (DPAA_VERSION >= 11) */
++
++/* @} */
++
++#define IOC_FM_PCD_MANIP_MAX_HDR_SIZE 256
++#define IOC_FM_PCD_MANIP_DSCP_TO_VLAN_TRANS 64
++/**************************************************************************//**
++ @Collection A set of definitions to support Header Manipulation selection.
++*//***************************************************************************/
++typedef uint32_t ioc_hdr_manip_flags_t; /**< A general type to define a HMan update command flags. */
++
++typedef ioc_hdr_manip_flags_t ioc_ipv4_hdr_manip_update_flags_t; /**< IPv4 protocol HMan update command flags. */
++
++#define IOC_HDR_MANIP_IPV4_TOS 0x80000000 /**< update TOS with the given value ('tos' field
++ of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
++#define IOC_HDR_MANIP_IPV4_ID 0x40000000 /**< update IP ID with the given value ('id' field
++ of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
++#define IOC_HDR_MANIP_IPV4_TTL 0x20000000 /**< Decrement TTL by 1 */
++#define IOC_HDR_MANIP_IPV4_SRC 0x10000000 /**< update IP source address with the given value
++ ('src' field of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
++#define IOC_HDR_MANIP_IPV4_DST 0x08000000 /**< update IP destination address with the given value
++ ('dst' field of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
++
++typedef ioc_hdr_manip_flags_t ioc_ipv6_hdr_manip_update_flags_t; /**< IPv6 protocol HMan update command flags. */
++
++#define IOC_HDR_MANIP_IPV6_TC 0x80000000 /**< update Traffic Class address with the given value
++ ('traffic_class' field of ioc_fm_pcd_manip_hdr_field_update_ipv6_t) */
++#define IOC_HDR_MANIP_IPV6_HL 0x40000000 /**< Decrement Hop Limit by 1 */
++#define IOC_HDR_MANIP_IPV6_SRC 0x20000000 /**< update IP source address with the given value
++ ('src' field of ioc_fm_pcd_manip_hdr_field_update_ipv6_t) */
++#define IOC_HDR_MANIP_IPV6_DST 0x10000000 /**< update IP destination address with the given value
++ ('dst' field of ioc_fm_pcd_manip_hdr_field_update_ipv6_t) */
++
++typedef ioc_hdr_manip_flags_t ioc_tcp_udp_hdr_manip_update_flags_t;/**< TCP/UDP protocol HMan update command flags. */
++
++#define IOC_HDR_MANIP_TCP_UDP_SRC 0x80000000 /**< update TCP/UDP source address with the given value
++ ('src' field of ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t) */
++#define IOC_HDR_MANIP_TCP_UDP_DST 0x40000000 /**< update TCP/UDP destination address with the given value
++ ('dst' field of ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t) */
++#define IOC_HDR_MANIP_TCP_UDP_CHECKSUM 0x20000000 /**< update TCP/UDP checksum */
++
++/* @} */
++
++/**************************************************************************//**
++ @Description A type used for returning the order of the key extraction.
++ each value in this array represents the index of the extraction
++ command as defined by the user in the initialization extraction array.
++ The valid size of this array is the user define number of extractions
++ required (also marked by the second '0' in this array).
++*//***************************************************************************/
++typedef uint8_t ioc_fm_pcd_kg_key_order_t [IOC_FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
++
++/**************************************************************************//**
++ @Description All PCD engines
++ (must match enum e_FmPcdEngine defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_engine {
++ e_IOC_FM_PCD_INVALID = 0, /**< Invalid PCD engine */
++ e_IOC_FM_PCD_DONE, /**< No PCD Engine indicated */
++ e_IOC_FM_PCD_KG, /**< KeyGen */
++ e_IOC_FM_PCD_CC, /**< Coarse Classifier */
++ e_IOC_FM_PCD_PLCR, /**< Policer */
++ e_IOC_FM_PCD_PRS, /**< Parser */
++#if DPAA_VERSION >= 11
++ e_IOC_FM_PCD_FR, /**< Frame Replicator */
++#endif /* DPAA_VERSION >= 11 */
++ e_IOC_FM_PCD_HASH /**< Hash Table */
++} ioc_fm_pcd_engine;
++
++/**************************************************************************//**
++ @Description An enum for selecting extraction by header types
++ (Must match enum e_FmPcdExtractByHdrType defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_extract_by_hdr_type {
++ e_IOC_FM_PCD_EXTRACT_FROM_HDR, /**< Extract bytes from header */
++ e_IOC_FM_PCD_EXTRACT_FROM_FIELD, /**< Extract bytes from header field */
++ e_IOC_FM_PCD_EXTRACT_FULL_FIELD /**< Extract a full field */
++} ioc_fm_pcd_extract_by_hdr_type;
++
++/**************************************************************************//**
++ @Description An enum for selecting extraction source (when it is not the header)
++ (Must match enum e_FmPcdExtractFrom defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_extract_from {
++ e_IOC_FM_PCD_EXTRACT_FROM_FRAME_START, /**< KG & CC: Extract from beginning of frame */
++ e_IOC_FM_PCD_EXTRACT_FROM_DFLT_VALUE, /**< KG only: Extract from a default value */
++ e_IOC_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE, /**< KG only: Extract from the point where parsing had finished */
++ e_IOC_FM_PCD_EXTRACT_FROM_KEY, /**< CC only: Field where saved KEY */
++ e_IOC_FM_PCD_EXTRACT_FROM_HASH, /**< CC only: Field where saved HASH */
++ e_IOC_FM_PCD_EXTRACT_FROM_PARSE_RESULT, /**< KG & CC: Extract from the parser result */
++ e_IOC_FM_PCD_EXTRACT_FROM_ENQ_FQID, /**< KG & CC: Extract from enqueue FQID */
++ e_IOC_FM_PCD_EXTRACT_FROM_FLOW_ID /**< CC only: Field where saved Dequeue FQID */
++} ioc_fm_pcd_extract_from;
++
++/**************************************************************************//**
++ @Description An enum for selecting extraction type
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_extract_type {
++ e_IOC_FM_PCD_EXTRACT_BY_HDR, /**< Extract according to header */
++ e_IOC_FM_PCD_EXTRACT_NON_HDR, /**< Extract from data that is not the header */
++ e_IOC_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO /**< Extract private info as specified by user */
++} ioc_fm_pcd_extract_type;
++
++/**************************************************************************//**
++ @Description An enum for selecting a default
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_kg_extract_dflt_select {
++ e_IOC_FM_PCD_KG_DFLT_GBL_0, /**< Default selection is KG register 0 */
++ e_IOC_FM_PCD_KG_DFLT_GBL_1, /**< Default selection is KG register 1 */
++ e_IOC_FM_PCD_KG_DFLT_PRIVATE_0, /**< Default selection is a per scheme register 0 */
++ e_IOC_FM_PCD_KG_DFLT_PRIVATE_1, /**< Default selection is a per scheme register 1 */
++ e_IOC_FM_PCD_KG_DFLT_ILLEGAL /**< Illegal selection */
++} ioc_fm_pcd_kg_extract_dflt_select;
++
++/**************************************************************************//**
++ @Description Enumeration type defining all default groups - each group shares
++ a default value, one of four user-initialized values.
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_kg_known_fields_dflt_types {
++ e_IOC_FM_PCD_KG_MAC_ADDR, /**< MAC Address */
++ e_IOC_FM_PCD_KG_TCI, /**< TCI field */
++ e_IOC_FM_PCD_KG_ENET_TYPE, /**< ENET Type */
++ e_IOC_FM_PCD_KG_PPP_SESSION_ID, /**< PPP Session id */
++ e_IOC_FM_PCD_KG_PPP_PROTOCOL_ID, /**< PPP Protocol id */
++ e_IOC_FM_PCD_KG_MPLS_LABEL, /**< MPLS label */
++ e_IOC_FM_PCD_KG_IP_ADDR, /**< IP addr */
++ e_IOC_FM_PCD_KG_PROTOCOL_TYPE, /**< Protocol type */
++ e_IOC_FM_PCD_KG_IP_TOS_TC, /**< TOS or TC */
++ e_IOC_FM_PCD_KG_IPV6_FLOW_LABEL, /**< IPV6 flow label */
++ e_IOC_FM_PCD_KG_IPSEC_SPI, /**< IPSEC SPI */
++ e_IOC_FM_PCD_KG_L4_PORT, /**< L4 Port */
++ e_IOC_FM_PCD_KG_TCP_FLAG, /**< TCP Flag */
++ e_IOC_FM_PCD_KG_GENERIC_FROM_DATA, /**< grouping implemented by SW,
++ any data extraction that is not the full
++ field described above */
++ e_IOC_FM_PCD_KG_GENERIC_FROM_DATA_NO_V, /**< grouping implemented by SW,
++ any data extraction without validation */
++ e_IOC_FM_PCD_KG_GENERIC_NOT_FROM_DATA /**< grouping implemented by SW,
++ extraction from parser result or
++ direct use of default value */
++} ioc_fm_pcd_kg_known_fields_dflt_types;
++
++/**************************************************************************//**
++ @Description Enumeration type for defining header index for scenarios with
++ multiple (tunneled) headers
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_hdr_index {
++ e_IOC_FM_PCD_HDR_INDEX_NONE = 0, /**< used when multiple headers not used, also
++ to specify regular IP (not tunneled). */
++ e_IOC_FM_PCD_HDR_INDEX_1, /**< may be used for VLAN, MPLS, tunneled IP */
++ e_IOC_FM_PCD_HDR_INDEX_2, /**< may be used for MPLS, tunneled IP */
++ e_IOC_FM_PCD_HDR_INDEX_3, /**< may be used for MPLS */
++ e_IOC_FM_PCD_HDR_INDEX_LAST = 0xFF /**< may be used for VLAN, MPLS */
++} ioc_fm_pcd_hdr_index;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile functional type
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_profile_type_selection {
++ e_IOC_FM_PCD_PLCR_PORT_PRIVATE, /**< Port dedicated profile */
++ e_IOC_FM_PCD_PLCR_SHARED /**< Shared profile (shared within partition) */
++} ioc_fm_pcd_profile_type_selection;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile algorithm
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_plcr_algorithm_selection {
++ e_IOC_FM_PCD_PLCR_PASS_THROUGH, /**< Policer pass through */
++ e_IOC_FM_PCD_PLCR_RFC_2698, /**< Policer algorithm RFC 2698 */
++ e_IOC_FM_PCD_PLCR_RFC_4115 /**< Policer algorithm RFC 4115 */
++} ioc_fm_pcd_plcr_algorithm_selection;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting a policer profile color mode
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_plcr_color_mode {
++ e_IOC_FM_PCD_PLCR_COLOR_BLIND, /**< Color blind */
++ e_IOC_FM_PCD_PLCR_COLOR_AWARE /**< Color aware */
++} ioc_fm_pcd_plcr_color_mode;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting a policer profile color
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_plcr_color {
++ e_IOC_FM_PCD_PLCR_GREEN, /**< Green */
++ e_IOC_FM_PCD_PLCR_YELLOW, /**< Yellow */
++ e_IOC_FM_PCD_PLCR_RED, /**< Red */
++ e_IOC_FM_PCD_PLCR_OVERRIDE /**< Color override */
++} ioc_fm_pcd_plcr_color;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile packet frame length selector
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_plcr_frame_length_select {
++ e_IOC_FM_PCD_PLCR_L2_FRM_LEN, /**< L2 frame length */
++ e_IOC_FM_PCD_PLCR_L3_FRM_LEN, /**< L3 frame length */
++ e_IOC_FM_PCD_PLCR_L4_FRM_LEN, /**< L4 frame length */
++ e_IOC_FM_PCD_PLCR_FULL_FRM_LEN /**< Full frame length */
++} ioc_fm_pcd_plcr_frame_length_select;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting roll-back frame
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_plcr_roll_back_frame_select {
++ e_IOC_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN, /**< Rollback L2 frame length */
++ e_IOC_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN /**< Rollback Full frame length */
++} ioc_fm_pcd_plcr_roll_back_frame_select;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer profile packet or byte mode
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_plcr_rate_mode {
++ e_IOC_FM_PCD_PLCR_BYTE_MODE, /**< Byte mode */
++ e_IOC_FM_PCD_PLCR_PACKET_MODE /**< Packet mode */
++} ioc_fm_pcd_plcr_rate_mode;
++
++/**************************************************************************//**
++ @Description Enumeration type for defining action of frame
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_done_action {
++ e_IOC_FM_PCD_ENQ_FRAME = 0, /**< Enqueue frame */
++ e_IOC_FM_PCD_DROP_FRAME /**< Drop frame */
++} ioc_fm_pcd_done_action;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the policer counter
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_plcr_profile_counters {
++ e_IOC_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER, /**< Green packets counter */
++ e_IOC_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER, /**< Yellow packets counter */
++ e_IOC_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER, /**< Red packets counter */
++ e_IOC_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER, /**< Recolored yellow packets counter */
++ e_IOC_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER /**< Recolored red packets counter */
++} ioc_fm_pcd_plcr_profile_counters;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting the PCD action after extraction
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_action {
++ e_IOC_FM_PCD_ACTION_NONE, /**< NONE */
++ e_IOC_FM_PCD_ACTION_EXACT_MATCH, /**< Exact match on the selected extraction*/
++ e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP /**< Indexed lookup on the selected extraction*/
++} ioc_fm_pcd_action;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of insert manipulation
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_insrt_type {
++ e_IOC_FM_PCD_MANIP_INSRT_GENERIC, /**< Insert according to offset & size */
++ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR, /**< Insert according to protocol */
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ e_IOC_FM_PCD_MANIP_INSRT_BY_TEMPLATE /**< Insert template to start of frame */
++#endif /* FM_CAPWAP_SUPPORT */
++} ioc_fm_pcd_manip_hdr_insrt_type;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of remove manipulation
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_rmv_type {
++ e_IOC_FM_PCD_MANIP_RMV_GENERIC, /**< Remove according to offset & size */
++ e_IOC_FM_PCD_MANIP_RMV_BY_HDR /**< Remove according to offset & size */
++} ioc_fm_pcd_manip_hdr_rmv_type;
++
++/**************************************************************************//**
++ @Description An enum for selecting specific L2 fields removal
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_rmv_specific_l2 {
++ e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET, /**< Ethernet/802.3 MAC */
++ e_IOC_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS, /**< stacked QTags */
++ e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS, /**< MPLS and Ethernet/802.3 MAC header until
++ the header which follows the MPLS header */
++ e_IOC_FM_PCD_MANIP_HDR_RMV_MPLS /**< Remove MPLS header (Unlimited MPLS labels) */
++} ioc_fm_pcd_manip_hdr_rmv_specific_l2;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific fields updates
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_field_update_type {
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN, /**< VLAN updates */
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4, /**< IPV4 updates */
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6, /**< IPV6 updates */
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP, /**< TCP_UDP updates */
++} ioc_fm_pcd_manip_hdr_field_update_type;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting VLAN updates
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_field_update_vlan {
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI, /**< Replace VPri of outer most VLAN tag. */
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN /**< DSCP to VLAN priority bits translation */
++} ioc_fm_pcd_manip_hdr_field_update_vlan;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific L2 fields removal
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_insrt_specific_l2 {
++ e_IOC_FM_PCD_MANIP_HDR_INSRT_MPLS /**< Insert MPLS header (Unlimited MPLS labels) */
++} ioc_fm_pcd_manip_hdr_insrt_specific_l2;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Enumeration type for selecting QoS mapping mode
++
++ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE'
++ User should instruct the port to read the parser-result
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_qos_mapping_mode {
++ e_IOC_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE = 0, /**< No mapping, QoS field will not be changed */
++ e_IOC_FM_PCD_MANIP_HDR_QOS_MAPPING_AS_IS, /**< QoS field will be overwritten by the last byte in the parser-result. */
++} ioc_fm_pcd_manip_hdr_qos_mapping_mode;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting QoS source
++
++ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_SRC_NONE'
++ User should left room for the parser-result on input/output buffer
++ and instruct the port to read/write the parser-result to the buffer (RPD should be set)
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_qos_src {
++ e_IOC_FM_PCD_MANIP_HDR_QOS_SRC_NONE = 0, /**< TODO */
++ e_IOC_FM_PCD_MANIP_HDR_QOS_SRC_USER_DEFINED, /**< QoS will be taken from the last byte in the parser-result. */
++} ioc_fm_pcd_manip_hdr_qos_src;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of header insertion
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_insrt_by_hdr_type {
++ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2, /**< Specific L2 fields insertion */
++#if (DPAA_VERSION >= 11)
++ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_IP, /**< IP insertion */
++ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_UDP, /**< UDP insertion */
++ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, /**< UDP lite insertion */
++ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP /**< CAPWAP insertion */
++#endif /* (DPAA_VERSION >= 11) */
++} ioc_fm_pcd_manip_hdr_insrt_by_hdr_type;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific custom command
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_custom_type {
++ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE, /**< Replace IPv4/IPv6 */
++} ioc_fm_pcd_manip_hdr_custom_type;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting specific custom command
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_custom_ip_replace {
++ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV4_BY_IPV6, /**< Replace IPv4 by IPv6 */
++ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 /**< Replace IPv6 by IPv4 */
++} ioc_fm_pcd_manip_hdr_custom_ip_replace;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of header removal
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_hdr_rmv_by_hdr_type {
++ e_IOC_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2 = 0, /**< Specific L2 fields removal */
++#if (DPAA_VERSION >= 11)
++ e_IOC_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP, /**< CAPWAP removal */
++#endif /* (DPAA_VERSION >= 11) */
++#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ e_IOC_FM_PCD_MANIP_RMV_BY_HDR_FROM_START, /**< Locate from data that is not the header */
++#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
++} ioc_fm_pcd_manip_hdr_rmv_by_hdr_type;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of timeout mode
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_reassem_time_out_mode {
++ e_IOC_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES, /**< Limits the time of the reassembly process
++ from the first fragment to the last */
++ e_IOC_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG /**< Limits the time of receiving the fragment */
++} ioc_fm_pcd_manip_reassem_time_out_mode;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of WaysNumber mode
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_reassem_ways_number {
++ e_IOC_FM_PCD_MANIP_ONE_WAY_HASH = 1, /**< One way hash */
++ e_IOC_FM_PCD_MANIP_TWO_WAYS_HASH, /**< Two ways hash */
++ e_IOC_FM_PCD_MANIP_THREE_WAYS_HASH, /**< Three ways hash */
++ e_IOC_FM_PCD_MANIP_FOUR_WAYS_HASH, /**< Four ways hash */
++ e_IOC_FM_PCD_MANIP_FIVE_WAYS_HASH, /**< Five ways hash */
++ e_IOC_FM_PCD_MANIP_SIX_WAYS_HASH, /**< Six ways hash */
++ e_IOC_FM_PCD_MANIP_SEVEN_WAYS_HASH, /**< Seven ways hash */
++ e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH /**< Eight ways hash */
++} ioc_fm_pcd_manip_reassem_ways_number;
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of statistics mode
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_stats {
++ e_IOC_FM_PCD_STATS_PER_FLOWID = 0 /**< Flow ID is used as index for getting statistics */
++} ioc_fm_pcd_stats;
++#endif
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting manipulation type
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_type {
++ e_IOC_FM_PCD_MANIP_HDR = 0, /**< Header manipulation */
++ e_IOC_FM_PCD_MANIP_REASSEM, /**< Reassembly */
++ e_IOC_FM_PCD_MANIP_FRAG, /**< Fragmentation */
++ e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD /**< Special Offloading */
++} ioc_fm_pcd_manip_type;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of statistics mode
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_cc_stats_mode {
++ e_IOC_FM_PCD_CC_STATS_MODE_NONE = 0, /**< No statistics support */
++ e_IOC_FM_PCD_CC_STATS_MODE_FRAME, /**< Frame count statistics */
++ e_IOC_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME, /**< Byte and frame count statistics */
++#if (DPAA_VERSION >= 11)
++ e_IOC_FM_PCD_CC_STATS_MODE_RMON, /**< Byte and frame length range count statistics */
++#endif /* (DPAA_VERSION >= 11) */
++} ioc_fm_pcd_cc_stats_mode;
++
++/**************************************************************************//**
++ @Description Enumeration type for determining the action in case an IP packet
++ is larger than MTU but its DF (Don't Fragment) bit is set.
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_dont_frag_action {
++ e_IOC_FM_PCD_MANIP_DISCARD_PACKET = 0, /**< Discard packet */
++ e_IOC_FM_PCD_MANIP_ENQ_TO_ERR_Q_OR_DISCARD_PACKET = e_IOC_FM_PCD_MANIP_DISCARD_PACKET,
++ /**< Obsolete, cannot enqueue to error queue;
++ In practice, selects to discard packets;
++ Will be removed in the future */
++ e_IOC_FM_PCD_MANIP_FRAGMENT_PACKECT, /**< Fragment packet and continue normal processing */
++ e_IOC_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG /**< Continue normal processing without fragmenting the packet */
++} ioc_fm_pcd_manip_dont_frag_action;
++
++/**************************************************************************//**
++ @Description Enumeration type for selecting type of special offload manipulation
++*//***************************************************************************/
++typedef enum ioc_fm_pcd_manip_special_offload_type {
++ e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC, /**< IPSec offload manipulation */
++#if (DPAA_VERSION >= 11)
++ e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP /**< CAPWAP offload manipulation */
++#endif /* (DPAA_VERSION >= 11) */
++} ioc_fm_pcd_manip_special_offload_type;
++
++/**************************************************************************//**
++ @Description A union of protocol dependent special options
++ (Must match union u_FmPcdHdrProtocolOpt defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef union ioc_fm_pcd_hdr_protocol_opt_u {
++ ioc_eth_protocol_opt_t eth_opt; /**< Ethernet options */
++ ioc_vlan_protocol_opt_t vlan_opt; /**< Vlan options */
++ ioc_mpls_protocol_opt_t mpls_opt; /**< MPLS options */
++ ioc_ipv4_protocol_opt_t ipv4_opt; /**< IPv4 options */
++ ioc_ipv6_protocol_opt_t ipv6_opt; /**< IPv6 options */
++#if (DPAA_VERSION >= 11)
++ ioc_capwap_protocol_opt_t capwap_opt; /**< CAPWAP options */
++#endif /* (DPAA_VERSION >= 11) */
++} ioc_fm_pcd_hdr_protocol_opt_u;
++
++/**************************************************************************//**
++ @Description A union holding all known protocol fields
++*//***************************************************************************/
++typedef union ioc_fm_pcd_fields_u {
++ ioc_header_field_eth_t eth; /**< Ethernet */
++ ioc_header_field_vlan_t vlan; /**< VLAN */
++ ioc_header_field_llc_snap_t llc_snap; /**< LLC SNAP */
++ ioc_header_field_pppoe_t pppoe; /**< PPPoE */
++ ioc_header_field_mpls_t mpls; /**< MPLS */
++ ioc_header_field_ip_t ip; /**< IP */
++ ioc_header_field_ipv4_t ipv4; /**< IPv4 */
++ ioc_header_field_ipv6_t ipv6; /**< IPv6 */
++ ioc_header_field_udp_t udp; /**< UDP */
++ ioc_header_field_udp_lite_t udp_lite; /**< UDP_Lite */
++ ioc_header_field_tcp_t tcp; /**< TCP */
++ ioc_header_field_sctp_t sctp; /**< SCTP */
++ ioc_header_field_dccp_t dccp; /**< DCCP */
++ ioc_header_field_gre_t gre; /**< GRE */
++ ioc_header_field_minencap_t minencap; /**< Minimal Encapsulation */
++ ioc_header_field_ipsec_ah_t ipsec_ah; /**< IPSec AH */
++ ioc_header_field_ipsec_esp_t ipsec_esp; /**< IPSec ESP */
++ ioc_header_field_udp_encap_esp_t udp_encap_esp; /**< UDP Encapsulation ESP */
++} ioc_fm_pcd_fields_u;
++
++/**************************************************************************//**
++ @Description Parameters for defining header extraction for key generation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_from_hdr_t {
++ uint8_t size; /**< Size in byte */
++ uint8_t offset; /**< Byte offset */
++} ioc_fm_pcd_from_hdr_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining field extraction for key generation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_from_field_t {
++ ioc_fm_pcd_fields_u field; /**< Field selection */
++ uint8_t size; /**< Size in byte */
++ uint8_t offset; /**< Byte offset */
++} ioc_fm_pcd_from_field_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a single network environment unit
++ A distinction unit should be defined if it will later be used
++ by one or more PCD engines to distinguish between flows.
++ (Must match struct t_FmPcdDistinctionUnit defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_distinction_unit_t {
++ struct {
++ ioc_net_header_type hdr; /**< One of the headers supported by the FM */
++ ioc_fm_pcd_hdr_protocol_opt_u opt; /**< Select only one option! */
++ } hdrs[IOC_FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS];
++} ioc_fm_pcd_distinction_unit_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining all different distinction units supported
++ by a specific PCD Network Environment Characteristics module.
++
++ Each unit represent a protocol or a group of protocols that may
++ be used later by the different PCD engines to distinguish between flows.
++ (Must match struct t_FmPcdNetEnvParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_net_env_params_t {
++ uint8_t num_of_distinction_units;/**< Number of different units to be identified */
++ ioc_fm_pcd_distinction_unit_t units[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
++ /**< An array of num_of_distinction_units of the
++ different units to be identified */
++ void *id; /**< Output parameter; Returns the net-env Id to be used */
++} ioc_fm_pcd_net_env_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a single extraction action when
++ creating a key
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_extract_entry_t {
++ ioc_fm_pcd_extract_type type; /**< Extraction type select */
++ union {
++ struct {
++ ioc_net_header_type hdr; /**< Header selection */
++ bool ignore_protocol_validation;
++ /**< Ignore protocol validation */
++ ioc_fm_pcd_hdr_index hdr_index; /**< Relevant only for MPLS, VLAN and tunneled
++ IP. Otherwise should be cleared.*/
++ ioc_fm_pcd_extract_by_hdr_type type; /**< Header extraction type select */
++ union {
++ ioc_fm_pcd_from_hdr_t from_hdr; /**< Extract bytes from header parameters */
++ ioc_fm_pcd_from_field_t from_field; /**< Extract bytes from field parameters */
++ ioc_fm_pcd_fields_u full_field; /**< Extract full field parameters */
++ } extract_by_hdr_type;
++ } extract_by_hdr; /**< Used when type = e_IOC_FM_PCD_KG_EXTRACT_BY_HDR */
++ struct {
++ ioc_fm_pcd_extract_from src; /**< Non-header extraction source */
++ ioc_fm_pcd_action action; /**< Relevant for CC Only */
++ uint16_t ic_indx_mask; /**< Relevant only for CC when
++ action = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP;
++ Note that the number of bits that are set within
++ this mask must be log2 of the CC-node 'num_of_keys'.
++ Note that the mask cannot be set on the lower bits. */
++ uint8_t offset; /**< Byte offset */
++ uint8_t size; /**< Size in bytes */
++ } extract_non_hdr; /**< Used when type = e_IOC_FM_PCD_KG_EXTRACT_NON_HDR */
++ } extract_params;
++} ioc_fm_pcd_extract_entry_t;
++
++/**************************************************************************//**
++ @Description A structure for defining masks for each extracted
++ field in the key.
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_extract_mask_t {
++ uint8_t extract_array_index; /**< Index in the extraction array, as initialized by user */
++ uint8_t offset; /**< Byte offset */
++ uint8_t mask; /**< A byte mask (selected bits will be ignored) */
++} ioc_fm_pcd_kg_extract_mask_t;
++
++/**************************************************************************//**
++ @Description A structure for defining default selection per groups
++ of fields
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_extract_dflt_t {
++ ioc_fm_pcd_kg_known_fields_dflt_types type; /**< Default type select*/
++ ioc_fm_pcd_kg_extract_dflt_select dflt_select; /**< Default register select */
++} ioc_fm_pcd_kg_extract_dflt_t;
++
++
++/**************************************************************************//**
++ @Description A structure for defining all parameters needed for
++ generation a key and using a hash function
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_key_extract_and_hash_params_t {
++ uint32_t private_dflt0; /**< Scheme default register 0 */
++ uint32_t private_dflt1; /**< Scheme default register 1 */
++ uint8_t num_of_used_extracts; /**< defines the valid size of the following array */
++ ioc_fm_pcd_extract_entry_t extract_array [IOC_FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
++ /**< An array of extraction definitions. */
++ uint8_t num_of_used_dflts; /**< defines the valid size of the following array */
++ ioc_fm_pcd_kg_extract_dflt_t dflts[IOC_FM_PCD_KG_NUM_OF_DEFAULT_GROUPS];
++ /**< For each extraction used in this scheme, specify the required
++ default register to be used when header is not found.
++ types not specified in this array will get undefined value. */
++ uint8_t num_of_used_masks; /**< Defines the valid size of the following array */
++ ioc_fm_pcd_kg_extract_mask_t masks[IOC_FM_PCD_KG_NUM_OF_EXTRACT_MASKS];
++ uint8_t hash_shift; /**< Hash result right shift.
++ Selects the 24 bits out of the 64 hash result.
++ 0 means using the 24 LSB's, otherwise use the
++ 24 LSB's after shifting right.*/
++ uint32_t hash_distribution_num_of_fqids; /**< must be > 1 and a power of 2. Represents the range
++ of queues for the key and hash functionality */
++ uint8_t hash_distribution_fqids_shift; /**< selects the FQID bits that will be effected by the hash */
++ bool symmetric_hash; /**< TRUE to generate the same hash for frames with swapped source and
++ destination fields on all layers; If TRUE, driver will check that for
++ all layers, if SRC extraction is selected, DST extraction must also be
++ selected, and vice versa. */
++} ioc_fm_pcd_kg_key_extract_and_hash_params_t;
++
++/**************************************************************************//**
++ @Description A structure of parameters for defining a single
++ Qid mask (extracted OR).
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_extracted_or_params_t {
++ ioc_fm_pcd_extract_type type; /**< Extraction type select */
++ union {
++ struct { /**< used when type = e_IOC_FM_PCD_KG_EXTRACT_BY_HDR */
++ ioc_net_header_type hdr;
++ ioc_fm_pcd_hdr_index hdr_index; /**< Relevant only for MPLS, VLAN and tunneled
++ IP. Otherwise should be cleared.*/
++ bool ignore_protocol_validation;
++
++ } extract_by_hdr;
++ ioc_fm_pcd_extract_from src; /**< used when type = e_IOC_FM_PCD_KG_EXTRACT_NON_HDR */
++ } extract_params;
++ uint8_t extraction_offset; /**< Offset for extraction */
++ ioc_fm_pcd_kg_extract_dflt_select dflt_value; /**< Select register from which extraction is taken if
++ field not found */
++ uint8_t mask; /**< Mask LSB byte of extraction (specified bits are ignored) */
++ uint8_t bit_offset_in_fqid; /**< 0-31, Selects which bits of the 24 FQID bits to effect using
++ the extracted byte; Assume byte is placed as the 8 MSB's in
++ a 32 bit word where the lower bits
++ are the FQID; i.e if bitOffsetInFqid=1 than its LSB
++ will effect the FQID MSB, if bitOffsetInFqid=24 than the
++ extracted byte will effect the 8 LSB's of the FQID,
++ if bitOffsetInFqid=31 than the byte's MSB will effect
++ the FQID's LSB; 0 means - no effect on FQID;
++ Note that one, and only one of
++ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
++ extracted byte must effect either FQID or Policer profile).*/
++ uint8_t bit_offset_in_plcr_profile;
++ /**< 0-15, Selects which bits of the 8 policer profile id bits to
++ effect using the extracted byte; Assume byte is placed
++ as the 8 MSB's in a 16 bit word where the lower bits
++ are the policer profile id; i.e if bitOffsetInPlcrProfile=1
++ than its LSB will effect the profile MSB, if bitOffsetInFqid=8
++ than the extracted byte will effect the whole policer profile id,
++ if bitOffsetInFqid=15 than the byte's MSB will effect
++ the Policer Profile id's LSB;
++ 0 means - no effect on policer profile; Note that one, and only one of
++ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
++ extracted byte must effect either FQID or Policer profile).*/
++} ioc_fm_pcd_kg_extracted_or_params_t;
++
++/**************************************************************************//**
++ @Description A structure for configuring scheme counter
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_scheme_counter_t {
++ bool update; /**< FALSE to keep the current counter state
++ and continue from that point, TRUE to update/reset
++ the counter when the scheme is written. */
++ uint32_t value; /**< If update=TRUE, this value will be written into the
++ counter; clear this field to reset the counter. */
++} ioc_fm_pcd_kg_scheme_counter_t;
++
++
++/**************************************************************************//**
++ @Description A structure for retrieving FMKG_SE_SPC
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_scheme_spc_t {
++ uint32_t val; /**< return value */
++ void *id; /**< scheme handle */
++} ioc_fm_pcd_kg_scheme_spc_t;
++
++/**************************************************************************//**
++ @Description A structure for defining policer profile parameters as required by keygen
++ (when policer is the next engine after this scheme).
++ (Must match struct t_FmPcdKgPlcrProfile defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_plcr_profile_t {
++ bool shared_profile; /**< TRUE if this profile is shared between ports
++ (i.e. managed by master partition) May not be TRUE
++ if profile is after Coarse Classification*/
++ bool direct; /**< If TRUE, direct_relative_profile_id only selects the profile
++ id, if FALSE fqid_offset_relative_profile_id_base is used
++ together with fqid_offset_shift and num_of_profiles
++ parameters, to define a range of profiles from
++ which the KeyGen result will determine the
++ destination policer profile. */
++ union {
++ uint16_t direct_relative_profile_id; /**< Used if 'direct' is TRUE, to select policer profile.
++ This parameter should indicate the policer profile offset within the port's
++ policer profiles or SHARED window. */
++ struct {
++ uint8_t fqid_offset_shift; /**< Shift of KG results without the qid base */
++ uint8_t fqid_offset_relative_profile_id_base;
++ /**< OR of KG results without the qid base
++ This parameter should indicate the policer profile
++ offset within the port's policer profiles window
++ or SHARED window depends on shared_profile */
++ uint8_t num_of_profiles; /**< Range of profiles starting at base */
++ } indirect_profile; /**< Indirect profile parameters */
++ } profile_select; /**< Direct/indirect profile selection and parameters */
++} ioc_fm_pcd_kg_plcr_profile_t;
++
++#if DPAA_VERSION >= 11
++/**************************************************************************//**
++ @Description Parameters for configuring a storage profile for a KeyGen scheme.
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_storage_profile_t {
++ bool direct; /**< If TRUE, directRelativeProfileId only selects the
++ profile id;
++ If FALSE, fqidOffsetRelativeProfileIdBase is used
++ together with fqidOffsetShift and numOfProfiles
++ parameters to define a range of profiles from which
++ the KeyGen result will determine the destination
++ storage profile. */
++ union {
++ uint16_t direct_relative_profileId; /**< Used when 'direct' is TRUE, to select a storage profile;
++ should indicate the storage profile offset within the
++ port's storage profiles window. */
++ struct {
++ uint8_t fqid_offset_shift; /**< Shift of KeyGen results without the FQID base */
++ uint8_t fqid_offset_relative_profile_id_base;
++ /**< OR of KeyGen results without the FQID base;
++ should indicate the policer profile offset within the
++ port's storage profiles window. */
++ uint8_t num_of_profiles; /**< Range of profiles starting at base. */
++ } indirect_profile; /**< Indirect profile parameters. */
++ } profile_select; /**< Direct/indirect profile selection and parameters. */
++} ioc_fm_pcd_kg_storage_profile_t;
++#endif /* DPAA_VERSION >= 11 */
++
++/**************************************************************************//**
++ @Description Parameters for defining CC as the next engine after KeyGen
++ (Must match struct t_FmPcdKgCc defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_cc_t {
++ void *tree_id; /**< CC Tree id */
++ uint8_t grp_id; /**< CC group id within the CC tree */
++ bool plcr_next; /**< TRUE if after CC, in case of data frame,
++ policing is required. */
++ bool bypass_plcr_profile_generation;
++ /**< TRUE to bypass KeyGen policer profile generation;
++ selected profile is the one set at port initialization. */
++ ioc_fm_pcd_kg_plcr_profile_t plcr_profile; /**< Valid only if plcr_next = TRUE and
++ bypass_plcr_profile_generation = FALSE */
++} ioc_fm_pcd_kg_cc_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining initializing a KeyGen scheme
++ (Must match struct t_FmPcdKgSchemeParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_scheme_params_t {
++ bool modify; /**< TRUE to change an existing scheme */
++ union {
++ uint8_t relative_scheme_id;
++ /**< if modify=FALSE: partition-relative scheme id */
++ void *scheme_id; /**< if modify=TRUE: the id of an existing scheme */
++ } scm_id;
++ bool always_direct; /**< This scheme is reached only directly, i.e. no need
++ for match vector; KeyGen will ignore it when matching */
++ struct { /**< HL relevant only if always_direct=FALSE */
++ void *net_env_id; /**< The id of the Network Environment as returned
++ by FM_PCD_NetEnvCharacteristicsSet() */
++ uint8_t num_of_distinction_units;
++ /**< Number of NetEnv units listed in unit_ids array */
++ uint8_t unit_ids[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
++ /**< Indexes as passed to SetNetEnvCharacteristics (?) array */
++ } net_env_params;
++ bool use_hash; /**< use the KG Hash functionality */
++ ioc_fm_pcd_kg_key_extract_and_hash_params_t key_extract_and_hash_params;
++ /**< used only if useHash = TRUE */
++ bool bypass_fqid_generation;
++ /**< Normally - FALSE, TRUE to avoid FQID update in the IC;
++ In such a case FQID after KG will be the default FQID
++ defined for the relevant port, or the FQID defined by CC
++ in cases where CC was the previous engine. */
++ uint32_t base_fqid; /**< Base FQID; Relevant only if bypass_fqid_generation = FALSE;
++ If hash is used and an even distribution is expected
++ according to hash_distribution_num_of_fqids, base_fqid must be aligned to
++ hash_distribution_num_of_fqids. */
++ uint8_t num_of_used_extracted_ors;
++ /**< Number of FQID masks listed in extracted_ors array*/
++ ioc_fm_pcd_kg_extracted_or_params_t extracted_ors[IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS];
++ /**< IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS
++ registers are shared between qid_masks
++ functionality and some of the extraction
++ actions; Normally only some will be used
++ for qid_mask. Driver will return error if
++ resource is full at initialization time. */
++#if DPAA_VERSION >= 11
++ bool override_storage_profile;
++ /**< TRUE if KeyGen override previously decided storage profile */
++ ioc_fm_pcd_kg_storage_profile_t storage_profile;/**< Used when override_storage_profile=TRUE */
++#endif /* DPAA_VERSION >= 11 */
++ ioc_fm_pcd_engine next_engine; /**< may be BMI, PLCR or CC */
++ union { /**< depends on nextEngine */
++ ioc_fm_pcd_done_action done_action; /**< Used when next engine is BMI (done) */
++ ioc_fm_pcd_kg_plcr_profile_t plcr_profile; /**< Used when next engine is PLCR */
++ ioc_fm_pcd_kg_cc_t cc; /**< Used when next engine is CC */
++ } kg_next_engine_params;
++ ioc_fm_pcd_kg_scheme_counter_t scheme_counter; /**< A structure of parameters for updating
++ the scheme counter */
++ void *id; /**< Returns the scheme Id to be used */
++} ioc_fm_pcd_kg_scheme_params_t;
++
++/**************************************************************************//**
++ @Collection
++*//***************************************************************************/
++#if DPAA_VERSION >= 11
++#define IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR 10 /* Maximal supported number of frame length ranges */
++#define IOC_FM_PCD_CC_STATS_FLR_SIZE 2 /* Size in bytes of a frame length range limit */
++#endif /* DPAA_VERSION >= 11 */
++#define IOC_FM_PCD_CC_STATS_FLR_COUNT_SIZE 4 /* Size in bytes of a frame length range counter */
++/* @} */
++
++/**************************************************************************//**
++ @Description Parameters for defining CC as the next engine after a CC node.
++ (Must match struct t_FmPcdCcNextCcParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_next_cc_params_t {
++ void *cc_node_id; /**< Id of the next CC node */
++} ioc_fm_pcd_cc_next_cc_params_t;
++
++#if DPAA_VERSION >= 11
++/**************************************************************************//**
++ @Description A structure for defining Frame Replicator as the next engine after a CC node.
++ (Must match struct t_FmPcdCcNextFrParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_next_fr_params_t {
++ void* frm_replic_id; /**< The id of the next frame replicator group */
++} ioc_fm_pcd_cc_next_fr_params_t;
++#endif /* DPAA_VERSION >= 11 */
++
++/**************************************************************************//**
++ @Description A structure for defining PLCR params when PLCR is the
++ next engine after a CC node
++ (Must match struct t_FmPcdCcNextPlcrParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_next_plcr_params_t {
++ bool override_params; /**< TRUE if CC override previously decided parameters*/
++ bool shared_profile; /**< Relevant only if overrideParams=TRUE:
++ TRUE if this profile is shared between ports */
++ uint16_t new_relative_profile_id; /**< Relevant only if overrideParams=TRUE:
++ (otherwise profile id is taken from keygen);
++ This parameter should indicate the policer
++ profile offset within the port's
++ policer profiles or from SHARED window.*/
++ uint32_t new_fqid; /**< Relevant only if overrideParams=TRUE:
++ FQID for enquing the frame;
++ In earlier chips if policer next engine is KEYGEN,
++ this parameter can be 0, because the KEYGEN always decides
++ the enqueue FQID.*/
++#if DPAA_VERSION >= 11
++ uint8_t new_relative_storage_profile_id;
++ /**< Indicates the relative storage profile offset within
++ the port's storage profiles window;
++ Relevant only if the port was configured with VSP. */
++#endif /* DPAA_VERSION >= 11 */
++} ioc_fm_pcd_cc_next_plcr_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining enqueue params when BMI is the
++ next engine after a CC node
++ (Must match struct t_FmPcdCcNextEnqueueParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_next_enqueue_params_t {
++ ioc_fm_pcd_done_action action; /**< Action - when next engine is BMI (done) */
++ bool override_fqid; /**< TRUE if CC override previously decided fqid and vspid,
++ relevant if action = e_IOC_FM_PCD_ENQ_FRAME */
++ uint32_t new_fqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
++ (otherwise FQID is taken from KeyGen),
++ relevant if action = e_IOC_FM_PCD_ENQ_FRAME*/
++#if DPAA_VERSION >= 11
++ uint8_t new_relative_storage_profile_id;
++ /**< Valid if override_fqid=TRUE, Indicates the relative virtual
++ storage profile offset within the port's storage profiles
++ window; Relevant only if the port was configured with VSP. */
++#endif /* DPAA_VERSION >= 11 */
++
++} ioc_fm_pcd_cc_next_enqueue_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining KG params when KG is the next engine after a CC node
++ (Must match struct t_FmPcdCcNextKgParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_next_kg_params_t {
++ bool override_fqid; /**< TRUE if CC override previously decided fqid and vspid,
++ Note - this parameters are irrelevant for earlier chips */
++ uint32_t new_fqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
++ (otherwise FQID is taken from KeyGen),
++ Note - this parameters are irrelevant for earlier chips */
++#if DPAA_VERSION >= 11
++ uint8_t new_relative_storage_profile_id;
++ /**< Valid if override_fqid=TRUE, Indicates the relative virtual
++ storage profile offset within the port's storage profiles
++ window; Relevant only if the port was configured with VSP. */
++#endif /* DPAA_VERSION >= 11 */
++ void *p_direct_scheme; /**< Direct scheme id to go to. */
++} ioc_fm_pcd_cc_next_kg_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining the next engine after a CC node.
++ (Must match struct t_FmPcdCcNextEngineParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_next_engine_params_t {
++ ioc_fm_pcd_engine next_engine; /**< User has to initialize parameters
++ according to nextEngine definition */
++ union {
++ ioc_fm_pcd_cc_next_cc_params_t cc_params; /**< Parameters in case next engine is CC */
++ ioc_fm_pcd_cc_next_plcr_params_t plcr_params; /**< Parameters in case next engine is PLCR */
++ ioc_fm_pcd_cc_next_enqueue_params_t enqueue_params; /**< Parameters in case next engine is BMI */
++ ioc_fm_pcd_cc_next_kg_params_t kg_params; /**< Parameters in case next engine is KG */
++#if DPAA_VERSION >= 11
++ ioc_fm_pcd_cc_next_fr_params_t fr_params; /**< Parameters in case next engine is FR */
++#endif /* DPAA_VERSION >= 11 */
++ } params; /**< Union used for all the next-engine parameters options */
++ void *manip_id; /**< Handle to Manipulation object.
++ Relevant if next engine is of type result
++ (e_IOC_FM_PCD_PLCR, e_IOC_FM_PCD_KG, e_IOC_FM_PCD_DONE) */
++ bool statistics_en; /**< If TRUE, statistics counters are incremented
++ for each frame passing through this
++ Coarse Classification entry. */
++} ioc_fm_pcd_cc_next_engine_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a single CC key
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_key_params_t {
++ uint8_t *p_key; /**< pointer to the key of the size defined in key_size */
++ uint8_t *p_mask; /**< pointer to the Mask per key of the size defined
++ in keySize. p_key and p_mask (if defined) has to be
++ of the same size defined in the key_size */
++ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
++ /**< parameters for the next for the defined Key in p_key */
++
++} ioc_fm_pcd_cc_key_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining CC keys parameters
++ The driver supports two methods for CC node allocation: dynamic and static.
++ Static mode was created in order to prevent runtime alloc/free
++ of FMan memory (MURAM), which may cause fragmentation; in this mode,
++ the driver automatically allocates the memory according to
++ 'max_num_of_keys' parameter. The driver calculates the maximal memory
++ size that may be used for this CC-Node taking into consideration
++ 'mask_support' and 'statistics_mode' parameters.
++ When 'action' = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP in the extraction
++ parameters of this node, 'max_num_of_keys' must be equal to 'num_of_keys'.
++ In dynamic mode, 'max_num_of_keys' must be zero. At initialization,
++ all required structures are allocated according to 'num_of_keys'
++ parameter. During runtime modification, these structures are
++ re-allocated according to the updated number of keys.
++
++ Please note that 'action' and 'ic_indx_mask' mentioned in the
++ specific parameter explanations are passed in the extraction
++ parameters of the node (fields of extractccparams.extractnonhdr).
++*//***************************************************************************/
++typedef struct ioc_keys_params_t {
++ uint16_t max_num_of_keys;/**< Maximum number of keys that will (ever) be used in this CC-Node;
++ A value of zero may be used for dynamic memory allocation. */
++ bool mask_support; /**< This parameter is relevant only if a node is initialized with
++ action = e_IOC_FM_PCD_ACTION_EXACT_MATCH and max_num_of_keys > 0;
++ Should be TRUE to reserve table memory for key masks, even if
++ initial keys do not contain masks, or if the node was initialized
++ as 'empty' (without keys); this will allow user to add keys with
++ masks at runtime. */
++ ioc_fm_pcd_cc_stats_mode statistics_mode;/**< Determines the supported statistics mode for all node's keys.
++ To enable statistics gathering, statistics should be enabled per
++ every key, using 'statistics_en' in next engine parameters structure
++ of that key;
++ If 'max_num_of_keys' is set, all required structures will be
++ preallocated for all keys. */
++#if (DPAA_VERSION >= 11)
++ uint16_t frame_length_ranges[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
++ /**< Relevant only for 'RMON' statistics mode
++ (this feature is supported only on B4860 device);
++ Holds a list of programmable thresholds. For each received frame,
++ its length in bytes is examined against these range thresholds and
++ the appropriate counter is incremented by 1. For example, to belong
++ to range i, the following should hold:
++ range i-1 threshold < frame length <= range i threshold
++ Each range threshold must be larger then its preceding range
++ threshold. Last range threshold must be 0xFFFF. */
++#endif /* (DPAA_VERSION >= 11) */
++ uint16_t num_of_keys; /**< Number of initial keys;
++ Note that in case of 'action' = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP,
++ this field should be power-of-2 of the number of bits that are
++ set in 'ic_indx_mask'. */
++ uint8_t key_size; /**< Size of key - for extraction of type FULL_FIELD, 'key_size' has
++ to be the standard size of the selected key; For other extraction
++ types, 'key_size' has to be as size of extraction; When 'action' =
++ e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP, 'keySize' must be 2. */
++ ioc_fm_pcd_cc_key_params_t key_params[IOC_FM_PCD_MAX_NUM_OF_KEYS];
++ /**< An array with 'num_of_keys' entries, each entry specifies the
++ corresponding key parameters;
++ When 'action' = e_IOC_FM_PCD_ACTION_EXACT_MATCH, this value must not
++ exceed 255 (IOC_FM_PCD_MAX_NUM_OF_KEYS-1) as the last entry is saved
++ for the 'miss' entry. */
++ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss;
++ /**< Parameters for defining the next engine when a key is not matched;
++ Not relevant if action = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP. */
++} ioc_keys_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a CC node
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_node_params_t {
++ ioc_fm_pcd_extract_entry_t extract_cc_params; /**< Extraction parameters */
++ ioc_keys_params_t keys_params; /**< Keys definition matching the selected extraction */
++ void *id; /**< Output parameter; returns the CC node Id to be used */
++} ioc_fm_pcd_cc_node_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a hash table
++ (Must match struct t_FmPcdHashTableParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_hash_table_params_t {
++ uint16_t max_num_of_keys; /**< Maximum Number Of Keys that will (ever) be used in this Hash-table */
++ ioc_fm_pcd_cc_stats_mode statistics_mode; /**< If not e_IOC_FM_PCD_CC_STATS_MODE_NONE, the required structures for the
++ requested statistics mode will be allocated according to max_num_of_keys. */
++ uint8_t kg_hash_shift; /**< KG-Hash-shift as it was configured in the KG-scheme
++ that leads to this hash-table. */
++ uint16_t hash_res_mask; /**< Mask that will be used on the hash-result;
++ The number-of-sets for this hash will be calculated
++ as (2^(number of bits set in 'hash_res_mask'));
++ The 4 lower bits must be cleared. */
++ uint8_t hash_shift; /**< Byte offset from the beginning of the KeyGen hash result to the
++ 2-bytes to be used as hash index. */
++ uint8_t match_key_size; /**< Size of the exact match keys held by the hash buckets */
++
++ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss;
++ /**< Parameters for defining the next engine when a key is not matched */
++ void *id;
++} ioc_fm_pcd_hash_table_params_t;
++
++/**************************************************************************//**
++ @Description A structure with the arguments for the FM_PCD_HashTableAddKey ioctl() call
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_hash_table_add_key_params_t {
++ void *p_hash_tbl;
++ uint8_t key_size;
++ ioc_fm_pcd_cc_key_params_t key_params;
++} ioc_fm_pcd_hash_table_add_key_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a CC tree group.
++
++ This structure defines a CC group in terms of NetEnv units
++ and the action to be taken in each case. The unit_ids list must
++ be given in order from low to high indices.
++
++ ioc_fm_pcd_cc_next_engine_params_t is a list of 2^num_of_distinction_units
++ structures where each defines the next action to be taken for
++ each units combination. for example:
++ num_of_distinction_units = 2
++ unit_ids = {1,3}
++ next_engine_per_entries_in_grp[0] = ioc_fm_pcd_cc_next_engine_params_t for the case that
++ unit 1 - not found; unit 3 - not found;
++ next_engine_per_entries_in_grp[1] = ioc_fm_pcd_cc_next_engine_params_t for the case that
++ unit 1 - not found; unit 3 - found;
++ next_engine_per_entries_in_grp[2] = ioc_fm_pcd_cc_next_engine_params_t for the case that
++ unit 1 - found; unit 3 - not found;
++ next_engine_per_entries_in_grp[3] = ioc_fm_pcd_cc_next_engine_params_t for the case that
++ unit 1 - found; unit 3 - found;
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_grp_params_t {
++ uint8_t num_of_distinction_units; /**< Up to 4 */
++ uint8_t unit_ids [IOC_FM_PCD_MAX_NUM_OF_CC_UNITS];
++ /**< Indexes of the units as defined in
++ FM_PCD_NetEnvCharacteristicsSet() */
++ ioc_fm_pcd_cc_next_engine_params_t next_engine_per_entries_in_grp[IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP];
++ /**< Maximum entries per group is 16 */
++} ioc_fm_pcd_cc_grp_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining the CC tree groups
++ (Must match struct t_FmPcdCcTreeParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_tree_params_t {
++ void *net_env_id; /**< Id of the Network Environment as returned
++ by FM_PCD_NetEnvCharacteristicsSet() */
++ uint8_t num_of_groups; /**< Number of CC groups within the CC tree */
++ ioc_fm_pcd_cc_grp_params_t fm_pcd_cc_group_params [IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS];
++ /**< Parameters for each group. */
++ void *id; /**< Output parameter; Returns the tree Id to be used */
++} ioc_fm_pcd_cc_tree_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining policer byte rate
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_plcr_byte_rate_mode_param_t {
++ ioc_fm_pcd_plcr_frame_length_select frame_length_selection; /**< Frame length selection */
++ ioc_fm_pcd_plcr_roll_back_frame_select roll_back_frame_selection; /**< relevant option only e_IOC_FM_PCD_PLCR_L2_FRM_LEN,
++ e_IOC_FM_PCD_PLCR_FULL_FRM_LEN */
++} ioc_fm_pcd_plcr_byte_rate_mode_param_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining the policer profile (based on
++ RFC-2698 or RFC-4115 attributes).
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_plcr_non_passthrough_alg_param_t {
++ ioc_fm_pcd_plcr_rate_mode rate_mode; /**< Byte / Packet */
++ ioc_fm_pcd_plcr_byte_rate_mode_param_t byte_mode_param; /**< Valid for Byte NULL for Packet */
++ uint32_t committed_info_rate; /**< KBits/Sec or Packets/Sec */
++ uint32_t committed_burst_size; /**< KBits or Packets */
++ uint32_t peak_or_excess_info_rate; /**< KBits/Sec or Packets/Sec */
++ uint32_t peak_or_excess_burst_size; /**< KBits or Packets */
++} ioc_fm_pcd_plcr_non_passthrough_alg_param_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining the next engine after policer
++*//***************************************************************************/
++typedef union ioc_fm_pcd_plcr_next_engine_params_u {
++ ioc_fm_pcd_done_action action; /**< Action - when next engine is BMI (done) */
++ void *p_profile; /**< Policer profile handle - used when next engine
++ is PLCR, must be a SHARED profile */
++ void *p_direct_scheme; /**< Direct scheme select - when next engine is Keygen */
++} ioc_fm_pcd_plcr_next_engine_params_u;
++
++typedef struct ioc_fm_pcd_port_params_t {
++ ioc_fm_port_type port_type; /**< Type of port for this profile */
++ uint8_t port_id; /**< FM-Port id of port for this profile */
++} ioc_fm_pcd_port_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining the policer profile entry
++ (Must match struct t_FmPcdPlcrProfileParams defined in fm_pcd_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_plcr_profile_params_t {
++ bool modify; /**< TRUE to change an existing profile */
++ union {
++ struct {
++ ioc_fm_pcd_profile_type_selection profile_type; /**< Type of policer profile */
++ ioc_fm_pcd_port_params_t *p_fm_port; /**< Relevant for per-port profiles only */
++ uint16_t relative_profile_id; /**< Profile id - relative to shared group or to port */
++ } new_params; /**< Use it when modify = FALSE */
++ void *p_profile; /**< A handle to a profile - use it when modify=TRUE */
++ } profile_select;
++ ioc_fm_pcd_plcr_algorithm_selection alg_selection; /**< Profile Algorithm PASS_THROUGH, RFC_2698, RFC_4115 */
++ ioc_fm_pcd_plcr_color_mode color_mode; /**< COLOR_BLIND, COLOR_AWARE */
++
++ union {
++ ioc_fm_pcd_plcr_color dflt_color; /**< For Color-Blind Pass-Through mode; the policer will re-color
++ any incoming packet with the default value. */
++ ioc_fm_pcd_plcr_color override; /**< For Color-Aware modes; the profile response to a
++ pre-color value of 2'b11. */
++ } color;
++
++ ioc_fm_pcd_plcr_non_passthrough_alg_param_t non_passthrough_alg_param; /**< RFC2698 or RFC4115 parameters */
++
++ ioc_fm_pcd_engine next_engine_on_green; /**< Next engine for green-colored frames */
++ ioc_fm_pcd_plcr_next_engine_params_u params_on_green; /**< Next engine parameters for green-colored frames */
++
++ ioc_fm_pcd_engine next_engine_on_yellow; /**< Next engine for yellow-colored frames */
++ ioc_fm_pcd_plcr_next_engine_params_u params_on_yellow; /**< Next engine parameters for yellow-colored frames */
++
++ ioc_fm_pcd_engine next_engine_on_red; /**< Next engine for red-colored frames */
++ ioc_fm_pcd_plcr_next_engine_params_u params_on_red; /**< Next engine parameters for red-colored frames */
++
++ bool trap_profile_on_flow_A; /**< Obsolete - do not use */
++ bool trap_profile_on_flow_B; /**< Obsolete - do not use */
++ bool trap_profile_on_flow_C; /**< Obsolete - do not use */
++
++ void *id; /**< output parameter; Returns the profile Id to be used */
++} ioc_fm_pcd_plcr_profile_params_t;
++
++/**************************************************************************//**
++ @Description A structure for modifying CC tree next engine
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_tree_modify_next_engine_params_t {
++ void *id; /**< CC tree Id to be used */
++ uint8_t grp_indx; /**< A Group index in the tree */
++ uint8_t indx; /**< Entry index in the group defined by grp_index */
++ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
++ /**< Parameters for the next for the defined Key in the p_Key */
++} ioc_fm_pcd_cc_tree_modify_next_engine_params_t;
++
++/**************************************************************************//**
++ @Description A structure for modifying CC node next engine
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_node_modify_next_engine_params_t {
++ void *id; /**< CC node Id to be used */
++ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
++ NOTE: This parameter is IGNORED for miss-key! */
++ uint8_t key_size; /**< Key size of added key */
++ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
++ /**< parameters for the next for the defined Key in the p_Key */
++} ioc_fm_pcd_cc_node_modify_next_engine_params_t;
++
++/**************************************************************************//**
++ @Description A structure for remove CC node key
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_node_remove_key_params_t {
++ void *id; /**< CC node Id to be used */
++ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
++ NOTE: This parameter is IGNORED for miss-key! */
++} ioc_fm_pcd_cc_node_remove_key_params_t;
++
++/**************************************************************************//**
++ @Description A structure for modifying CC node key and next engine
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t {
++ void *id; /**< CC node Id to be used */
++ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
++ NOTE: This parameter is IGNORED for miss-key! */
++ uint8_t key_size; /**< Key size of added key */
++ ioc_fm_pcd_cc_key_params_t key_params; /**< it's array with numOfKeys entries each entry in
++ the array of the type ioc_fm_pcd_cc_key_params_t */
++} ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t;
++
++/**************************************************************************//**
++ @Description A structure for modifying CC node key
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_cc_node_modify_key_params_t {
++ void *id; /**< CC node Id to be used */
++ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
++ NOTE: This parameter is IGNORED for miss-key! */
++ uint8_t key_size; /**< Key size of added key */
++ uint8_t *p_key; /**< Pointer to the key of the size defined in key_size */
++ uint8_t *p_mask; /**< Pointer to the Mask per key of the size defined
++ in keySize. p_Key and p_Mask (if defined) have to be
++ of the same size as defined in the key_size */
++} ioc_fm_pcd_cc_node_modify_key_params_t;
++
++/**************************************************************************//**
++ @Description A structure with the arguments for the FM_PCD_HashTableRemoveKey ioctl() call
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_hash_table_remove_key_params_t {
++ void *p_hash_tbl; /**< The id of the hash table */
++ uint8_t key_size; /**< The size of the key to remove */
++ uint8_t *p_key; /**< Pointer to the key to remove */
++} ioc_fm_pcd_hash_table_remove_key_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for selecting a location for requested manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_manip_hdr_info_t {
++ ioc_net_header_type hdr; /**< Header selection */
++ ioc_fm_pcd_hdr_index hdr_index; /**< Relevant only for MPLS, VLAN and tunneled IP. Otherwise should be cleared. */
++ bool by_field; /**< TRUE if the location of manipulation is according to some field in the specific header*/
++ ioc_fm_pcd_fields_u full_field; /**< Relevant only when by_field = TRUE: Extract field */
++} ioc_fm_manip_hdr_info_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header removal by header type
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t {
++ ioc_fm_pcd_manip_hdr_rmv_by_hdr_type type; /**< Selection of header removal location */
++ union {
++#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
++ struct {
++ bool include;/**< If FALSE, remove until the specified header (not including the header);
++ If TRUE, remove also the specified header. */
++ ioc_fm_manip_hdr_info_t hdr_info;
++ } from_start_by_hdr; /**< Relevant when type = e_IOC_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
++#endif /* FM_CAPWAP_SUPPORT */
++#if (DPAA_VERSION >= 11)
++ ioc_fm_manip_hdr_info_t hdr_info; /**< Relevant when type = e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
++#endif /* (DPAA_VERSION >= 11) */
++ ioc_fm_pcd_manip_hdr_rmv_specific_l2 specific_l2;/**< Relevant when type = e_IOC_FM_PCD_MANIP_BY_HDR_SPECIFIC_L2;
++ Defines which L2 headers to remove. */
++ } u;
++} ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for configuring IP fragmentation manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_frag_ip_params_t {
++ uint16_t size_for_fragmentation; /**< If length of the frame is greater than this value,
++ IP fragmentation will be executed.*/
++#if DPAA_VERSION == 10
++ uint8_t scratch_bpid; /**< Absolute buffer pool id according to BM configuration.*/
++#endif /* DPAA_VERSION == 10 */
++ bool sg_bpid_en; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
++ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
++ received frame's buffer. */
++ uint8_t sg_bpid; /**< Scatter/Gather buffer pool id;
++ This parameter is relevant when 'sg_bpid_en=TRUE';
++ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
++ of this pool need to be allocated in the same memory area as the received buffers.
++ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
++ mutual to all these sources. */
++ ioc_fm_pcd_manip_dont_frag_action dont_frag_action; /**< Dont Fragment Action - If an IP packet is larger
++ than MTU and its DF bit is set, then this field will
++ determine the action to be taken.*/
++} ioc_fm_pcd_manip_frag_ip_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for configuring IP reassembly manipulation.
++
++ This is a common structure for both IPv4 and IPv6 reassembly
++ manipulation. For reassembly of both IPv4 and IPv6, make sure to
++ set the 'hdr' field in ioc_fm_pcd_manip_reassem_params_t to IOC_HEADER_TYPE_IPv6.
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_reassem_ip_params_t {
++ uint8_t relative_scheme_id[2]; /**< Partition relative scheme id:
++ relativeSchemeId[0] - Relative scheme ID for IPV4 Reassembly manipulation;
++ relativeSchemeId[1] - Relative scheme ID for IPV6 Reassembly manipulation;
++ NOTE: The following comment is relevant only for FMAN v2 devices:
++ Relative scheme ID for IPv4/IPv6 Reassembly manipulation must be smaller than
++ the user schemes id to ensure that the reassembly's schemes will be first match.
++ The remaining schemes, if defined, should have higher relative scheme ID. */
++#if DPAA_VERSION >= 11
++ uint32_t non_consistent_sp_fqid; /**< In case that other fragments of the frame corresponds to different storage
++ profile than the opening fragment (Non-Consistent-SP state)
++ then one of two possible scenarios occurs:
++ if 'nonConsistentSpFqid != 0', the reassembled frame will be enqueued to
++ this fqid, otherwise a 'Non Consistent SP' bit will be set in the FD[status].*/
++#else
++ uint8_t sg_bpid; /**< Buffer pool id for the S/G frame created by the reassembly process */
++#endif /* DPAA_VERSION >= 11 */
++ uint8_t data_mem_id; /**< Memory partition ID for the IPR's external tables structure */
++ uint16_t data_liodn_offset; /**< LIODN offset for access the IPR's external tables structure. */
++ uint16_t min_frag_size[2]; /**< Minimum fragment size:
++ minFragSize[0] - for ipv4, minFragSize[1] - for ipv6 */
++ ioc_fm_pcd_manip_reassem_ways_number num_of_frames_per_hash_entry[2];
++ /**< Number of frames per hash entry needed for reassembly process:
++ numOfFramesPerHashEntry[0] - for ipv4 (max value is e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH);
++ numOfFramesPerHashEntry[1] - for ipv6 (max value is e_IOC_FM_PCD_MANIP_SIX_WAYS_HASH). */
++ uint16_t max_num_frames_in_process;/**< Number of frames which can be processed by Reassembly in the same time;
++ Must be power of 2;
++ In the case numOfFramesPerHashEntry == e_IOC_FM_PCD_MANIP_FOUR_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 4 - 512;
++ In the case numOfFramesPerHashEntry == e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 8 - 2048. */
++ ioc_fm_pcd_manip_reassem_time_out_mode time_out_mode; /**< Expiration delay initialized by Reassembly process */
++ uint32_t fqid_for_time_out_frames;/**< FQID in which time out frames will enqueue during Time Out Process */
++ uint32_t timeout_threshold_for_reassm_process;
++ /**< Represents the time interval in microseconds which defines
++ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
++} ioc_fm_pcd_manip_reassem_ip_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining IPSEC manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_special_offload_ipsec_params_t {
++ bool decryption; /**< TRUE if being used in decryption direction;
++ FALSE if being used in encryption direction. */
++ bool ecn_copy; /**< TRUE to copy the ECN bits from inner/outer to outer/inner
++ (direction depends on the 'decryption' field). */
++ bool dscp_copy; /**< TRUE to copy the DSCP bits from inner/outer to outer/inner
++ (direction depends on the 'decryption' field). */
++ bool variable_ip_hdr_len; /**< TRUE for supporting variable IP header length in decryption. */
++ bool variable_ip_version; /**< TRUE for supporting both IP version on the same SA in encryption */
++ uint8_t outer_ip_hdr_len; /**< If 'variable_ip_version == TRUE' than this field must be set to non-zero value;
++ It is specifies the length of the outer IP header that was configured in the
++ corresponding SA. */
++ uint16_t arw_size; /**< if <> '0' then will perform ARW check for this SA;
++ The value must be a multiplication of 16 */
++ void *arw_addr; /**< if arwSize <> '0' then this field must be set to non-zero value;
++ MUST be allocated from FMAN's MURAM that the post-sec op-port belong
++ Must be 4B aligned. Required MURAM size is '(NEXT_POWER_OF_2(arwSize+32))/8+4' Bytes */
++} ioc_fm_pcd_manip_special_offload_ipsec_params_t;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Parameters for configuring CAPWAP fragmentation manipulation
++
++ Restrictions:
++ - Maximum number of fragments per frame is 16.
++ - Transmit confirmation is not supported.
++ - Fragmentation nodes must be set as the last PCD action (i.e. the
++ corresponding CC node key must have next engine set to e_FM_PCD_DONE).
++ - Only BMan buffers shall be used for frames to be fragmented.
++ - NOTE: The following comment is relevant only for FMAN v3 devices: IPF
++ does not support VSP. Therefore, on the same port where we have IPF we
++ cannot support VSP.
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_frag_capwap_params_t {
++ uint16_t size_for_fragmentation; /**< If length of the frame is greater than this value,
++ CAPWAP fragmentation will be executed.*/
++ bool sg_bpid_en; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
++ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
++ received frame's buffer. */
++ uint8_t sg_bpid; /**< Scatter/Gather buffer pool id;
++ This parameters is relevant when 'sgBpidEn=TRUE';
++ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
++ of this pool need to be allocated in the same memory area as the received buffers.
++ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
++ mutual to all these sources. */
++ bool compress_mode_en; /**< CAPWAP Header Options Compress Enable mode;
++ When this mode is enabled then only the first fragment include the CAPWAP header options
++ field (if user provides it in the input frame) and all other fragments exclude the CAPWAP
++ options field (CAPWAP header is updated accordingly).*/
++} ioc_fm_pcd_manip_frag_capwap_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for configuring CAPWAP reassembly manipulation.
++
++ Restrictions:
++ - Application must define one scheme to catch the reassembled frames.
++ - Maximum number of fragments per frame is 16.
++
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_reassem_capwap_params_t {
++ uint8_t relative_scheme_id; /**< Partition relative scheme id;
++ NOTE: this id must be smaller than the user schemes id to ensure that the reassembly scheme will be first match;
++ Rest schemes, if defined, should have higher relative scheme ID. */
++ uint8_t data_mem_id; /**< Memory partition ID for the IPR's external tables structure */
++ uint16_t data_liodn_offset; /**< LIODN offset for access the IPR's external tables structure. */
++ uint16_t max_reassembled_frame_length;/**< The maximum CAPWAP reassembled frame length in bytes;
++ If maxReassembledFrameLength == 0, any successful reassembled frame length is
++ considered as a valid length;
++ if maxReassembledFrameLength > 0, a successful reassembled frame which its length
++ exceeds this value is considered as an error frame (FD status[CRE] bit is set). */
++ ioc_fm_pcd_manip_reassem_ways_number num_of_frames_per_hash_entry;
++ /**< Number of frames per hash entry needed for reassembly process */
++ uint16_t max_num_frames_in_process; /**< Number of frames which can be processed by reassembly in the same time;
++ Must be power of 2;
++ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 4 - 512;
++ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
++ maxNumFramesInProcess has to be in the range of 8 - 2048. */
++ ioc_fm_pcd_manip_reassem_time_out_mode time_out_mode; /**< Expiration delay initialized by Reassembly process */
++ uint32_t fqid_for_time_out_frames; /**< FQID in which time out frames will enqueue during Time Out Process;
++ Recommended value for this field is 0; in this way timed-out frames will be discarded */
++ uint32_t timeout_threshold_for_reassm_process;
++ /**< Represents the time interval in microseconds which defines
++ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
++} ioc_fm_pcd_manip_reassem_capwap_params_t;
++
++/**************************************************************************//**
++ @Description structure for defining CAPWAP manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_special_offload_capwap_params_t {
++ bool dtls; /**< TRUE if continue to SEC DTLS encryption */
++ ioc_fm_pcd_manip_hdr_qos_src qos_src; /**< TODO */
++} ioc_fm_pcd_manip_special_offload_capwap_params_t;
++
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Parameters for defining special offload manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_special_offload_params_t {
++ ioc_fm_pcd_manip_special_offload_type type; /**< Type of special offload manipulation */
++ union
++ {
++ ioc_fm_pcd_manip_special_offload_ipsec_params_t ipsec; /**< Parameters for IPSec; Relevant when
++ type = e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC */
++
++#if (DPAA_VERSION >= 11)
++ ioc_fm_pcd_manip_special_offload_capwap_params_t capwap; /**< Parameters for CAPWAP; Relevant when
++ type = e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} ioc_fm_pcd_manip_special_offload_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining generic removal manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_rmv_generic_params_t {
++ uint8_t offset; /**< Offset from beginning of header to the start
++ location of the removal */
++ uint8_t size; /**< Size of removed section */
++} ioc_fm_pcd_manip_hdr_rmv_generic_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining insertion manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_insrt_t {
++ uint8_t size; /**< size of inserted section */
++ uint8_t *p_data; /**< data to be inserted */
++} ioc_fm_pcd_manip_hdr_insrt_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining generic insertion manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_insrt_generic_params_t {
++ uint8_t offset; /**< Offset from beginning of header to the start
++ location of the insertion */
++ uint8_t size; /**< Size of inserted section */
++ bool replace; /**< TRUE to override (replace) existing data at
++ 'offset', FALSE to insert */
++ uint8_t *p_data; /**< Pointer to data to be inserted */
++} ioc_fm_pcd_manip_hdr_insrt_generic_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation VLAN DSCP To Vpri translation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_field_update_vlan_dscp_to_vpri_t {
++ uint8_t dscp_to_vpri_table[IOC_FM_PCD_MANIP_DSCP_TO_VLAN_TRANS];
++ /**< A table of VPri values for each DSCP value;
++ The index is the D_SCP value (0-0x3F) and the
++ value is the corresponding VPRI (0-15). */
++ uint8_t vpri_def_val; /**< 0-7, Relevant only if if update_type =
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN,
++ this field is the Q Tag default value if the
++ IP header is not found. */
++} ioc_fm_pcd_manip_hdr_field_update_vlan_dscp_to_vpri_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation VLAN fields updates
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_field_update_vlan_t {
++ ioc_fm_pcd_manip_hdr_field_update_vlan update_type; /**< Selects VLAN update type */
++ union {
++ uint8_t vpri; /**< 0-7, Relevant only if If update_type =
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_PRI, this
++ is the new VLAN pri. */
++ ioc_fm_pcd_manip_hdr_field_update_vlan_dscp_to_vpri_t dscp_to_vpri;
++ /**< Parameters structure, Relevant only if update_type =
++ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN. */
++ } u;
++} ioc_fm_pcd_manip_hdr_field_update_vlan_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation IPV4 fields updates
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_field_update_ipv4_t {
++ ioc_ipv4_hdr_manip_update_flags_t valid_updates; /**< ORed flag, selecting the required updates */
++ uint8_t tos; /**< 8 bit New TOS; Relevant if valid_updates contains
++ IOC_HDR_MANIP_IPV4_TOS */
++ uint16_t id; /**< 16 bit New IP ID; Relevant only if valid_updates
++ contains IOC_HDR_MANIP_IPV4_ID */
++ uint32_t src; /**< 32 bit New IP SRC; Relevant only if valid_updates
++ contains IOC_HDR_MANIP_IPV4_SRC */
++ uint32_t dst; /**< 32 bit New IP DST; Relevant only if valid_updates
++ contains IOC_HDR_MANIP_IPV4_DST */
++} ioc_fm_pcd_manip_hdr_field_update_ipv4_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation IPV6 fields updates
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_field_update_ipv6_t {
++ ioc_ipv6_hdr_manip_update_flags_t valid_updates; /**< ORed flag, selecting the required updates */
++ uint8_t traffic_class; /**< 8 bit New Traffic Class; Relevant if valid_updates contains
++ IOC_HDR_MANIP_IPV6_TC */
++ uint8_t src[IOC_NET_HEADER_FIELD_IPv6_ADDR_SIZE];
++ /**< 16 byte new IP SRC; Relevant only if valid_updates
++ contains IOC_HDR_MANIP_IPV6_SRC */
++ uint8_t dst[IOC_NET_HEADER_FIELD_IPv6_ADDR_SIZE];
++ /**< 16 byte new IP DST; Relevant only if valid_updates
++ contains IOC_HDR_MANIP_IPV6_DST */
++} ioc_fm_pcd_manip_hdr_field_update_ipv6_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation TCP/UDP fields updates
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t {
++ ioc_tcp_udp_hdr_manip_update_flags_t valid_updates; /**< ORed flag, selecting the required updates */
++ uint16_t src; /**< 16 bit New TCP/UDP SRC; Relevant only if valid_updates
++ contains IOC_HDR_MANIP_TCP_UDP_SRC */
++ uint16_t dst; /**< 16 bit New TCP/UDP DST; Relevant only if valid_updates
++ contains IOC_HDR_MANIP_TCP_UDP_DST */
++} ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation fields updates
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_field_update_params_t {
++ ioc_fm_pcd_manip_hdr_field_update_type type; /**< Type of header field update manipulation */
++ union {
++ ioc_fm_pcd_manip_hdr_field_update_vlan_t vlan; /**< Parameters for VLAN update. Relevant when
++ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN */
++ ioc_fm_pcd_manip_hdr_field_update_ipv4_t ipv4; /**< Parameters for IPv4 update. Relevant when
++ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4 */
++ ioc_fm_pcd_manip_hdr_field_update_ipv6_t ipv6; /**< Parameters for IPv6 update. Relevant when
++ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6 */
++ ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t tcp_udp;/**< Parameters for TCP/UDP update. Relevant when
++ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP */
++ } u;
++} ioc_fm_pcd_manip_hdr_field_update_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining custom header manipulation for IP replacement
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_custom_ip_hdr_replace_t {
++ ioc_fm_pcd_manip_hdr_custom_ip_replace replace_type; /**< Selects replace update type */
++ bool dec_ttl_hl; /**< Decrement TTL (IPV4) or Hop limit (IPV6) by 1 */
++ bool update_ipv4_id; /**< Relevant when replace_type =
++ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 */
++ uint16_t id; /**< 16 bit New IP ID; Relevant only if
++ update_ipv4_id = TRUE */
++ uint8_t hdr_size; /**< The size of the new IP header */
++ uint8_t hdr[IOC_FM_PCD_MANIP_MAX_HDR_SIZE];
++ /**< The new IP header */
++} ioc_fm_pcd_manip_hdr_custom_ip_hdr_replace_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining custom header manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_custom_params_t {
++ ioc_fm_pcd_manip_hdr_custom_type type; /**< Type of header field update manipulation */
++ union {
++ ioc_fm_pcd_manip_hdr_custom_ip_hdr_replace_t ip_hdr_replace;
++ /**< Parameters IP header replacement */
++ } u;
++} ioc_fm_pcd_manip_hdr_custom_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining specific L2 insertion manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_insrt_specific_l2_params_t {
++ ioc_fm_pcd_manip_hdr_insrt_specific_l2 specific_l2; /**< Selects which L2 headers to insert */
++ bool update; /**< TRUE to update MPLS header */
++ uint8_t size; /**< size of inserted section */
++ uint8_t *p_data; /**< data to be inserted */
++} ioc_fm_pcd_manip_hdr_insrt_specific_l2_params_t;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Parameters for defining IP insertion manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_insrt_ip_params_t {
++ bool calc_l4_checksum; /**< Calculate L4 checksum. */
++ ioc_fm_pcd_manip_hdr_qos_mapping_mode mapping_mode; /**< TODO */
++ uint8_t last_pid_offset; /**< the offset of the last Protocol within
++ the inserted header */
++ uint16_t id; /**< 16 bit New IP ID */
++ bool dont_frag_overwrite;
++ /**< IPv4 only. DF is overwritten with the hash-result next-to-last byte.
++ * This byte is configured to be overwritten when RPD is set. */
++ uint8_t last_dst_offset;
++ /**< IPv6 only. if routing extension exist, user should set the offset of the destination address
++ * in order to calculate UDP checksum pseudo header;
++ * Otherwise set it to '0'. */
++ ioc_fm_pcd_manip_hdr_insrt_t insrt; /**< size and data to be inserted. */
++} ioc_fm_pcd_manip_hdr_insrt_ip_params_t;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Parameters for defining header insertion manipulation by header type
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t {
++ ioc_fm_pcd_manip_hdr_insrt_by_hdr_type type; /**< Selects manipulation type */
++ union {
++ ioc_fm_pcd_manip_hdr_insrt_specific_l2_params_t specific_l2_params;
++ /**< Used when type = e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2:
++ Selects which L2 headers to remove */
++#if (DPAA_VERSION >= 11)
++ ioc_fm_pcd_manip_hdr_insrt_ip_params_t ip_params; /**< Used when type = e_FM_PCD_MANIP_INSRT_BY_HDR_IP */
++ ioc_fm_pcd_manip_hdr_insrt_t insrt; /**< Used when type is one of e_FM_PCD_MANIP_INSRT_BY_HDR_UDP,
++ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, or
++ e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header insertion manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_insrt_params_t {
++ ioc_fm_pcd_manip_hdr_insrt_type type; /**< Type of insertion manipulation */
++ union {
++ ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t by_hdr; /**< Parameters for defining header insertion manipulation by header type,
++ relevant if 'type' = e_IOC_FM_PCD_MANIP_INSRT_BY_HDR */
++ ioc_fm_pcd_manip_hdr_insrt_generic_params_t generic;/**< Parameters for defining generic header insertion manipulation,
++ relevant if type = e_IOC_FM_PCD_MANIP_INSRT_GENERIC */
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ ioc_fm_pcd_manip_hdr_insrt_by_template_params_t by_template;
++ /**< Parameters for defining header insertion manipulation by template,
++ relevant if 'type' = e_IOC_FM_PCD_MANIP_INSRT_BY_TEMPLATE */
++#endif /* FM_CAPWAP_SUPPORT */
++ } u;
++} ioc_fm_pcd_manip_hdr_insrt_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header removal manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_rmv_params_t {
++ ioc_fm_pcd_manip_hdr_rmv_type type; /**< Type of header removal manipulation */
++ union {
++ ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t by_hdr; /**< Parameters for defining header removal manipulation by header type,
++ relevant if type = e_IOC_FM_PCD_MANIP_RMV_BY_HDR */
++ ioc_fm_pcd_manip_hdr_rmv_generic_params_t generic; /**< Parameters for defining generic header removal manipulation,
++ relevant if type = e_IOC_FM_PCD_MANIP_RMV_GENERIC */
++ } u;
++} ioc_fm_pcd_manip_hdr_rmv_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining header manipulation node
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_hdr_params_t {
++ bool rmv; /**< TRUE, to define removal manipulation */
++ ioc_fm_pcd_manip_hdr_rmv_params_t rmv_params; /**< Parameters for removal manipulation, relevant if 'rmv' = TRUE */
++
++ bool insrt; /**< TRUE, to define insertion manipulation */
++ ioc_fm_pcd_manip_hdr_insrt_params_t insrt_params; /**< Parameters for insertion manipulation, relevant if 'insrt' = TRUE */
++
++ bool field_update; /**< TRUE, to define field update manipulation */
++ ioc_fm_pcd_manip_hdr_field_update_params_t field_update_params; /**< Parameters for field update manipulation, relevant if 'fieldUpdate' = TRUE */
++
++ bool custom; /**< TRUE, to define custom manipulation */
++ ioc_fm_pcd_manip_hdr_custom_params_t custom_params; /**< Parameters for custom manipulation, relevant if 'custom' = TRUE */
++
++ bool dont_parse_after_manip;/**< FALSE to activate the parser a second time after
++ completing the manipulation on the frame */
++} ioc_fm_pcd_manip_hdr_params_t;
++
++
++/**************************************************************************//**
++ @Description structure for defining fragmentation manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_frag_params_t {
++ ioc_net_header_type hdr; /**< Header selection */
++ union {
++#if (DPAA_VERSION >= 11)
++ ioc_fm_pcd_manip_frag_capwap_params_t capwap_frag; /**< Parameters for defining CAPWAP fragmentation,
++ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++ ioc_fm_pcd_manip_frag_ip_params_t ip_frag; /**< Parameters for defining IP fragmentation,
++ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
++ } u;
++} ioc_fm_pcd_manip_frag_params_t;
++
++/**************************************************************************//**
++ @Description structure for defining reassemble manipulation
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_reassem_params_t {
++ ioc_net_header_type hdr; /**< Header selection */
++ union {
++#if (DPAA_VERSION >= 11)
++ ioc_fm_pcd_manip_reassem_capwap_params_t capwap_reassem; /**< Parameters for defining CAPWAP reassembly,
++ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
++#endif /* (DPAA_VERSION >= 11) */
++ ioc_fm_pcd_manip_reassem_ip_params_t ip_reassem; /**< Parameters for defining IP reassembly,
++ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
++ } u;
++} ioc_fm_pcd_manip_reassem_params_t;
++
++/**************************************************************************//**
++ @Description Parameters for defining a manipulation node
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_params_t {
++ ioc_fm_pcd_manip_type type; /**< Selects type of manipulation node */
++ union {
++ ioc_fm_pcd_manip_hdr_params_t hdr; /**< Parameters for defining header manipulation node */
++ ioc_fm_pcd_manip_reassem_params_t reassem;/**< Parameters for defining reassembly manipulation node */
++ ioc_fm_pcd_manip_frag_params_t frag; /**< Parameters for defining fragmentation manipulation node */
++ ioc_fm_pcd_manip_special_offload_params_t special_offload;/**< Parameters for defining special offload manipulation node */
++ } u;
++ void *p_next_manip;/**< Handle to another (previously defined) manipulation node;
++ Allows concatenation of manipulation actions
++ This parameter is optional and may be NULL. */
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ bool frag_or_reasm;/**< TRUE, if defined fragmentation/reassembly manipulation */
++ ioc_fm_pcd_manip_frag_or_reasm_params_t frag_or_reasm_params;/**< Parameters for fragmentation/reassembly manipulation,
++ relevant if frag_or_reasm = TRUE */
++#endif /* FM_CAPWAP_SUPPORT */
++ void *id;
++} ioc_fm_pcd_manip_params_t;
++
++/**************************************************************************//**
++ @Description Structure for retrieving IP reassembly statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_reassem_ip_stats_t {
++ /* common counters for both IPv4 and IPv6 */
++ uint32_t timeout; /**< Counts the number of TimeOut occurrences */
++ uint32_t rfd_pool_busy; /**< Counts the number of failed attempts to allocate
++ a Reassembly Frame Descriptor */
++ uint32_t internal_buffer_busy; /**< Counts the number of times an internal buffer busy occurred */
++ uint32_t external_buffer_busy; /**< Counts the number of times external buffer busy occurred */
++ uint32_t sg_fragments; /**< Counts the number of Scatter/Gather fragments */
++ uint32_t dma_semaphore_depletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
++#if (DPAA_VERSION >= 11)
++ uint32_t non_consistent_sp; /**< Counts the number of Non Consistent Storage Profile events for
++ successfully reassembled frames */
++#endif /* (DPAA_VERSION >= 11) */
++struct {
++ uint32_t successfully_reassembled; /**< Counts the number of successfully reassembled frames */
++ uint32_t valid_fragments; /**< Counts the total number of valid fragments that
++ have been processed for all frames */
++ uint32_t processed_fragments; /**< Counts the number of processed fragments
++ (valid and error fragments) for all frames */
++ uint32_t malformed_fragments; /**< Counts the number of malformed fragments processed for all frames */
++ uint32_t discarded_fragments; /**< Counts the number of fragments discarded by the reassembly process */
++ uint32_t auto_learn_busy; /**< Counts the number of times a busy condition occurs when attempting
++ to access an IP-Reassembly Automatic Learning Hash set */
++ uint32_t more_than16fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
++ exceeds 16 */
++ } specific_hdr_statistics[2]; /**< slot '0' is for IPv4, slot '1' is for IPv6 */
++} ioc_fm_pcd_manip_reassem_ip_stats_t;
++
++/**************************************************************************//**
++ @Description Structure for retrieving IP fragmentation statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_frag_ip_stats_t {
++ uint32_t total_frames; /**< Number of frames that passed through the manipulation node */
++ uint32_t fragmented_frames; /**< Number of frames that were fragmented */
++ uint32_t generated_fragments; /**< Number of fragments that were generated */
++} ioc_fm_pcd_manip_frag_ip_stats_t;
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Description Structure for retrieving CAPWAP reassembly statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_reassem_capwap_stats_t {
++ uint32_t timeout; /**< Counts the number of timeout occurrences */
++ uint32_t rfd_pool_busy; /**< Counts the number of failed attempts to allocate
++ a Reassembly Frame Descriptor */
++ uint32_t internal_buffer_busy; /**< Counts the number of times an internal buffer busy occurred */
++ uint32_t external_buffer_busy; /**< Counts the number of times external buffer busy occurred */
++ uint32_t sg_fragments; /**< Counts the number of Scatter/Gather fragments */
++ uint32_t dma_semaphore_depletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
++ uint32_t successfully_reassembled; /**< Counts the number of successfully reassembled frames */
++ uint32_t valid_fragments; /**< Counts the total number of valid fragments that
++ have been processed for all frames */
++ uint32_t processed_fragments; /**< Counts the number of processed fragments
++ (valid and error fragments) for all frames */
++ uint32_t malformed_fragments; /**< Counts the number of malformed fragments processed for all frames */
++ uint32_t autoLearn_busy; /**< Counts the number of times a busy condition occurs when attempting
++ to access an Reassembly Automatic Learning Hash set */
++ uint32_t discarded_fragments; /**< Counts the number of fragments discarded by the reassembly process */
++ uint32_t more_than16fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
++ exceeds 16 */
++ uint32_t exceed_max_reassembly_frame_len;/**< ounts the number of times that a successful reassembled frame
++ length exceeds MaxReassembledFrameLength value */
++} ioc_fm_pcd_manip_reassem_capwap_stats_t;
++
++/**************************************************************************//**
++ @Description Structure for retrieving CAPWAP fragmentation statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_frag_capwap_stats_t {
++ uint32_t total_frames; /**< Number of frames that passed through the manipulation node */
++ uint32_t fragmented_frames; /**< Number of frames that were fragmented */
++ uint32_t generated_fragments; /**< Number of fragments that were generated */
++#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
++ uint8_t sg_allocation_failure; /**< Number of allocation failure of s/g buffers */
++#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
++} ioc_fm_pcd_manip_frag_capwap_stats_t;
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Description Structure for retrieving reassembly statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_reassem_stats_t {
++ union {
++ ioc_fm_pcd_manip_reassem_ip_stats_t ip_reassem; /**< Structure for IP reassembly statistics */
++#if (DPAA_VERSION >= 11)
++ ioc_fm_pcd_manip_reassem_capwap_stats_t capwap_reassem; /**< Structure for CAPWAP reassembly statistics */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} ioc_fm_pcd_manip_reassem_stats_t;
++
++/**************************************************************************//**
++ @Description structure for retrieving fragmentation statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_frag_stats_t {
++ union {
++ ioc_fm_pcd_manip_frag_ip_stats_t ip_frag; /**< Structure for IP fragmentation statistics */
++#if (DPAA_VERSION >= 11)
++ ioc_fm_pcd_manip_frag_capwap_stats_t capwap_frag; /**< Structure for CAPWAP fragmentation statistics */
++#endif /* (DPAA_VERSION >= 11) */
++ } u;
++} ioc_fm_pcd_manip_frag_stats_t;
++
++/**************************************************************************//**
++ @Description structure for defining manipulation statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_stats_t {
++ union {
++ ioc_fm_pcd_manip_reassem_stats_t reassem; /**< Structure for reassembly statistics */
++ ioc_fm_pcd_manip_frag_stats_t frag; /**< Structure for fragmentation statistics */
++ } u;
++} ioc_fm_pcd_manip_stats_t;
++
++/**************************************************************************//**
++ @Description Parameters for acquiring manipulation statistics
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_manip_get_stats_t {
++ void *id;
++ ioc_fm_pcd_manip_stats_t stats;
++} ioc_fm_pcd_manip_get_stats_t;
++
++#if DPAA_VERSION >= 11
++/**************************************************************************//**
++ @Description Parameters for defining frame replicator group and its members
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_frm_replic_group_params_t {
++ uint8_t max_num_of_entries; /**< Maximal number of members in the group - must be at least two */
++ uint8_t num_of_entries; /**< Number of members in the group - must be at least 1 */
++ ioc_fm_pcd_cc_next_engine_params_t next_engine_params[IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES];
++ /**< Array of members' parameters */
++ void *id;
++} ioc_fm_pcd_frm_replic_group_params_t;
++
++typedef struct ioc_fm_pcd_frm_replic_member_t {
++ void *h_replic_group;
++ uint16_t member_index;
++} ioc_fm_pcd_frm_replic_member_t;
++
++typedef struct ioc_fm_pcd_frm_replic_member_params_t {
++ ioc_fm_pcd_frm_replic_member_t member;
++ ioc_fm_pcd_cc_next_engine_params_t next_engine_params;
++} ioc_fm_pcd_frm_replic_member_params_t;
++#endif /* DPAA_VERSION >= 11 */
++
++
++typedef struct ioc_fm_pcd_cc_key_statistics_t {
++ uint32_t byte_count; /**< This counter reflects byte count of frames that
++ were matched by this key. */
++ uint32_t frame_count; /**< This counter reflects count of frames that
++ were matched by this key. */
++#if (DPAA_VERSION >= 11)
++ uint32_t frame_length_range_count[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
++ /**< These counters reflect how many frames matched
++ this key in 'RMON' statistics mode:
++ Each counter holds the number of frames of a
++ specific frames length range, according to the
++ ranges provided at initialization. */
++#endif /* (DPAA_VERSION >= 11) */
++} ioc_fm_pcd_cc_key_statistics_t;
++
++
++typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {
++ void *id;
++ uint16_t key_index;
++ ioc_fm_pcd_cc_key_statistics_t statistics;
++} ioc_fm_pcd_cc_tbl_get_stats_t;
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableGetKeyStatistics
++
++ @Description This routine may be used to get statistics counters of specific key
++ in a CC Node.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames passed that were matched
++ this key; The total frames count will be returned in the counter
++ of the first range (as only one frame length range was defined).
++ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
++ frame count will be separated to frame length counters, based on
++ provided frame length ranges.
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[in] keyIndex Key index for adding
++ @Param[out] p_KeyStatistics Key statistics counters
++
++ @Return The specific key statistics.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_compat_fm_pcd_cc_tbl_get_stats_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_fm_pcd_cc_tbl_get_stats_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableGetMissStatistics
++
++ @Description This routine may be used to get statistics counters of miss entry
++ in a CC Node.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames were not matched to any
++ existing key and therefore passed through the miss entry; The
++ total frames count will be returned in the counter of the
++ first range (as only one frame length range was defined).
++
++ @Param[in] h_CcNode A handle to the node
++ @Param[out] p_MissStatistics Statistics counters for 'miss'
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_compat_fm_pcd_cc_tbl_get_stats_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_fm_pcd_cc_tbl_get_stats_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableGetMissStatistics
++
++ @Description This routine may be used to get statistics counters of 'miss'
++ entry of the a hash table.
++
++ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
++ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
++ these counters reflect how many frames were not matched to any
++ existing key and therefore passed through the miss entry;
++
++ @Param[in] h_HashTbl A handle to a hash table
++ @Param[out] p_MissStatistics Statistics counters for 'miss'
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(14), ioc_compat_fm_pcd_cc_tbl_get_stats_t)
++#endif
++#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(14), ioc_fm_pcd_cc_tbl_get_stats_t)
++
++
++/**************************************************************************//**
++ @Function FM_PCD_NetEnvCharacteristicsSet
++
++ @Description Define a set of Network Environment Characteristics.
++
++ When setting an environment it is important to understand its
++ application. It is not meant to describe the flows that will run
++ on the ports using this environment, but what the user means TO DO
++ with the PCD mechanisms in order to parse-classify-distribute those
++ frames.
++ By specifying a distinction unit, the user means it would use that option
++ for distinction between frames at either a KeyGen scheme or a coarse
++ classification action descriptor. Using interchangeable headers to define a
++ unit means that the user is indifferent to which of the interchangeable
++ headers is present in the frame, and wants the distinction to be based
++ on the presence of either one of them.
++
++ Depending on context, there are limitations to the use of environments. A
++ port using the PCD functionality is bound to an environment. Some or even
++ all ports may share an environment but also an environment per port is
++ possible. When initializing a scheme, a classification plan group (see below),
++ or a coarse classification tree, one of the initialized environments must be
++ stated and related to. When a port is bound to a scheme, a classification
++ plan group, or a coarse classification tree, it MUST be bound to the same
++ environment.
++
++ The different PCD modules, may relate (for flows definition) ONLY on
++ distinction units as defined by their environment. When initializing a
++ scheme for example, it may not choose to select IPV4 as a match for
++ recognizing flows unless it was defined in the relating environment. In
++ fact, to guide the user through the configuration of the PCD, each module's
++ characterization in terms of flows is not done using protocol names, but using
++ environment indexes.
++
++ In terms of HW implementation, the list of distinction units sets the LCV vectors
++ and later used for match vector, classification plan vectors and coarse classification
++ indexing.
++
++ @Param[in,out] ioc_fm_pcd_net_env_params_t A structure defining the distiction units for this configuration.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(20), ioc_compat_fm_pcd_net_env_params_t)
++#endif
++#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(20), ioc_fm_pcd_net_env_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_NetEnvCharacteristicsDelete
++
++ @Description Deletes a set of Network Environment Charecteristics.
++
++ @Param[in] ioc_fm_obj_t - The id of a Network Environment object.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(21), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(21), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSchemeSet
++
++ @Description Initializing or modifying and enabling a scheme for the KeyGen.
++ This routine should be called for adding or modifying a scheme.
++ When a scheme needs modifying, the API requires that it will be
++ rewritten. In such a case 'modify' should be TRUE. If the
++ routine is called for a valid scheme and 'modify' is FALSE,
++ it will return error.
++
++ @Param[in,out] ioc_fm_pcd_kg_scheme_params_t A structure of parameters for defining the scheme
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_KG_SCHEME_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(24), ioc_compat_fm_pcd_kg_scheme_params_t)
++#endif
++#define FM_PCD_IOC_KG_SCHEME_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(24), ioc_fm_pcd_kg_scheme_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_KgSchemeDelete
++
++ @Description Deleting an initialized scheme.
++
++ @Param[in] ioc_fm_obj_t scheme id as initalized by application at FM_PCD_IOC_KG_SET_SCHEME
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(25), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_KG_SCHEME_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(25), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_CcRootBuild
++
++ @Description This routine must be called to define a complete coarse
++ classification tree. This is the way to define coarse
++ classification to a certain flow - the KeyGen schemes
++ may point only to trees defined in this way.
++
++ @Param[in,out] ioc_fm_pcd_cc_tree_params_t A structure of parameters to define the tree.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_CC_ROOT_BUILD_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(26), compat_uptr_t)
++#endif
++#define FM_PCD_IOC_CC_ROOT_BUILD _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(26), void *) /* workaround ...*/
++
++/**************************************************************************//**
++ @Function FM_PCD_CcRootDelete
++
++ @Description Deleting a built tree.
++
++ @Param[in] ioc_fm_obj_t - The id of a CC tree.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_CC_ROOT_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(27), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_CC_ROOT_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(27), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableSet
++
++ @Description This routine should be called for each CC (coarse classification)
++ node. The whole CC tree should be built bottom up so that each
++ node points to already defined nodes. p_NodeId returns the node
++ Id to be used by other nodes.
++
++ @Param[in,out] ioc_fm_pcd_cc_node_params_t A structure for defining the CC node params
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(28), compat_uptr_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(28), void *) /* workaround ...*/
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableDelete
++
++ @Description Deleting a built node.
++
++ @Param[in] ioc_fm_obj_t - The id of a CC node.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(29), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(29), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_CcRootModifyNextEngine
++
++ @Description Modify the Next Engine Parameters in the entry of the tree.
++
++ @Param[in] ioc_fm_pcd_cc_tree_modify_next_engine_params_t - Pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_CcRootBuild().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(30), ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t)
++#endif
++#define FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(30), ioc_fm_pcd_cc_tree_modify_next_engine_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyNextEngine
++
++ @Description Modify the Next Engine Parameters in the relevant key entry of the node.
++
++ @Param[in] ioc_fm_pcd_cc_node_modify_next_engine_params_t A pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(31), ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(31), ioc_fm_pcd_cc_node_modify_next_engine_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyMissNextEngine
++
++ @Description Modify the Next Engine Parameters of the Miss key case of the node.
++
++ @Param[in] ioc_fm_pcd_cc_node_modify_next_engine_params_t - Pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(32), ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(32), ioc_fm_pcd_cc_node_modify_next_engine_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableRemoveKey
++
++ @Description Remove the key (including next engine parameters of this key)
++ defined by the index of the relevant node.
++
++ @Param[in] ioc_fm_pcd_cc_node_remove_key_params_t A pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_PCD_MatchTableSet() has been called for this
++ node and for all of the nodes that lead to it.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(33), ioc_compat_fm_pcd_cc_node_remove_key_params_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(33), ioc_fm_pcd_cc_node_remove_key_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableAddKey
++
++ @Description Add the key (including next engine parameters of this key in the
++ index defined by the keyIndex. Note that 'FM_PCD_LAST_KEY_INDEX'
++ may be used when the user doesn't care about the position of the
++ key in the table - in that case, the key will be automatically
++ added by the driver in the last available entry.
++
++ @Param[in] ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t A pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_PCD_MatchTableSet() has been called for this
++ node and for all of the nodes that lead to it.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_ADD_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(34), ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_ADD_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(34), ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyKeyAndNextEngine
++
++ @Description Modify the key and Next Engine Parameters of this key in the index defined by key_index.
++
++ @Param[in] ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t A pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_MatchTableSet() not only of the relevnt node but also
++ the node that points to this node
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_MatchTableModifyKey
++
++ @Description Modify the key at the index defined by key_index.
++
++ @Param[in] ioc_fm_pcd_cc_node_modify_key_params_t - Pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only after FM_PCD_MatchTableSet() has been called for this
++ node and for all of the nodes that lead to it.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(36), ioc_compat_fm_pcd_cc_node_modify_key_params_t)
++#endif
++#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(36), ioc_fm_pcd_cc_node_modify_key_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableSet
++
++ @Description This routine initializes a hash table structure.
++ KeyGen hash result determines the hash bucket.
++ Next, KeyGen key is compared against all keys of this
++ bucket (exact match).
++ Number of sets (number of buckets) of the hash equals to the
++ number of 1-s in 'hash_res_mask' in the provided parameters.
++ Number of hash table ways is then calculated by dividing
++ 'max_num_of_keys' equally between the hash sets. This is the maximal
++ number of keys that a hash bucket may hold.
++ The hash table is initialized empty and keys may be
++ added to it following the initialization. Keys masks are not
++ supported in current hash table implementation.
++ The initialized hash table can be integrated as a node in a
++ CC tree.
++
++ @Param[in,out] ioc_fm_pcd_hash_table_params_t - Pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_HASH_TABLE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_compat_fm_pcd_hash_table_params_t)
++#endif
++#define FM_PCD_IOC_HASH_TABLE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_fm_pcd_hash_table_params_t)
++
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableDelete
++
++ @Description This routine deletes the provided hash table and released all
++ its allocated resources.
++
++ @Param[in] ioc_fm_obj_t - The ID of a hash table.
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_HASH_TABLE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_HASH_TABLE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableAddKey
++
++ @Description This routine adds the provided key (including next engine
++ parameters of this key) to the hash table.
++ The key is added as the last key of the bucket that it is
++ mapped to.
++
++ @Param[in] ioc_fm_pcd_hash_table_add_key_params_t - Pointer to a structure with the relevant parameters
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_HASH_TABLE_ADD_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(39), ioc_compat_fm_pcd_hash_table_add_key_params_t)
++#endif
++#define FM_PCD_IOC_HASH_TABLE_ADD_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(39), ioc_fm_pcd_hash_table_add_key_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_HashTableRemoveKey
++
++ @Description This routine removes the requested key (including next engine
++ parameters of this key) from the hash table.
++
++ @Param[in] ioc_fm_pcd_hash_table_remove_key_params_t - Pointer to a structure with the relevant parameters
++
++ @Return 0 on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_HashTableSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_HASH_TABLE_REMOVE_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(40), ioc_compat_fm_pcd_hash_table_remove_key_params_t)
++#endif
++#define FM_PCD_IOC_HASH_TABLE_REMOVE_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(40), ioc_fm_pcd_hash_table_remove_key_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrProfileSet
++
++ @Description Sets a profile entry in the policer profile table.
++ The routine overrides any existing value.
++
++ @Param[in,out] ioc_fm_pcd_plcr_profile_params_t A structure of parameters for defining a
++ policer profile entry.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_PLCR_PROFILE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_compat_fm_pcd_plcr_profile_params_t)
++#endif
++#define FM_PCD_IOC_PLCR_PROFILE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_fm_pcd_plcr_profile_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_PlcrProfileDelete
++
++ @Description Delete a profile entry in the policer profile table.
++ The routine set entry to invalid.
++
++ @Param[in] ioc_fm_obj_t The id of a policer profile.
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_PLCR_PROFILE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_PLCR_PROFILE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipNodeSet
++
++ @Description This routine should be called for defining a manipulation
++ node. A manipulation node must be defined before the CC node
++ that precedes it.
++
++ @Param[in] ioc_fm_pcd_manip_params_t - A structure of parameters defining the manipulation
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MANIP_NODE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(43), ioc_compat_fm_pcd_manip_params_t)
++#endif
++#define FM_PCD_IOC_MANIP_NODE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(43), ioc_fm_pcd_manip_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipNodeReplace
++
++ @Description Change existing manipulation node to be according to new requirement.
++ (Here, it's implemented as a variant of the same IOCTL as for
++ FM_PCD_ManipNodeSet(), and one that when called, the 'id' member
++ in its 'ioc_fm_pcd_manip_params_t' argument is set to contain
++ the manip node's handle)
++
++ @Param[in] ioc_fm_pcd_manip_params_t - A structure of parameters defining the manipulation
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_ManipNodeSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MANIP_NODE_REPLACE_COMPAT FM_PCD_IOC_MANIP_NODE_SET_COMPAT
++#endif
++#define FM_PCD_IOC_MANIP_NODE_REPLACE FM_PCD_IOC_MANIP_NODE_SET
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipNodeDelete
++
++ @Description Delete an existing manipulation node.
++
++ @Param[in] ioc_fm_obj_t The id of the manipulation node to delete.
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_ManipNodeSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MANIP_NODE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(44), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_MANIP_NODE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(44), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_ManipGetStatistics
++
++ @Description Retrieve the manipulation statistics.
++
++ @Param[in] h_ManipNode A handle to a manipulation node.
++ @Param[out] p_FmPcdManipStats A structure for retrieving the manipulation statistics
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_ManipNodeSet().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_MANIP_GET_STATS_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(50), ioc_compat_fm_pcd_manip_get_stats_t)
++#endif
++#define FM_PCD_IOC_MANIP_GET_STATS _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(50), ioc_fm_pcd_manip_get_stats_t)
++
++/**************************************************************************//**
++@Function FM_PCD_SetAdvancedOffloadSupport
++
++@Description This routine must be called in order to support the following features:
++ IP-fragmentation, IP-reassembly, IPsec, Header-manipulation, frame-replicator.
++
++@Param[in] h_FmPcd FM PCD module descriptor.
++
++@Return 0 on success; error code otherwise.
++
++@Cautions Allowed only when PCD is disabled.
++*//***************************************************************************/
++#define FM_PCD_IOC_SET_ADVANCED_OFFLOAD_SUPPORT _IO(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(45))
++
++#if (DPAA_VERSION >= 11)
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicSetGroup
++
++ @Description Initialize a Frame Replicator group.
++
++ @Param[in] h_FmPcd FM PCD module descriptor.
++ @Param[in] p_FrmReplicGroupParam A structure of parameters for the initialization of
++ the frame replicator group.
++
++ @Return A handle to the initialized object on success; NULL code otherwise.
++
++ @Cautions Allowed only following FM_PCD_Init().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_FRM_REPLIC_GROUP_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(46), ioc_compat_fm_pcd_frm_replic_group_params_t)
++#endif
++#define FM_PCD_IOC_FRM_REPLIC_GROUP_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(46), ioc_fm_pcd_frm_replic_group_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicDeleteGroup
++
++ @Description Delete a Frame Replicator group.
++
++ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(47), ioc_compat_fm_obj_t)
++#endif
++#define FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(47), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicAddMember
++
++ @Description Add the member in the index defined by the memberIndex.
++
++ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
++ @Param[in] memberIndex member index for adding.
++ @Param[in] p_MemberParams A pointer to the new member parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(48), ioc_compat_fm_pcd_frm_replic_member_params_t)
++#endif
++#define FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(48), ioc_fm_pcd_frm_replic_member_params_t)
++
++/**************************************************************************//**
++ @Function FM_PCD_FrmReplicRemoveMember
++
++ @Description Remove the member defined by the index from the relevant group.
++
++ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
++ @Param[in] memberIndex member index for removing.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(49), ioc_compat_fm_pcd_frm_replic_member_t)
++#endif
++#define FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(49), ioc_fm_pcd_frm_replic_member_t)
++
++#endif
++
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++/**************************************************************************//**
++ @Function FM_PCD_StatisticsSetNode
++
++ @Description This routine should be called for defining a statistics node.
++
++ @Param[in,out] ioc_fm_pcd_stats_params_t A structure of parameters defining the statistics
++
++ @Return 0 on success; Error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_STATISTICS_SET_NODE_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(45), void *)
++#endif
++#define FM_PCD_IOC_STATISTICS_SET_NODE _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(45), void *)
++
++#endif /* FM_CAPWAP_SUPPORT */
++
++#ifdef NCSW_BACKWARD_COMPATIBLE_API
++#if defined(CONFIG_COMPAT)
++#define FM_PCD_IOC_SET_NET_ENV_CHARACTERISTICS_COMPAT \
++ FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET_COMPAT
++#define FM_PCD_IOC_DELETE_NET_ENV_CHARACTERISTICS_COMPAT \
++ FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE_COMPAT
++#define FM_PCD_IOC_KG_SET_SCHEME_COMPAT FM_PCD_IOC_KG_SCHEME_SET_COMPAT
++#define FM_PCD_IOC_KG_DEL_SCHEME_COMPAT FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT
++#define FM_PCD_IOC_CC_BUILD_TREE_COMPAT FM_PCD_IOC_CC_ROOT_BUILD_COMPAT
++#define FM_PCD_IOC_CC_DELETE_TREE_COMPAT FM_PCD_IOC_CC_ROOT_DELETE_COMPAT
++#define FM_PCD_IOC_CC_DELETE_NODE_COMPAT FM_PCD_IOC_MATCH_TABLE_DELETE_COMPAT
++#define FM_PCD_IOC_CC_TREE_MODIFY_NEXT_ENGINE_COMPAT \
++ FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE_COMPAT
++#define FM_PCD_IOC_CC_NODE_MODIFY_NEXT_ENGINE_COMPAT \
++ FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE_COMPAT
++#define FM_PCD_IOC_CC_NODE_MODIFY_MISS_NEXT_ENGINE_COMPAT \
++ FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE_COMPAT
++#define FM_PCD_IOC_CC_NODE_REMOVE_KEY_COMPAT FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY_COMPAT
++#define FM_PCD_IOC_CC_NODE_ADD_KEY_COMPAT FM_PCD_IOC_MATCH_TABLE_ADD_KEY_COMPAT
++#define FM_PCD_IOC_CC_NODE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT \
++ FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT
++#define FM_PCD_IOC_CC_NODE_MODIFY_KEY_COMPAT FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_COMPAT
++#define FM_PCD_IOC_PLCR_SET_PROFILE_COMPAT FM_PCD_IOC_PLCR_PROFILE_SET_COMPAT
++#define FM_PCD_IOC_PLCR_DEL_PROFILE_COMPAT FM_PCD_IOC_PLCR_PROFILE_DELETE_COMPAT
++#define FM_PCD_IOC_MANIP_SET_NODE_COMPAT FM_PCD_IOC_MANIP_NODE_SET_COMPAT
++#define FM_PCD_IOC_MANIP_DELETE_NODE_COMPAT FM_PCD_IOC_MANIP_NODE_DELETE_COMPAT
++#endif
++#define FM_PCD_IOC_SET_NET_ENV_CHARACTERISTICS FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET
++#define FM_PCD_IOC_DELETE_NET_ENV_CHARACTERISTICS \
++ FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE
++#define FM_PCD_IOC_KG_SET_SCHEME FM_PCD_IOC_KG_SCHEME_SET
++#define FM_PCD_IOC_KG_DEL_SCHEME FM_PCD_IOC_KG_SCHEME_DELETE
++#define FM_PCD_IOC_CC_BUILD_TREE FM_PCD_IOC_CC_ROOT_BUILD
++#define FM_PCD_IOC_CC_DELETE_TREE FM_PCD_IOC_CC_ROOT_DELETE
++#define FM_PCD_IOC_CC_DELETE_NODE FM_PCD_IOC_MATCH_TABLE_DELETE
++#define FM_PCD_IOC_CC_TREE_MODIFY_NEXT_ENGINE FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE
++#define FM_PCD_IOC_CC_NODE_MODIFY_NEXT_ENGINE FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE
++#define FM_PCD_IOC_CC_NODE_MODIFY_MISS_NEXT_ENGINE \
++ FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE
++#define FM_PCD_IOC_CC_NODE_REMOVE_KEY FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY
++#define FM_PCD_IOC_CC_NODE_ADD_KEY FM_PCD_IOC_MATCH_TABLE_ADD_KEY
++#define FM_PCD_IOC_CC_NODE_MODIFY_KEY_AND_NEXT_ENGINE \
++ FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE
++#define FM_PCD_IOC_CC_NODE_MODIFY_KEY FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY
++#define FM_PCD_IOC_PLCR_SET_PROFILE FM_PCD_IOC_PLCR_PROFILE_SET
++#define FM_PCD_IOC_PLCR_DEL_PROFILE FM_PCD_IOC_PLCR_PROFILE_DELETE
++#define FM_PCD_IOC_MANIP_SET_NODE FM_PCD_IOC_MANIP_NODE_SET
++#define FM_PCD_IOC_MANIP_DELETE_NODE FM_PCD_IOC_MANIP_NODE_DELETE
++#endif /* NCSW_BACKWARD_COMPATIBLE_API */
++
++#endif /* __FM_PCD_IOCTLS_H */
++/** @} */ /* end of lnx_ioctl_FM_PCD_Runtime_grp group */
++/** @} */ /* end of lnx_ioctl_FM_PCD_grp group */
++/** @} */ /* end of lnx_ioctl_FM_grp group */
+diff --git a/include/uapi/linux/fmd/Peripherals/fm_port_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_port_ioctls.h
+new file mode 100644
+index 00000000..a2f61107
+--- /dev/null
++++ b/include/uapi/linux/fmd/Peripherals/fm_port_ioctls.h
+@@ -0,0 +1,948 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/******************************************************************************
++ @File fm_port_ioctls.h
++
++ @Description FM Port routines
++*//***************************************************************************/
++#ifndef __FM_PORT_IOCTLS_H
++#define __FM_PORT_IOCTLS_H
++
++#include "enet_ext.h"
++#include "net_ioctls.h"
++#include "fm_ioctls.h"
++#include "fm_pcd_ioctls.h"
++
++
++/**************************************************************************//**
++
++ @Group lnx_ioctl_FM_grp Frame Manager Linux IOCTL API
++
++ @Description FM Linux ioctls definitions and enums
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_PORT_grp FM Port
++
++ @Description FM Port API
++
++ The FM uses a general module called "port" to represent a Tx port
++ (MAC), an Rx port (MAC), offline parsing flow or host command
++ flow. There may be up to 17 (may change) ports in an FM - 5 Tx
++ ports (4 for the 1G MACs, 1 for the 10G MAC), 5 Rx Ports, and 7
++ Host command/Offline parsing ports. The SW driver manages these
++ ports as sub-modules of the FM, i.e. after an FM is initialized,
++ its ports may be initialized and operated upon.
++
++ The port is initialized aware of its type, but other functions on
++ a port may be indifferent to its type. When necessary, the driver
++ verifies coherency and returns error if applicable.
++
++ On initialization, user specifies the port type and it's index
++ (relative to the port's type). Host command and Offline parsing
++ ports share the same id range, I.e user may not initialized host
++ command port 0 and offline parsing port 0.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description An enum for defining port PCD modes.
++ (Must match enum e_FmPortPcdSupport defined in fm_port_ext.h)
++
++ This enum defines the superset of PCD engines support - i.e. not
++ all engines have to be used, but all have to be enabled. The real
++ flow of a specific frame depends on the PCD configuration and the
++ frame headers and payload.
++ Note: the first engine and the first engine after the parser (if
++ exists) should be in order, the order is important as it will
++ define the flow of the port. However, as for the rest engines
++ (the ones that follows), the order is not important anymore as
++ it is defined by the PCD graph itself.
++*//***************************************************************************/
++typedef enum ioc_fm_port_pcd_support {
++ e_IOC_FM_PORT_PCD_SUPPORT_NONE = 0 /**< BMI to BMI, PCD is not used */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_ONLY /**< Use only Parser */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PLCR_ONLY /**< Use only Policer */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR /**< Use Parser and Policer */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG /**< Use Parser and Keygen */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC /**< Use Parser, Keygen and Coarse Classification */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR
++ /**< Use all PCD engines */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR /**< Use Parser, Keygen and Policer */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_CC /**< Use Parser and Coarse Classification */
++ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR /**< Use Parser and Coarse Classification and Policer */
++ , e_IOC_FM_PORT_PCD_SUPPORT_CC_ONLY /**< Use only Coarse Classification */
++#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
++ , e_IOC_FM_PORT_PCD_SUPPORT_CC_AND_KG /**< Use Coarse Classification,and Keygen */
++ , e_IOC_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR /**< Use Coarse Classification, Keygen and Policer */
++#endif /* FM_CAPWAP_SUPPORT */
++} ioc_fm_port_pcd_support;
++
++
++/**************************************************************************//**
++ @Collection FM Frame error
++*//***************************************************************************/
++typedef uint32_t ioc_fm_port_frame_err_select_t; /**< typedef for defining Frame Descriptor errors */
++
++/* @} */
++
++
++/**************************************************************************//**
++ @Description An enum for defining Dual Tx rate limiting scale.
++ (Must match e_FmPortDualRateLimiterScaleDown defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_port_dual_rate_limiter_scale_down {
++ e_IOC_FM_PORT_DUAL_RATE_LIMITER_NONE = 0, /**< Use only single rate limiter */
++ e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_2, /**< Divide high rate limiter by 2 */
++ e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_4, /**< Divide high rate limiter by 4 */
++ e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 /**< Divide high rate limiter by 8 */
++} ioc_fm_port_dual_rate_limiter_scale_down;
++
++/**************************************************************************//**
++ @Description A structure for defining Tx rate limiting
++ (Must match struct t_FmPortRateLimit defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_rate_limit_t {
++ uint16_t max_burst_size; /**< in KBytes for Tx ports, in frames
++ for offline parsing ports. (note that
++ for early chips burst size is
++ rounded up to a multiply of 1000 frames).*/
++ uint32_t rate_limit; /**< in Kb/sec for Tx ports, in frame/sec for
++ offline parsing ports. Rate limit refers to
++ data rate (rather than line rate). */
++ ioc_fm_port_dual_rate_limiter_scale_down rate_limit_divider; /**< For offline parsing ports only. Not-valid
++ for some earlier chip revisions */
++} ioc_fm_port_rate_limit_t;
++
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_PORT_runtime_control_grp FM Port Runtime Control Unit
++
++ @Description FM Port Runtime control unit API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description An enum for defining FM Port counters.
++ (Must match enum e_FmPortCounters defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef enum ioc_fm_port_counters {
++ e_IOC_FM_PORT_COUNTERS_CYCLE, /**< BMI performance counter */
++ e_IOC_FM_PORT_COUNTERS_TASK_UTIL, /**< BMI performance counter */
++ e_IOC_FM_PORT_COUNTERS_QUEUE_UTIL, /**< BMI performance counter */
++ e_IOC_FM_PORT_COUNTERS_DMA_UTIL, /**< BMI performance counter */
++ e_IOC_FM_PORT_COUNTERS_FIFO_UTIL, /**< BMI performance counter */
++ e_IOC_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION, /**< BMI Rx only performance counter */
++ e_IOC_FM_PORT_COUNTERS_FRAME, /**< BMI statistics counter */
++ e_IOC_FM_PORT_COUNTERS_DISCARD_FRAME, /**< BMI statistics counter */
++ e_IOC_FM_PORT_COUNTERS_DEALLOC_BUF, /**< BMI deallocate buffer statistics counter */
++ e_IOC_FM_PORT_COUNTERS_RX_BAD_FRAME, /**< BMI Rx only statistics counter */
++ e_IOC_FM_PORT_COUNTERS_RX_LARGE_FRAME, /**< BMI Rx only statistics counter */
++ e_IOC_FM_PORT_COUNTERS_RX_FILTER_FRAME, /**< BMI Rx & OP only statistics counter */
++ e_IOC_FM_PORT_COUNTERS_RX_LIST_DMA_ERR, /**< BMI Rx, OP & HC only statistics counter */
++ e_IOC_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD, /**< BMI Rx, OP & HC statistics counter */
++ e_IOC_FM_PORT_COUNTERS_PREPARE_TO_ENQUEUE_COUNTER, /**< BMI Rx, OP & HC only statistics counter */
++ e_IOC_FM_PORT_COUNTERS_WRED_DISCARD, /**< BMI OP & HC only statistics counter */
++ e_IOC_FM_PORT_COUNTERS_LENGTH_ERR, /**< BMI non-Rx statistics counter */
++ e_IOC_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT, /**< BMI non-Rx statistics counter */
++ e_IOC_FM_PORT_COUNTERS_DEQ_TOTAL, /**< QMI total QM dequeues counter */
++ e_IOC_FM_PORT_COUNTERS_ENQ_TOTAL, /**< QMI total QM enqueues counter */
++ e_IOC_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI counter */
++ e_IOC_FM_PORT_COUNTERS_DEQ_CONFIRM /**< QMI counter */
++} ioc_fm_port_counters;
++
++typedef struct ioc_fm_port_bmi_stats_t {
++ uint32_t cnt_cycle;
++ uint32_t cnt_task_util;
++ uint32_t cnt_queue_util;
++ uint32_t cnt_dma_util;
++ uint32_t cnt_fifo_util;
++ uint32_t cnt_rx_pause_activation;
++ uint32_t cnt_frame;
++ uint32_t cnt_discard_frame;
++ uint32_t cnt_dealloc_buf;
++ uint32_t cnt_rx_bad_frame;
++ uint32_t cnt_rx_large_frame;
++ uint32_t cnt_rx_filter_frame;
++ uint32_t cnt_rx_list_dma_err;
++ uint32_t cnt_rx_out_of_buffers_discard;
++ uint32_t cnt_wred_discard;
++ uint32_t cnt_length_err;
++ uint32_t cnt_unsupported_format;
++} ioc_fm_port_bmi_stats_t;
++
++/**************************************************************************//**
++ @Description Structure for Port id parameters.
++ (Description may be inaccurate;
++ must match struct t_FmPortCongestionGrps defined in fm_port_ext.h)
++
++ Fields commented 'IN' are passed by the port module to be used
++ by the FM module.
++ Fields commented 'OUT' will be filled by FM before returning to port.
++*//***************************************************************************/
++typedef struct ioc_fm_port_congestion_groups_t {
++ uint16_t num_of_congestion_grps_to_consider; /**< The number of required congestion groups
++ to define the size of the following array */
++ uint8_t congestion_grps_to_consider [FM_PORT_NUM_OF_CONGESTION_GRPS];
++ /**< An array of CG indexes;
++ Note that the size of the array should be
++ 'num_of_congestion_grps_to_consider'. */
++#if DPAA_VERSION >= 11
++ bool pfc_priorities_enable[FM_PORT_NUM_OF_CONGESTION_GRPS][FM_MAX_NUM_OF_PFC_PRIORITIES];
++ /**< A matrix that represents the map between the CG ids
++ defined in 'congestion_grps_to_consider' to the priorities
++ mapping array. */
++#endif /* DPAA_VERSION >= 11 */
++} ioc_fm_port_congestion_groups_t;
++
++
++
++/**************************************************************************//**
++ @Function FM_PORT_Disable
++
++ @Description Gracefully disable an FM port. The port will not start new tasks after all
++ tasks associated with the port are terminated.
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions This is a blocking routine, it returns after port is
++ gracefully stopped, i.e. the port will not except new frames,
++ but it will finish all frames or tasks which were already began
++*//***************************************************************************/
++#define FM_PORT_IOC_DISABLE _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(1))
++
++/**************************************************************************//**
++ @Function FM_PORT_Enable
++
++ @Description A runtime routine provided to allow disable/enable of port.
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_ENABLE _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(2))
++
++/**************************************************************************//**
++ @Function FM_PORT_SetRateLimit
++
++ @Description Calling this routine enables rate limit algorithm.
++ By default, this functionality is disabled.
++ Note that rate-limit mechanism uses the FM time stamp.
++ The selected rate limit specified here would be
++ rounded DOWN to the nearest 16M.
++
++ May be used for Tx and offline parsing ports only
++
++ @Param[in] ioc_fm_port_rate_limit A structure of rate limit parameters
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_SET_RATE_LIMIT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(3), ioc_fm_port_rate_limit_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_DeleteRateLimit
++
++ @Description Calling this routine disables the previously enabled rate limit.
++
++ May be used for Tx and offline parsing ports only
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_DELETE_RATE_LIMIT _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(5))
++#define FM_PORT_IOC_REMOVE_RATE_LIMIT FM_PORT_IOC_DELETE_RATE_LIMIT
++
++
++/**************************************************************************//**
++ @Function FM_PORT_AddCongestionGrps
++
++ @Description This routine effects the corresponding Tx port.
++ It should be called in order to enable pause
++ frame transmission in case of congestion in one or more
++ of the congestion groups relevant to this port.
++ Each call to this routine may add one or more congestion
++ groups to be considered relevant to this port.
++
++ May be used for Rx, or RX+OP ports only (depending on chip)
++
++ @Param[in] ioc_fm_port_congestion_groups_t - A pointer to an array of
++ congestion group ids to consider.
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_ADD_CONGESTION_GRPS _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(34), ioc_fm_port_congestion_groups_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_RemoveCongestionGrps
++
++ @Description This routine effects the corresponding Tx port. It should be
++ called when congestion groups were
++ defined for this port and are no longer relevant, or pause
++ frames transmitting is not required on their behalf.
++ Each call to this routine may remove one or more congestion
++ groups to be considered relevant to this port.
++
++ May be used for Rx, or RX+OP ports only (depending on chip)
++
++ @Param[in] ioc_fm_port_congestion_groups_t - A pointer to an array of
++ congestion group ids to consider.
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_REMOVE_CONGESTION_GRPS _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(35), ioc_fm_port_congestion_groups_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_SetErrorsRoute
++
++ @Description Errors selected for this routine will cause a frame with that error
++ to be enqueued to error queue.
++ Errors not selected for this routine will cause a frame with that error
++ to be enqueued to the one of the other port queues.
++ By default all errors are defined to be enqueued to error queue.
++ Errors that were configured to be discarded (at initialization)
++ may not be selected here.
++
++ May be used for Rx and offline parsing ports only
++
++ @Param[in] ioc_fm_port_frame_err_select_t A list of errors to enqueue to error queue
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++ (szbs001: How is it possible to have one function that needs to be
++ called BEFORE FM_PORT_Init() implemented as an ioctl,
++ which will ALWAYS be called AFTER the FM_PORT_Init()
++ for that port!?!?!?!???!?!??!?!?)
++*//***************************************************************************/
++#define FM_PORT_IOC_SET_ERRORS_ROUTE _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(4), ioc_fm_port_frame_err_select_t)
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FM_PORT_pcd_runtime_control_grp FM Port PCD Runtime Control Unit
++
++ @Description FM Port PCD Runtime control unit API functions, definitions and enums.
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description A structure defining the KG scheme after the parser.
++ (Must match struct t_FmPcdKgSchemeSelect defined in fm_port_ext.h)
++
++ This is relevant only to change scheme selection mode - from
++ direct to indirect and vice versa, or when the scheme is selected directly,
++ to select the scheme id.
++
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_kg_scheme_select_t {
++ bool direct; /**< TRUE to use 'scheme_id' directly, FALSE to use LCV.*/
++ void *scheme_id; /**< Relevant for 'direct'=TRUE only.
++ 'scheme_id' selects the scheme after parser. */
++} ioc_fm_pcd_kg_scheme_select_t;
++
++/**************************************************************************//**
++ @Description Scheme IDs structure
++ (Must match struct t_FmPcdPortSchemesParams defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_port_schemes_params_t {
++ uint8_t num_of_schemes; /**< Number of schemes for port to be bound to. */
++ void *scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES]; /**< Array of 'num_of_schemes' schemes for the
++ port to be bound to */
++} ioc_fm_pcd_port_schemes_params_t;
++
++/**************************************************************************//**
++ @Description A union for defining port protocol parameters for parser
++ (Must match union u_FmPcdHdrPrsOpts defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef union ioc_fm_pcd_hdr_prs_opts_u {
++ /* MPLS */
++ struct {
++ bool label_interpretation_enable;/**< When this bit is set, the last MPLS label will be
++ interpreted as described in HW spec table. When the bit
++ is cleared, the parser will advance to MPLS next parse */
++ ioc_net_header_type next_parse; /**< must be equal or higher than IPv4 */
++ } mpls_prs_options;
++
++ /* VLAN */
++ struct {
++ uint16_t tag_protocol_id1; /**< User defined Tag Protocol Identifier, to be recognized
++ on VLAN TAG on top of 0x8100 and 0x88A8 */
++ uint16_t tag_protocol_id2; /**< User defined Tag Protocol Identifier, to be recognized
++ on VLAN TAG on top of 0x8100 and 0x88A8 */
++ } vlan_prs_options;
++
++ /* PPP */
++ struct{
++ bool enable_mtu_check; /**< Check validity of MTU according to RFC2516 */
++ } pppoe_prs_options;
++
++ /* IPV6 */
++ struct {
++ bool routing_hdr_disable; /**< Disable routing header */
++ } ipv6_prs_options;
++
++ /* UDP */
++ struct {
++ bool pad_ignore_checksum; /**< TRUE to ignore pad in checksum */
++ } udp_prs_options;
++
++ /* TCP */
++ struct {
++ bool pad_ignore_checksum; /**< TRUE to ignore pad in checksum */
++ } tcp_prs_options;
++} ioc_fm_pcd_hdr_prs_opts_u;
++
++/**************************************************************************//**
++ @Description A structure for defining each header for the parser
++ (must match struct t_FmPcdPrsAdditionalHdrParams defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_prs_additional_hdr_params_t {
++ ioc_net_header_type hdr; /**< Selected header */
++ bool err_disable; /**< TRUE to disable error indication */
++ bool soft_prs_enable; /**< Enable jump to SW parser when this
++ header is recognized by the HW parser. */
++ uint8_t index_per_hdr; /**< Normally 0, if more than one sw parser
++ attachments exists for the same header,
++ (in the main sw parser code) use this
++ index to distinguish between them. */
++ bool use_prs_opts; /**< TRUE to use parser options. */
++ ioc_fm_pcd_hdr_prs_opts_u prs_opts; /**< A unuion according to header type,
++ defining the parser options selected.*/
++} ioc_fm_pcd_prs_additional_hdr_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining port PCD parameters
++ (Must match t_FmPortPcdPrsParams defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_pcd_prs_params_t {
++ uint8_t prs_res_priv_info; /**< The private info provides a method of inserting
++ port information into the parser result. This information
++ may be extracted by KeyGen and be used for frames
++ distribution when a per-port distinction is required,
++ it may also be used as a port logical id for analyzing
++ incoming frames. */
++ uint8_t parsing_offset; /**< Number of bytes from begining of packet to start parsing */
++ ioc_net_header_type first_prs_hdr; /**< The type of the first header axpected at 'parsing_offset' */
++ bool include_in_prs_statistics; /**< TRUE to include this port in the parser statistics */
++ uint8_t num_of_hdrs_with_additional_params;
++ /**< Normally 0, some headers may get special parameters */
++ ioc_fm_pcd_prs_additional_hdr_params_t additional_params[IOC_FM_PCD_PRS_NUM_OF_HDRS];
++ /**< 'num_of_hdrs_with_additional_params' structures
++ additional parameters for each header that requires them */
++ bool set_vlan_tpid1; /**< TRUE to configure user selection of Ethertype to
++ indicate a VLAN tag (in addition to the TPID values
++ 0x8100 and 0x88A8). */
++ uint16_t vlan_tpid1; /**< extra tag to use if set_vlan_tpid1=TRUE. */
++ bool set_vlan_tpid2; /**< TRUE to configure user selection of Ethertype to
++ indicate a VLAN tag (in addition to the TPID values
++ 0x8100 and 0x88A8). */
++ uint16_t vlan_tpid2; /**< extra tag to use if set_vlan_tpid1=TRUE. */
++} ioc_fm_port_pcd_prs_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining coarse alassification parameters
++ (Must match t_FmPortPcdCcParams defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_pcd_cc_params_t {
++ void *cc_tree_id; /**< CC tree id */
++} ioc_fm_port_pcd_cc_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining keygen parameters
++ (Must match t_FmPortPcdKgParams defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_pcd_kg_params_t {
++ uint8_t num_of_schemes; /**< Number of schemes for port to be bound to. */
++ void *scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES];
++ /**< Array of 'num_of_schemes' schemes for the
++ port to be bound to */
++ bool direct_scheme; /**< TRUE for going from parser to a specific scheme,
++ regardless of parser result */
++ void *direct_scheme_id; /**< Scheme id, as returned by FM_PCD_KgSetScheme;
++ relevant only if direct=TRUE. */
++} ioc_fm_port_pcd_kg_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining policer parameters
++ (Must match t_FmPortPcdPlcrParams defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_pcd_plcr_params_t {
++ void *plcr_profile_id; /**< Selected profile handle;
++ relevant in one of the following cases:
++ e_IOC_FM_PORT_PCD_SUPPORT_PLCR_ONLY or
++ e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR were selected,
++ or if any flow uses a KG scheme where policer
++ profile is not generated (bypass_plcr_profile_generation selected) */
++} ioc_fm_port_pcd_plcr_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining port PCD parameters
++ (Must match struct t_FmPortPcdParams defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_port_pcd_params_t {
++ ioc_fm_port_pcd_support pcd_support; /**< Relevant for Rx and offline ports only.
++ Describes the active PCD engines for this port. */
++ void *net_env_id; /**< HL Unused in PLCR only mode */
++ ioc_fm_port_pcd_prs_params_t *p_prs_params; /**< Parser parameters for this port */
++ ioc_fm_port_pcd_cc_params_t *p_cc_params; /**< Coarse classification parameters for this port */
++ ioc_fm_port_pcd_kg_params_t *p_kg_params; /**< Keygen parameters for this port */
++ ioc_fm_port_pcd_plcr_params_t *p_plcr_params; /**< Policer parameters for this port */
++ void *p_ip_reassembly_manip;/**< IP Reassembly manipulation */
++#if (DPAA_VERSION >= 11)
++ void *p_capwap_reassembly_manip;/**< CAPWAP Reassembly manipulation */
++#endif /* (DPAA_VERSION >= 11) */
++} ioc_fm_port_pcd_params_t;
++
++/**************************************************************************//**
++ @Description A structure for defining the Parser starting point
++ (Must match struct t_FmPcdPrsStart defined in fm_port_ext.h)
++*//***************************************************************************/
++typedef struct ioc_fm_pcd_prs_start_t {
++ uint8_t parsing_offset; /**< Number of bytes from begining of packet to
++ start parsing */
++ ioc_net_header_type first_prs_hdr; /**< The type of the first header axpected at
++ 'parsing_offset' */
++} ioc_fm_pcd_prs_start_t;
++
++
++/**************************************************************************//**
++ @Description FQID parameters structure
++*//***************************************************************************/
++typedef struct ioc_fm_port_pcd_fqids_params_t {
++ uint32_t num_fqids; /**< Number of fqids to be allocated for the port */
++ uint8_t alignment; /**< Alignment required for this port */
++ uint32_t base_fqid; /**< output parameter - the base fqid */
++} ioc_fm_port_pcd_fqids_params_t;
++
++
++/**************************************************************************//**
++ @Function FM_PORT_IOC_ALLOC_PCD_FQIDS
++
++ @Description Allocates FQID's
++
++ May be used for Rx and offline parsing ports only
++
++ @Param[in,out] ioc_fm_port_pcd_fqids_params_t Parameters for allocating FQID's
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_ALLOC_PCD_FQIDS _IOWR(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(19), ioc_fm_port_pcd_fqids_params_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_IOC_FREE_PCD_FQIDS
++
++ @Description Frees previously-allocated FQIDs
++
++ May be used for Rx and offline parsing ports only
++
++ @Param[in] uint32_t Base FQID of previously allocated range.
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_FREE_PCD_FQIDS _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(19), uint32_t)
++
++
++/**************************************************************************//**
++ @Function FM_PORT_SetPCD
++
++ @Description Calling this routine defines the port's PCD configuration.
++ It changes it from its default configuration which is PCD
++ disabled (BMI to BMI) and configures it according to the passed
++ parameters.
++
++ May be used for Rx and offline parsing ports only
++
++ @Param[in] ioc_fm_port_pcd_params_t A Structure of parameters defining the port's PCD
++ configuration.
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PORT_IOC_SET_PCD_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(20), ioc_compat_fm_port_pcd_params_t)
++#endif
++#define FM_PORT_IOC_SET_PCD _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(20), ioc_fm_port_pcd_params_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_DeletePCD
++
++ @Description Calling this routine releases the port's PCD configuration.
++ The port returns to its default configuration which is PCD
++ disabled (BMI to BMI) and all PCD configuration is removed.
++
++ May be used for Rx and offline parsing ports which are
++ in PCD mode only
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_DELETE_PCD _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(21))
++
++/**************************************************************************//**
++ @Function FM_PORT_AttachPCD
++
++ @Description This routine may be called after FM_PORT_DetachPCD was called,
++ to return to the originally configured PCD support flow.
++ The couple of routines are used to allow PCD configuration changes
++ that demand that PCD will not be used while changes take place.
++
++ May be used for Rx and offline parsing ports which are
++ in PCD mode only
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_ATTACH_PCD _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(23))
++
++/**************************************************************************//**
++ @Function FM_PORT_DetachPCD
++
++ @Description Calling this routine detaches the port from its PCD functionality.
++ The port returns to its default flow which is BMI to BMI.
++
++ May be used for Rx and offline parsing ports which are
++ in PCD mode only
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#define FM_PORT_IOC_DETACH_PCD _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(22))
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdPlcrAllocProfiles
++
++ @Description This routine may be called only for ports that use the Policer in
++ order to allocate private policer profiles.
++
++ @Param[in] uint16_t The number of required policer profiles
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed before FM_PORT_SetPCD() only.
++*//***************************************************************************/
++#define FM_PORT_IOC_PCD_PLCR_ALLOC_PROFILES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(24), uint16_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdPlcrFreeProfiles
++
++ @Description This routine should be called for freeing private policer profiles.
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed before FM_PORT_SetPCD() only.
++*//***************************************************************************/
++#define FM_PORT_IOC_PCD_PLCR_FREE_PROFILES _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(25))
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdKgModifyInitialScheme
++
++ @Description This routine may be called only for ports that use the keygen in
++ order to change the initial scheme frame should be routed to.
++ The change may be of a scheme id (in case of direct mode),
++ from direct to indirect, or from indirect to direct - specifying the scheme id.
++
++ @Param[in] ioc_fm_pcd_kg_scheme_select_t A structure of parameters for defining whether
++ a scheme is direct/indirect, and if direct - scheme id.
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(26), ioc_compat_fm_pcd_kg_scheme_select_t)
++#endif
++#define FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(26), ioc_fm_pcd_kg_scheme_select_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdPlcrModifyInitialProfile
++
++ @Description This routine may be called for ports with flows
++ e_IOC_FM_PCD_SUPPORT_PLCR_ONLY or e_IOC_FM_PCD_SUPPORT_PRS_AND_PLCR only,
++ to change the initial Policer profile frame should be routed to.
++ The change may be of a profile and/or absolute/direct mode selection.
++
++ @Param[in] ioc_fm_obj_t Policer profile Id as returned from FM_PCD_PlcrSetProfile.
++
++ @Return 0 on success; error code otherwise.
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(27), ioc_compat_fm_obj_t)
++#endif
++#define FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(27), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdCcModifyTree
++
++ @Description This routine may be called to change this port connection to
++ a pre-initializes coarse classification Tree.
++
++ @Param[in] ioc_fm_obj_t Id of new coarse classification tree selected for this port.
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_SetPCD() and FM_PORT_DetachPCD()
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PORT_IOC_PCD_CC_MODIFY_TREE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(28), ioc_compat_fm_obj_t)
++#endif
++#define FM_PORT_IOC_PCD_CC_MODIFY_TREE _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(28), ioc_fm_obj_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdKgBindSchemes
++
++ @Description These routines may be called for modifying the binding of ports
++ to schemes. The scheme itself is not added,
++ just this specific port starts using it.
++
++ @Param[in] ioc_fm_pcd_port_schemes_params_t Schemes parameters structre
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_SetPCD().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PORT_IOC_PCD_KG_BIND_SCHEMES_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(30), ioc_compat_fm_pcd_port_schemes_params_t)
++#endif
++#define FM_PORT_IOC_PCD_KG_BIND_SCHEMES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(30), ioc_fm_pcd_port_schemes_params_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_PcdKgUnbindSchemes
++
++ @Description These routines may be called for modifying the binding of ports
++ to schemes. The scheme itself is not removed or invalidated,
++ just this specific port stops using it.
++
++ @Param[in] ioc_fm_pcd_port_schemes_params_t Schemes parameters structre
++
++ @Return 0 on success; error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_SetPCD().
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(31), ioc_compat_fm_pcd_port_schemes_params_t)
++#endif
++#define FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(31), ioc_fm_pcd_port_schemes_params_t)
++
++typedef struct ioc_fm_port_mac_addr_params_t {
++ uint8_t addr[ENET_NUM_OCTETS_PER_ADDRESS];
++} ioc_fm_port_mac_addr_params_t;
++
++/**************************************************************************//**
++ @Function FM_MAC_AddHashMacAddr
++
++ @Description Add an Address to the hash table. This is for filter purpose only.
++
++ @Param[in] ioc_fm_port_mac_addr_params_t - Ethernet Mac address
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init(). It is a filter only address.
++ @Cautions Some address need to be filtered out in upper FM blocks.
++*//***************************************************************************/
++#define FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(36), ioc_fm_port_mac_addr_params_t)
++
++/**************************************************************************//**
++ @Function FM_MAC_RemoveHashMacAddr
++
++ @Description Delete an Address to the hash table. This is for filter purpose only.
++
++ @Param[in] ioc_fm_port_mac_addr_params_t - Ethernet Mac address
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++*//***************************************************************************/
++#define FM_PORT_IOC_REMOVE_RX_HASH_MAC_ADDR _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(37), ioc_fm_port_mac_addr_params_t)
++
++typedef struct ioc_fm_port_tx_pause_frames_params_t {
++ uint8_t priority;
++ uint16_t pause_time;
++ uint16_t thresh_time;
++} ioc_fm_port_tx_pause_frames_params_t;
++
++/**************************************************************************//**
++ @Function FM_MAC_SetTxPauseFrames
++
++ @Description Enable/Disable transmission of Pause-Frames.
++ The routine changes the default configuration:
++ pause-time - [0xf000]
++ threshold-time - [0]
++
++ @Param[in] ioc_fm_port_tx_pause_frames_params_t A structure holding the required parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_MAC_Init().
++ PFC is supported only on new mEMAC; i.e. in MACs that don't have
++ PFC support (10G-MAC and dTSEC), user should use 'FM_MAC_NO_PFC'
++ in the 'priority' field.
++*//***************************************************************************/
++#define FM_PORT_IOC_SET_TX_PAUSE_FRAMES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(40), ioc_fm_port_tx_pause_frames_params_t)
++
++typedef struct ioc_fm_port_mac_statistics_t {
++ /* RMON */
++ uint64_t e_stat_pkts_64; /**< r-10G tr-DT 64 byte frame counter */
++ uint64_t e_stat_pkts_65_to_127; /**< r-10G 65 to 127 byte frame counter */
++ uint64_t e_stat_pkts_128_to_255; /**< r-10G 128 to 255 byte frame counter */
++ uint64_t e_stat_pkts_256_to_511; /**< r-10G 256 to 511 byte frame counter */
++ uint64_t e_stat_pkts_512_to_1023; /**< r-10G 512 to 1023 byte frame counter */
++ uint64_t e_stat_pkts_1024_to_1518; /**< r-10G 1024 to 1518 byte frame counter */
++ uint64_t e_stat_pkts_1519_to_1522; /**< r-10G 1519 to 1522 byte good frame count */
++ /* */
++ uint64_t e_stat_fragments; /**< Total number of packets that were less than 64 octets long with a wrong CRC.*/
++ uint64_t e_stat_jabbers; /**< Total number of packets longer than valid maximum length octets */
++ uint64_t e_stat_drop_events; /**< number of dropped packets due to internal errors of the MAC Client (during recieve). */
++ uint64_t e_stat_CRC_align_errors; /**< Incremented when frames of correct length but with CRC error are received.*/
++ uint64_t e_stat_undersize_pkts; /**< Incremented for frames under 64 bytes with a valid FCS and otherwise well formed;
++ This count does not include range length errors */
++ uint64_t e_stat_oversize_pkts; /**< Incremented for frames which exceed 1518 (non VLAN) or 1522 (VLAN) and contains
++ a valid FCS and otherwise well formed */
++ /* Pause */
++ uint64_t te_stat_pause; /**< Pause MAC Control received */
++ uint64_t re_stat_pause; /**< Pause MAC Control sent */
++ /* MIB II */
++ uint64_t if_in_octets; /**< Total number of byte received. */
++ uint64_t if_in_pkts; /**< Total number of packets received.*/
++ uint64_t if_in_ucast_pkts; /**< Total number of unicast frame received;
++ NOTE: this counter is not supported on dTSEC MAC */
++ uint64_t if_in_mcast_pkts; /**< Total number of multicast frame received*/
++ uint64_t if_in_bcast_pkts; /**< Total number of broadcast frame received */
++ uint64_t if_in_discards; /**< Frames received, but discarded due to problems within the MAC RX. */
++ uint64_t if_in_errors; /**< Number of frames received with error:
++ - FIFO Overflow Error
++ - CRC Error
++ - Frame Too Long Error
++ - Alignment Error
++ - The dedicated Error Code (0xfe, not a code error) was received */
++ uint64_t if_out_octets; /**< Total number of byte sent. */
++ uint64_t if_out_pkts; /**< Total number of packets sent .*/
++ uint64_t if_out_ucast_pkts; /**< Total number of unicast frame sent;
++ NOTE: this counter is not supported on dTSEC MAC */
++ uint64_t if_out_mcast_pkts; /**< Total number of multicast frame sent */
++ uint64_t if_out_bcast_pkts; /**< Total number of multicast frame sent */
++ uint64_t if_out_discards; /**< Frames received, but discarded due to problems within the MAC TX N/A!.*/
++ uint64_t if_out_errors; /**< Number of frames transmitted with error:
++ - FIFO Overflow Error
++ - FIFO Underflow Error
++ - Other */
++} ioc_fm_port_mac_statistics_t;
++
++/**************************************************************************//**
++ @Function FM_MAC_GetStatistics
++
++ @Description get all MAC statistics counters
++
++ @Param[out] ioc_fm_port_mac_statistics_t A structure holding the statistics
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_Init().
++*//***************************************************************************/
++#define FM_PORT_IOC_GET_MAC_STATISTICS _IOR(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(41), ioc_fm_port_mac_statistics_t)
++
++/**************************************************************************//**
++ @Function FM_PORT_ConfigBufferPrefixContent
++
++ @Description Defines the structure, size and content of the application buffer.
++ The prefix will
++ In Tx ports, if 'passPrsResult', the application
++ should set a value to their offsets in the prefix of
++ the FM will save the first 'privDataSize', than,
++ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
++ and timeStamp, and the packet itself (in this order), to the
++ application buffer, and to offset.
++ Calling this routine changes the buffer margins definitions
++ in the internal driver data base from its default
++ configuration: Data size: [DEFAULT_FM_SP_bufferPrefixContent_privDataSize]
++ Pass Parser result: [DEFAULT_FM_SP_bufferPrefixContent_passPrsResult].
++ Pass timestamp: [DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp].
++
++ May be used for all ports
++
++ @Param[in] ioc_fm_buffer_prefix_content_t A structure holding the required parameters.
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
++*//***************************************************************************/
++#define FM_PORT_IOC_CONFIG_BUFFER_PREFIX_CONTENT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(39), ioc_fm_buffer_prefix_content_t)
++
++#if (DPAA_VERSION >= 11)
++typedef struct ioc_fm_port_vsp_alloc_params_t {
++ uint8_t num_of_profiles; /**< Number of Virtual Storage Profiles */
++ uint8_t dflt_relative_id; /**< The default Virtual-Storage-Profile-id dedicated to Rx/OP port
++ The same default Virtual-Storage-Profile-id will be for coupled Tx port
++ if relevant function called for Rx port */
++ void *p_fm_tx_port; /**< Handle to coupled Tx Port; not relevant for OP port. */
++}ioc_fm_port_vsp_alloc_params_t;
++
++/**************************************************************************//**
++ @Function FM_PORT_VSPAlloc
++
++ @Description This routine allocated VSPs per port and forces the port to work
++ in VSP mode. Note that the port is initialized by default with the
++ physical-storage-profile only.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[in] p_Params A structure of parameters for allocation VSP's per port
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init(), and before FM_PORT_SetPCD()
++ and also before FM_PORT_Enable() (i.e. the port should be disabled).
++*//***************************************************************************/
++#if defined(CONFIG_COMPAT)
++#define FM_PORT_IOC_VSP_ALLOC_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(38), ioc_compat_fm_port_vsp_alloc_params_t)
++#endif
++#define FM_PORT_IOC_VSP_ALLOC _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(38), ioc_fm_port_vsp_alloc_params_t)
++#endif /* (DPAA_VERSION >= 11) */
++
++/**************************************************************************//**
++ @Function FM_PORT_GetBmiCounters
++
++ @Description Read port's BMI stat counters and place them into
++ a designated structure of counters.
++
++ @Param[in] h_FmPort A handle to a FM Port module.
++ @Param[out] p_BmiStats counters structure
++
++ @Return E_OK on success; Error code otherwise.
++
++ @Cautions Allowed only following FM_PORT_Init().
++*//***************************************************************************/
++
++#define FM_PORT_IOC_GET_BMI_COUNTERS _IOR(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(42), ioc_fm_port_bmi_stats_t)
++
++
++/** @} */ /* end of lnx_ioctl_FM_PORT_pcd_runtime_control_grp group */
++/** @} */ /* end of lnx_ioctl_FM_PORT_runtime_control_grp group */
++
++/** @} */ /* end of lnx_ioctl_FM_PORT_grp group */
++/** @} */ /* end of lnx_ioctl_FM_grp group */
++#endif /* __FM_PORT_IOCTLS_H */
+diff --git a/include/uapi/linux/fmd/Peripherals/fm_test_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_test_ioctls.h
+new file mode 100644
+index 00000000..207ed1eb
+--- /dev/null
++++ b/include/uapi/linux/fmd/Peripherals/fm_test_ioctls.h
+@@ -0,0 +1,208 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File fm_test_ioctls.h
++
++ @Description FM Char device ioctls
++*//***************************************************************************/
++#ifndef __FM_TEST_IOCTLS_H
++#define __FM_TEST_IOCTLS_H
++
++#include "ioctls.h"
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FMT_grp Frame Manager Test Linux IOCTL API
++
++ @Description FM-Test Linux ioctls definitions and enums
++
++ @{
++*//***************************************************************************/
++
++#define IOC_FMT_MAX_NUM_OF_PORTS 26
++
++/**************************************************************************//**
++ @Collection TEST Parameters
++*//***************************************************************************/
++/**************************************************************************//**
++ @Description: Name of the FM-Test chardev
++*//***************************************************************************/
++#define DEV_FM_TEST_NAME "fm-test-port"
++
++#define DEV_FM_TEST_PORTS_MINOR_BASE 0
++#define DEV_FM_TEST_MAX_MINORS (DEV_FM_TEST_PORTS_MINOR_BASE + IOC_FMT_MAX_NUM_OF_PORTS)
++
++#define FMT_PORT_IOC_NUM(n) n
++/* @} */
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FMT_lib_grp FM-Test library
++
++ @Description TODO
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description TODO
++*//***************************************************************************/
++typedef uint8_t ioc_fmt_xxx_t;
++
++#define FM_PRS_MAX 32
++#define FM_TIME_STAMP_MAX 8
++
++/**************************************************************************//**
++ @Description FM Port buffer content description
++*//***************************************************************************/
++typedef struct ioc_fmt_buff_context_t {
++ void *p_user_priv;
++ uint8_t fm_prs_res[FM_PRS_MAX];
++ uint8_t fm_time_stamp[FM_TIME_STAMP_MAX];
++} ioc_fmt_buff_context_t;
++
++#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
++typedef struct ioc_fmt_compat_buff_context_t {
++ compat_uptr_t p_user_priv;
++ uint8_t fm_prs_res[FM_PRS_MAX];
++ uint8_t fm_time_stamp[FM_TIME_STAMP_MAX];
++} ioc_fmt_compat_buff_context_t;
++#endif
++
++/**************************************************************************//**
++ @Description Buffer descriptor
++*//***************************************************************************/
++typedef struct ioc_fmt_buff_desc_t {
++ uint32_t qid;
++ void *p_data;
++ uint32_t size;
++ uint32_t status;
++ ioc_fmt_buff_context_t buff_context;
++} ioc_fmt_buff_desc_t;
++
++#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
++typedef struct ioc_fmt_compat_buff_desc_t {
++ uint32_t qid;
++ compat_uptr_t p_data;
++ uint32_t size;
++ uint32_t status;
++ ioc_fmt_compat_buff_context_t buff_context;
++} ioc_fmt_compat_buff_desc_t;
++#endif
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FMT_runtime_control_grp FM-Test Runtime Control Unit
++
++ @Description TODO
++ @{
++*//***************************************************************************/
++
++/** @} */ /* end of lnx_ioctl_FMT_runtime_control_grp group */
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_FMTP_lib_grp FM-Port-Test library
++
++ @Description TODO
++
++ @{
++*//***************************************************************************/
++
++/**************************************************************************//**
++ @Description FM-Test FM port type
++*//***************************************************************************/
++typedef enum ioc_fmt_port_type {
++ e_IOC_FMT_PORT_T_RXTX, /**< Standard port */
++ e_IOC_FMT_PORT_T_OP, /**< Offline-parsing port */
++} ioc_fmt_port_type;
++
++/**************************************************************************//**
++ @Description TODO
++*//***************************************************************************/
++typedef struct ioc_fmt_port_param_t {
++ uint8_t fm_id;
++ ioc_fmt_port_type fm_port_type;
++ uint8_t fm_port_id;
++ uint32_t num_tx_queues;
++} ioc_fmt_port_param_t;
++
++
++/**************************************************************************//**
++ @Function FMT_PORT_IOC_INIT
++
++ @Description TODO
++
++ @Param[in] ioc_fmt_port_param_t TODO
++
++ @Cautions Allowed only after the FM equivalent port is already initialized.
++*//***************************************************************************/
++#define FMT_PORT_IOC_INIT _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(0), ioc_fmt_port_param_t)
++
++/**************************************************************************//**
++ @Function FMT_PORT_IOC_SET_DIAG_MODE
++
++ @Description TODO
++
++ @Param[in] ioc_diag_mode TODO
++
++ @Cautions Allowed only following FMT_PORT_IOC_INIT().
++*//***************************************************************************/
++#define FMT_PORT_IOC_SET_DIAG_MODE _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(1), ioc_diag_mode)
++
++/**************************************************************************//**
++ @Function FMT_PORT_IOC_SET_IP_HEADER_MANIP
++
++ @Description Set IP header manipulations for this port.
++
++ @Param[in] int 1 to enable; 0 to disable
++
++ @Cautions Allowed only following FMT_PORT_IOC_INIT().
++*//***************************************************************************/
++#define FMT_PORT_IOC_SET_IP_HEADER_MANIP _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(2), int)
++
++/**************************************************************************//**
++ @Function FMT_PORT_IOC_SET_DPAECHO_MODE
++
++ @Description Set DPA in echo mode - all frame are sent back.
++
++ @Param[in] int 1 to enable; 0 to disable
++
++ @Cautions Allowed only following FMT_PORT_IOC_INIT().
++*//***************************************************************************/
++#define FMT_PORT_IOC_SET_DPAECHO_MODE _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(3), int)
++
++/** @} */ /* end of lnx_ioctl_FMTP_lib_grp group */
++/** @} */ /* end of lnx_ioctl_FMT_lib_grp group */
++/** @} */ /* end of lnx_ioctl_FMT_grp */
++
++
++#endif /* __FM_TEST_IOCTLS_H */
+diff --git a/include/uapi/linux/fmd/integrations/Kbuild b/include/uapi/linux/fmd/integrations/Kbuild
+new file mode 100644
+index 00000000..e548d676
+--- /dev/null
++++ b/include/uapi/linux/fmd/integrations/Kbuild
+@@ -0,0 +1 @@
++header-y += integration_ioctls.h
+diff --git a/include/uapi/linux/fmd/integrations/integration_ioctls.h b/include/uapi/linux/fmd/integrations/integration_ioctls.h
+new file mode 100644
+index 00000000..61d696e2
+--- /dev/null
++++ b/include/uapi/linux/fmd/integrations/integration_ioctls.h
+@@ -0,0 +1,56 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File integration_ioctls.h
++
++ @Description External header file for Integration unit routines.
++*//***************************************************************************/
++
++#ifndef __INTG_IOCTLS_H
++#define __INTG_IOCTLS_H
++
++
++#define FM_IOC_TYPE_BASE (NCSW_IOC_TYPE_BASE+1)
++#define FMT_IOC_TYPE_BASE (NCSW_IOC_TYPE_BASE+3)
++
++/*#define FM_IOCTL_DBG*/
++
++#if defined(FM_IOCTL_DBG)
++ #define _fm_ioctl_dbg(format, arg...) \
++ printk("fm ioctl [%s:%u](cpu:%u) - " format, \
++ __func__, __LINE__, smp_processor_id(), ##arg)
++#else
++# define _fm_ioctl_dbg(arg...)
++#endif
++
++#endif /* __INTG_IOCTLS_H */
+diff --git a/include/uapi/linux/fmd/ioctls.h b/include/uapi/linux/fmd/ioctls.h
+new file mode 100644
+index 00000000..4f36cb05
+--- /dev/null
++++ b/include/uapi/linux/fmd/ioctls.h
+@@ -0,0 +1,96 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**************************************************************************//**
++ @File ioctls.h
++
++ @Description Structures and definitions for Command Relay Ioctls
++*//***************************************************************************/
++
++#ifndef __IOCTLS_H__
++#define __IOCTLS_H__
++
++#include <asm/ioctl.h>
++
++#include "integration_ioctls.h"
++
++
++/**************************************************************************//**
++ @Group lnx_ioctl_ncsw_grp NetCommSw Linux User-Space (IOCTL) API
++ @{
++*//***************************************************************************/
++
++#define NCSW_IOC_TYPE_BASE 0xe0 /**< defines the IOCTL type for all
++ the NCSW Linux module commands */
++
++
++/**************************************************************************//**
++ @Description IOCTL Memory allocation types.
++*//***************************************************************************/
++typedef enum ioc_mem_type {
++ e_IOC_MEM_INVALID = 0x00000000, /**< Invalid memory type (error) */
++ e_IOC_MEM_CACHABLE_SYS = 0x00000001, /**< Primary DDR, cacheable segment */
++ e_IOC_MEM_NOCACHE_SYS = 0x00000004, /**< Primary DDR, non-cacheable segment */
++ e_IOC_MEM_SECONDARY = 0x00000002, /**< Either secondary DDR or SDRAM */
++ e_IOC_MEM_PRAM = 0x00000008 /**< Multi-user RAM identifier */
++} ioc_mem_type;
++
++/**************************************************************************//**
++ @Description Enumeration (bit flags) of communication modes (Transmit,
++ receive or both).
++*//***************************************************************************/
++typedef enum ioc_comm_mode {
++ e_IOC_COMM_MODE_NONE = 0 /**< No transmit/receive communication */
++ , e_IOC_COMM_MODE_RX = 1 /**< Only receive communication */
++ , e_IOC_COMM_MODE_TX = 2 /**< Only transmit communication */
++ , e_IOC_COMM_MODE_RX_AND_TX = 3 /**< Both transmit and receive communication */
++} ioc_comm_mode;
++
++/**************************************************************************//**
++ @Description General Diagnostic Mode
++*//***************************************************************************/
++typedef enum ioc_diag_mode
++{
++ e_IOC_DIAG_MODE_NONE = 0,
++ e_IOC_DIAG_MODE_CTRL_LOOPBACK, /**< loopback in the controller; E.g. MAC, TDM, etc. */
++ e_IOC_DIAG_MODE_CHIP_LOOPBACK, /**< loopback in the chip but not in controller;
++ E.g. IO-pins, SerDes, etc. */
++ e_IOC_DIAG_MODE_PHY_LOOPBACK, /**< loopback in the external PHY */
++ e_IOC_DIAG_MODE_LINE_LOOPBACK, /**< loopback in the external line */
++ e_IOC_DIAG_MODE_CTRL_ECHO, /**< */
++ e_IOC_DIAG_MODE_PHY_ECHO /**< */
++} ioc_diag_mode;
++
++/** @} */ /* end of lnx_ioctl_ncsw_grp */
++
++
++#endif /* __IOCTLS_H__ */
+diff --git a/include/uapi/linux/fmd/net_ioctls.h b/include/uapi/linux/fmd/net_ioctls.h
+new file mode 100644
+index 00000000..c99d64cf
+--- /dev/null
++++ b/include/uapi/linux/fmd/net_ioctls.h
+@@ -0,0 +1,430 @@
++/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/**************************************************************************//**
++ @File net_ioctls.h
++
++ @Description This file contains common and general netcomm headers definitions.
++*//***************************************************************************/
++#ifndef __NET_IOCTLS_H
++#define __NET_IOCTLS_H
++
++#include "ioctls.h"
++
++
++typedef uint8_t ioc_header_field_ppp_t;
++
++#define IOC_NET_HEADER_FIELD_PPP_PID (1)
++#define IOC_NET_HEADER_FIELD_PPP_COMPRESSED (IOC_NET_HEADER_FIELD_PPP_PID << 1)
++#define IOC_NET_HEADER_FIELD_PPP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPP_PID << 2) - 1)
++
++
++typedef uint8_t ioc_header_field_pppoe_t;
++
++#define IOC_NET_HEADER_FIELD_PPPoE_VER (1)
++#define IOC_NET_HEADER_FIELD_PPPoE_TYPE (IOC_NET_HEADER_FIELD_PPPoE_VER << 1)
++#define IOC_NET_HEADER_FIELD_PPPoE_CODE (IOC_NET_HEADER_FIELD_PPPoE_VER << 2)
++#define IOC_NET_HEADER_FIELD_PPPoE_SID (IOC_NET_HEADER_FIELD_PPPoE_VER << 3)
++#define IOC_NET_HEADER_FIELD_PPPoE_LEN (IOC_NET_HEADER_FIELD_PPPoE_VER << 4)
++#define IOC_NET_HEADER_FIELD_PPPoE_SESSION (IOC_NET_HEADER_FIELD_PPPoE_VER << 5)
++#define IOC_NET_HEADER_FIELD_PPPoE_PID (IOC_NET_HEADER_FIELD_PPPoE_VER << 6)
++#define IOC_NET_HEADER_FIELD_PPPoE_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPPoE_VER << 7) - 1)
++
++#define IOC_NET_HEADER_FIELD_PPPMUX_PID (1)
++#define IOC_NET_HEADER_FIELD_PPPMUX_CKSUM (IOC_NET_HEADER_FIELD_PPPMUX_PID << 1)
++#define IOC_NET_HEADER_FIELD_PPPMUX_COMPRESSED (IOC_NET_HEADER_FIELD_PPPMUX_PID << 2)
++#define IOC_NET_HEADER_FIELD_PPPMUX_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPPMUX_PID << 3) - 1)
++
++#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF (1)
++#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_LXT (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 1)
++#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_LEN (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 2)
++#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PID (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 3)
++#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_USE_PID (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 4)
++#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 5) - 1)
++
++
++typedef uint8_t ioc_header_field_eth_t;
++
++#define IOC_NET_HEADER_FIELD_ETH_DA (1)
++#define IOC_NET_HEADER_FIELD_ETH_SA (IOC_NET_HEADER_FIELD_ETH_DA << 1)
++#define IOC_NET_HEADER_FIELD_ETH_LENGTH (IOC_NET_HEADER_FIELD_ETH_DA << 2)
++#define IOC_NET_HEADER_FIELD_ETH_TYPE (IOC_NET_HEADER_FIELD_ETH_DA << 3)
++#define IOC_NET_HEADER_FIELD_ETH_FINAL_CKSUM (IOC_NET_HEADER_FIELD_ETH_DA << 4)
++#define IOC_NET_HEADER_FIELD_ETH_PADDING (IOC_NET_HEADER_FIELD_ETH_DA << 5)
++#define IOC_NET_HEADER_FIELD_ETH_ALL_FIELDS ((IOC_NET_HEADER_FIELD_ETH_DA << 6) - 1)
++
++#define IOC_NET_HEADER_FIELD_ETH_ADDR_SIZE 6
++
++typedef uint16_t ioc_header_field_ip_t;
++
++#define IOC_NET_HEADER_FIELD_IP_VER (1)
++#define IOC_NET_HEADER_FIELD_IP_DSCP (IOC_NET_HEADER_FIELD_IP_VER << 2)
++#define IOC_NET_HEADER_FIELD_IP_ECN (IOC_NET_HEADER_FIELD_IP_VER << 3)
++#define IOC_NET_HEADER_FIELD_IP_PROTO (IOC_NET_HEADER_FIELD_IP_VER << 4)
++
++#define IOC_NET_HEADER_FIELD_IP_PROTO_SIZE 1
++
++typedef uint16_t ioc_header_field_ipv4_t;
++
++#define IOC_NET_HEADER_FIELD_IPv4_VER (1)
++#define IOC_NET_HEADER_FIELD_IPv4_HDR_LEN (IOC_NET_HEADER_FIELD_IPv4_VER << 1)
++#define IOC_NET_HEADER_FIELD_IPv4_TOS (IOC_NET_HEADER_FIELD_IPv4_VER << 2)
++#define IOC_NET_HEADER_FIELD_IPv4_TOTAL_LEN (IOC_NET_HEADER_FIELD_IPv4_VER << 3)
++#define IOC_NET_HEADER_FIELD_IPv4_ID (IOC_NET_HEADER_FIELD_IPv4_VER << 4)
++#define IOC_NET_HEADER_FIELD_IPv4_FLAG_D (IOC_NET_HEADER_FIELD_IPv4_VER << 5)
++#define IOC_NET_HEADER_FIELD_IPv4_FLAG_M (IOC_NET_HEADER_FIELD_IPv4_VER << 6)
++#define IOC_NET_HEADER_FIELD_IPv4_OFFSET (IOC_NET_HEADER_FIELD_IPv4_VER << 7)
++#define IOC_NET_HEADER_FIELD_IPv4_TTL (IOC_NET_HEADER_FIELD_IPv4_VER << 8)
++#define IOC_NET_HEADER_FIELD_IPv4_PROTO (IOC_NET_HEADER_FIELD_IPv4_VER << 9)
++#define IOC_NET_HEADER_FIELD_IPv4_CKSUM (IOC_NET_HEADER_FIELD_IPv4_VER << 10)
++#define IOC_NET_HEADER_FIELD_IPv4_SRC_IP (IOC_NET_HEADER_FIELD_IPv4_VER << 11)
++#define IOC_NET_HEADER_FIELD_IPv4_DST_IP (IOC_NET_HEADER_FIELD_IPv4_VER << 12)
++#define IOC_NET_HEADER_FIELD_IPv4_OPTS (IOC_NET_HEADER_FIELD_IPv4_VER << 13)
++#define IOC_NET_HEADER_FIELD_IPv4_OPTS_COUNT (IOC_NET_HEADER_FIELD_IPv4_VER << 14)
++#define IOC_NET_HEADER_FIELD_IPv4_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPv4_VER << 15) - 1)
++
++#define IOC_NET_HEADER_FIELD_IPv4_ADDR_SIZE 4
++#define IOC_NET_HEADER_FIELD_IPv4_PROTO_SIZE 1
++
++
++typedef uint8_t ioc_header_field_ipv6_t;
++
++#define IOC_NET_HEADER_FIELD_IPv6_VER (1)
++#define IOC_NET_HEADER_FIELD_IPv6_TC (IOC_NET_HEADER_FIELD_IPv6_VER << 1)
++#define IOC_NET_HEADER_FIELD_IPv6_SRC_IP (IOC_NET_HEADER_FIELD_IPv6_VER << 2)
++#define IOC_NET_HEADER_FIELD_IPv6_DST_IP (IOC_NET_HEADER_FIELD_IPv6_VER << 3)
++#define IOC_NET_HEADER_FIELD_IPv6_NEXT_HDR (IOC_NET_HEADER_FIELD_IPv6_VER << 4)
++#define IOC_NET_HEADER_FIELD_IPv6_FL (IOC_NET_HEADER_FIELD_IPv6_VER << 5)
++#define IOC_NET_HEADER_FIELD_IPv6_HOP_LIMIT (IOC_NET_HEADER_FIELD_IPv6_VER << 6)
++#define IOC_NET_HEADER_FIELD_IPv6_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPv6_VER << 7) - 1)
++
++#define IOC_NET_HEADER_FIELD_IPv6_ADDR_SIZE 16
++#define IOC_NET_HEADER_FIELD_IPv6_NEXT_HDR_SIZE 1
++
++#define IOC_NET_HEADER_FIELD_ICMP_TYPE (1)
++#define IOC_NET_HEADER_FIELD_ICMP_CODE (IOC_NET_HEADER_FIELD_ICMP_TYPE << 1)
++#define IOC_NET_HEADER_FIELD_ICMP_CKSUM (IOC_NET_HEADER_FIELD_ICMP_TYPE << 2)
++#define IOC_NET_HEADER_FIELD_ICMP_ID (IOC_NET_HEADER_FIELD_ICMP_TYPE << 3)
++#define IOC_NET_HEADER_FIELD_ICMP_SQ_NUM (IOC_NET_HEADER_FIELD_ICMP_TYPE << 4)
++#define IOC_NET_HEADER_FIELD_ICMP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_ICMP_TYPE << 5) - 1)
++
++#define IOC_NET_HEADER_FIELD_ICMP_CODE_SIZE 1
++#define IOC_NET_HEADER_FIELD_ICMP_TYPE_SIZE 1
++
++#define IOC_NET_HEADER_FIELD_IGMP_VERSION (1)
++#define IOC_NET_HEADER_FIELD_IGMP_TYPE (IOC_NET_HEADER_FIELD_IGMP_VERSION << 1)
++#define IOC_NET_HEADER_FIELD_IGMP_CKSUM (IOC_NET_HEADER_FIELD_IGMP_VERSION << 2)
++#define IOC_NET_HEADER_FIELD_IGMP_DATA (IOC_NET_HEADER_FIELD_IGMP_VERSION << 3)
++#define IOC_NET_HEADER_FIELD_IGMP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IGMP_VERSION << 4) - 1)
++
++
++typedef uint16_t ioc_header_field_tcp_t;
++
++#define IOC_NET_HEADER_FIELD_TCP_PORT_SRC (1)
++#define IOC_NET_HEADER_FIELD_TCP_PORT_DST (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 1)
++#define IOC_NET_HEADER_FIELD_TCP_SEQ (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 2)
++#define IOC_NET_HEADER_FIELD_TCP_ACK (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 3)
++#define IOC_NET_HEADER_FIELD_TCP_OFFSET (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 4)
++#define IOC_NET_HEADER_FIELD_TCP_FLAGS (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 5)
++#define IOC_NET_HEADER_FIELD_TCP_WINDOW (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 6)
++#define IOC_NET_HEADER_FIELD_TCP_CKSUM (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 7)
++#define IOC_NET_HEADER_FIELD_TCP_URGPTR (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 8)
++#define IOC_NET_HEADER_FIELD_TCP_OPTS (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 9)
++#define IOC_NET_HEADER_FIELD_TCP_OPTS_COUNT (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 10)
++#define IOC_NET_HEADER_FIELD_TCP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 11) - 1)
++
++#define IOC_NET_HEADER_FIELD_TCP_PORT_SIZE 2
++
++
++typedef uint8_t ioc_header_field_sctp_t;
++
++#define IOC_NET_HEADER_FIELD_SCTP_PORT_SRC (1)
++#define IOC_NET_HEADER_FIELD_SCTP_PORT_DST (IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 1)
++#define IOC_NET_HEADER_FIELD_SCTP_VER_TAG (IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 2)
++#define IOC_NET_HEADER_FIELD_SCTP_CKSUM (IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 3)
++#define IOC_NET_HEADER_FIELD_SCTP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 4) - 1)
++
++#define IOC_NET_HEADER_FIELD_SCTP_PORT_SIZE 2
++
++typedef uint8_t ioc_header_field_dccp_t;
++
++#define IOC_NET_HEADER_FIELD_DCCP_PORT_SRC (1)
++#define IOC_NET_HEADER_FIELD_DCCP_PORT_DST (IOC_NET_HEADER_FIELD_DCCP_PORT_SRC << 1)
++#define IOC_NET_HEADER_FIELD_DCCP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_DCCP_PORT_SRC << 2) - 1)
++
++#define IOC_NET_HEADER_FIELD_DCCP_PORT_SIZE 2
++
++
++typedef uint8_t ioc_header_field_udp_t;
++
++#define IOC_NET_HEADER_FIELD_UDP_PORT_SRC (1)
++#define IOC_NET_HEADER_FIELD_UDP_PORT_DST (IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 1)
++#define IOC_NET_HEADER_FIELD_UDP_LEN (IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 2)
++#define IOC_NET_HEADER_FIELD_UDP_CKSUM (IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 3)
++#define IOC_NET_HEADER_FIELD_UDP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 4) - 1)
++
++#define IOC_NET_HEADER_FIELD_UDP_PORT_SIZE 2
++
++typedef uint8_t ioc_header_field_udp_lite_t;
++
++#define IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SRC (1)
++#define IOC_NET_HEADER_FIELD_UDP_LITE_PORT_DST (IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 1)
++#define IOC_NET_HEADER_FIELD_UDP_LITE_ALL_FIELDS ((IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 2) - 1)
++
++#define IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SIZE 2
++
++typedef uint8_t ioc_header_field_udp_encap_esp_t;
++
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC (1)
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 1)
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 2)
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 3)
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 4)
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 5)
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 6) - 1)
++
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SIZE 2
++#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI_SIZE 4
++
++#define IOC_NET_HEADER_FIELD_IPHC_CID (1)
++#define IOC_NET_HEADER_FIELD_IPHC_CID_TYPE (IOC_NET_HEADER_FIELD_IPHC_CID << 1)
++#define IOC_NET_HEADER_FIELD_IPHC_HCINDEX (IOC_NET_HEADER_FIELD_IPHC_CID << 2)
++#define IOC_NET_HEADER_FIELD_IPHC_GEN (IOC_NET_HEADER_FIELD_IPHC_CID << 3)
++#define IOC_NET_HEADER_FIELD_IPHC_D_BIT (IOC_NET_HEADER_FIELD_IPHC_CID << 4)
++#define IOC_NET_HEADER_FIELD_IPHC_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPHC_CID << 5) - 1)
++
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE (1)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_FLAGS (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 1)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_LENGTH (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 2)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TSN (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 3)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_ID (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 4)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_SQN (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 5)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_PAYLOAD_PID (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 6)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_UNORDERED (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 7)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_BEGGINING (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 8)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_END (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 9)
++#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS ((IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++
++#define IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT (1)
++#define IOC_NET_HEADER_FIELD_L2TPv2_LENGTH_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 1)
++#define IOC_NET_HEADER_FIELD_L2TPv2_SEQUENCE_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 2)
++#define IOC_NET_HEADER_FIELD_L2TPv2_OFFSET_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 3)
++#define IOC_NET_HEADER_FIELD_L2TPv2_PRIORITY_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 4)
++#define IOC_NET_HEADER_FIELD_L2TPv2_VERSION (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 5)
++#define IOC_NET_HEADER_FIELD_L2TPv2_LEN (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 6)
++#define IOC_NET_HEADER_FIELD_L2TPv2_TUNNEL_ID (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 7)
++#define IOC_NET_HEADER_FIELD_L2TPv2_SESSION_ID (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 8)
++#define IOC_NET_HEADER_FIELD_L2TPv2_NS (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 9)
++#define IOC_NET_HEADER_FIELD_L2TPv2_NR (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 10)
++#define IOC_NET_HEADER_FIELD_L2TPv2_OFFSET_SIZE (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 11)
++#define IOC_NET_HEADER_FIELD_L2TPv2_FIRST_BYTE (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 12)
++#define IOC_NET_HEADER_FIELD_L2TPv2_ALL_FIELDS ((IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 13) - 1)
++
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT (1)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH_BIT (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 1)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_SEQUENCE_BIT (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 2)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_VERSION (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 3)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 4)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_CONTROL (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 5)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_SENT (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 6)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_RECV (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 7)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_FIRST_BYTE (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 8)
++#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS ((IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 9) - 1)
++
++#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT (1)
++#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_VERSION (IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 1)
++#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_ID (IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 2)
++#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_COOKIE (IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 3)
++#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS ((IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 4) - 1)
++
++
++typedef uint8_t ioc_header_field_vlan_t;
++
++#define IOC_NET_HEADER_FIELD_VLAN_VPRI (1)
++#define IOC_NET_HEADER_FIELD_VLAN_CFI (IOC_NET_HEADER_FIELD_VLAN_VPRI << 1)
++#define IOC_NET_HEADER_FIELD_VLAN_VID (IOC_NET_HEADER_FIELD_VLAN_VPRI << 2)
++#define IOC_NET_HEADER_FIELD_VLAN_LENGTH (IOC_NET_HEADER_FIELD_VLAN_VPRI << 3)
++#define IOC_NET_HEADER_FIELD_VLAN_TYPE (IOC_NET_HEADER_FIELD_VLAN_VPRI << 4)
++#define IOC_NET_HEADER_FIELD_VLAN_ALL_FIELDS ((IOC_NET_HEADER_FIELD_VLAN_VPRI << 5) - 1)
++
++#define IOC_NET_HEADER_FIELD_VLAN_TCI (IOC_NET_HEADER_FIELD_VLAN_VPRI | \
++ IOC_NET_HEADER_FIELD_VLAN_CFI | \
++ IOC_NET_HEADER_FIELD_VLAN_VID)
++
++
++typedef uint8_t ioc_header_field_llc_t;
++
++#define IOC_NET_HEADER_FIELD_LLC_DSAP (1)
++#define IOC_NET_HEADER_FIELD_LLC_SSAP (IOC_NET_HEADER_FIELD_LLC_DSAP << 1)
++#define IOC_NET_HEADER_FIELD_LLC_CTRL (IOC_NET_HEADER_FIELD_LLC_DSAP << 2)
++#define IOC_NET_HEADER_FIELD_LLC_ALL_FIELDS ((IOC_NET_HEADER_FIELD_LLC_DSAP << 3) - 1)
++
++#define IOC_NET_HEADER_FIELD_NLPID_NLPID (1)
++#define IOC_NET_HEADER_FIELD_NLPID_ALL_FIELDS ((IOC_NET_HEADER_FIELD_NLPID_NLPID << 1) - 1)
++
++
++typedef uint8_t ioc_header_field_snap_t;
++
++#define IOC_NET_HEADER_FIELD_SNAP_OUI (1)
++#define IOC_NET_HEADER_FIELD_SNAP_PID (IOC_NET_HEADER_FIELD_SNAP_OUI << 1)
++#define IOC_NET_HEADER_FIELD_SNAP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_SNAP_OUI << 2) - 1)
++
++
++typedef uint8_t ioc_header_field_llc_snap_t;
++
++#define IOC_NET_HEADER_FIELD_LLC_SNAP_TYPE (1)
++#define IOC_NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_LLC_SNAP_TYPE << 1) - 1)
++
++#define IOC_NET_HEADER_FIELD_ARP_HTYPE (1)
++#define IOC_NET_HEADER_FIELD_ARP_PTYPE (IOC_NET_HEADER_FIELD_ARP_HTYPE << 1)
++#define IOC_NET_HEADER_FIELD_ARP_HLEN (IOC_NET_HEADER_FIELD_ARP_HTYPE << 2)
++#define IOC_NET_HEADER_FIELD_ARP_PLEN (IOC_NET_HEADER_FIELD_ARP_HTYPE << 3)
++#define IOC_NET_HEADER_FIELD_ARP_OPER (IOC_NET_HEADER_FIELD_ARP_HTYPE << 4)
++#define IOC_NET_HEADER_FIELD_ARP_SHA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 5)
++#define IOC_NET_HEADER_FIELD_ARP_SPA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 6)
++#define IOC_NET_HEADER_FIELD_ARP_THA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 7)
++#define IOC_NET_HEADER_FIELD_ARP_TPA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 8)
++#define IOC_NET_HEADER_FIELD_ARP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_ARP_HTYPE << 9) - 1)
++
++#define IOC_NET_HEADER_FIELD_RFC2684_LLC (1)
++#define IOC_NET_HEADER_FIELD_RFC2684_NLPID (IOC_NET_HEADER_FIELD_RFC2684_LLC << 1)
++#define IOC_NET_HEADER_FIELD_RFC2684_OUI (IOC_NET_HEADER_FIELD_RFC2684_LLC << 2)
++#define IOC_NET_HEADER_FIELD_RFC2684_PID (IOC_NET_HEADER_FIELD_RFC2684_LLC << 3)
++#define IOC_NET_HEADER_FIELD_RFC2684_VPN_OUI (IOC_NET_HEADER_FIELD_RFC2684_LLC << 4)
++#define IOC_NET_HEADER_FIELD_RFC2684_VPN_IDX (IOC_NET_HEADER_FIELD_RFC2684_LLC << 5)
++#define IOC_NET_HEADER_FIELD_RFC2684_ALL_FIELDS ((IOC_NET_HEADER_FIELD_RFC2684_LLC << 6) - 1)
++
++#define IOC_NET_HEADER_FIELD_USER_DEFINED_SRCPORT (1)
++#define IOC_NET_HEADER_FIELD_USER_DEFINED_PCDID (IOC_NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 1)
++#define IOC_NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS ((IOC_NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 2) - 1)
++
++#define IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER (1)
++#define IOC_NET_HEADER_FIELD_PAYLOAD_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 1)
++#define IOC_NET_HEADER_FIELD_MAX_FRM_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 2)
++#define IOC_NET_HEADER_FIELD_MIN_FRM_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 3)
++#define IOC_NET_HEADER_FIELD_PAYLOAD_TYPE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 4)
++#define IOC_NET_HEADER_FIELD_FRAME_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 5)
++#define IOC_NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 6) - 1)
++
++
++typedef uint8_t ioc_header_field_gre_t;
++
++#define IOC_NET_HEADER_FIELD_GRE_TYPE (1)
++#define IOC_NET_HEADER_FIELD_GRE_ALL_FIELDS ((IOC_NET_HEADER_FIELD_GRE_TYPE << 1) - 1)
++
++
++typedef uint8_t ioc_header_field_minencap_t;
++
++#define IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP (1)
++#define IOC_NET_HEADER_FIELD_MINENCAP_DST_IP (IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP << 1)
++#define IOC_NET_HEADER_FIELD_MINENCAP_TYPE (IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP << 2)
++#define IOC_NET_HEADER_FIELD_MINENCAP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP << 3) - 1)
++
++
++typedef uint8_t ioc_header_field_ipsec_ah_t;
++
++#define IOC_NET_HEADER_FIELD_IPSEC_AH_SPI (1)
++#define IOC_NET_HEADER_FIELD_IPSEC_AH_NH (IOC_NET_HEADER_FIELD_IPSEC_AH_SPI << 1)
++#define IOC_NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPSEC_AH_SPI << 2) - 1)
++
++
++typedef uint8_t ioc_header_field_ipsec_esp_t;
++
++#define IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI (1)
++#define IOC_NET_HEADER_FIELD_IPSEC_ESP_SEQUENCE_NUM (IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI << 1)
++#define IOC_NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI << 2) - 1)
++
++#define IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI_SIZE 4
++
++
++typedef uint8_t ioc_header_field_mpls_t;
++
++#define IOC_NET_HEADER_FIELD_MPLS_LABEL_STACK (1)
++#define IOC_NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS ((IOC_NET_HEADER_FIELD_MPLS_LABEL_STACK << 1) - 1)
++
++
++typedef uint8_t ioc_header_field_macsec_t;
++
++#define IOC_NET_HEADER_FIELD_MACSEC_SECTAG (1)
++#define IOC_NET_HEADER_FIELD_MACSEC_ALL_FIELDS ((IOC_NET_HEADER_FIELD_MACSEC_SECTAG << 1) - 1)
++
++
++typedef enum {
++ e_IOC_NET_HEADER_TYPE_NONE = 0,
++ e_IOC_NET_HEADER_TYPE_PAYLOAD,
++ e_IOC_NET_HEADER_TYPE_ETH,
++ e_IOC_NET_HEADER_TYPE_VLAN,
++ e_IOC_NET_HEADER_TYPE_IPv4,
++ e_IOC_NET_HEADER_TYPE_IPv6,
++ e_IOC_NET_HEADER_TYPE_IP,
++ e_IOC_NET_HEADER_TYPE_TCP,
++ e_IOC_NET_HEADER_TYPE_UDP,
++ e_IOC_NET_HEADER_TYPE_UDP_LITE,
++ e_IOC_NET_HEADER_TYPE_IPHC,
++ e_IOC_NET_HEADER_TYPE_SCTP,
++ e_IOC_NET_HEADER_TYPE_SCTP_CHUNK_DATA,
++ e_IOC_NET_HEADER_TYPE_PPPoE,
++ e_IOC_NET_HEADER_TYPE_PPP,
++ e_IOC_NET_HEADER_TYPE_PPPMUX,
++ e_IOC_NET_HEADER_TYPE_PPPMUX_SUBFRAME,
++ e_IOC_NET_HEADER_TYPE_L2TPv2,
++ e_IOC_NET_HEADER_TYPE_L2TPv3_CTRL,
++ e_IOC_NET_HEADER_TYPE_L2TPv3_SESS,
++ e_IOC_NET_HEADER_TYPE_LLC,
++ e_IOC_NET_HEADER_TYPE_LLC_SNAP,
++ e_IOC_NET_HEADER_TYPE_NLPID,
++ e_IOC_NET_HEADER_TYPE_SNAP,
++ e_IOC_NET_HEADER_TYPE_MPLS,
++ e_IOC_NET_HEADER_TYPE_IPSEC_AH,
++ e_IOC_NET_HEADER_TYPE_IPSEC_ESP,
++ e_IOC_NET_HEADER_TYPE_UDP_ENCAP_ESP, /* RFC 3948 */
++ e_IOC_NET_HEADER_TYPE_MACSEC,
++ e_IOC_NET_HEADER_TYPE_GRE,
++ e_IOC_NET_HEADER_TYPE_MINENCAP,
++ e_IOC_NET_HEADER_TYPE_DCCP,
++ e_IOC_NET_HEADER_TYPE_ICMP,
++ e_IOC_NET_HEADER_TYPE_IGMP,
++ e_IOC_NET_HEADER_TYPE_ARP,
++ e_IOC_NET_HEADER_TYPE_CAPWAP,
++ e_IOC_NET_HEADER_TYPE_CAPWAP_DTLS,
++ e_IOC_NET_HEADER_TYPE_RFC2684,
++ e_IOC_NET_HEADER_TYPE_USER_DEFINED_L2,
++ e_IOC_NET_HEADER_TYPE_USER_DEFINED_L3,
++ e_IOC_NET_HEADER_TYPE_USER_DEFINED_L4,
++ e_IOC_NET_HEADER_TYPE_USER_DEFINED_SHIM1,
++ e_IOC_NET_HEADER_TYPE_USER_DEFINED_SHIM2,
++ e_IOC_NET_MAX_HEADER_TYPE_COUNT
++} ioc_net_header_type;
++
++
++#endif /* __NET_IOCTLS_H */
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch b/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch
new file mode 100644
index 0000000000..c3dcd73b60
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch
@@ -0,0 +1,2062 @@
+From c4813da334b0c31e9c55eea015f1e898e84ff45b Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 11:04:10 +0800
+Subject: [PATCH] pci: support layerscape
+
+This is a integrated patch for layerscape pcie support.
+
+Signed-off-by: Po Liu <po.liu@nxp.com>
+Signed-off-by: Liu Gang <Gang.Liu@nxp.com>
+Signed-off-by: Minghuan Lian <Minghuan.Lian@freescale.com>
+Signed-off-by: hongbo.wang <hongbo.wang@nxp.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+Signed-off-by: Mingkai Hu <mingkai.hu@nxp.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/irqchip/irq-ls-scfg-msi.c | 256 +++++++--
+ drivers/pci/host/Makefile | 2 +-
+ drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++++++++++
+ drivers/pci/host/pci-layerscape-ep.c | 309 +++++++++++
+ drivers/pci/host/pci-layerscape-ep.h | 115 ++++
+ drivers/pci/host/pci-layerscape.c | 37 +-
+ drivers/pci/host/pcie-designware.c | 6 +
+ drivers/pci/host/pcie-designware.h | 1 +
+ drivers/pci/pcie/portdrv_core.c | 181 +++----
+ include/linux/pci.h | 1 +
+ 10 files changed, 1518 insertions(+), 148 deletions(-)
+ create mode 100644 drivers/pci/host/pci-layerscape-ep-debugfs.c
+ create mode 100644 drivers/pci/host/pci-layerscape-ep.c
+ create mode 100644 drivers/pci/host/pci-layerscape-ep.h
+
+diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
+index 02cca74c..119f4ef0 100644
+--- a/drivers/irqchip/irq-ls-scfg-msi.c
++++ b/drivers/irqchip/irq-ls-scfg-msi.c
+@@ -17,13 +17,32 @@
+ #include <linux/irq.h>
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqdomain.h>
++#include <linux/of_irq.h>
+ #include <linux/of_pci.h>
+ #include <linux/of_platform.h>
+ #include <linux/spinlock.h>
+
+-#define MSI_MAX_IRQS 32
+-#define MSI_IBS_SHIFT 3
+-#define MSIR 4
++#define MSI_IRQS_PER_MSIR 32
++#define MSI_MSIR_OFFSET 4
++
++#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
++#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
++
++struct ls_scfg_msi_cfg {
++ u32 ibs_shift; /* Shift of interrupt bit select */
++ u32 msir_irqs; /* The irq number per MSIR */
++ u32 msir_base; /* The base address of MSIR */
++};
++
++struct ls_scfg_msir {
++ struct ls_scfg_msi *msi_data;
++ unsigned int index;
++ unsigned int gic_irq;
++ unsigned int bit_start;
++ unsigned int bit_end;
++ unsigned int srs; /* Shared interrupt register select */
++ void __iomem *reg;
++};
+
+ struct ls_scfg_msi {
+ spinlock_t lock;
+@@ -32,8 +51,11 @@ struct ls_scfg_msi {
+ struct irq_domain *msi_domain;
+ void __iomem *regs;
+ phys_addr_t msiir_addr;
+- int irq;
+- DECLARE_BITMAP(used, MSI_MAX_IRQS);
++ struct ls_scfg_msi_cfg *cfg;
++ u32 msir_num;
++ struct ls_scfg_msir *msir;
++ u32 irqs_num;
++ unsigned long *used;
+ };
+
+ static struct irq_chip ls_scfg_msi_irq_chip = {
+@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = {
+ .chip = &ls_scfg_msi_irq_chip,
+ };
+
++static int msi_affinity_flag = 1;
++
++static int __init early_parse_ls_scfg_msi(char *p)
++{
++ if (p && strncmp(p, "no-affinity", 11) == 0)
++ msi_affinity_flag = 0;
++ else
++ msi_affinity_flag = 1;
++
++ return 0;
++}
++early_param("lsmsi", early_parse_ls_scfg_msi);
++
+ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+ {
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ msg->address_hi = upper_32_bits(msi_data->msiir_addr);
+ msg->address_lo = lower_32_bits(msi_data->msiir_addr);
+- msg->data = data->hwirq << MSI_IBS_SHIFT;
++ msg->data = data->hwirq;
++
++ if (msi_affinity_flag)
++ msg->data |= cpumask_first(data->common->affinity);
+ }
+
+ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+ {
+- return -EINVAL;
++ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
++ u32 cpu;
++
++ if (!msi_affinity_flag)
++ return -EINVAL;
++
++ if (!force)
++ cpu = cpumask_any_and(mask, cpu_online_mask);
++ else
++ cpu = cpumask_first(mask);
++
++ if (cpu >= msi_data->msir_num)
++ return -EINVAL;
++
++ if (msi_data->msir[cpu].gic_irq <= 0) {
++ pr_warn("cannot bind the irq to cpu%d\n", cpu);
++ return -EINVAL;
++ }
++
++ cpumask_copy(irq_data->common->affinity, mask);
++
++ return IRQ_SET_MASK_OK;
+ }
+
+ static struct irq_chip ls_scfg_msi_parent_chip = {
+@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
+ WARN_ON(nr_irqs != 1);
+
+ spin_lock(&msi_data->lock);
+- pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
+- if (pos < MSI_MAX_IRQS)
++ pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
++ if (pos < msi_data->irqs_num)
+ __set_bit(pos, msi_data->used);
+ else
+ err = -ENOSPC;
+@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
+ int pos;
+
+ pos = d->hwirq;
+- if (pos < 0 || pos >= MSI_MAX_IRQS) {
++ if (pos < 0 || pos >= msi_data->irqs_num) {
+ pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
+ return;
+ }
+@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
+
+ static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
+ {
+- struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
++ struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
++ struct ls_scfg_msi *msi_data = msir->msi_data;
+ unsigned long val;
+- int pos, virq;
++ int pos, size, virq, hwirq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+- val = ioread32be(msi_data->regs + MSIR);
+- for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
+- virq = irq_find_mapping(msi_data->parent, (31 - pos));
++ val = ioread32be(msir->reg);
++
++ pos = msir->bit_start;
++ size = msir->bit_end + 1;
++
++ for_each_set_bit_from(pos, &val, size) {
++ hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
++ msir->srs;
++ virq = irq_find_mapping(msi_data->parent, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
+ {
+ /* Initialize MSI domain parent */
+ msi_data->parent = irq_domain_add_linear(NULL,
+- MSI_MAX_IRQS,
++ msi_data->irqs_num,
+ &ls_scfg_msi_domain_ops,
+ msi_data);
+ if (!msi_data->parent) {
+@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
+ return 0;
+ }
+
++static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
++{
++ struct ls_scfg_msir *msir;
++ int virq, i, hwirq;
++
++ virq = platform_get_irq(msi_data->pdev, index);
++ if (virq <= 0)
++ return -ENODEV;
++
++ msir = &msi_data->msir[index];
++ msir->index = index;
++ msir->msi_data = msi_data;
++ msir->gic_irq = virq;
++ msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
++
++ if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
++ msir->bit_start = 32 - ((msir->index + 1) *
++ MSI_LS1043V1_1_IRQS_PER_MSIR);
++ msir->bit_end = msir->bit_start +
++ MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
++ } else {
++ msir->bit_start = 0;
++ msir->bit_end = msi_data->cfg->msir_irqs - 1;
++ }
++
++ irq_set_chained_handler_and_data(msir->gic_irq,
++ ls_scfg_msi_irq_handler,
++ msir);
++
++ if (msi_affinity_flag) {
++ /* Associate MSIR interrupt to the cpu */
++ irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
++ msir->srs = 0; /* This value is determined by the CPU */
++ } else
++ msir->srs = index;
++
++ /* Release the hwirqs corresponding to this MSIR */
++ if (!msi_affinity_flag || msir->index == 0) {
++ for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
++ hwirq = i << msi_data->cfg->ibs_shift | msir->index;
++ bitmap_clear(msi_data->used, hwirq, 1);
++ }
++ }
++
++ return 0;
++}
++
++static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
++{
++ struct ls_scfg_msi *msi_data = msir->msi_data;
++ int i, hwirq;
++
++ if (msir->gic_irq > 0)
++ irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
++
++ for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
++ hwirq = i << msi_data->cfg->ibs_shift | msir->index;
++ bitmap_set(msi_data->used, hwirq, 1);
++ }
++
++ return 0;
++}
++
++static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
++ .ibs_shift = 3,
++ .msir_irqs = MSI_IRQS_PER_MSIR,
++ .msir_base = MSI_MSIR_OFFSET,
++};
++
++static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
++ .ibs_shift = 2,
++ .msir_irqs = MSI_IRQS_PER_MSIR,
++ .msir_base = MSI_MSIR_OFFSET,
++};
++
++static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
++ .ibs_shift = 2,
++ .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
++ .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
++};
++
++static const struct of_device_id ls_scfg_msi_id[] = {
++ /* The following two misspelled compatibles are obsolete */
++ { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
++ { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
++
++ { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
++ { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
++ { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
++ { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
++ {},
++};
++MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
++
+ static int ls_scfg_msi_probe(struct platform_device *pdev)
+ {
++ const struct of_device_id *match;
+ struct ls_scfg_msi *msi_data;
+ struct resource *res;
+- int ret;
++ int i, ret;
++
++ match = of_match_device(ls_scfg_msi_id, &pdev->dev);
++ if (!match)
++ return -ENODEV;
+
+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
++ msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
++
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi_data->regs)) {
+@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
+ }
+ msi_data->msiir_addr = res->start;
+
+- msi_data->irq = platform_get_irq(pdev, 0);
+- if (msi_data->irq <= 0) {
+- dev_err(&pdev->dev, "failed to get MSI irq\n");
+- return -ENODEV;
+- }
+-
+ msi_data->pdev = pdev;
+ spin_lock_init(&msi_data->lock);
+
++ msi_data->irqs_num = MSI_IRQS_PER_MSIR *
++ (1 << msi_data->cfg->ibs_shift);
++ msi_data->used = devm_kcalloc(&pdev->dev,
++ BITS_TO_LONGS(msi_data->irqs_num),
++ sizeof(*msi_data->used),
++ GFP_KERNEL);
++ if (!msi_data->used)
++ return -ENOMEM;
++ /*
++ * Reserve all the hwirqs
++ * The available hwirqs will be released in ls1_msi_setup_hwirq()
++ */
++ bitmap_set(msi_data->used, 0, msi_data->irqs_num);
++
++ msi_data->msir_num = of_irq_count(pdev->dev.of_node);
++
++ if (msi_affinity_flag) {
++ u32 cpu_num;
++
++ cpu_num = num_possible_cpus();
++ if (msi_data->msir_num >= cpu_num)
++ msi_data->msir_num = cpu_num;
++ else
++ msi_affinity_flag = 0;
++ }
++
++ msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
++ sizeof(*msi_data->msir),
++ GFP_KERNEL);
++ if (!msi_data->msir)
++ return -ENOMEM;
++
++ for (i = 0; i < msi_data->msir_num; i++)
++ ls_scfg_msi_setup_hwirq(msi_data, i);
++
+ ret = ls_scfg_msi_domains_init(msi_data);
+ if (ret)
+ return ret;
+
+- irq_set_chained_handler_and_data(msi_data->irq,
+- ls_scfg_msi_irq_handler,
+- msi_data);
+-
+ platform_set_drvdata(pdev, msi_data);
+
+ return 0;
+@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
+ static int ls_scfg_msi_remove(struct platform_device *pdev)
+ {
+ struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
++ int i;
+
+- irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
++ for (i = 0; i < msi_data->msir_num; i++)
++ ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
+
+ irq_domain_remove(msi_data->msi_domain);
+ irq_domain_remove(msi_data->parent);
+@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static const struct of_device_id ls_scfg_msi_id[] = {
+- { .compatible = "fsl,1s1021a-msi", },
+- { .compatible = "fsl,1s1043a-msi", },
+- {},
+-};
+-
+ static struct platform_driver ls_scfg_msi_driver = {
+ .driver = {
+ .name = "ls-scfg-msi",
+diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
+index 084cb498..88e87704 100644
+--- a/drivers/pci/host/Makefile
++++ b/drivers/pci/host/Makefile
+@@ -17,7 +17,7 @@ obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
+ obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
+ obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
+ obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
+-obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
++obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o pci-layerscape-ep-debugfs.o
+ obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
+ obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
+ obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
+diff --git a/drivers/pci/host/pci-layerscape-ep-debugfs.c b/drivers/pci/host/pci-layerscape-ep-debugfs.c
+new file mode 100644
+index 00000000..5f4870ba
+--- /dev/null
++++ b/drivers/pci/host/pci-layerscape-ep-debugfs.c
+@@ -0,0 +1,758 @@
++/*
++ * PCIe Endpoint driver for Freescale Layerscape SoCs
++ *
++ * Copyright (C) 2015 Freescale Semiconductor.
++ *
++ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include <linux/time.h>
++#include <linux/uaccess.h>
++#include <linux/kthread.h>
++#include <linux/slab.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/freezer.h>
++
++#include <linux/completion.h>
++
++#include "pci-layerscape-ep.h"
++
++#define PCIE_ATU_INDEX3 (0x3 << 0)
++#define PCIE_ATU_INDEX2 (0x2 << 0)
++#define PCIE_ATU_INDEX1 (0x1 << 0)
++#define PCIE_ATU_INDEX0 (0x0 << 0)
++
++#define PCIE_BAR0_SIZE (4 * 1024) /* 4K */
++#define PCIE_BAR1_SIZE (8 * 1024) /* 8K for MSIX */
++#define PCIE_BAR2_SIZE (4 * 1024) /* 4K */
++#define PCIE_BAR4_SIZE (1 * 1024 * 1024) /* 1M */
++#define PCIE_MSI_OB_SIZE (4 * 1024) /* 4K */
++
++#define PCIE_MSI_MSG_ADDR_OFF 0x54
++#define PCIE_MSI_MSG_DATA_OFF 0x5c
++
++enum test_type {
++ TEST_TYPE_DMA,
++ TEST_TYPE_MEMCPY
++};
++
++enum test_dirt {
++ TEST_DIRT_READ,
++ TEST_DIRT_WRITE
++};
++
++enum test_status {
++ TEST_IDLE,
++ TEST_BUSY
++};
++
++struct ls_ep_test {
++ struct ls_ep_dev *ep;
++ void __iomem *cfg;
++ void __iomem *buf;
++ void __iomem *out;
++ void __iomem *msi;
++ dma_addr_t cfg_addr;
++ dma_addr_t buf_addr;
++ dma_addr_t out_addr;
++ dma_addr_t bus_addr;
++ dma_addr_t msi_addr;
++ u64 msi_msg_addr;
++ u16 msi_msg_data;
++ struct task_struct *thread;
++ spinlock_t lock;
++ struct completion done;
++ u32 len;
++ int loop;
++ char data;
++ enum test_dirt dirt;
++ enum test_type type;
++ enum test_status status;
++ u64 result; /* Mbps */
++ char cmd[256];
++};
++
++static int ls_pcie_ep_trigger_msi(struct ls_ep_test *test)
++{
++ if (!test->msi)
++ return -EINVAL;
++
++ iowrite32(test->msi_msg_data, test->msi);
++
++ return 0;
++}
++
++static int ls_pcie_ep_test_try_run(struct ls_ep_test *test)
++{
++ int ret;
++
++ spin_lock(&test->lock);
++ if (test->status == TEST_IDLE) {
++ test->status = TEST_BUSY;
++ ret = 0;
++ } else
++ ret = -EBUSY;
++ spin_unlock(&test->lock);
++
++ return ret;
++}
++
++static void ls_pcie_ep_test_done(struct ls_ep_test *test)
++{
++ spin_lock(&test->lock);
++ test->status = TEST_IDLE;
++ spin_unlock(&test->lock);
++}
++
++static void ls_pcie_ep_test_dma_cb(void *arg)
++{
++ struct ls_ep_test *test = arg;
++
++ complete(&test->done);
++}
++
++static int ls_pcie_ep_test_dma(struct ls_ep_test *test)
++{
++ dma_cap_mask_t mask;
++ struct dma_chan *chan;
++ struct dma_device *dma_dev;
++ dma_addr_t src, dst;
++ enum dma_data_direction direction;
++ enum dma_ctrl_flags dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
++ struct timespec start, end, period;
++ int i = 0;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++
++ chan = dma_request_channel(mask, NULL, test);
++ if (!chan) {
++ pr_err("failed to request dma channel\n");
++ return -EINVAL;
++ }
++
++ memset(test->buf, test->data, test->len);
++
++ if (test->dirt == TEST_DIRT_WRITE) {
++ src = test->buf_addr;
++ dst = test->out_addr;
++ direction = DMA_TO_DEVICE;
++ } else {
++ src = test->out_addr;
++ dst = test->buf_addr;
++ direction = DMA_FROM_DEVICE;
++ }
++
++ dma_dev = chan->device;
++ dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
++
++ dma_sync_single_for_device(&test->ep->dev, test->buf_addr,
++ test->len, direction);
++
++ set_freezable();
++
++ getrawmonotonic(&start);
++ while (!kthread_should_stop() && (i < test->loop)) {
++ struct dma_async_tx_descriptor *dma_desc;
++ dma_cookie_t dma_cookie = {0};
++ unsigned long tmo;
++ int status;
++
++ init_completion(&test->done);
++
++ dma_desc = dma_dev->device_prep_dma_memcpy(chan,
++ dst, src,
++ test->len,
++ dma_flags);
++ if (!dma_desc) {
++ pr_err("DMA desc constr failed...\n");
++ goto _err;
++ }
++
++ dma_desc->callback = ls_pcie_ep_test_dma_cb;
++ dma_desc->callback_param = test;
++ dma_cookie = dmaengine_submit(dma_desc);
++
++ if (dma_submit_error(dma_cookie)) {
++ pr_err("DMA submit error....\n");
++ goto _err;
++ }
++
++ /* Trigger the transaction */
++ dma_async_issue_pending(chan);
++
++ tmo = wait_for_completion_timeout(&test->done,
++ msecs_to_jiffies(5 * test->len));
++ if (tmo == 0) {
++ pr_err("Self-test copy timed out, disabling\n");
++ goto _err;
++ }
++
++ status = dma_async_is_tx_complete(chan, dma_cookie,
++ NULL, NULL);
++ if (status != DMA_COMPLETE) {
++ pr_err("got completion callback, but status is %s\n",
++ status == DMA_ERROR ? "error" : "in progress");
++ goto _err;
++ }
++
++ i++;
++ }
++
++ getrawmonotonic(&end);
++ period = timespec_sub(end, start);
++ test->result = test->len * 8ULL * i * 1000;
++ do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec);
++ dma_release_channel(chan);
++
++ return 0;
++
++_err:
++ dma_release_channel(chan);
++ test->result = 0;
++ return -EINVAL;
++}
++
++static int ls_pcie_ep_test_cpy(struct ls_ep_test *test)
++{
++ void *dst, *src;
++ struct timespec start, end, period;
++ int i = 0;
++
++ memset(test->buf, test->data, test->len);
++
++ if (test->dirt == TEST_DIRT_WRITE) {
++ dst = test->out;
++ src = test->buf;
++ } else {
++ dst = test->buf;
++ src = test->out;
++ }
++
++ getrawmonotonic(&start);
++ while (!kthread_should_stop() && i < test->loop) {
++ memcpy(dst, src, test->len);
++ i++;
++ }
++ getrawmonotonic(&end);
++
++ period = timespec_sub(end, start);
++ test->result = test->len * 8ULL * i * 1000;
++ do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec);
++
++ return 0;
++}
++
++int ls_pcie_ep_test_thread(void *arg)
++{
++ int ret;
++
++ struct ls_ep_test *test = arg;
++
++ if (test->type == TEST_TYPE_DMA)
++ ret = ls_pcie_ep_test_dma(test);
++ else
++ ret = ls_pcie_ep_test_cpy(test);
++
++ if (ret) {
++ pr_err("\n%s \ttest failed\n",
++ test->cmd);
++ test->result = 0;
++ } else
++ pr_err("\n%s \tthroughput:%lluMbps\n",
++ test->cmd, test->result);
++
++ ls_pcie_ep_test_done(test);
++
++ ls_pcie_ep_trigger_msi(test);
++
++ do_exit(0);
++}
++
++static int ls_pcie_ep_free_test(struct ls_ep_dev *ep)
++{
++ struct ls_ep_test *test = ep->driver_data;
++
++ if (!test)
++ return 0;
++
++ if (test->status == TEST_BUSY) {
++ kthread_stop(test->thread);
++ dev_info(&ep->dev,
++ "test is running please wait and run again\n");
++ return -EBUSY;
++ }
++
++ if (test->buf)
++ free_pages((unsigned long)test->buf,
++ get_order(PCIE_BAR4_SIZE));
++
++ if (test->cfg)
++ free_pages((unsigned long)test->cfg,
++ get_order(PCIE_BAR2_SIZE));
++
++ if (test->out)
++ iounmap(test->out);
++
++ kfree(test);
++ ep->driver_data = NULL;
++
++ return 0;
++}
++
++static int ls_pcie_ep_init_test(struct ls_ep_dev *ep, u64 bus_addr)
++{
++ struct ls_pcie *pcie = ep->pcie;
++ struct ls_ep_test *test = ep->driver_data;
++ int err;
++
++ if (test) {
++ dev_info(&ep->dev,
++ "Please use 'free' to remove the exiting test\n");
++ return -EBUSY;
++ }
++
++ test = kzalloc(sizeof(*test), GFP_KERNEL);
++ if (!test)
++ return -ENOMEM;
++ ep->driver_data = test;
++ test->ep = ep;
++ spin_lock_init(&test->lock);
++ test->status = TEST_IDLE;
++
++ test->buf = dma_alloc_coherent(pcie->dev, get_order(PCIE_BAR4_SIZE),
++ &test->buf_addr,
++ GFP_KERNEL);
++ if (!test->buf) {
++ dev_info(&ep->dev, "failed to get mem for bar4\n");
++ err = -ENOMEM;
++ goto _err;
++ }
++
++ test->cfg = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
++ get_order(PCIE_BAR2_SIZE));
++ if (!test->cfg) {
++ dev_info(&ep->dev, "failed to get mem for bar4\n");
++ err = -ENOMEM;
++ goto _err;
++ }
++ test->cfg_addr = virt_to_phys(test->cfg);
++
++ test->out_addr = pcie->out_base;
++ test->out = ioremap(test->out_addr, PCIE_BAR4_SIZE);
++ if (!test->out) {
++ dev_info(&ep->dev, "failed to map out\n");
++ err = -ENOMEM;
++ goto _err;
++ }
++
++ test->bus_addr = bus_addr;
++
++ test->msi_addr = test->out_addr + PCIE_BAR4_SIZE;
++ test->msi = ioremap(test->msi_addr, PCIE_MSI_OB_SIZE);
++ if (!test->msi)
++ dev_info(&ep->dev, "failed to map MSI outbound region\n");
++
++ test->msi_msg_addr = ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF) |
++ (((u64)ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF + 4)) << 32);
++ test->msi_msg_data = ioread16(pcie->dbi + PCIE_MSI_MSG_DATA_OFF);
++
++ ls_pcie_ep_dev_cfg_enable(ep);
++
++ /* outbound iATU for memory */
++ ls_pcie_iatu_outbound_set(pcie, 0, PCIE_ATU_TYPE_MEM,
++ test->out_addr, bus_addr, PCIE_BAR4_SIZE);
++ /* outbound iATU for MSI */
++ ls_pcie_iatu_outbound_set(pcie, 1, PCIE_ATU_TYPE_MEM,
++ test->msi_addr, test->msi_msg_addr,
++ PCIE_MSI_OB_SIZE);
++
++ /* ATU 0 : INBOUND : map BAR0 */
++ ls_pcie_iatu_inbound_set(pcie, 0, 0, test->cfg_addr);
++ /* ATU 2 : INBOUND : map BAR2 */
++ ls_pcie_iatu_inbound_set(pcie, 2, 2, test->cfg_addr);
++ /* ATU 3 : INBOUND : map BAR4 */
++ ls_pcie_iatu_inbound_set(pcie, 3, 4, test->buf_addr);
++
++ return 0;
++
++_err:
++ ls_pcie_ep_free_test(ep);
++ return err;
++}
++
++static int ls_pcie_ep_start_test(struct ls_ep_dev *ep, char *cmd)
++{
++ struct ls_ep_test *test = ep->driver_data;
++ enum test_type type;
++ enum test_dirt dirt;
++ u32 cnt, len, loop;
++ unsigned int data;
++ char dirt_str[2];
++ int ret;
++
++ if (strncmp(cmd, "dma", 3) == 0)
++ type = TEST_TYPE_DMA;
++ else
++ type = TEST_TYPE_MEMCPY;
++
++ cnt = sscanf(&cmd[4], "%1s %u %u %x", dirt_str, &len, &loop, &data);
++ if (cnt != 4) {
++ dev_info(&ep->dev, "format error %s", cmd);
++ dev_info(&ep->dev, "dma/cpy <r/w> <packet_size> <loop> <data>\n");
++ return -EINVAL;
++ }
++
++ if (strncmp(dirt_str, "r", 1) == 0)
++ dirt = TEST_DIRT_READ;
++ else
++ dirt = TEST_DIRT_WRITE;
++
++ if (len > PCIE_BAR4_SIZE) {
++ dev_err(&ep->dev, "max len is %d", PCIE_BAR4_SIZE);
++ return -EINVAL;
++ }
++
++ if (!test) {
++ dev_err(&ep->dev, "Please first run init command\n");
++ return -EINVAL;
++ }
++
++ if (ls_pcie_ep_test_try_run(test)) {
++ dev_err(&ep->dev, "There is already a test running\n");
++ return -EINVAL;
++ }
++
++ test->len = len;
++ test->loop = loop;
++ test->type = type;
++ test->data = (char)data;
++ test->dirt = dirt;
++ strcpy(test->cmd, cmd);
++ test->thread = kthread_run(ls_pcie_ep_test_thread, test,
++ "pcie ep test");
++ if (IS_ERR(test->thread)) {
++ dev_err(&ep->dev, "fork failed for pcie ep test\n");
++ ls_pcie_ep_test_done(test);
++ ret = PTR_ERR(test->thread);
++ }
++
++ return ret;
++}
++
++
++/**
++ * ls_pcie_reg_ops_read - read for regs data
++ * @filp: the opened file
++ * @buffer: where to write the data for the user to read
++ * @count: the size of the user's buffer
++ * @ppos: file position offset
++ **/
++static ssize_t ls_pcie_ep_dbg_regs_read(struct file *filp, char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct ls_ep_dev *ep = filp->private_data;
++ struct ls_pcie *pcie = ep->pcie;
++ char *buf;
++ int desc = 0, i, len;
++
++ buf = kmalloc(4 * 1024, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ ls_pcie_ep_dev_cfg_enable(ep);
++
++ desc += sprintf(buf + desc, "%s", "reg info:");
++ for (i = 0; i < 0x200; i += 4) {
++ if (i % 16 == 0)
++ desc += sprintf(buf + desc, "\n%08x:", i);
++ desc += sprintf(buf + desc, " %08x", readl(pcie->dbi + i));
++ }
++
++ desc += sprintf(buf + desc, "\n%s", "outbound iATU info:\n");
++ for (i = 0; i < 6; i++) {
++ writel(PCIE_ATU_REGION_OUTBOUND | i,
++ pcie->dbi + PCIE_ATU_VIEWPORT);
++ desc += sprintf(buf + desc, "iATU%d", i);
++ desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_LOWER_BASE));
++ desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_UPPER_BASE));
++ desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_LOWER_TARGET));
++ desc += sprintf(buf + desc, "\tUPPER BUS 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_UPPER_TARGET));
++ desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_LIMIT));
++ desc += sprintf(buf + desc, "\tCR1 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_CR1));
++ desc += sprintf(buf + desc, "\tCR2 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_CR2));
++ }
++
++ desc += sprintf(buf + desc, "\n%s", "inbound iATU info:\n");
++ for (i = 0; i < 6; i++) {
++ writel(PCIE_ATU_REGION_INBOUND | i,
++ pcie->dbi + PCIE_ATU_VIEWPORT);
++ desc += sprintf(buf + desc, "iATU%d", i);
++ desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_LOWER_BASE));
++ desc += sprintf(buf + desc, "\tUPPER BUSs 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_UPPER_BASE));
++ desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_LOWER_TARGET));
++ desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_UPPER_TARGET));
++ desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_LIMIT));
++ desc += sprintf(buf + desc, "\tCR1 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_CR1));
++ desc += sprintf(buf + desc, "\tCR2 0x%08x\n",
++ readl(pcie->dbi + PCIE_ATU_CR2));
++ }
++
++ len = simple_read_from_buffer(buffer, count, ppos, buf, desc);
++ kfree(buf);
++
++ return len;
++}
++
++/**
++ * ls_pcie_ep_dbg_regs_write - write into regs datum
++ * @filp: the opened file
++ * @buffer: where to find the user's data
++ * @count: the length of the user's data
++ * @ppos: file position offset
++ **/
++static ssize_t ls_pcie_ep_dbg_regs_write(struct file *filp,
++ const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct ls_ep_dev *ep = filp->private_data;
++ struct ls_pcie *pcie = ep->pcie;
++ char buf[256];
++
++ if (count >= sizeof(buf))
++ return -ENOSPC;
++
++ memset(buf, 0, sizeof(buf));
++
++ if (copy_from_user(buf, buffer, count))
++ return -EFAULT;
++
++ ls_pcie_ep_dev_cfg_enable(ep);
++
++ if (strncmp(buf, "reg", 3) == 0) {
++ u32 reg, value;
++ int cnt;
++
++ cnt = sscanf(&buf[3], "%x %x", &reg, &value);
++ if (cnt == 2) {
++ writel(value, pcie->dbi + reg);
++ value = readl(pcie->dbi + reg);
++ dev_info(&ep->dev, "reg 0x%08x: 0x%08x\n",
++ reg, value);
++ } else {
++ dev_info(&ep->dev, "reg <reg> <value>\n");
++ }
++ } else if (strncmp(buf, "atu", 3) == 0) {
++ /* to do */
++ dev_info(&ep->dev, " Not support atu command\n");
++ } else {
++ dev_info(&ep->dev, "Unknown command %s\n", buf);
++ dev_info(&ep->dev, "Available commands:\n");
++ dev_info(&ep->dev, " reg <reg> <value>\n");
++ }
++
++ return count;
++}
++
++static const struct file_operations ls_pcie_ep_dbg_regs_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = ls_pcie_ep_dbg_regs_read,
++ .write = ls_pcie_ep_dbg_regs_write,
++};
++
++static ssize_t ls_pcie_ep_dbg_test_read(struct file *filp,
++ char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct ls_ep_dev *ep = filp->private_data;
++ struct ls_ep_test *test = ep->driver_data;
++ char buf[512];
++ int desc = 0, len;
++
++ if (!test) {
++ dev_info(&ep->dev, " there is NO test\n");
++ return 0;
++ }
++
++ if (test->status != TEST_IDLE) {
++ dev_info(&ep->dev, "test %s is running\n", test->cmd);
++ return 0;
++ }
++
++ desc = sprintf(buf, "MSI ADDR:0x%llx MSI DATA:0x%x\n",
++ test->msi_msg_addr, test->msi_msg_data);
++
++ desc += sprintf(buf + desc, "%s throughput:%lluMbps\n",
++ test->cmd, test->result);
++
++ len = simple_read_from_buffer(buffer, count, ppos,
++ buf, desc);
++
++ return len;
++}
++
++static ssize_t ls_pcie_ep_dbg_test_write(struct file *filp,
++ const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct ls_ep_dev *ep = filp->private_data;
++ char buf[256];
++
++ if (count >= sizeof(buf))
++ return -ENOSPC;
++
++ memset(buf, 0, sizeof(buf));
++
++ if (copy_from_user(buf, buffer, count))
++ return -EFAULT;
++
++ if (strncmp(buf, "init", 4) == 0) {
++ int i = 4;
++ u64 bus_addr;
++
++ while (buf[i] == ' ')
++ i++;
++
++ if (kstrtou64(&buf[i], 0, &bus_addr))
++ dev_info(&ep->dev, "command: init <bus_addr>\n");
++ else {
++ if (ls_pcie_ep_init_test(ep, bus_addr))
++ dev_info(&ep->dev, "failed to init test\n");
++ }
++ } else if (strncmp(buf, "free", 4) == 0)
++ ls_pcie_ep_free_test(ep);
++ else if (strncmp(buf, "dma", 3) == 0 ||
++ strncmp(buf, "cpy", 3) == 0)
++ ls_pcie_ep_start_test(ep, buf);
++ else {
++ dev_info(&ep->dev, "Unknown command: %s\n", buf);
++ dev_info(&ep->dev, "Available commands:\n");
++ dev_info(&ep->dev, "\tinit <bus_addr>\n");
++ dev_info(&ep->dev, "\t<dma/cpy> <r/w> <packet_size> <loop>\n");
++ dev_info(&ep->dev, "\tfree\n");
++ }
++
++ return count;
++}
++
++static const struct file_operations ls_pcie_ep_dbg_test_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = ls_pcie_ep_dbg_test_read,
++ .write = ls_pcie_ep_dbg_test_write,
++};
++
++static ssize_t ls_pcie_ep_dbg_dump_read(struct file *filp,
++ char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct ls_ep_dev *ep = filp->private_data;
++ struct ls_ep_test *test = ep->driver_data;
++ char *buf;
++ int desc = 0, i, len;
++
++ buf = kmalloc(4 * 1024, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ if (!test) {
++ dev_info(&ep->dev, " there is NO test\n");
++ kfree(buf);
++ return 0;
++ }
++
++ desc += sprintf(buf + desc, "%s", "dump info:");
++ for (i = 0; i < 256; i += 4) {
++ if (i % 16 == 0)
++ desc += sprintf(buf + desc, "\n%08x:", i);
++ desc += sprintf(buf + desc, " %08x", readl(test->buf + i));
++ }
++
++ desc += sprintf(buf + desc, "\n");
++ len = simple_read_from_buffer(buffer, count, ppos, buf, desc);
++
++ kfree(buf);
++
++ return len;
++}
++
++static const struct file_operations ls_pcie_ep_dbg_dump_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = ls_pcie_ep_dbg_dump_read,
++};
++
++static int ls_pcie_ep_dev_dbgfs_init(struct ls_ep_dev *ep)
++{
++ struct ls_pcie *pcie = ep->pcie;
++ struct dentry *pfile;
++
++ ls_pcie_ep_dev_cfg_enable(ep);
++
++ ep->dir = debugfs_create_dir(dev_name(&ep->dev), pcie->dir);
++ if (!ep->dir)
++ return -ENOMEM;
++
++ pfile = debugfs_create_file("regs", 0600, ep->dir, ep,
++ &ls_pcie_ep_dbg_regs_fops);
++ if (!pfile)
++ dev_info(&ep->dev, "debugfs regs for failed\n");
++
++ pfile = debugfs_create_file("test", 0600, ep->dir, ep,
++ &ls_pcie_ep_dbg_test_fops);
++ if (!pfile)
++ dev_info(&ep->dev, "debugfs test for failed\n");
++
++ pfile = debugfs_create_file("dump", 0600, ep->dir, ep,
++ &ls_pcie_ep_dbg_dump_fops);
++ if (!pfile)
++ dev_info(&ep->dev, "debugfs dump for failed\n");
++
++ return 0;
++}
++
++int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie)
++{
++ struct ls_ep_dev *ep;
++
++ pcie->dir = debugfs_create_dir(dev_name(pcie->dev), NULL);
++ if (!pcie->dir)
++ return -ENOMEM;
++
++ list_for_each_entry(ep, &pcie->ep_list, node)
++ ls_pcie_ep_dev_dbgfs_init(ep);
++
++ return 0;
++}
++
++int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie)
++{
++ debugfs_remove_recursive(pcie->dir);
++ return 0;
++}
++
++MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
++MODULE_DESCRIPTION("Freescale Layerscape PCIe EP controller driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/pci/host/pci-layerscape-ep.c b/drivers/pci/host/pci-layerscape-ep.c
+new file mode 100644
+index 00000000..8f1cca6e
+--- /dev/null
++++ b/drivers/pci/host/pci-layerscape-ep.c
+@@ -0,0 +1,309 @@
++/*
++ * PCIe Endpoint driver for Freescale Layerscape SoCs
++ *
++ * Copyright (C) 2015 Freescale Semiconductor.
++ *
++ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/of_pci.h>
++#include <linux/of_platform.h>
++#include <linux/of_irq.h>
++#include <linux/of_address.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/resource.h>
++#include <linux/debugfs.h>
++#include <linux/time.h>
++#include <linux/uaccess.h>
++
++#include "pci-layerscape-ep.h"
++
++struct ls_ep_dev *
++ls_pci_ep_find(struct ls_pcie *pcie, int dev_id)
++{
++ struct ls_ep_dev *ep;
++
++ list_for_each_entry(ep, &pcie->ep_list, node) {
++ if (ep->dev_id == dev_id)
++ return ep;
++ }
++
++ return NULL;
++}
++
++static void ls_pcie_try_cfg2(struct ls_pcie *pcie, int pf, int vf)
++{
++ if (pcie->sriov)
++ writel(PCIE_LCTRL0_VAL(pf, vf),
++ pcie->dbi + PCIE_LUT_BASE + PCIE_LUT_LCTRL0);
++}
++
++static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
++{
++ u32 header_type = 0;
++
++ header_type = readl(pcie->dbi + (PCI_HEADER_TYPE & ~0x3));
++ header_type = (header_type >> 16) & 0x7f;
++
++ return header_type == PCI_HEADER_TYPE_BRIDGE;
++}
++
++void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type,
++ u64 cpu_addr, u64 pci_addr, u32 size)
++{
++ writel(PCIE_ATU_REGION_OUTBOUND | idx,
++ pcie->dbi + PCIE_ATU_VIEWPORT);
++ writel(lower_32_bits(cpu_addr),
++ pcie->dbi + PCIE_ATU_LOWER_BASE);
++ writel(upper_32_bits(cpu_addr),
++ pcie->dbi + PCIE_ATU_UPPER_BASE);
++ writel(lower_32_bits(cpu_addr + size - 1),
++ pcie->dbi + PCIE_ATU_LIMIT);
++ writel(lower_32_bits(pci_addr),
++ pcie->dbi + PCIE_ATU_LOWER_TARGET);
++ writel(upper_32_bits(pci_addr),
++ pcie->dbi + PCIE_ATU_UPPER_TARGET);
++ writel(type, pcie->dbi + PCIE_ATU_CR1);
++ writel(PCIE_ATU_ENABLE, pcie->dbi + PCIE_ATU_CR2);
++}
++
++/* Use bar match mode and MEM type as default */
++void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx,
++ int bar, u64 phys)
++{
++ writel(PCIE_ATU_REGION_INBOUND | idx, pcie->dbi + PCIE_ATU_VIEWPORT);
++ writel((u32)phys, pcie->dbi + PCIE_ATU_LOWER_TARGET);
++ writel(phys >> 32, pcie->dbi + PCIE_ATU_UPPER_TARGET);
++ writel(PCIE_ATU_TYPE_MEM, pcie->dbi + PCIE_ATU_CR1);
++ writel(PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
++ PCIE_ATU_BAR_NUM(bar), pcie->dbi + PCIE_ATU_CR2);
++}
++
++void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep)
++{
++ ls_pcie_try_cfg2(ep->pcie, ep->pf_idx, ep->vf_idx);
++}
++
++void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
++{
++ if (size < 4 * 1024)
++ return;
++
++ switch (bar) {
++ case 0:
++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
++ break;
++ case 1:
++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
++ break;
++ case 2:
++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
++ writel(0, bar_base + PCI_BASE_ADDRESS_3);
++ break;
++ case 4:
++ writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
++ writel(0, bar_base + PCI_BASE_ADDRESS_5);
++ break;
++ default:
++ break;
++ }
++}
++
++void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size)
++{
++ struct ls_pcie *pcie = ep->pcie;
++ void *bar_base;
++
++ if (size < 4 * 1024)
++ return;
++
++ if (pcie->sriov)
++ bar_base = pcie->dbi;
++ else
++ bar_base = pcie->dbi + PCIE_NO_SRIOV_BAR_BASE;
++
++ ls_pcie_ep_dev_cfg_enable(ep);
++ ls_pcie_ep_setup_bar(bar_base, bar, size);
++}
++
++static int ls_pcie_ep_dev_init(struct ls_pcie *pcie, int pf_idx, int vf_idx)
++{
++ struct ls_ep_dev *ep;
++
++ ep = devm_kzalloc(pcie->dev, sizeof(*ep), GFP_KERNEL);
++ if (!ep)
++ return -ENOMEM;
++
++ ep->pcie = pcie;
++ ep->pf_idx = pf_idx;
++ ep->vf_idx = vf_idx;
++ if (vf_idx)
++ ep->dev_id = pf_idx + 4 + 4 * (vf_idx - 1);
++ else
++ ep->dev_id = pf_idx;
++
++ if (ep->vf_idx)
++ dev_set_name(&ep->dev, "pf%d-vf%d",
++ ep->pf_idx,
++ ep->vf_idx);
++ else
++ dev_set_name(&ep->dev, "pf%d",
++ ep->pf_idx);
++
++ list_add_tail(&ep->node, &pcie->ep_list);
++
++ return 0;
++}
++
++static int ls_pcie_ep_init(struct ls_pcie *pcie)
++{
++ u32 sriov_header;
++ int pf, vf, i, j;
++
++ sriov_header = readl(pcie->dbi + PCIE_SRIOV_POS);
++
++ if (PCI_EXT_CAP_ID(sriov_header) == PCI_EXT_CAP_ID_SRIOV) {
++ pcie->sriov = PCIE_SRIOV_POS;
++ pf = PCIE_PF_NUM;
++ vf = PCIE_VF_NUM;
++ } else {
++ pcie->sriov = 0;
++ pf = 1;
++ vf = 0;
++ }
++
++ for (i = 0; i < pf; i++) {
++ for (j = 0; j <= vf; j++)
++ ls_pcie_ep_dev_init(pcie, i, j);
++ }
++
++ return 0;
++}
++
++static struct ls_pcie_ep_drvdata ls1043_drvdata = {
++ .lut_offset = 0x10000,
++ .ltssm_shift = 24,
++ .lut_dbg = 0x7fc,
++};
++
++static struct ls_pcie_ep_drvdata ls1046_drvdata = {
++ .lut_offset = 0x80000,
++ .ltssm_shift = 24,
++ .lut_dbg = 0x407fc,
++};
++
++static struct ls_pcie_ep_drvdata ls2080_drvdata = {
++ .lut_offset = 0x80000,
++ .ltssm_shift = 0,
++ .lut_dbg = 0x7fc,
++};
++
++static const struct of_device_id ls_pcie_ep_of_match[] = {
++ { .compatible = "fsl,ls1021a-pcie", },
++ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
++ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
++ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
++ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
++ { },
++};
++MODULE_DEVICE_TABLE(of, ls_pcie_ep_of_match);
++
++static int ls_pcie_ep_probe(struct platform_device *pdev)
++{
++ struct ls_pcie *pcie;
++ struct resource *dbi_base, *cfg_res;
++ const struct of_device_id *match;
++ int ret;
++
++ match = of_match_device(ls_pcie_ep_of_match, &pdev->dev);
++ if (!match)
++ return -ENODEV;
++
++ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
++ if (!pcie)
++ return -ENOMEM;
++
++ pcie->dev = &pdev->dev;
++ INIT_LIST_HEAD(&pcie->ep_list);
++
++ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
++ pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
++ if (IS_ERR(pcie->dbi)) {
++ dev_err(&pdev->dev, "missing *regs* space\n");
++ return PTR_ERR(pcie->dbi);
++ }
++
++ pcie->drvdata = match->data;
++ pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
++
++ if (ls_pcie_is_bridge(pcie))
++ return -ENODEV;
++
++ dev_info(pcie->dev, "in EP mode\n");
++
++ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
++ if (cfg_res)
++ pcie->out_base = cfg_res->start;
++ else {
++ dev_err(&pdev->dev, "missing *config* space\n");
++ return -ENODEV;
++ }
++
++ ret = ls_pcie_ep_init(pcie);
++ if (ret)
++ return ret;
++
++ ls_pcie_ep_dbgfs_init(pcie);
++
++ platform_set_drvdata(pdev, pcie);
++
++ return 0;
++}
++
++static int ls_pcie_ep_dev_remove(struct ls_ep_dev *ep)
++{
++ list_del(&ep->node);
++
++ return 0;
++}
++
++static int ls_pcie_ep_remove(struct platform_device *pdev)
++{
++ struct ls_pcie *pcie = platform_get_drvdata(pdev);
++ struct ls_ep_dev *ep, *tmp;
++
++ if (!pcie)
++ return 0;
++
++ ls_pcie_ep_dbgfs_remove(pcie);
++
++ list_for_each_entry_safe(ep, tmp, &pcie->ep_list, node)
++ ls_pcie_ep_dev_remove(ep);
++
++ return 0;
++}
++
++static struct platform_driver ls_pcie_ep_driver = {
++ .driver = {
++ .name = "ls-pcie-ep",
++ .owner = THIS_MODULE,
++ .of_match_table = ls_pcie_ep_of_match,
++ },
++ .probe = ls_pcie_ep_probe,
++ .remove = ls_pcie_ep_remove,
++};
++
++module_platform_driver(ls_pcie_ep_driver);
++
++MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
++MODULE_DESCRIPTION("Freescale Layerscape PCIe EP driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/pci/host/pci-layerscape-ep.h b/drivers/pci/host/pci-layerscape-ep.h
+new file mode 100644
+index 00000000..990c0ff5
+--- /dev/null
++++ b/drivers/pci/host/pci-layerscape-ep.h
+@@ -0,0 +1,115 @@
++/*
++ * PCIe Endpoint driver for Freescale Layerscape SoCs
++ *
++ * Copyright (C) 2015 Freescale Semiconductor.
++ *
++ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++
++#ifndef _PCIE_LAYERSCAPE_EP_H
++#define _PCIE_LAYERSCAPE_EP_H
++
++#include <linux/device.h>
++
++/* Synopsis specific PCIE configuration registers */
++#define PCIE_ATU_VIEWPORT 0x900
++#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
++#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
++#define PCIE_ATU_REGION_INDEX3 (0x3 << 0)
++#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
++#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
++#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
++#define PCIE_ATU_CR1 0x904
++#define PCIE_ATU_TYPE_MEM (0x0 << 0)
++#define PCIE_ATU_TYPE_IO (0x2 << 0)
++#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
++#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
++#define PCIE_ATU_CR2 0x908
++#define PCIE_ATU_ENABLE (0x1 << 31)
++#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
++#define PCIE_ATU_LOWER_BASE 0x90C
++#define PCIE_ATU_UPPER_BASE 0x910
++#define PCIE_ATU_LIMIT 0x914
++#define PCIE_ATU_LOWER_TARGET 0x918
++#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
++#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
++#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
++#define PCIE_ATU_UPPER_TARGET 0x91C
++
++/* PEX internal configuration registers */
++#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
++
++/* PEX LUT registers */
++#define PCIE_LUT_BASE 0x80000
++#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug register */
++
++#define PCIE_LUT_LCTRL0 0x7F8
++
++#define PCIE_ATU_BAR_NUM(bar) ((bar) << 8)
++#define PCIE_LCTRL0_CFG2_ENABLE (1 << 31)
++#define PCIE_LCTRL0_VF(vf) ((vf) << 22)
++#define PCIE_LCTRL0_PF(pf) ((pf) << 16)
++#define PCIE_LCTRL0_VF_ACTIVE (1 << 21)
++#define PCIE_LCTRL0_VAL(pf, vf) (PCIE_LCTRL0_PF(pf) | \
++ PCIE_LCTRL0_VF(vf) | \
++ ((vf) == 0 ? 0 : PCIE_LCTRL0_VF_ACTIVE) | \
++ PCIE_LCTRL0_CFG2_ENABLE)
++
++#define PCIE_NO_SRIOV_BAR_BASE 0x1000
++
++#define PCIE_SRIOV_POS 0x178
++#define PCIE_PF_NUM 2
++#define PCIE_VF_NUM 64
++
++struct ls_pcie_ep_drvdata {
++ u32 lut_offset;
++ u32 ltssm_shift;
++ u32 lut_dbg;
++};
++
++struct ls_pcie {
++ struct list_head ep_list;
++ struct device *dev;
++ struct dentry *dir;
++ const struct ls_pcie_ep_drvdata *drvdata;
++ void __iomem *dbi;
++ void __iomem *lut;
++ phys_addr_t out_base;
++ int sriov;
++ int index;
++};
++
++struct ls_ep_dev {
++ struct list_head node;
++ struct ls_pcie *pcie;
++ struct device dev;
++ struct dentry *dir;
++ int pf_idx;
++ int vf_idx;
++ int dev_id;
++ void *driver_data;
++};
++
++struct ls_ep_dev *ls_pci_ep_find(struct ls_pcie *pcie, int dev_id);
++
++void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type,
++ u64 cpu_addr, u64 pci_addr, u32 size);
++
++/* Use bar match mode and MEM type as default */
++void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx,
++ int bar, u64 phys);
++
++void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size);
++
++
++void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep);
++
++int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie);
++int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie);
++
++#endif /* _PCIE_LAYERSCAPE_EP_H */
+diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
+index 65370799..4713b872 100644
+--- a/drivers/pci/host/pci-layerscape.c
++++ b/drivers/pci/host/pci-layerscape.c
+@@ -35,12 +35,14 @@
+ #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+ #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
+
+-/* PEX LUT registers */
+-#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
++#define PCIE_IATU_NUM 6
++
++static void ls_pcie_host_init(struct pcie_port *pp);
+
+ struct ls_pcie_drvdata {
+ u32 lut_offset;
+ u32 ltssm_shift;
++ u32 lut_dbg;
+ struct pcie_host_ops *ops;
+ };
+
+@@ -86,6 +88,14 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+ iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
+ }
+
++static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
++{
++ int i;
++
++ for (i = 0; i < PCIE_IATU_NUM; i++)
++ dw_pcie_disable_outbound_atu(&pcie->pp, i);
++}
++
+ static int ls1021_pcie_link_up(struct pcie_port *pp)
+ {
+ u32 state;
+@@ -134,7 +144,7 @@ static int ls_pcie_link_up(struct pcie_port *pp)
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+ u32 state;
+
+- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
++ state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
+ pcie->drvdata->ltssm_shift) &
+ LTSSM_STATE_MASK;
+
+@@ -153,6 +163,9 @@ static void ls_pcie_host_init(struct pcie_port *pp)
+ ls_pcie_clear_multifunction(pcie);
+ ls_pcie_drop_msg_tlp(pcie);
+ iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
++
++ ls_pcie_disable_outbound_atus(pcie);
++ dw_pcie_setup_rc(pp);
+ }
+
+ static int ls_pcie_msi_host_init(struct pcie_port *pp,
+@@ -196,20 +209,38 @@ static struct ls_pcie_drvdata ls1021_drvdata = {
+ static struct ls_pcie_drvdata ls1043_drvdata = {
+ .lut_offset = 0x10000,
+ .ltssm_shift = 24,
++ .lut_dbg = 0x7fc,
++ .ops = &ls_pcie_host_ops,
++};
++
++static struct ls_pcie_drvdata ls1046_drvdata = {
++ .lut_offset = 0x80000,
++ .ltssm_shift = 24,
++ .lut_dbg = 0x407fc,
+ .ops = &ls_pcie_host_ops,
+ };
+
+ static struct ls_pcie_drvdata ls2080_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 0,
++ .lut_dbg = 0x7fc,
++ .ops = &ls_pcie_host_ops,
++};
++
++static struct ls_pcie_drvdata ls2088_drvdata = {
++ .lut_offset = 0x80000,
++ .ltssm_shift = 0,
++ .lut_dbg = 0x407fc,
+ .ops = &ls_pcie_host_ops,
+ };
+
+ static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
++ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
++ { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
+ { },
+ };
+
+diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
+index af8f6e92..2358e049 100644
+--- a/drivers/pci/host/pcie-designware.c
++++ b/drivers/pci/host/pcie-designware.c
+@@ -478,6 +478,12 @@ int dw_pcie_wait_for_link(struct pcie_port *pp)
+ return -ETIMEDOUT;
+ }
+
++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index)
++{
++ dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index);
++ dw_pcie_writel_rc(pp, PCIE_ATU_CR2, 0);
++}
++
+ int dw_pcie_link_up(struct pcie_port *pp)
+ {
+ u32 val;
+diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
+index a567ea28..4e6672b2 100644
+--- a/drivers/pci/host/pcie-designware.h
++++ b/drivers/pci/host/pcie-designware.h
+@@ -82,5 +82,6 @@ int dw_pcie_wait_for_link(struct pcie_port *pp);
+ int dw_pcie_link_up(struct pcie_port *pp);
+ void dw_pcie_setup_rc(struct pcie_port *pp);
+ int dw_pcie_host_init(struct pcie_port *pp);
++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index);
+
+ #endif /* _PCIE_DESIGNWARE_H */
+diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
+index e9270b40..1bad877a 100644
+--- a/drivers/pci/pcie/portdrv_core.c
++++ b/drivers/pci/pcie/portdrv_core.c
+@@ -44,52 +44,30 @@ static void release_pcie_device(struct device *dev)
+ }
+
+ /**
+- * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
+- * @entries: Array of MSI-X entries
+- * @new_entry: Index of the entry to add to the array
+- * @nr_entries: Number of entries already in the array
++ * pcibios_check_service_irqs - check irqs in the device tree
++ * @dev: PCI Express port to handle
++ * @irqs: Array of irqs to populate
++ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
++ *
++ * Return value: 0 means no service irqs in the device tree
+ *
+- * Return value: Position of the added entry in the array
+ */
+-static int pcie_port_msix_add_entry(
+- struct msix_entry *entries, int new_entry, int nr_entries)
++int __weak pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+ {
+- int j;
+-
+- for (j = 0; j < nr_entries; j++)
+- if (entries[j].entry == new_entry)
+- return j;
+-
+- entries[j].entry = new_entry;
+- return j;
++ return 0;
+ }
+
+ /**
+ * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
+ * @dev: PCI Express port to handle
+- * @vectors: Array of interrupt vectors to populate
++ * @irqs: Array of interrupt vectors to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: 0 on success, error code on failure
+ */
+-static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
++static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
+ {
+- struct msix_entry *msix_entries;
+- int idx[PCIE_PORT_DEVICE_MAXSERVICES];
+- int nr_entries, status, pos, i, nvec;
+- u16 reg16;
+- u32 reg32;
+-
+- nr_entries = pci_msix_vec_count(dev);
+- if (nr_entries < 0)
+- return nr_entries;
+- BUG_ON(!nr_entries);
+- if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
+- nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
+-
+- msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
+- if (!msix_entries)
+- return -ENOMEM;
++ int nr_entries, entry, nvec = 0;
+
+ /*
+ * Allocate as many entries as the port wants, so that we can check
+@@ -97,20 +75,13 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
+ * equal to the number of entries this port actually uses, we'll happily
+ * go through without any tricks.
+ */
+- for (i = 0; i < nr_entries; i++)
+- msix_entries[i].entry = i;
+-
+- status = pci_enable_msix_exact(dev, msix_entries, nr_entries);
+- if (status)
+- goto Exit;
+-
+- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+- idx[i] = -1;
+- status = -EIO;
+- nvec = 0;
++ nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
++ PCI_IRQ_MSIX);
++ if (nr_entries < 0)
++ return nr_entries;
+
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+- int entry;
++ u16 reg16;
+
+ /*
+ * The code below follows the PCI Express Base Specification 2.0
+@@ -125,18 +96,16 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
+ pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
+ entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
+ if (entry >= nr_entries)
+- goto Error;
++ goto out_free_irqs;
+
+- i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
+- if (i == nvec)
+- nvec++;
++ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
++ irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
+
+- idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
+- idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
++ nvec = max(nvec, entry + 1);
+ }
+
+ if (mask & PCIE_PORT_SERVICE_AER) {
+- int entry;
++ u32 reg32, pos;
+
+ /*
+ * The code below follows Section 7.10.10 of the PCI Express
+@@ -151,13 +120,11 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ entry = reg32 >> 27;
+ if (entry >= nr_entries)
+- goto Error;
++ goto out_free_irqs;
+
+- i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
+- if (i == nvec)
+- nvec++;
++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
+
+- idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
++ nvec = max(nvec, entry + 1);
+ }
+
+ /*
+@@ -165,41 +132,54 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
+ * what we have. Otherwise, the port has some extra entries not for the
+ * services we know and we need to work around that.
+ */
+- if (nvec == nr_entries) {
+- status = 0;
+- } else {
++ if (nvec != nr_entries) {
+ /* Drop the temporary MSI-X setup */
+- pci_disable_msix(dev);
++ pci_free_irq_vectors(dev);
+
+ /* Now allocate the MSI-X vectors for real */
+- status = pci_enable_msix_exact(dev, msix_entries, nvec);
+- if (status)
+- goto Exit;
++ nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
++ PCI_IRQ_MSIX);
++ if (nr_entries < 0)
++ return nr_entries;
+ }
+
+- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+- vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
+-
+- Exit:
+- kfree(msix_entries);
+- return status;
++ return 0;
+
+- Error:
+- pci_disable_msix(dev);
+- goto Exit;
++out_free_irqs:
++ pci_free_irq_vectors(dev);
++ return -EIO;
+ }
+
+ /**
+- * init_service_irqs - initialize irqs for PCI Express port services
++ * pcie_init_service_irqs - initialize irqs for PCI Express port services
+ * @dev: PCI Express port to handle
+ * @irqs: Array of irqs to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: Interrupt mode associated with the port
+ */
+-static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
++static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+ {
+- int i, irq = -1;
++ unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
++ int ret, i;
++ int irq = -1;
++
++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
++ irqs[i] = -1;
++
++ /* Check if some platforms owns independent irq pins for AER/PME etc.
++ * Some platforms may own independent AER/PME interrupts and set
++ * them in the device tree file.
++ */
++ ret = pcibios_check_service_irqs(dev, irqs, mask);
++ if (ret) {
++ if (dev->irq)
++ irq = dev->irq;
++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
++ if (irqs[i] == -1 && i != PCIE_PORT_SERVICE_VC_SHIFT)
++ irqs[i] = irq;
++ return 0;
++ }
+
+ /*
+ * If MSI cannot be used for PCIe PME or hotplug, we have to use
+@@ -207,41 +187,25 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+ */
+ if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
+ ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
+- if (dev->irq)
+- irq = dev->irq;
+- goto no_msi;
++ flags &= ~PCI_IRQ_MSI;
++ } else {
++ /* Try to use MSI-X if supported */
++ if (!pcie_port_enable_msix(dev, irqs, mask))
++ return 0;
+ }
+
+- /* Try to use MSI-X if supported */
+- if (!pcie_port_enable_msix(dev, irqs, mask))
+- return 0;
+-
+- /*
+- * We're not going to use MSI-X, so try MSI and fall back to INTx.
+- * If neither MSI/MSI-X nor INTx available, try other interrupt. On
+- * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode.
+- */
+- if (!pci_enable_msi(dev) || dev->irq)
+- irq = dev->irq;
++ ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
++ if (ret < 0)
++ return -ENODEV;
+
+- no_msi:
+- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+- irqs[i] = irq;
+- irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
++ if (i != PCIE_PORT_SERVICE_VC_SHIFT)
++ irqs[i] = pci_irq_vector(dev, 0);
++ }
+
+- if (irq < 0)
+- return -ENODEV;
+ return 0;
+ }
+
+-static void cleanup_service_irqs(struct pci_dev *dev)
+-{
+- if (dev->msix_enabled)
+- pci_disable_msix(dev);
+- else if (dev->msi_enabled)
+- pci_disable_msi(dev);
+-}
+-
+ /**
+ * get_port_device_capability - discover capabilities of a PCI Express port
+ * @dev: PCI Express port to examine
+@@ -378,7 +342,7 @@ int pcie_port_device_register(struct pci_dev *dev)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
+ */
+- status = init_service_irqs(dev, irqs, capabilities);
++ status = pcie_init_service_irqs(dev, irqs, capabilities);
+ if (status) {
+ capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
+ if (!capabilities)
+@@ -401,7 +365,7 @@ int pcie_port_device_register(struct pci_dev *dev)
+ return 0;
+
+ error_cleanup_irqs:
+- cleanup_service_irqs(dev);
++ pci_free_irq_vectors(dev);
+ error_disable:
+ pci_disable_device(dev);
+ return status;
+@@ -469,7 +433,7 @@ static int remove_iter(struct device *dev, void *data)
+ void pcie_port_device_remove(struct pci_dev *dev)
+ {
+ device_for_each_child(&dev->dev, NULL, remove_iter);
+- cleanup_service_irqs(dev);
++ pci_free_irq_vectors(dev);
+ pci_disable_device(dev);
+ }
+
+@@ -499,7 +463,6 @@ static int pcie_port_probe_service(struct device *dev)
+ if (status)
+ return status;
+
+- dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
+ get_device(dev);
+ return 0;
+ }
+@@ -524,8 +487,6 @@ static int pcie_port_remove_service(struct device *dev)
+ pciedev = to_pcie_device(dev);
+ driver = to_service_driver(dev->driver);
+ if (driver && driver->remove) {
+- dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
+- driver->name);
+ driver->remove(pciedev);
+ put_device(dev);
+ }
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 1b711796..6738d816 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1823,6 +1823,7 @@ void pcibios_release_device(struct pci_dev *dev);
+ void pcibios_penalize_isa_irq(int irq, int active);
+ int pcibios_alloc_irq(struct pci_dev *dev);
+ void pcibios_free_irq(struct pci_dev *dev);
++int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask);
+
+ #ifdef CONFIG_HIBERNATE_CALLBACKS
+ extern struct dev_pm_ops pcibios_pm_ops;
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch b/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch
new file mode 100644
index 0000000000..0e8e3695f3
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch
@@ -0,0 +1,1776 @@
+From be07319b9897738a4ab1501880b7dd9be26eba66 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 11:54:28 +0800
+Subject: [PATCH] phy: support layerscape
+
+This is a integrated patch for layerscape mdio-phy support.
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: costi <constantin.tudor@freescale.com>
+Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
+Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/net/phy/Kconfig | 11 +
+ drivers/net/phy/Makefile | 2 +
+ drivers/net/phy/aquantia.c | 28 +
+ drivers/net/phy/cortina.c | 118 ++++
+ drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++++++++++
+ drivers/net/phy/phy.c | 23 +-
+ drivers/net/phy/phy_device.c | 6 +-
+ drivers/net/phy/swphy.c | 1 +
+ include/linux/phy.h | 4 +
+ 9 files changed, 1544 insertions(+), 7 deletions(-)
+ create mode 100644 drivers/net/phy/cortina.c
+ create mode 100644 drivers/net/phy/fsl_backplane.c
+
+diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
+index 30a3a2f5..3521c1ac 100644
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -89,6 +89,12 @@ config MDIO_BUS_MUX_MMIOREG
+ config MDIO_CAVIUM
+ tristate
+
++config MDIO_FSL_BACKPLANE
++ tristate "Support for backplane on Freescale XFI interface"
++ depends on OF_MDIO
++ help
++ This module provides a driver for Freescale XFI's backplane.
++
+ config MDIO_GPIO
+ tristate "GPIO lib-based bitbanged MDIO buses"
+ depends on MDIO_BITBANG && GPIOLIB
+@@ -298,6 +304,11 @@ config CICADA_PHY
+ ---help---
+ Currently supports the cis8204
+
++config CORTINA_PHY
++ tristate "Cortina EDC CDR 10G Ethernet PHY"
++ ---help---
++ Currently supports the CS4340 phy.
++
+ config DAVICOM_PHY
+ tristate "Davicom PHYs"
+ ---help---
+diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
+index 93a68fcd..ef3ec265 100644
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -30,6 +30,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
+ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+ obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
++obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
+ obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
+ obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+ obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+@@ -48,6 +49,7 @@ obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
+ obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
+ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+ obj-$(CONFIG_CICADA_PHY) += cicada.o
++obj-$(CONFIG_CORTINA_PHY) += cortina.o
+ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
+ obj-$(CONFIG_DP83640_PHY) += dp83640.o
+ obj-$(CONFIG_DP83848_PHY) += dp83848.o
+diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
+index 09b0b0aa..e8ae50e1 100644
+--- a/drivers/net/phy/aquantia.c
++++ b/drivers/net/phy/aquantia.c
+@@ -21,6 +21,8 @@
+ #define PHY_ID_AQ1202 0x03a1b445
+ #define PHY_ID_AQ2104 0x03a1b460
+ #define PHY_ID_AQR105 0x03a1b4a2
++#define PHY_ID_AQR106 0x03a1b4d0
++#define PHY_ID_AQR107 0x03a1b4e0
+ #define PHY_ID_AQR405 0x03a1b4b0
+
+ #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
+@@ -153,6 +155,30 @@ static struct phy_driver aquantia_driver[] = {
+ .ack_interrupt = aquantia_ack_interrupt,
+ .read_status = aquantia_read_status,
+ },
++{
++ .phy_id = PHY_ID_AQR106,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Aquantia AQR106",
++ .features = PHY_AQUANTIA_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .aneg_done = aquantia_aneg_done,
++ .config_aneg = aquantia_config_aneg,
++ .config_intr = aquantia_config_intr,
++ .ack_interrupt = aquantia_ack_interrupt,
++ .read_status = aquantia_read_status,
++},
++{
++ .phy_id = PHY_ID_AQR107,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Aquantia AQR107",
++ .features = PHY_AQUANTIA_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .aneg_done = aquantia_aneg_done,
++ .config_aneg = aquantia_config_aneg,
++ .config_intr = aquantia_config_intr,
++ .ack_interrupt = aquantia_ack_interrupt,
++ .read_status = aquantia_read_status,
++},
+ {
+ .phy_id = PHY_ID_AQR405,
+ .phy_id_mask = 0xfffffff0,
+@@ -173,6 +199,8 @@ static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
+ { PHY_ID_AQ1202, 0xfffffff0 },
+ { PHY_ID_AQ2104, 0xfffffff0 },
+ { PHY_ID_AQR105, 0xfffffff0 },
++ { PHY_ID_AQR106, 0xfffffff0 },
++ { PHY_ID_AQR107, 0xfffffff0 },
+ { PHY_ID_AQR405, 0xfffffff0 },
+ { }
+ };
+diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
+new file mode 100644
+index 00000000..72f4228a
+--- /dev/null
++++ b/drivers/net/phy/cortina.c
+@@ -0,0 +1,118 @@
++/*
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * CORTINA is a registered trademark of Cortina Systems, Inc.
++ *
++ */
++#include <linux/module.h>
++#include <linux/phy.h>
++
++#define PHY_ID_CS4340 0x13e51002
++
++#define VILLA_GLOBAL_CHIP_ID_LSB 0x0
++#define VILLA_GLOBAL_CHIP_ID_MSB 0x1
++
++#define VILLA_GLOBAL_GPIO_1_INTS 0x017
++
++static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
++{
++ return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
++ MII_ADDR_C45 | regnum);
++}
++
++static int cortina_config_aneg(struct phy_device *phydev)
++{
++ phydev->supported = SUPPORTED_10000baseT_Full;
++ phydev->advertising = SUPPORTED_10000baseT_Full;
++
++ return 0;
++}
++
++static int cortina_read_status(struct phy_device *phydev)
++{
++ int gpio_int_status, ret = 0;
++
++ gpio_int_status = cortina_read_reg(phydev, VILLA_GLOBAL_GPIO_1_INTS);
++ if (gpio_int_status < 0) {
++ ret = gpio_int_status;
++ goto err;
++ }
++
++ if (gpio_int_status & 0x8) {
++ /* up when edc_convergedS set */
++ phydev->speed = SPEED_10000;
++ phydev->duplex = DUPLEX_FULL;
++ phydev->link = 1;
++ } else {
++ phydev->link = 0;
++ }
++
++err:
++ return ret;
++}
++
++static int cortina_soft_reset(struct phy_device *phydev)
++{
++ return 0;
++}
++
++static int cortina_probe(struct phy_device *phydev)
++{
++ u32 phy_id = 0;
++ int id_lsb = 0, id_msb = 0;
++
++ /* Read device id from phy registers. */
++ id_lsb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_LSB);
++ if (id_lsb < 0)
++ return -ENXIO;
++
++ phy_id = id_lsb << 16;
++
++ id_msb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_MSB);
++ if (id_msb < 0)
++ return -ENXIO;
++
++ phy_id |= id_msb;
++
++ /* Make sure the device tree binding matched the driver with the
++ * right device.
++ */
++ if (phy_id != phydev->drv->phy_id) {
++ phydev_err(phydev, "Error matching phy with %s driver\n",
++ phydev->drv->name);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static struct phy_driver cortina_driver[] = {
++{
++ .phy_id = PHY_ID_CS4340,
++ .phy_id_mask = 0xffffffff,
++ .name = "Cortina CS4340",
++ .config_aneg = cortina_config_aneg,
++ .read_status = cortina_read_status,
++ .soft_reset = cortina_soft_reset,
++ .probe = cortina_probe,
++},
++};
++
++module_phy_driver(cortina_driver);
++
++static struct mdio_device_id __maybe_unused cortina_tbl[] = {
++ { PHY_ID_CS4340, 0xffffffff},
++ {},
++};
++
++MODULE_DEVICE_TABLE(mdio, cortina_tbl);
+diff --git a/drivers/net/phy/fsl_backplane.c b/drivers/net/phy/fsl_backplane.c
+new file mode 100644
+index 00000000..76865261
+--- /dev/null
++++ b/drivers/net/phy/fsl_backplane.c
+@@ -0,0 +1,1358 @@
++/* Freescale backplane driver.
++ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
++ *
++ * Copyright 2015 Freescale Semiconductor, Inc.
++ *
++ * Licensed under the GPL-2 or later.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mii.h>
++#include <linux/mdio.h>
++#include <linux/ethtool.h>
++#include <linux/phy.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/of_net.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/workqueue.h>
++
++/* XFI PCS Device Identifier */
++#define FSL_PCS_PHY_ID 0x0083e400
++
++/* Freescale KR PMD registers */
++#define FSL_KR_PMD_CTRL 0x96
++#define FSL_KR_PMD_STATUS 0x97
++#define FSL_KR_LP_CU 0x98
++#define FSL_KR_LP_STATUS 0x99
++#define FSL_KR_LD_CU 0x9a
++#define FSL_KR_LD_STATUS 0x9b
++
++/* Freescale KR PMD defines */
++#define PMD_RESET 0x1
++#define PMD_STATUS_SUP_STAT 0x4
++#define PMD_STATUS_FRAME_LOCK 0x2
++#define TRAIN_EN 0x3
++#define TRAIN_DISABLE 0x1
++#define RX_STAT 0x1
++
++#define FSL_KR_RX_LINK_STAT_MASK 0x1000
++#define FSL_XFI_PCS_10GR_SR1 0x20
++
++/* Freescale KX PCS mode register */
++#define FSL_PCS_IF_MODE 0x8014
++
++/* Freescale KX PCS mode register init value */
++#define IF_MODE_INIT 0x8
++
++/* Freescale KX/KR AN registers */
++#define FSL_AN_AD1 0x11
++#define FSL_AN_BP_STAT 0x30
++
++/* Freescale KX/KR AN registers defines */
++#define AN_CTRL_INIT 0x1200
++#define KX_AN_AD1_INIT 0x25
++#define KR_AN_AD1_INIT 0x85
++#define AN_LNK_UP_MASK 0x4
++#define KR_AN_MASK 0x8
++#define TRAIN_FAIL 0x8
++
++/* C(-1) */
++#define BIN_M1 0
++/* C(1) */
++#define BIN_LONG 1
++#define BIN_M1_SEL 6
++#define BIN_Long_SEL 7
++#define CDR_SEL_MASK 0x00070000
++#define BIN_SNAPSHOT_NUM 5
++#define BIN_M1_THRESHOLD 3
++#define BIN_LONG_THRESHOLD 2
++
++#define PRE_COE_SHIFT 22
++#define POST_COE_SHIFT 16
++#define ZERO_COE_SHIFT 8
++
++#define PRE_COE_MAX 0x0
++#define PRE_COE_MIN 0x8
++#define POST_COE_MAX 0x0
++#define POST_COE_MIN 0x10
++#define ZERO_COE_MAX 0x30
++#define ZERO_COE_MIN 0x0
++
++#define TECR0_INIT 0x24200000
++#define RATIO_PREQ 0x3
++#define RATIO_PST1Q 0xd
++#define RATIO_EQ 0x20
++
++#define GCR0_RESET_MASK 0x600000
++#define GCR1_SNP_START_MASK 0x00000040
++#define GCR1_CTL_SNP_START_MASK 0x00002000
++#define GCR1_REIDL_TH_MASK 0x00700000
++#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
++#define GCR1_REIDL_ET_MAS_MASK 0x00004000
++#define TECR0_AMP_RED_MASK 0x0000003f
++
++#define RECR1_CTL_SNP_DONE_MASK 0x00000002
++#define RECR1_SNP_DONE_MASK 0x00000004
++#define TCSR1_SNP_DATA_MASK 0x0000ffc0
++#define TCSR1_SNP_DATA_SHIFT 6
++#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
++
++#define RECR1_GAINK2_MASK 0x0f000000
++#define RECR1_GAINK2_SHIFT 24
++#define RECR1_GAINK3_MASK 0x000f0000
++#define RECR1_GAINK3_SHIFT 16
++#define RECR1_OFFSET_MASK 0x00003f80
++#define RECR1_OFFSET_SHIFT 7
++#define RECR1_BLW_MASK 0x00000f80
++#define RECR1_BLW_SHIFT 7
++#define EYE_CTRL_SHIFT 12
++#define BASE_WAND_SHIFT 10
++
++#define XGKR_TIMEOUT 1050
++
++#define INCREMENT 1
++#define DECREMENT 2
++#define TIMEOUT_LONG 3
++#define TIMEOUT_M1 3
++
++#define RX_READY_MASK 0x8000
++#define PRESET_MASK 0x2000
++#define INIT_MASK 0x1000
++#define COP1_MASK 0x30
++#define COP1_SHIFT 4
++#define COZ_MASK 0xc
++#define COZ_SHIFT 2
++#define COM1_MASK 0x3
++#define COM1_SHIFT 0
++#define REQUEST_MASK 0x3f
++#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
++ COP1_MASK | COZ_MASK | COM1_MASK)
++
++#define NEW_ALGORITHM_TRAIN_TX
++#ifdef NEW_ALGORITHM_TRAIN_TX
++#define FORCE_INC_COP1_NUMBER 0
++#define FORCE_INC_COM1_NUMBER 1
++#endif
++
++#define VAL_INVALID 0xff
++
++static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
++ 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
++static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
++ 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
++
++enum backplane_mode {
++ PHY_BACKPLANE_1000BASE_KX,
++ PHY_BACKPLANE_10GBASE_KR,
++ PHY_BACKPLANE_INVAL
++};
++
++enum coe_filed {
++ COE_COP1,
++ COE_COZ,
++ COE_COM
++};
++
++enum coe_update {
++ COE_NOTUPDATED,
++ COE_UPDATED,
++ COE_MIN,
++ COE_MAX,
++ COE_INV
++};
++
++enum train_state {
++ DETECTING_LP,
++ TRAINED,
++};
++
++struct per_lane_ctrl_status {
++ __be32 gcr0; /* 0x.000 - General Control Register 0 */
++ __be32 gcr1; /* 0x.004 - General Control Register 1 */
++ __be32 gcr2; /* 0x.008 - General Control Register 2 */
++ __be32 resv1; /* 0x.00C - Reserved */
++ __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
++ __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
++ __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
++ __be32 resv2; /* 0x.01C - Reserved */
++ __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */
++ __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */
++ __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */
++ __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */
++ __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
++ __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
++ __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
++ __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
++};
++
++struct tx_condition {
++ bool bin_m1_late_early;
++ bool bin_long_late_early;
++ bool bin_m1_stop;
++ bool bin_long_stop;
++ bool tx_complete;
++ bool sent_init;
++ int m1_min_max_cnt;
++ int long_min_max_cnt;
++#ifdef NEW_ALGORITHM_TRAIN_TX
++ int pre_inc;
++ int post_inc;
++#endif
++};
++
++struct fsl_xgkr_inst {
++ void *reg_base;
++ struct phy_device *phydev;
++ struct tx_condition tx_c;
++ struct delayed_work xgkr_wk;
++ enum train_state state;
++ u32 ld_update;
++ u32 ld_status;
++ u32 ratio_preq;
++ u32 ratio_pst1q;
++ u32 adpt_eq;
++};
++
++static void tx_condition_init(struct tx_condition *tx_c)
++{
++ tx_c->bin_m1_late_early = true;
++ tx_c->bin_long_late_early = false;
++ tx_c->bin_m1_stop = false;
++ tx_c->bin_long_stop = false;
++ tx_c->tx_complete = false;
++ tx_c->sent_init = false;
++ tx_c->m1_min_max_cnt = 0;
++ tx_c->long_min_max_cnt = 0;
++#ifdef NEW_ALGORITHM_TRAIN_TX
++ tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
++ tx_c->post_inc = FORCE_INC_COP1_NUMBER;
++#endif
++}
++
++void tune_tecr0(struct fsl_xgkr_inst *inst)
++{
++ struct per_lane_ctrl_status *reg_base = inst->reg_base;
++ u32 val;
++
++ val = TECR0_INIT |
++ inst->adpt_eq << ZERO_COE_SHIFT |
++ inst->ratio_preq << PRE_COE_SHIFT |
++ inst->ratio_pst1q << POST_COE_SHIFT;
++
++ /* reset the lane */
++ iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
++ &reg_base->gcr0);
++ udelay(1);
++ iowrite32(val, &reg_base->tecr0);
++ udelay(1);
++ /* unreset the lane */
++ iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
++ &reg_base->gcr0);
++ udelay(1);
++}
++
++static void start_lt(struct phy_device *phydev)
++{
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN);
++}
++
++static void stop_lt(struct phy_device *phydev)
++{
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
++}
++
++static void reset_gcr0(struct fsl_xgkr_inst *inst)
++{
++ struct per_lane_ctrl_status *reg_base = inst->reg_base;
++
++ iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
++ &reg_base->gcr0);
++ udelay(1);
++ iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
++ &reg_base->gcr0);
++ udelay(1);
++}
++
++void lane_set_1gkx(void *reg)
++{
++ struct per_lane_ctrl_status *reg_base = reg;
++ u32 val;
++
++ /* reset the lane */
++ iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
++ &reg_base->gcr0);
++ udelay(1);
++
++ /* set gcr1 for 1GKX */
++ val = ioread32(&reg_base->gcr1);
++ val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
++ GCR1_REIDL_ET_MAS_MASK);
++ iowrite32(val, &reg_base->gcr1);
++ udelay(1);
++
++ /* set tecr0 for 1GKX */
++ val = ioread32(&reg_base->tecr0);
++ val &= ~TECR0_AMP_RED_MASK;
++ iowrite32(val, &reg_base->tecr0);
++ udelay(1);
++
++ /* unreset the lane */
++ iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
++ &reg_base->gcr0);
++ udelay(1);
++}
++
++static void reset_lt(struct phy_device *phydev)
++{
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET);
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0);
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0);
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0);
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0);
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0);
++}
++
++static void start_xgkr_state_machine(struct delayed_work *work)
++{
++ queue_delayed_work(system_power_efficient_wq, work,
++ msecs_to_jiffies(XGKR_TIMEOUT));
++}
++
++static void start_xgkr_an(struct phy_device *phydev)
++{
++ struct fsl_xgkr_inst *inst;
++
++ reset_lt(phydev);
++ phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT);
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
++
++ inst = phydev->priv;
++
++ /* start state machine*/
++ start_xgkr_state_machine(&inst->xgkr_wk);
++}
++
++static void start_1gkx_an(struct phy_device *phydev)
++{
++ phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT);
++ phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT);
++ phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
++}
++
++static void ld_coe_status(struct fsl_xgkr_inst *inst)
++{
++ phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
++ FSL_KR_LD_STATUS, inst->ld_status);
++}
++
++static void ld_coe_update(struct fsl_xgkr_inst *inst)
++{
++ dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update);
++ phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
++ FSL_KR_LD_CU, inst->ld_update);
++}
++
++static void init_inst(struct fsl_xgkr_inst *inst, int reset)
++{
++ if (reset) {
++ inst->ratio_preq = RATIO_PREQ;
++ inst->ratio_pst1q = RATIO_PST1Q;
++ inst->adpt_eq = RATIO_EQ;
++ tune_tecr0(inst);
++ }
++
++ tx_condition_init(&inst->tx_c);
++ inst->state = DETECTING_LP;
++ inst->ld_status &= RX_READY_MASK;
++ ld_coe_status(inst);
++ inst->ld_update = 0;
++ inst->ld_status &= ~RX_READY_MASK;
++ ld_coe_status(inst);
++}
++
++#ifdef NEW_ALGORITHM_TRAIN_TX
++static int get_median_gaink2(u32 *reg)
++{
++ int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
++ u32 rx_eq_snp;
++ struct per_lane_ctrl_status *reg_base;
++ int timeout;
++ int i, j, tmp, pos;
++
++ reg_base = (struct per_lane_ctrl_status *)reg;
++
++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
++ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
++ timeout = 100;
++ while (ioread32(&reg_base->recr1) &
++ RECR1_CTL_SNP_DONE_MASK) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* start snap shot */
++ iowrite32((ioread32(&reg_base->gcr1) |
++ GCR1_CTL_SNP_START_MASK),
++ &reg_base->gcr1);
++
++ /* wait for SNP done */
++ timeout = 100;
++ while (!(ioread32(&reg_base->recr1) &
++ RECR1_CTL_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* read and save the snap shot */
++ rx_eq_snp = ioread32(&reg_base->recr1);
++ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
++ RECR1_GAINK2_SHIFT;
++
++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
++ iowrite32((ioread32(&reg_base->gcr1) &
++ ~GCR1_CTL_SNP_START_MASK),
++ &reg_base->gcr1);
++ }
++
++ /* get median of the 5 snap shot */
++ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
++ tmp = gaink2_snap_shot[i];
++ pos = i;
++ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
++ if (gaink2_snap_shot[j] < tmp) {
++ tmp = gaink2_snap_shot[j];
++ pos = j;
++ }
++ }
++
++ gaink2_snap_shot[pos] = gaink2_snap_shot[i];
++ gaink2_snap_shot[i] = tmp;
++ }
++
++ return gaink2_snap_shot[2];
++}
++#endif
++
++static bool is_bin_early(int bin_sel, void *reg)
++{
++ bool early = false;
++ int bin_snap_shot[BIN_SNAPSHOT_NUM];
++ int i, negative_count = 0;
++ struct per_lane_ctrl_status *reg_base = reg;
++ int timeout;
++
++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
++ /* wait RECR1_SNP_DONE_MASK has cleared */
++ timeout = 100;
++ while ((ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* set TCSR1[CDR_SEL] to BinM1/BinLong */
++ if (bin_sel == BIN_M1) {
++ iowrite32((ioread32(&reg_base->tcsr1) &
++ ~CDR_SEL_MASK) | BIN_M1_SEL,
++ &reg_base->tcsr1);
++ } else {
++ iowrite32((ioread32(&reg_base->tcsr1) &
++ ~CDR_SEL_MASK) | BIN_Long_SEL,
++ &reg_base->tcsr1);
++ }
++
++ /* start snap shot */
++ iowrite32(ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
++ &reg_base->gcr1);
++
++ /* wait for SNP done */
++ timeout = 100;
++ while (!(ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* read and save the snap shot */
++ bin_snap_shot[i] = (ioread32(&reg_base->tcsr1) &
++ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
++ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
++ negative_count++;
++
++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
++ iowrite32(ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
++ &reg_base->gcr1);
++ }
++
++ if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
++ ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
++ early = true;
++ }
++
++ return early;
++}
++
++static void train_tx(struct fsl_xgkr_inst *inst)
++{
++ struct phy_device *phydev = inst->phydev;
++ struct tx_condition *tx_c = &inst->tx_c;
++ bool bin_m1_early, bin_long_early;
++ u32 lp_status, old_ld_update;
++ u32 status_cop1, status_coz, status_com1;
++ u32 req_cop1, req_coz, req_com1, req_preset, req_init;
++ u32 temp;
++#ifdef NEW_ALGORITHM_TRAIN_TX
++ u32 median_gaink2;
++#endif
++
++recheck:
++ if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
++ tx_c->tx_complete = true;
++ inst->ld_status |= RX_READY_MASK;
++ ld_coe_status(inst);
++ /* tell LP we are ready */
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
++ FSL_KR_PMD_STATUS, RX_STAT);
++ return;
++ }
++
++ /* We start by checking the current LP status. If we got any responses,
++ * we can clear up the appropriate update request so that the
++ * subsequent code may easily issue new update requests if needed.
++ */
++ lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
++ REQUEST_MASK;
++ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
++ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
++ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
++
++ old_ld_update = inst->ld_update;
++ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
++ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
++ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
++ req_preset = old_ld_update & PRESET_MASK;
++ req_init = old_ld_update & INIT_MASK;
++
++ /* IEEE802.3-2008, 72.6.10.2.3.1
++ * We may clear PRESET when all coefficients show UPDATED or MAX.
++ */
++ if (req_preset) {
++ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
++ (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
++ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
++ inst->ld_update &= ~PRESET_MASK;
++ }
++ }
++
++ /* IEEE802.3-2008, 72.6.10.2.3.2
++ * We may clear INITIALIZE when no coefficients show NOT UPDATED.
++ */
++ if (req_init) {
++ if (status_cop1 != COE_NOTUPDATED &&
++ status_coz != COE_NOTUPDATED &&
++ status_com1 != COE_NOTUPDATED) {
++ inst->ld_update &= ~INIT_MASK;
++ }
++ }
++
++ /* IEEE802.3-2008, 72.6.10.2.3.2
++ * we send initialize to the other side to ensure default settings
++ * for the LP. Naturally, we should do this only once.
++ */
++ if (!tx_c->sent_init) {
++ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
++ inst->ld_update = INIT_MASK;
++ tx_c->sent_init = true;
++ }
++ }
++
++ /* IEEE802.3-2008, 72.6.10.2.3.3
++ * We set coefficient requests to HOLD when we get the information
++ * about any updates On clearing our prior response, we also update
++ * our internal status.
++ */
++ if (status_cop1 != COE_NOTUPDATED) {
++ if (req_cop1) {
++ inst->ld_update &= ~COP1_MASK;
++#ifdef NEW_ALGORITHM_TRAIN_TX
++ if (tx_c->post_inc) {
++ if (req_cop1 == INCREMENT &&
++ status_cop1 == COE_MAX) {
++ tx_c->post_inc = 0;
++ tx_c->bin_long_stop = true;
++ tx_c->bin_m1_stop = true;
++ } else {
++ tx_c->post_inc -= 1;
++ }
++
++ ld_coe_update(inst);
++ goto recheck;
++ }
++#endif
++ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
++ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
++ dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s",
++ (status_cop1 == COE_MIN) ?
++ "DEC MIN" : "INC MAX");
++ tx_c->long_min_max_cnt++;
++ if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
++ tx_c->bin_long_stop = true;
++ ld_coe_update(inst);
++ goto recheck;
++ }
++ }
++ }
++ }
++
++ if (status_coz != COE_NOTUPDATED) {
++ if (req_coz)
++ inst->ld_update &= ~COZ_MASK;
++ }
++
++ if (status_com1 != COE_NOTUPDATED) {
++ if (req_com1) {
++ inst->ld_update &= ~COM1_MASK;
++#ifdef NEW_ALGORITHM_TRAIN_TX
++ if (tx_c->pre_inc) {
++ if (req_com1 == INCREMENT &&
++ status_com1 == COE_MAX)
++ tx_c->pre_inc = 0;
++ else
++ tx_c->pre_inc -= 1;
++
++ ld_coe_update(inst);
++ goto recheck;
++ }
++#endif
++ /* Stop If we have reached the limit for a parameter. */
++ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
++ (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
++ dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s",
++ (status_com1 == COE_MIN) ?
++ "DEC MIN" : "INC MAX");
++ tx_c->m1_min_max_cnt++;
++ if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
++ tx_c->bin_m1_stop = true;
++ ld_coe_update(inst);
++ goto recheck;
++ }
++ }
++ }
++ }
++
++ if (old_ld_update != inst->ld_update) {
++ ld_coe_update(inst);
++ /* Redo these status checks and updates until we have no more
++ * changes, to speed up the overall process.
++ */
++ goto recheck;
++ }
++
++ /* Do nothing if we have pending request. */
++ if ((req_coz || req_com1 || req_cop1))
++ return;
++ else if (lp_status)
++ /* No pending request but LP status was not reverted to
++ * not updated.
++ */
++ return;
++
++#ifdef NEW_ALGORITHM_TRAIN_TX
++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
++ if (tx_c->pre_inc) {
++ inst->ld_update = INCREMENT << COM1_SHIFT;
++ ld_coe_update(inst);
++ return;
++ }
++
++ if (status_cop1 != COE_MAX) {
++ median_gaink2 = get_median_gaink2(inst->reg_base);
++ if (median_gaink2 == 0xf) {
++ tx_c->post_inc = 1;
++ } else {
++ /* Gaink2 median lower than "F" */
++ tx_c->bin_m1_stop = true;
++ tx_c->bin_long_stop = true;
++ goto recheck;
++ }
++ } else {
++ /* C1 MAX */
++ tx_c->bin_m1_stop = true;
++ tx_c->bin_long_stop = true;
++ goto recheck;
++ }
++
++ if (tx_c->post_inc) {
++ inst->ld_update = INCREMENT << COP1_SHIFT;
++ ld_coe_update(inst);
++ return;
++ }
++ }
++#endif
++
++ /* snapshot and select bin */
++ bin_m1_early = is_bin_early(BIN_M1, inst->reg_base);
++ bin_long_early = is_bin_early(BIN_LONG, inst->reg_base);
++
++ if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
++ tx_c->bin_m1_stop = true;
++ goto recheck;
++ }
++
++ if (!tx_c->bin_long_stop &&
++ tx_c->bin_long_late_early && !bin_long_early) {
++ tx_c->bin_long_stop = true;
++ goto recheck;
++ }
++
++ /* IEEE802.3-2008, 72.6.10.2.3.3
++ * We only request coefficient updates when no PRESET/INITIALIZE is
++ * pending. We also only request coefficient updates when the
++ * corresponding status is NOT UPDATED and nothing is pending.
++ */
++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
++ if (!tx_c->bin_long_stop) {
++ /* BinM1 correction means changing COM1 */
++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
++ /* Avoid BinM1Late by requesting an
++ * immediate decrement.
++ */
++ if (!bin_m1_early) {
++ /* request decrement c(-1) */
++ temp = DECREMENT << COM1_SHIFT;
++ inst->ld_update = temp;
++ ld_coe_update(inst);
++ tx_c->bin_m1_late_early = bin_m1_early;
++ return;
++ }
++ }
++
++ /* BinLong correction means changing COP1 */
++ if (!status_cop1 && !(inst->ld_update & COP1_MASK)) {
++ /* Locate BinLong transition point (if any)
++ * while avoiding BinM1Late.
++ */
++ if (bin_long_early) {
++ /* request increment c(1) */
++ temp = INCREMENT << COP1_SHIFT;
++ inst->ld_update = temp;
++ } else {
++ /* request decrement c(1) */
++ temp = DECREMENT << COP1_SHIFT;
++ inst->ld_update = temp;
++ }
++
++ ld_coe_update(inst);
++ tx_c->bin_long_late_early = bin_long_early;
++ }
++ /* We try to finish BinLong before we do BinM1 */
++ return;
++ }
++
++ if (!tx_c->bin_m1_stop) {
++ /* BinM1 correction means changing COM1 */
++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
++ /* Locate BinM1 transition point (if any) */
++ if (bin_m1_early) {
++ /* request increment c(-1) */
++ temp = INCREMENT << COM1_SHIFT;
++ inst->ld_update = temp;
++ } else {
++ /* request decrement c(-1) */
++ temp = DECREMENT << COM1_SHIFT;
++ inst->ld_update = temp;
++ }
++
++ ld_coe_update(inst);
++ tx_c->bin_m1_late_early = bin_m1_early;
++ }
++ }
++ }
++}
++
++static int is_link_up(struct phy_device *phydev)
++{
++ int val;
++
++ phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
++ val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
++
++ return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0;
++}
++
++static int is_link_training_fail(struct phy_device *phydev)
++{
++ int val;
++ int timeout = 100;
++
++ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS);
++ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
++ /* check LNK_STAT for sure */
++ while (timeout--) {
++ if (is_link_up(phydev))
++ return 0;
++
++ usleep_range(100, 500);
++ }
++ }
++
++ return 1;
++}
++
++static int check_rx(struct phy_device *phydev)
++{
++ return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
++ RX_READY_MASK;
++}
++
++/* Coefficient values have hardware restrictions */
++static int is_ld_valid(struct fsl_xgkr_inst *inst)
++{
++ u32 ratio_pst1q = inst->ratio_pst1q;
++ u32 adpt_eq = inst->adpt_eq;
++ u32 ratio_preq = inst->ratio_preq;
++
++ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
++ return 0;
++
++ if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
++ ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
++ return 0;
++
++ if (ratio_preq > ratio_pst1q)
++ return 0;
++
++ if (ratio_preq > 8)
++ return 0;
++
++ if (adpt_eq < 26)
++ return 0;
++
++ if (ratio_pst1q > 16)
++ return 0;
++
++ return 1;
++}
++
++static int is_value_allowed(const u32 *val_table, u32 val)
++{
++ int i;
++
++ for (i = 0;; i++) {
++ if (*(val_table + i) == VAL_INVALID)
++ return 0;
++ if (*(val_table + i) == val)
++ return 1;
++ }
++}
++
++static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request)
++{
++ u32 ld_limit[3], ld_coe[3], step[3];
++
++ ld_coe[0] = inst->ratio_pst1q;
++ ld_coe[1] = inst->adpt_eq;
++ ld_coe[2] = inst->ratio_preq;
++
++ /* Information specific to the Freescale SerDes for 10GBase-KR:
++ * Incrementing C(+1) means *decrementing* RATIO_PST1Q
++ * Incrementing C(0) means incrementing ADPT_EQ
++ * Incrementing C(-1) means *decrementing* RATIO_PREQ
++ */
++ step[0] = -1;
++ step[1] = 1;
++ step[2] = -1;
++
++ switch (request) {
++ case INCREMENT:
++ ld_limit[0] = POST_COE_MAX;
++ ld_limit[1] = ZERO_COE_MAX;
++ ld_limit[2] = PRE_COE_MAX;
++ if (ld_coe[field] != ld_limit[field])
++ ld_coe[field] += step[field];
++ else
++ /* MAX */
++ return 2;
++ break;
++ case DECREMENT:
++ ld_limit[0] = POST_COE_MIN;
++ ld_limit[1] = ZERO_COE_MIN;
++ ld_limit[2] = PRE_COE_MIN;
++ if (ld_coe[field] != ld_limit[field])
++ ld_coe[field] -= step[field];
++ else
++ /* MIN */
++ return 1;
++ break;
++ default:
++ break;
++ }
++
++ if (is_ld_valid(inst)) {
++ /* accept new ld */
++ inst->ratio_pst1q = ld_coe[0];
++ inst->adpt_eq = ld_coe[1];
++ inst->ratio_preq = ld_coe[2];
++ /* only some values for preq and pst1q can be used.
++ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
++ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
++ */
++ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
++ dev_dbg(&inst->phydev->mdio.dev,
++ "preq skipped value: %d\n", ld_coe[2]);
++ return 0;
++ }
++
++ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
++ dev_dbg(&inst->phydev->mdio.dev,
++ "pst1q skipped value: %d\n", ld_coe[0]);
++ return 0;
++ }
++
++ tune_tecr0(inst);
++ } else {
++ if (request == DECREMENT)
++ /* MIN */
++ return 1;
++ if (request == INCREMENT)
++ /* MAX */
++ return 2;
++ }
++
++ return 0;
++}
++
++static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld)
++{
++ u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX};
++ u32 mask, val;
++
++ switch (field) {
++ case COE_COP1:
++ mask = COP1_MASK;
++ val = ld_coe[new_ld] << COP1_SHIFT;
++ break;
++ case COE_COZ:
++ mask = COZ_MASK;
++ val = ld_coe[new_ld] << COZ_SHIFT;
++ break;
++ case COE_COM:
++ mask = COM1_MASK;
++ val = ld_coe[new_ld] << COM1_SHIFT;
++ break;
++ default:
++ return;
++ }
++
++ inst->ld_status &= ~mask;
++ inst->ld_status |= val;
++}
++
++static void check_request(struct fsl_xgkr_inst *inst, int request)
++{
++ int cop1_req, coz_req, com_req;
++ int old_status, new_ld_sta;
++
++ cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
++ coz_req = (request & COZ_MASK) >> COZ_SHIFT;
++ com_req = (request & COM1_MASK) >> COM1_SHIFT;
++
++ /* IEEE802.3-2008, 72.6.10.2.5
++ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
++ */
++ old_status = inst->ld_status;
++
++ if (cop1_req && !(inst->ld_status & COP1_MASK)) {
++ new_ld_sta = inc_dec(inst, COE_COP1, cop1_req);
++ min_max_updated(inst, COE_COP1, new_ld_sta);
++ }
++
++ if (coz_req && !(inst->ld_status & COZ_MASK)) {
++ new_ld_sta = inc_dec(inst, COE_COZ, coz_req);
++ min_max_updated(inst, COE_COZ, new_ld_sta);
++ }
++
++ if (com_req && !(inst->ld_status & COM1_MASK)) {
++ new_ld_sta = inc_dec(inst, COE_COM, com_req);
++ min_max_updated(inst, COE_COM, new_ld_sta);
++ }
++
++ if (old_status != inst->ld_status)
++ ld_coe_status(inst);
++}
++
++static void preset(struct fsl_xgkr_inst *inst)
++{
++ /* These are all MAX values from the IEEE802.3 perspective. */
++ inst->ratio_pst1q = POST_COE_MAX;
++ inst->adpt_eq = ZERO_COE_MAX;
++ inst->ratio_preq = PRE_COE_MAX;
++
++ tune_tecr0(inst);
++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
++ inst->ld_status |= COE_MAX << COP1_SHIFT |
++ COE_MAX << COZ_SHIFT |
++ COE_MAX << COM1_SHIFT;
++ ld_coe_status(inst);
++}
++
++static void initialize(struct fsl_xgkr_inst *inst)
++{
++ inst->ratio_preq = RATIO_PREQ;
++ inst->ratio_pst1q = RATIO_PST1Q;
++ inst->adpt_eq = RATIO_EQ;
++
++ tune_tecr0(inst);
++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
++ inst->ld_status |= COE_UPDATED << COP1_SHIFT |
++ COE_UPDATED << COZ_SHIFT |
++ COE_UPDATED << COM1_SHIFT;
++ ld_coe_status(inst);
++}
++
++static void train_rx(struct fsl_xgkr_inst *inst)
++{
++ struct phy_device *phydev = inst->phydev;
++ int request, old_ld_status;
++
++ /* get request from LP */
++ request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) &
++ (LD_ALL_MASK);
++ old_ld_status = inst->ld_status;
++
++ /* IEEE802.3-2008, 72.6.10.2.5
++ * Ensure we always go to NOT UDPATED for status reporting in
++ * response to HOLD requests.
++ * IEEE802.3-2008, 72.6.10.2.3.1/2
++ * ... but only if PRESET/INITIALIZE are not active to ensure
++ * we keep status until they are released.
++ */
++ if (!(request & (PRESET_MASK | INIT_MASK))) {
++ if (!(request & COP1_MASK))
++ inst->ld_status &= ~COP1_MASK;
++
++ if (!(request & COZ_MASK))
++ inst->ld_status &= ~COZ_MASK;
++
++ if (!(request & COM1_MASK))
++ inst->ld_status &= ~COM1_MASK;
++
++ if (old_ld_status != inst->ld_status)
++ ld_coe_status(inst);
++ }
++
++ /* As soon as the LP shows ready, no need to do any more updates. */
++ if (check_rx(phydev)) {
++ /* LP receiver is ready */
++ if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
++ ld_coe_status(inst);
++ }
++ } else {
++ /* IEEE802.3-2008, 72.6.10.2.3.1/2
++ * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
++ */
++ if (request & (PRESET_MASK | INIT_MASK)) {
++ if (!(inst->ld_status &
++ (COP1_MASK | COZ_MASK | COM1_MASK))) {
++ if (request & PRESET_MASK)
++ preset(inst);
++
++ if (request & INIT_MASK)
++ initialize(inst);
++ }
++ }
++
++ /* LP Coefficient are not in HOLD */
++ if (request & REQUEST_MASK)
++ check_request(inst, request & REQUEST_MASK);
++ }
++}
++
++static void xgkr_start_train(struct phy_device *phydev)
++{
++ struct fsl_xgkr_inst *inst = phydev->priv;
++ struct tx_condition *tx_c = &inst->tx_c;
++ int val = 0, i;
++ int lt_state;
++ unsigned long dead_line;
++ int rx_ok, tx_ok;
++
++ init_inst(inst, 0);
++ start_lt(phydev);
++
++ for (i = 0; i < 2;) {
++ dead_line = jiffies + msecs_to_jiffies(500);
++ while (time_before(jiffies, dead_line)) {
++ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
++ FSL_KR_PMD_STATUS);
++ if (val & TRAIN_FAIL) {
++ /* LT failed already, reset lane to avoid
++ * it run into hanging, then start LT again.
++ */
++ reset_gcr0(inst);
++ start_lt(phydev);
++ } else if ((val & PMD_STATUS_SUP_STAT) &&
++ (val & PMD_STATUS_FRAME_LOCK))
++ break;
++ usleep_range(100, 500);
++ }
++
++ if (!((val & PMD_STATUS_FRAME_LOCK) &&
++ (val & PMD_STATUS_SUP_STAT))) {
++ i++;
++ continue;
++ }
++
++ /* init process */
++ rx_ok = false;
++ tx_ok = false;
++ /* the LT should be finished in 500ms, failed or OK. */
++ dead_line = jiffies + msecs_to_jiffies(500);
++
++ while (time_before(jiffies, dead_line)) {
++ /* check if the LT is already failed */
++ lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
++ FSL_KR_PMD_STATUS);
++ if (lt_state & TRAIN_FAIL) {
++ reset_gcr0(inst);
++ break;
++ }
++
++ rx_ok = check_rx(phydev);
++ tx_ok = tx_c->tx_complete;
++
++ if (rx_ok && tx_ok)
++ break;
++
++ if (!rx_ok)
++ train_rx(inst);
++
++ if (!tx_ok)
++ train_tx(inst);
++
++ usleep_range(100, 500);
++ }
++
++ i++;
++ /* check LT result */
++ if (is_link_training_fail(phydev)) {
++ init_inst(inst, 0);
++ continue;
++ } else {
++ stop_lt(phydev);
++ inst->state = TRAINED;
++ break;
++ }
++ }
++}
++
++static void xgkr_state_machine(struct work_struct *work)
++{
++ struct delayed_work *dwork = to_delayed_work(work);
++ struct fsl_xgkr_inst *inst = container_of(dwork,
++ struct fsl_xgkr_inst,
++ xgkr_wk);
++ struct phy_device *phydev = inst->phydev;
++ int an_state;
++ bool needs_train = false;
++
++ mutex_lock(&phydev->lock);
++
++ switch (inst->state) {
++ case DETECTING_LP:
++ phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
++ an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
++ if ((an_state & KR_AN_MASK))
++ needs_train = true;
++ break;
++ case TRAINED:
++ if (!is_link_up(phydev)) {
++ dev_info(&phydev->mdio.dev,
++ "Detect hotplug, restart training\n");
++ init_inst(inst, 1);
++ start_xgkr_an(phydev);
++ inst->state = DETECTING_LP;
++ }
++ break;
++ }
++
++ if (needs_train)
++ xgkr_start_train(phydev);
++
++ mutex_unlock(&phydev->lock);
++ queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk,
++ msecs_to_jiffies(XGKR_TIMEOUT));
++}
++
++static int fsl_backplane_probe(struct phy_device *phydev)
++{
++ struct fsl_xgkr_inst *xgkr_inst;
++ struct device_node *phy_node, *lane_node;
++ struct resource res_lane;
++ const char *bm;
++ int ret;
++ int bp_mode;
++ u32 lane[2];
++
++ phy_node = phydev->mdio.dev.of_node;
++ bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
++ if (bp_mode < 0)
++ return 0;
++
++ if (!strcasecmp(bm, "1000base-kx")) {
++ bp_mode = PHY_BACKPLANE_1000BASE_KX;
++ } else if (!strcasecmp(bm, "10gbase-kr")) {
++ bp_mode = PHY_BACKPLANE_10GBASE_KR;
++ } else {
++ dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
++ return -EINVAL;
++ }
++
++ lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
++ if (!lane_node) {
++ dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
++ return -EINVAL;
++ }
++
++ ret = of_address_to_resource(lane_node, 0, &res_lane);
++ if (ret) {
++ dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
++ return ret;
++ }
++
++ of_node_put(lane_node);
++ ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
++ (u32 *)&lane, 2);
++ if (ret) {
++ dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
++ return -EINVAL;
++ }
++
++ phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev,
++ res_lane.start + lane[0],
++ lane[1]);
++ if (!phydev->priv) {
++ dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
++ return -ENOMEM;
++ }
++
++ if (bp_mode == PHY_BACKPLANE_1000BASE_KX) {
++ phydev->speed = SPEED_1000;
++ /* configure the lane for 1000BASE-KX */
++ lane_set_1gkx(phydev->priv);
++ return 0;
++ }
++
++ xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
++ sizeof(*xgkr_inst), GFP_KERNEL);
++ if (!xgkr_inst)
++ return -ENOMEM;
++
++ xgkr_inst->reg_base = phydev->priv;
++ xgkr_inst->phydev = phydev;
++ phydev->priv = xgkr_inst;
++
++ if (bp_mode == PHY_BACKPLANE_10GBASE_KR) {
++ phydev->speed = SPEED_10000;
++ INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine);
++ }
++
++ return 0;
++}
++
++static int fsl_backplane_aneg_done(struct phy_device *phydev)
++{
++ return 1;
++}
++
++static int fsl_backplane_config_aneg(struct phy_device *phydev)
++{
++ if (phydev->speed == SPEED_10000) {
++ phydev->supported |= SUPPORTED_10000baseKR_Full;
++ start_xgkr_an(phydev);
++ } else if (phydev->speed == SPEED_1000) {
++ phydev->supported |= SUPPORTED_1000baseKX_Full;
++ start_1gkx_an(phydev);
++ }
++
++ phydev->advertising = phydev->supported;
++ phydev->duplex = 1;
++
++ return 0;
++}
++
++static int fsl_backplane_suspend(struct phy_device *phydev)
++{
++ if (phydev->speed == SPEED_10000) {
++ struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
++
++ cancel_delayed_work_sync(&xgkr_inst->xgkr_wk);
++ }
++ return 0;
++}
++
++static int fsl_backplane_resume(struct phy_device *phydev)
++{
++ if (phydev->speed == SPEED_10000) {
++ struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
++
++ init_inst(xgkr_inst, 1);
++ queue_delayed_work(system_power_efficient_wq,
++ &xgkr_inst->xgkr_wk,
++ msecs_to_jiffies(XGKR_TIMEOUT));
++ }
++ return 0;
++}
++
++static int fsl_backplane_read_status(struct phy_device *phydev)
++{
++ if (is_link_up(phydev))
++ phydev->link = 1;
++ else
++ phydev->link = 0;
++
++ return 0;
++}
++
++static struct phy_driver fsl_backplane_driver[] = {
++ {
++ .phy_id = FSL_PCS_PHY_ID,
++ .name = "Freescale Backplane",
++ .phy_id_mask = 0xffffffff,
++ .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
++ SUPPORTED_MII,
++ .probe = fsl_backplane_probe,
++ .aneg_done = fsl_backplane_aneg_done,
++ .config_aneg = fsl_backplane_config_aneg,
++ .read_status = fsl_backplane_read_status,
++ .suspend = fsl_backplane_suspend,
++ .resume = fsl_backplane_resume,
++ },
++};
++
++module_phy_driver(fsl_backplane_driver);
++
++static struct mdio_device_id __maybe_unused freescale_tbl[] = {
++ { FSL_PCS_PHY_ID, 0xffffffff },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, freescale_tbl);
++
++MODULE_DESCRIPTION("Freescale Backplane driver");
++MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index f3e64a89..42cdd5b7 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -585,7 +585,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
+ return 0;
+
+ case SIOCSHWTSTAMP:
+- if (phydev->drv->hwtstamp)
++ if (phydev->drv && phydev->drv->hwtstamp)
+ return phydev->drv->hwtstamp(phydev, ifr);
+ /* fall through */
+
+@@ -610,6 +610,9 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
+ bool trigger = 0;
+ int err;
+
++ if (!phydev->drv)
++ return -EIO;
++
+ mutex_lock(&phydev->lock);
+
+ if (AUTONEG_DISABLE == phydev->autoneg)
+@@ -1009,7 +1012,7 @@ void phy_state_machine(struct work_struct *work)
+
+ old_state = phydev->state;
+
+- if (phydev->drv->link_change_notify)
++ if (phydev->drv && phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+
+ switch (phydev->state) {
+@@ -1311,6 +1314,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect);
+ */
+ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+ {
++ if (!phydev->drv)
++ return -EIO;
++
+ /* According to 802.3az,the EEE is supported only in full duplex-mode.
+ * Also EEE feature is active when core is operating with MII, GMII
+ * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
+@@ -1388,6 +1394,9 @@ EXPORT_SYMBOL(phy_init_eee);
+ */
+ int phy_get_eee_err(struct phy_device *phydev)
+ {
++ if (!phydev->drv)
++ return -EIO;
++
+ return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
+ }
+ EXPORT_SYMBOL(phy_get_eee_err);
+@@ -1404,6 +1413,9 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+ {
+ int val;
+
++ if (!phydev->drv)
++ return -EIO;
++
+ /* Get Supported EEE */
+ val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
+ if (val < 0)
+@@ -1437,6 +1449,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+ {
+ int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+
++ if (!phydev->drv)
++ return -EIO;
++
+ /* Mask prohibited EEE modes */
+ val &= ~phydev->eee_broken_modes;
+
+@@ -1448,7 +1463,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
+
+ int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+ {
+- if (phydev->drv->set_wol)
++ if (phydev->drv && phydev->drv->set_wol)
+ return phydev->drv->set_wol(phydev, wol);
+
+ return -EOPNOTSUPP;
+@@ -1457,7 +1472,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
+
+ void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+ {
+- if (phydev->drv->get_wol)
++ if (phydev->drv && phydev->drv->get_wol)
+ phydev->drv->get_wol(phydev, wol);
+ }
+ EXPORT_SYMBOL(phy_ethtool_get_wol);
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 5fdc491e..039f9664 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1046,7 +1046,7 @@ int phy_suspend(struct phy_device *phydev)
+ if (wol.wolopts)
+ return -EBUSY;
+
+- if (phydrv->suspend)
++ if (phydev->drv && phydrv->suspend)
+ ret = phydrv->suspend(phydev);
+
+ if (ret)
+@@ -1063,7 +1063,7 @@ int phy_resume(struct phy_device *phydev)
+ struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+ int ret = 0;
+
+- if (phydrv->resume)
++ if (phydev->drv && phydrv->resume)
+ ret = phydrv->resume(phydev);
+
+ if (ret)
+@@ -1726,7 +1726,7 @@ static int phy_remove(struct device *dev)
+ phydev->state = PHY_DOWN;
+ mutex_unlock(&phydev->lock);
+
+- if (phydev->drv->remove)
++ if (phydev->drv && phydev->drv->remove)
+ phydev->drv->remove(phydev);
+ phydev->drv = NULL;
+
+diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
+index 34f58f23..52ddddbe 100644
+--- a/drivers/net/phy/swphy.c
++++ b/drivers/net/phy/swphy.c
+@@ -77,6 +77,7 @@ static const struct swmii_regs duplex[] = {
+ static int swphy_decode_speed(int speed)
+ {
+ switch (speed) {
++ case 10000:
+ case 1000:
+ return SWMII_SPEED_1000;
+ case 100:
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 850c8b51..5f253f1a 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -81,6 +81,7 @@ typedef enum {
+ PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
++ PHY_INTERFACE_MODE_SGMII_2500,
+ PHY_INTERFACE_MODE_MAX,
+ } phy_interface_t;
+
+@@ -784,6 +785,9 @@ int phy_stop_interrupts(struct phy_device *phydev);
+
+ static inline int phy_read_status(struct phy_device *phydev)
+ {
++ if (!phydev->drv)
++ return -EIO;
++
+ return phydev->drv->read_status(phydev);
+ }
+
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch
new file mode 100644
index 0000000000..eab1e656d2
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch
@@ -0,0 +1,11518 @@
+From 464b4d9b8282e0f1e5040e4914505f91ce4d3750 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:06:25 +0800
+Subject: [PATCH] fsl-mc: layerscape support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is a integrated patch for layerscape mc-bus support.
+
+Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
+Signed-off-by: Shiva Kerdel <shiva@exdev.nl>
+Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/staging/fsl-mc/bus/Kconfig | 41 +-
+ drivers/staging/fsl-mc/bus/Makefile | 10 +-
+ drivers/staging/fsl-mc/bus/dpbp-cmd.h | 80 ++
+ drivers/staging/fsl-mc/bus/dpbp.c | 450 +--------
+ drivers/staging/fsl-mc/bus/dpcon-cmd.h | 85 ++
+ drivers/staging/fsl-mc/bus/dpcon.c | 317 ++++++
+ drivers/staging/fsl-mc/bus/dpio/Makefile | 11 +
+ .../{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} | 73 +-
+ drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 296 ++++++
+ drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt | 135 +++
+ drivers/staging/fsl-mc/bus/dpio/dpio-service.c | 689 +++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/dpio.c | 224 +++++
+ drivers/staging/fsl-mc/bus/dpio/dpio.h | 109 ++
+ drivers/staging/fsl-mc/bus/dpio/qbman-portal.c | 1049 ++++++++++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman-portal.h | 662 ++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 853 ++++++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 +++
+ drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 171 ++++
+ drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 112 +--
+ drivers/staging/fsl-mc/bus/dpmcp.c | 374 +------
+ drivers/staging/fsl-mc/bus/dpmcp.h | 127 +--
+ drivers/staging/fsl-mc/bus/dpmng-cmd.h | 14 +-
+ drivers/staging/fsl-mc/bus/dpmng.c | 37 +-
+ drivers/staging/fsl-mc/bus/dprc-cmd.h | 82 +-
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 38 +-
+ drivers/staging/fsl-mc/bus/dprc.c | 629 +-----------
+ drivers/staging/fsl-mc/bus/fsl-mc-allocator.c | 78 +-
+ drivers/staging/fsl-mc/bus/fsl-mc-bus.c | 318 +++---
+ drivers/staging/fsl-mc/bus/fsl-mc-iommu.c | 104 ++
+ drivers/staging/fsl-mc/bus/fsl-mc-msi.c | 3 +-
+ drivers/staging/fsl-mc/bus/fsl-mc-private.h | 6 +-
+ .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 11 +-
+ drivers/staging/fsl-mc/bus/mc-io.c | 4 +-
+ drivers/staging/fsl-mc/bus/mc-ioctl.h | 22 +
+ drivers/staging/fsl-mc/bus/mc-restool.c | 405 ++++++++
+ drivers/staging/fsl-mc/bus/mc-sys.c | 14 +-
+ drivers/staging/fsl-mc/include/dpaa2-fd.h | 706 +++++++++++++
+ drivers/staging/fsl-mc/include/dpaa2-global.h | 202 ++++
+ drivers/staging/fsl-mc/include/dpaa2-io.h | 190 ++++
+ drivers/staging/fsl-mc/include/dpbp-cmd.h | 185 ----
+ drivers/staging/fsl-mc/include/dpbp.h | 158 +--
+ drivers/staging/fsl-mc/include/dpcon.h | 115 +++
+ drivers/staging/fsl-mc/include/dpmng.h | 16 +-
+ drivers/staging/fsl-mc/include/dpopr.h | 110 ++
+ drivers/staging/fsl-mc/include/dprc.h | 470 +++------
+ drivers/staging/fsl-mc/include/mc-bus.h | 7 +-
+ drivers/staging/fsl-mc/include/mc-cmd.h | 44 +-
+ drivers/staging/fsl-mc/include/mc-sys.h | 3 +-
+ drivers/staging/fsl-mc/include/mc.h | 17 +-
+ 49 files changed, 7380 insertions(+), 2612 deletions(-)
+ create mode 100644 drivers/staging/fsl-mc/bus/dpbp-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpcon-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile
+ rename drivers/staging/fsl-mc/{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} (64%)
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+ create mode 100644 drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c
+ create mode 100644 drivers/staging/fsl-mc/include/dpaa2-fd.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpaa2-global.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpaa2-io.h
+ delete mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpcon.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpopr.h
+
+diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
+index 1f959339..67847c0e 100644
+--- a/drivers/staging/fsl-mc/bus/Kconfig
++++ b/drivers/staging/fsl-mc/bus/Kconfig
+@@ -1,25 +1,40 @@
+ #
+-# Freescale Management Complex (MC) bus drivers
++# DPAA2 fsl-mc bus
+ #
+-# Copyright (C) 2014 Freescale Semiconductor, Inc.
++# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ #
+ # This file is released under the GPLv2
+ #
+
+ config FSL_MC_BUS
+- bool "Freescale Management Complex (MC) bus driver"
+- depends on OF && ARM64
++ bool "QorIQ DPAA2 fsl-mc bus driver"
++ depends on OF && ARCH_LAYERSCAPE
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+- Driver to enable the bus infrastructure for the Freescale
+- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
+- module of the QorIQ LS2 SoCs, that does resource management
+- for hardware building-blocks in the SoC that can be used
+- to dynamically create networking hardware objects such as
+- network interfaces (NICs), crypto accelerator instances,
+- or L2 switches.
++ Driver to enable the bus infrastructure for the QorIQ DPAA2
++ architecture. The fsl-mc bus driver handles discovery of
++ DPAA2 objects (which are represented as Linux devices) and
++ binding objects to drivers.
+
+- Only enable this option when building the kernel for
+- Freescale QorQIQ LS2xxxx SoCs.
++config FSL_MC_DPIO
++ tristate "QorIQ DPAA2 DPIO driver"
++ depends on FSL_MC_BUS
++ help
++ Driver for the DPAA2 DPIO object. A DPIO provides queue and
++ buffer management facilities for software to interact with
++ other DPAA2 objects. This driver does not expose the DPIO
++ objects individually, but groups them under a service layer
++ API.
+
++config FSL_QBMAN_DEBUG
++ tristate "Freescale QBMAN Debug APIs"
++ depends on FSL_MC_DPIO
++ help
++ QBMan debug assistant APIs.
+
++config FSL_MC_RESTOOL
++ tristate "Freescale Management Complex (MC) restool driver"
++ depends on FSL_MC_BUS
++ help
++ Driver that provides kernel support for the Freescale Management
++ Complex resource manager user-space tool.
+diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
+index 38716fd5..e7e2239c 100644
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -17,4 +17,12 @@ mc-bus-driver-objs := fsl-mc-bus.o \
+ fsl-mc-msi.o \
+ irq-gic-v3-its-fsl-mc-msi.o \
+ dpmcp.o \
+- dpbp.o
++ dpbp.o \
++ dpcon.o \
++ fsl-mc-iommu.o
++
++# MC DPIO driver
++obj-$(CONFIG_FSL_MC_DPIO) += dpio/
++
++# MC restool kernel support
++obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o
+diff --git a/drivers/staging/fsl-mc/bus/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
+new file mode 100644
+index 00000000..8aa65452
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
+@@ -0,0 +1,80 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPBP_CMD_H
++#define _FSL_DPBP_CMD_H
++
++/* DPBP Version */
++#define DPBP_VER_MAJOR 3
++#define DPBP_VER_MINOR 2
++
++/* Command versioning */
++#define DPBP_CMD_BASE_VERSION 1
++#define DPBP_CMD_ID_OFFSET 4
++
++#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
++#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
++#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
++
++#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
++#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
++#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
++#define DPBP_CMDID_RESET DPBP_CMD(0x005)
++#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
++
++struct dpbp_cmd_open {
++ __le32 dpbp_id;
++};
++
++struct dpbp_cmd_destroy {
++ __le32 object_id;
++};
++
++#define DPBP_ENABLE 0x1
++
++struct dpbp_rsp_is_enabled {
++ u8 enabled;
++};
++
++struct dpbp_rsp_get_attributes {
++ /* response word 0 */
++ __le16 pad;
++ __le16 bpid;
++ __le32 id;
++ /* response word 1 */
++ __le16 version_major;
++ __le16 version_minor;
++};
++
++#endif /* _FSL_DPBP_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
+index 5d4cd812..d9e450a6 100644
+--- a/drivers/staging/fsl-mc/bus/dpbp.c
++++ b/drivers/staging/fsl-mc/bus/dpbp.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -32,7 +32,8 @@
+ #include "../include/mc-sys.h"
+ #include "../include/mc-cmd.h"
+ #include "../include/dpbp.h"
+-#include "../include/dpbp-cmd.h"
++
++#include "dpbp-cmd.h"
+
+ /**
+ * dpbp_open() - Open a control session for the specified object.
+@@ -104,74 +105,6 @@ int dpbp_close(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dpbp_close);
+
+-/**
+- * dpbp_create() - Create the DPBP object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @cfg: Configuration structure
+- * @token: Returned token; use in subsequent API calls
+- *
+- * Create the DPBP object, allocate required resources and
+- * perform required initialization.
+- *
+- * The object can be created either by declaring it in the
+- * DPL file, or by calling this function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent calls to
+- * this specific object. For objects that are created using the
+- * DPL file, call dpbp_open function to get an authentication
+- * token first.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpbp_cfg *cfg,
+- u16 *token)
+-{
+- struct mc_command cmd = { 0 };
+- int err;
+-
+- (void)(cfg); /* unused */
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
+- cmd_flags, 0);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- *token = mc_cmd_hdr_read_token(&cmd);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- *
+- * Return: '0' on Success; error code otherwise.
+- */
+-int dpbp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+ /**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -250,6 +183,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+
+ return 0;
+ }
++EXPORT_SYMBOL(dpbp_is_enabled);
+
+ /**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+@@ -272,310 +206,7 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+ }
+-
+-/**
+- * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: Identifies the interrupt index to configure
+- * @irq_cfg: IRQ configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpbp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_irq *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_irq() - Get IRQ information from the DPBP.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: IRQ attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpbp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq *cmd_params;
+- struct dpbp_rsp_get_irq *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_set_irq_enable() - Set overall interrupt state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @en: Interrupt state - enable = 1, disable = 0
+- *
+- * Allows GPP software to control when interrupts are generated.
+- * Each interrupt can have up to 32 causes. The enable/disable control's the
+- * overall interrupt state. if the interrupt is disabled no causes will cause
+- * an interrupt.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_irq_enable *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params;
+- cmd_params->enable = en & DPBP_ENABLE;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_irq_enable() - Get overall interrupt state
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @en: Returned interrupt state - enable = 1, disable = 0
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq_enable *cmd_params;
+- struct dpbp_rsp_get_irq_enable *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params;
+- *en = rsp_params->enabled & DPBP_ENABLE;
+- return 0;
+-}
+-
+-/**
+- * dpbp_set_irq_mask() - Set interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Event mask to trigger interrupt;
+- * each bit:
+- * 0 = ignore event
+- * 1 = consider event for asserting IRQ
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_irq_mask *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params;
+- cmd_params->mask = cpu_to_le32(mask);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_irq_mask() - Get interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Returned event mask to trigger interrupt
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq_mask *cmd_params;
+- struct dpbp_rsp_get_irq_mask *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params;
+- *mask = le32_to_cpu(rsp_params->mask);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_get_irq_status() - Get the current status of any pending interrupts.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @status: Returned interrupts status - one bit per cause:
+- * 0 = no interrupt pending
+- * 1 = interrupt pending
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq_status *cmd_params;
+- struct dpbp_rsp_get_irq_status *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(*status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params;
+- *status = le32_to_cpu(rsp_params->status);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_clear_irq_status() - Clear a pending interrupt's status
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @status: Bits to clear (W1C) - one bit per cause:
+- * 0 = don't change
+- * 1 = clear status bit
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_clear_irq_status *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
++EXPORT_SYMBOL(dpbp_reset);
+
+ /**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+@@ -609,83 +240,40 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+ }
+ EXPORT_SYMBOL(dpbp_get_attributes);
+
+ /**
+- * dpbp_set_notifications() - Set notifications towards software
+- * @mc_io: Pointer to MC portal's I/O object
++ * dpbp_get_api_version - Get Data Path Buffer Pool API version
++ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @cfg: notifications configuration
++ * @major_ver: Major version of Buffer Pool API
++ * @minor_ver: Minor version of Buffer Pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg)
++int dpbp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
+ {
+ struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_notifications *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params;
+- cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry);
+- cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit);
+- cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry);
+- cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit);
+- cmd_params->options = cpu_to_le16(cfg->options);
+- cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+- cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_notifications() - Get the notifications configuration
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @cfg: notifications configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_rsp_get_notifications *rsp_params;
+ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
+- cmd_flags,
+- token);
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
+- /* send command to mc*/
++ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params;
+- cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry);
+- cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit);
+- cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry);
+- cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit);
+- cfg->options = le16_to_cpu(rsp_params->options);
+- cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+- cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+ }
++EXPORT_SYMBOL(dpbp_get_api_version);
+diff --git a/drivers/staging/fsl-mc/bus/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
+new file mode 100644
+index 00000000..2bb66988
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
+@@ -0,0 +1,85 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPCON_CMD_H
++#define _FSL_DPCON_CMD_H
++
++/* DPCON Version */
++#define DPCON_VER_MAJOR 3
++#define DPCON_VER_MINOR 2
++
++/* Command versioning */
++#define DPCON_CMD_BASE_VERSION 1
++#define DPCON_CMD_ID_OFFSET 4
++
++#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
++#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
++#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
++
++#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
++#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
++#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
++#define DPCON_CMDID_RESET DPCON_CMD(0x005)
++#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
++
++#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
++
++struct dpcon_cmd_open {
++ __le32 dpcon_id;
++};
++
++#define DPCON_ENABLE 1
++
++struct dpcon_rsp_is_enabled {
++ u8 enabled;
++};
++
++struct dpcon_rsp_get_attr {
++ /* response word 0 */
++ __le32 id;
++ __le16 qbman_ch_id;
++ u8 num_priorities;
++ u8 pad;
++};
++
++struct dpcon_cmd_set_notification {
++ /* cmd word 0 */
++ __le32 dpio_id;
++ u8 priority;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le64 user_ctx;
++};
++
++#endif /* _FSL_DPCON_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c
+new file mode 100644
+index 00000000..eb713578
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpcon.c
+@@ -0,0 +1,317 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../include/mc-sys.h"
++#include "../include/mc-cmd.h"
++#include "../include/dpcon.h"
++
++#include "dpcon-cmd.h"
++
++/**
++ * dpcon_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpcon_id: DPCON unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpcon_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpcon_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_cmd_open *dpcon_cmd;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
++ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_open);
++
++/**
++ * dpcon_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_close);
++
++/**
++ * dpcon_enable() - Enable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_enable);
++
++/**
++ * dpcon_disable() - Disable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_disable);
++
++/**
++ * dpcon_is_enabled() - Check if the DPCON is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_rsp_is_enabled *dpcon_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
++ *en = dpcon_rsp->enabled & DPCON_ENABLE;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_is_enabled);
++
++/**
++ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_reset);
++
++/**
++ * dpcon_get_attributes() - Retrieve DPCON attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_rsp_get_attr *dpcon_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
++ attr->id = le32_to_cpu(dpcon_rsp->id);
++ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
++ attr->num_priorities = dpcon_rsp->num_priorities;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_get_attributes);
++
++/**
++ * dpcon_set_notification() - Set DPCON notification destination
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @cfg: Notification parameters
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_cmd_set_notification *dpcon_cmd;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
++ cmd_flags,
++ token);
++ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
++ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
++ dpcon_cmd->priority = cfg->priority;
++ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_set_notification);
++
++/**
++ * dpcon_get_api_version - Get Data Path Concentrator API version
++ * @mc_io: Pointer to MC portal's DPCON object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of DPCON API
++ * @minor_ver: Minor version of DPCON API
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_get_api_version);
+diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile
+new file mode 100644
+index 00000000..1c28794e
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
+@@ -0,0 +1,11 @@
++#
++# QorIQ DPAA2 DPIO driver
++#
++
++subdir-ccflags-y := -Werror
++
++obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
++
++fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
++
++obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o
+diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+similarity index 64%
+rename from drivers/staging/fsl-mc/include/dpcon-cmd.h
+rename to drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+index 536b2ef1..b2dc6e76 100644
+--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+@@ -1,4 +1,6 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +13,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -29,34 +30,46 @@
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+-#ifndef _FSL_DPCON_CMD_H
+-#define _FSL_DPCON_CMD_H
++#ifndef _FSL_DPIO_CMD_H
++#define _FSL_DPIO_CMD_H
++
++/* DPIO Version */
++#define DPIO_VER_MAJOR 4
++#define DPIO_VER_MINOR 2
++
++/* Command Versioning */
++
++#define DPIO_CMD_ID_OFFSET 4
++#define DPIO_CMD_BASE_VERSION 1
+
+-/* DPCON Version */
+-#define DPCON_VER_MAJOR 2
+-#define DPCON_VER_MINOR 1
++#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+ /* Command IDs */
+-#define DPCON_CMDID_CLOSE 0x800
+-#define DPCON_CMDID_OPEN 0x808
+-#define DPCON_CMDID_CREATE 0x908
+-#define DPCON_CMDID_DESTROY 0x900
+-
+-#define DPCON_CMDID_ENABLE 0x002
+-#define DPCON_CMDID_DISABLE 0x003
+-#define DPCON_CMDID_GET_ATTR 0x004
+-#define DPCON_CMDID_RESET 0x005
+-#define DPCON_CMDID_IS_ENABLED 0x006
+-
+-#define DPCON_CMDID_SET_IRQ 0x010
+-#define DPCON_CMDID_GET_IRQ 0x011
+-#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPCON_CMDID_SET_IRQ_MASK 0x014
+-#define DPCON_CMDID_GET_IRQ_MASK 0x015
+-#define DPCON_CMDID_GET_IRQ_STATUS 0x016
+-#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPCON_CMDID_SET_NOTIFICATION 0x100
+-
+-#endif /* _FSL_DPCON_CMD_H */
++#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
++#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
++#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
++#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
++#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
++#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
++
++struct dpio_cmd_open {
++ __le32 dpio_id;
++};
++
++#define DPIO_CHANNEL_MODE_MASK 0x3
++
++struct dpio_rsp_get_attr {
++ /* cmd word 0 */
++ __le32 id;
++ __le16 qbman_portal_id;
++ u8 num_priorities;
++ u8 channel_mode;
++ /* cmd word 1 */
++ __le64 qbman_portal_ce_addr;
++ /* cmd word 2 */
++ __le64 qbman_portal_ci_addr;
++ /* cmd word 3 */
++ __le32 qbman_version;
++};
++
++#endif /* _FSL_DPIO_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+new file mode 100644
+index 00000000..8c8244a1
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+@@ -0,0 +1,296 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++
++#include "../../include/mc.h"
++#include "../../include/dpaa2-io.h"
++
++#include "qbman-portal.h"
++#include "dpio.h"
++#include "dpio-cmd.h"
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("DPIO Driver");
++
++struct dpio_priv {
++ struct dpaa2_io *io;
++};
++
++static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct dpio_priv *priv = dev_get_drvdata(dev);
++
++ return dpaa2_io_irq(priv->io);
++}
++
++static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
++{
++ struct fsl_mc_device_irq *irq;
++
++ irq = dpio_dev->irqs[0];
++
++ /* clear the affinity hint */
++ irq_set_affinity_hint(irq->msi_desc->irq, NULL);
++}
++
++static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
++{
++ struct dpio_priv *priv;
++ int error;
++ struct fsl_mc_device_irq *irq;
++ cpumask_t mask;
++
++ priv = dev_get_drvdata(&dpio_dev->dev);
++
++ irq = dpio_dev->irqs[0];
++ error = devm_request_irq(&dpio_dev->dev,
++ irq->msi_desc->irq,
++ dpio_irq_handler,
++ 0,
++ dev_name(&dpio_dev->dev),
++ &dpio_dev->dev);
++ if (error < 0) {
++ dev_err(&dpio_dev->dev,
++ "devm_request_irq() failed: %d\n",
++ error);
++ return error;
++ }
++
++ /* set the affinity hint */
++ cpumask_clear(&mask);
++ cpumask_set_cpu(cpu, &mask);
++ if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
++ dev_err(&dpio_dev->dev,
++ "irq_set_affinity failed irq %d cpu %d\n",
++ irq->msi_desc->irq, cpu);
++
++ return 0;
++}
++
++static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
++{
++ struct dpio_attr dpio_attrs;
++ struct dpaa2_io_desc desc;
++ struct dpio_priv *priv;
++ int err = -ENOMEM;
++ struct device *dev = &dpio_dev->dev;
++ static int next_cpu = -1;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ goto err_priv_alloc;
++
++ dev_set_drvdata(dev, priv);
++
++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
++ if (err) {
++ dev_dbg(dev, "MC portal allocation failed\n");
++ err = -EPROBE_DEFER;
++ goto err_mcportal;
++ }
++
++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
++ &dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
++ &dpio_attrs);
++ if (err) {
++ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
++ goto err_get_attr;
++ }
++ desc.qman_version = dpio_attrs.qbman_version;
++
++ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_enable() failed %d\n", err);
++ goto err_get_attr;
++ }
++
++ /* initialize DPIO descriptor */
++ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
++ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
++ desc.dpio_id = dpio_dev->obj_desc.id;
++
++ /* get the cpu to use for the affinity hint */
++ if (next_cpu == -1)
++ next_cpu = cpumask_first(cpu_online_mask);
++ else
++ next_cpu = cpumask_next(next_cpu, cpu_online_mask);
++
++ if (!cpu_possible(next_cpu)) {
++ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
++ err = -ERANGE;
++ goto err_allocate_irqs;
++ }
++ desc.cpu = next_cpu;
++
++ /*
++ * Set the CENA regs to be the cache enabled area of the portal to
++ * achieve the best performance.
++ */
++ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
++ resource_size(&dpio_dev->regions[0]));
++ desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
++ resource_size(&dpio_dev->regions[1]));
++
++ err = fsl_mc_allocate_irqs(dpio_dev);
++ if (err) {
++ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
++ goto err_allocate_irqs;
++ }
++
++ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
++ if (err)
++ goto err_register_dpio_irq;
++
++ priv->io = dpaa2_io_create(&desc);
++ if (!priv->io) {
++ dev_err(dev, "dpaa2_io_create failed\n");
++ goto err_dpaa2_io_create;
++ }
++
++ dev_info(dev, "probed\n");
++ dev_dbg(dev, " receives_notifications = %d\n",
++ desc.receives_notifications);
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ fsl_mc_portal_free(dpio_dev->mc_io);
++
++ return 0;
++
++err_dpaa2_io_create:
++ unregister_dpio_irq_handlers(dpio_dev);
++err_register_dpio_irq:
++ fsl_mc_free_irqs(dpio_dev);
++err_allocate_irqs:
++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++err_get_attr:
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++err_open:
++ fsl_mc_portal_free(dpio_dev->mc_io);
++err_mcportal:
++ dev_set_drvdata(dev, NULL);
++err_priv_alloc:
++ return err;
++}
++
++/* Tear down interrupts for a given DPIO object */
++static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
++{
++ unregister_dpio_irq_handlers(dpio_dev);
++ fsl_mc_free_irqs(dpio_dev);
++}
++
++static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
++{
++ struct device *dev;
++ struct dpio_priv *priv;
++ int err;
++
++ dev = &dpio_dev->dev;
++ priv = dev_get_drvdata(dev);
++
++ dpaa2_io_down(priv->io);
++
++ dpio_teardown_irqs(dpio_dev);
++
++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_mcportal;
++ }
++
++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
++ &dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++
++ fsl_mc_portal_free(dpio_dev->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++
++ return 0;
++
++err_open:
++ fsl_mc_portal_free(dpio_dev->mc_io);
++err_mcportal:
++ return err;
++}
++
++static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpio",
++ },
++ { .vendor = 0x0 }
++};
++
++static struct fsl_mc_driver dpaa2_dpio_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_dpio_probe,
++ .remove = dpaa2_dpio_remove,
++ .match_id_table = dpaa2_dpio_match_id_table
++};
++
++static int dpio_driver_init(void)
++{
++ return fsl_mc_driver_register(&dpaa2_dpio_driver);
++}
++
++static void dpio_driver_exit(void)
++{
++ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
++}
++module_init(dpio_driver_init);
++module_exit(dpio_driver_exit);
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
+new file mode 100644
+index 00000000..0ba67716
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
+@@ -0,0 +1,135 @@
++Copyright 2016 NXP
++
++Introduction
++------------
++
++A DPAA2 DPIO (Data Path I/O) is a hardware object that provides
++interfaces to enqueue and dequeue frames to/from network interfaces
++and other accelerators. A DPIO also provides hardware buffer
++pool management for network interfaces.
++
++This document provides an overview the Linux DPIO driver, its
++subcomponents, and its APIs.
++
++See Documentation/dpaa2/overview.txt for a general overview of DPAA2
++and the general DPAA2 driver architecture in Linux.
++
++Driver Overview
++---------------
++
++The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and
++provides services that:
++ A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue
++ frames for their respective objects
++ B) allow drivers to register callbacks for data availability notifications
++ when data becomes available on a queue or channel
++ C) allow drivers to manage hardware buffer pools
++
++The Linux DPIO driver consists of 3 primary components--
++ DPIO object driver-- fsl-mc driver that manages the DPIO object
++ DPIO service-- provides APIs to other Linux drivers for services
++ QBman portal interface-- sends portal commands, gets responses
++
++ fsl-mc other
++ bus drivers
++ | |
++ +---+----+ +------+-----+
++ |DPIO obj| |DPIO service|
++ | driver |---| (DPIO) |
++ +--------+ +------+-----+
++ |
++ +------+-----+
++ | QBman |
++ | portal i/f |
++ +------------+
++ |
++ hardware
++
++The diagram below shows how the DPIO driver components fit with the other
++DPAA2 Linux driver components:
++ +------------+
++ | OS Network |
++ | Stack |
++ +------------+ +------------+
++ | Allocator |. . . . . . . | Ethernet |
++ |(DPMCP,DPBP)| | (DPNI) |
++ +-.----------+ +---+---+----+
++ . . ^ |
++ . . <data avail, | |<enqueue,
++ . . tx confirm> | | dequeue>
++ +-------------+ . | |
++ | DPRC driver | . +--------+ +------------+
++ | (DPRC) | . . |DPIO obj| |DPIO service|
++ +----------+--+ | driver |-| (DPIO) |
++ | +--------+ +------+-----+
++ |<dev add/remove> +------|-----+
++ | | QBman |
++ +----+--------------+ | portal i/f |
++ | MC-bus driver | +------------+
++ | | |
++ | /soc/fsl-mc | |
++ +-------------------+ |
++ |
++ =========================================|=========|========================
++ +-+--DPIO---|-----------+
++ | | |
++ | QBman Portal |
++ +-----------------------+
++
++ ============================================================================
++
++
++DPIO Object Driver (dpio-driver.c)
++----------------------------------
++
++ The dpio-driver component registers with the fsl-mc bus to handle objects of
++ type "dpio". The implementation of probe() handles basic initialization
++ of the DPIO including mapping of the DPIO regions (the QBman SW portal)
++ and initializing interrupts and registering irq handlers. The dpio-driver
++ registers the probed DPIO with dpio-service.
++
++DPIO service (dpio-service.c, dpaa2-io.h)
++------------------------------------------
++
++ The dpio service component provides queuing, notification, and buffers
++ management services to DPAA2 drivers, such as the Ethernet driver. A system
++ will typically allocate 1 DPIO object per CPU to allow queuing operations
++ to happen simultaneously across all CPUs.
++
++ Notification handling
++ dpaa2_io_service_register()
++ dpaa2_io_service_deregister()
++ dpaa2_io_service_rearm()
++
++ Queuing
++ dpaa2_io_service_pull_fq()
++ dpaa2_io_service_pull_channel()
++ dpaa2_io_service_enqueue_fq()
++ dpaa2_io_service_enqueue_qd()
++ dpaa2_io_store_create()
++ dpaa2_io_store_destroy()
++ dpaa2_io_store_next()
++
++ Buffer pool management
++ dpaa2_io_service_release()
++ dpaa2_io_service_acquire()
++
++QBman portal interface (qbman-portal.c)
++---------------------------------------
++
++ The qbman-portal component provides APIs to do the low level hardware
++ bit twiddling for operations such as:
++ -initializing Qman software portals
++ -building and sending portal commands
++ -portal interrupt configuration and processing
++
++ The qbman-portal APIs are not public to other drivers, and are
++ only used by dpio-service.
++
++Other (dpaa2-fd.h, dpaa2-global.h)
++----------------------------------
++
++ Frame descriptor and scatter-gather definitions and the APIs used to
++ manipulate them are defined in dpaa2-fd.h.
++
++ Dequeue result struct and parsing APIs are defined in dpaa2-global.h.
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+new file mode 100644
+index 00000000..46c32a67
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+@@ -0,0 +1,689 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/types.h>
++#include "../../include/mc.h"
++#include "../../include/dpaa2-io.h"
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++
++#include "dpio.h"
++#include "qbman-portal.h"
++#include "qbman_debug.h"
++
++struct dpaa2_io {
++ atomic_t refs;
++ struct dpaa2_io_desc dpio_desc;
++ struct qbman_swp_desc swp_desc;
++ struct qbman_swp *swp;
++ struct list_head node;
++ /* protect against multiple management commands */
++ spinlock_t lock_mgmt_cmd;
++ /* protect notifications list */
++ spinlock_t lock_notifications;
++ struct list_head notifications;
++};
++
++struct dpaa2_io_store {
++ unsigned int max;
++ dma_addr_t paddr;
++ struct dpaa2_dq *vaddr;
++ void *alloced_addr; /* unaligned value from kmalloc() */
++ unsigned int idx; /* position of the next-to-be-returned entry */
++ struct qbman_swp *swp; /* portal used to issue VDQCR */
++ struct device *dev; /* device used for DMA mapping */
++};
++
++/* keep a per cpu array of DPIOs for fast access */
++static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
++static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
++static DEFINE_SPINLOCK(dpio_list_lock);
++
++static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
++ int cpu)
++{
++ if (d)
++ return d;
++
++ if (unlikely(cpu >= num_possible_cpus()))
++ return NULL;
++
++ /*
++ * If cpu == -1, choose the current cpu, with no guarantees about
++ * potentially being migrated away.
++ */
++ if (unlikely(cpu < 0))
++ cpu = smp_processor_id();
++
++ /* If a specific cpu was requested, pick it up immediately */
++ return dpio_by_cpu[cpu];
++}
++
++static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
++{
++ if (d)
++ return d;
++
++ spin_lock(&dpio_list_lock);
++ d = list_entry(dpio_list.next, struct dpaa2_io, node);
++ list_del(&d->node);
++ list_add_tail(&d->node, &dpio_list);
++ spin_unlock(&dpio_list_lock);
++
++ return d;
++}
++
++/**
++ * dpaa2_io_create() - create a dpaa2_io object.
++ * @desc: the dpaa2_io descriptor
++ *
++ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
++ * DPIO object.
++ *
++ * Return a valid dpaa2_io object for success, or NULL for failure.
++ */
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
++{
++ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
++
++ if (!obj)
++ return NULL;
++
++ /* check if CPU is out of range (-1 means any cpu) */
++ if (desc->cpu >= num_possible_cpus()) {
++ kfree(obj);
++ return NULL;
++ }
++
++ atomic_set(&obj->refs, 1);
++ obj->dpio_desc = *desc;
++ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
++ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
++ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
++ obj->swp = qbman_swp_init(&obj->swp_desc);
++
++ if (!obj->swp) {
++ kfree(obj);
++ return NULL;
++ }
++
++ INIT_LIST_HEAD(&obj->node);
++ spin_lock_init(&obj->lock_mgmt_cmd);
++ spin_lock_init(&obj->lock_notifications);
++ INIT_LIST_HEAD(&obj->notifications);
++
++ /* For now only enable DQRR interrupts */
++ qbman_swp_interrupt_set_trigger(obj->swp,
++ QBMAN_SWP_INTERRUPT_DQRI);
++ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
++ if (obj->dpio_desc.receives_notifications)
++ qbman_swp_push_set(obj->swp, 0, 1);
++
++ spin_lock(&dpio_list_lock);
++ list_add_tail(&obj->node, &dpio_list);
++ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
++ dpio_by_cpu[desc->cpu] = obj;
++ spin_unlock(&dpio_list_lock);
++
++ return obj;
++}
++EXPORT_SYMBOL(dpaa2_io_create);
++
++/**
++ * dpaa2_io_down() - release the dpaa2_io object.
++ * @d: the dpaa2_io object to be released.
++ *
++ * The "struct dpaa2_io" type can represent an individual DPIO object (as
++ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
++ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
++ * each handle obtained should be released using this function.
++ */
++void dpaa2_io_down(struct dpaa2_io *d)
++{
++ if (!atomic_dec_and_test(&d->refs))
++ return;
++ kfree(d);
++}
++EXPORT_SYMBOL(dpaa2_io_down);
++
++#define DPAA_POLL_MAX 32
++
++/**
++ * dpaa2_io_irq() - ISR for DPIO interrupts
++ *
++ * @obj: the given DPIO object.
++ *
++ * Return IRQ_HANDLED for success or IRQ_NONE if there
++ * were no pending interrupts.
++ */
++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
++{
++ const struct dpaa2_dq *dq;
++ int max = 0;
++ struct qbman_swp *swp;
++ u32 status;
++
++ swp = obj->swp;
++ status = qbman_swp_interrupt_read_status(swp);
++ if (!status)
++ return IRQ_NONE;
++
++ dq = qbman_swp_dqrr_next(swp);
++ while (dq) {
++ if (qbman_result_is_SCN(dq)) {
++ struct dpaa2_io_notification_ctx *ctx;
++ u64 q64;
++
++ q64 = qbman_result_SCN_ctx(dq);
++ ctx = (void *)q64;
++ ctx->cb(ctx);
++ } else {
++ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
++ }
++ qbman_swp_dqrr_consume(swp, dq);
++ ++max;
++ if (max > DPAA_POLL_MAX)
++ goto done;
++ dq = qbman_swp_dqrr_next(swp);
++ }
++done:
++ qbman_swp_interrupt_clear_status(swp, status);
++ qbman_swp_interrupt_set_inhibit(swp, 0);
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(dpaa2_io_irq);
++
++/**
++ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
++ * notifications on the given DPIO service.
++ * @d: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * The caller should make the MC command to attach a DPAA2 object to
++ * a DPIO after this function completes successfully. In that way:
++ * (a) The DPIO service is "ready" to handle a notification arrival
++ * (which might happen before the "attach" command to MC has
++ * returned control of execution back to the caller)
++ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
++ * 'qman64' parameters that it should pass along in the MC command
++ * in order for the object to be configured to produce the right
++ * notification fields to the DPIO service.
++ *
++ * Return 0 for success, or -ENODEV for failure.
++ */
++int dpaa2_io_service_register(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++
++ d = service_select_by_cpu(d, ctx->desired_cpu);
++ if (!d)
++ return -ENODEV;
++
++ ctx->dpio_id = d->dpio_desc.dpio_id;
++ ctx->qman64 = (u64)ctx;
++ ctx->dpio_private = d;
++ spin_lock_irqsave(&d->lock_notifications, irqflags);
++ list_add(&ctx->node, &d->notifications);
++ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++
++ /* Enable the generation of CDAN notifications */
++ if (ctx->is_cdan)
++ qbman_swp_CDAN_set_context_enable(d->swp,
++ (u16)ctx->id,
++ ctx->qman64);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_service_register);
++
++/**
++ * dpaa2_io_service_deregister - The opposite of 'register'.
++ * @service: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * This function should be called only after sending the MC command to
++ * to detach the notification-producing device from the DPIO.
++ */
++void dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_io *d = ctx->dpio_private;
++ unsigned long irqflags;
++
++ if (ctx->is_cdan)
++ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
++
++ spin_lock_irqsave(&d->lock_notifications, irqflags);
++ list_del(&ctx->node);
++ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++}
++EXPORT_SYMBOL(dpaa2_io_service_deregister);
++
++/**
++ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
++ * @d: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
++ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
++ * traffic source for as long as it likes. Eventually it may wish to "rearm"
++ * that source to allow it to produce another FQDAN/CDAN, that's what this
++ * function achieves.
++ *
++ * Return 0 for success.
++ */
++int dpaa2_io_service_rearm(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = service_select_by_cpu(d, ctx->desired_cpu);
++ if (!unlikely(d))
++ return -ENODEV;
++
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ if (ctx->is_cdan)
++ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
++ else
++ err = qbman_swp_fq_schedule(d->swp, ctx->id);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_rearm);
++
++/**
++ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
++ qbman_pull_desc_set_fq(&pd, fqid);
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++ s->swp = d->swp;
++ err = qbman_swp_pull(d->swp, &pd);
++ if (err)
++ s->swp = NULL;
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
++
++/**
++ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
++ * @d: the given DPIO service.
++ * @channelid: the given channel id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
++ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ s->swp = d->swp;
++ err = qbman_swp_pull(d->swp, &pd);
++ if (err)
++ s->swp = NULL;
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_pull_channel);
++
++/**
++ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
++ u32 fqid,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_fq(&ed, fqid);
++
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
++
++/**
++ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
++ * @d: the given DPIO service.
++ * @qdid: the given queuing destination id.
++ * @prio: the given queuing priority.
++ * @qdbin: the given queuing destination bin.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
++ u32 qdid, u8 prio, u16 qdbin,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
++
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd);
++
++/**
++ * dpaa2_io_service_release() - Release buffers to a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffers to be released.
++ * @num_buffers: the number of the buffers to be released.
++ *
++ * Return 0 for success, and negative error code for failure.
++ */
++int dpaa2_io_service_release(struct dpaa2_io *d,
++ u32 bpid,
++ const u64 *buffers,
++ unsigned int num_buffers)
++{
++ struct qbman_release_desc rd;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_release_desc_clear(&rd);
++ qbman_release_desc_set_bpid(&rd, bpid);
++
++ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
++}
++EXPORT_SYMBOL(dpaa2_io_service_release);
++
++/**
++ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffer addresses for acquired buffers.
++ * @num_buffers: the expected number of the buffers to acquire.
++ *
++ * Return a negative error code if the command failed, otherwise it returns
++ * the number of buffers acquired, which may be less than the number requested.
++ * Eg. if the buffer pool is empty, this will return zero.
++ */
++int dpaa2_io_service_acquire(struct dpaa2_io *d,
++ u32 bpid,
++ u64 *buffers,
++ unsigned int num_buffers)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_acquire);
++
++/*
++ * 'Stores' are reusable memory blocks for holding dequeue results, and to
++ * assist with parsing those results.
++ */
++
++/**
++ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
++ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
++ * @dev: the device to allow mapping/unmapping the DMAable region.
++ *
++ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
++ * The 'dpaa2_io_store' returned is a DPIO service managed object.
++ *
++ * Return pointer to dpaa2_io_store struct for successfuly created storage
++ * memory, or NULL on error.
++ */
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev)
++{
++ struct dpaa2_io_store *ret;
++ size_t size;
++
++ if (!max_frames || (max_frames > 16))
++ return NULL;
++
++ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
++ if (!ret)
++ return NULL;
++
++ ret->max = max_frames;
++ size = max_frames * sizeof(struct dpaa2_dq) + 64;
++ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
++ if (!ret->alloced_addr) {
++ kfree(ret);
++ return NULL;
++ }
++
++ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
++ ret->paddr = dma_map_single(dev, ret->vaddr,
++ sizeof(struct dpaa2_dq) * max_frames,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, ret->paddr)) {
++ kfree(ret->alloced_addr);
++ kfree(ret);
++ return NULL;
++ }
++
++ ret->idx = 0;
++ ret->dev = dev;
++
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_store_create);
++
++/**
++ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
++ * result.
++ * @s: the storage memory to be destroyed.
++ */
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
++{
++ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
++ DMA_FROM_DEVICE);
++ kfree(s->alloced_addr);
++ kfree(s);
++}
++EXPORT_SYMBOL(dpaa2_io_store_destroy);
++
++/**
++ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
++ * @s: the dpaa2_io_store object.
++ * @is_last: indicate whether this is the last frame in the pull command.
++ *
++ * When an object driver performs dequeues to a dpaa2_io_store, this function
++ * can be used to determine when the next frame result is available. Once
++ * this function returns non-NULL, a subsequent call to it will try to find
++ * the next dequeue result.
++ *
++ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
++ * was empty, then this function will also return NULL (rather than expecting
++ * the caller to always check for this. As such, "is_last" can be used to
++ * differentiate between "end-of-empty-dequeue" and "still-waiting".
++ *
++ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
++ */
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
++{
++ int match;
++ struct dpaa2_dq *ret = &s->vaddr[s->idx];
++
++ match = qbman_result_has_new_result(s->swp, ret);
++ if (!match) {
++ *is_last = 0;
++ return NULL;
++ }
++
++ s->idx++;
++
++ if (dpaa2_dq_is_pull_complete(ret)) {
++ *is_last = 1;
++ s->idx = 0;
++ /*
++ * If we get an empty dequeue result to terminate a zero-results
++ * vdqcr, return NULL to the caller rather than expecting him to
++ * check non-NULL results every time.
++ */
++ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
++ ret = NULL;
++ } else {
++ *is_last = 0;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_store_next);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++/**
++ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
++ * @d: the given DPIO object.
++ * @fqid: the id of frame queue to be queried.
++ * @fcnt: the queried frame count.
++ * @bcnt: the queried byte count.
++ *
++ * Knowing the FQ count at run-time can be useful in debugging situations.
++ * The instantaneous frame- and byte-count are hereby returned.
++ *
++ * Return 0 for a successful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
++ u32 *fcnt, u32 *bcnt)
++{
++ struct qbman_attr state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->swp;
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ ret = qbman_fq_query_state(swp, fqid, &state);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *fcnt = qbman_fq_state_frame_count(&state);
++ *bcnt = qbman_fq_state_byte_count(&state);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_fq_count);
++
++/**
++ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a
++ * buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the index of buffer pool to be queried.
++ * @num: the queried number of buffers in the buffer pool.
++ *
++ * Return 0 for a sucessful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, u32 *num)
++{
++ struct qbman_attr state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->swp;
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ ret = qbman_bp_query(swp, bpid, &state);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *num = qbman_bp_info_num_free_bufs(&state);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_bp_count);
++#endif
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c
+new file mode 100644
+index 00000000..d81e0232
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
+@@ -0,0 +1,224 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../include/mc-sys.h"
++#include "../../include/mc-cmd.h"
++
++#include "dpio.h"
++#include "dpio-cmd.h"
++
++/*
++ * Data Path I/O Portal API
++ * Contains initialization APIs and runtime control APIs for DPIO
++ */
++
++/**
++ * dpio_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpio_id: DPIO unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpio_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpio_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpio_cmd_open *dpio_cmd;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
++ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpio_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_get_attributes() - Retrieve DPIO attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpio_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpio_rsp_get_attr *dpio_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
++ attr->id = le32_to_cpu(dpio_rsp->id);
++ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
++ attr->num_priorities = dpio_rsp->num_priorities;
++ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
++ attr->qbman_portal_ce_offset =
++ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
++ attr->qbman_portal_ci_offset =
++ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
++ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
++
++ return 0;
++}
++
++/**
++ * dpio_get_api_version - Get Data Path I/O API version
++ * @mc_io: Pointer to MC portal's DPIO object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of DPIO API
++ * @minor_ver: Minor version of DPIO API
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.h b/drivers/staging/fsl-mc/bus/dpio/dpio.h
+new file mode 100644
+index 00000000..ced1103d
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h
+@@ -0,0 +1,109 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPIO_H
++#define __FSL_DPIO_H
++
++struct fsl_mc_io;
++
++int dpio_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpio_id,
++ u16 *token);
++
++int dpio_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * enum dpio_channel_mode - DPIO notification channel mode
++ * @DPIO_NO_CHANNEL: No support for notification channel
++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
++ * dedicated channel in the DPIO; user should point the queue's
++ * destination in the relevant interface to this DPIO
++ */
++enum dpio_channel_mode {
++ DPIO_NO_CHANNEL = 0,
++ DPIO_LOCAL_CHANNEL = 1,
++};
++
++/**
++ * struct dpio_cfg - Structure representing DPIO configuration
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ */
++struct dpio_cfg {
++ enum dpio_channel_mode channel_mode;
++ u8 num_priorities;
++};
++
++int dpio_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpio_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpio_attr - Structure representing DPIO attributes
++ * @id: DPIO object ID
++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
++ * @qbman_portal_id: Software portal ID
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ * @qbman_version: QBMAN version
++ */
++struct dpio_attr {
++ int id;
++ u64 qbman_portal_ce_offset;
++ u64 qbman_portal_ci_offset;
++ u16 qbman_portal_id;
++ enum dpio_channel_mode channel_mode;
++ u8 num_priorities;
++ u32 qbman_version;
++};
++
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpio_attr *attr);
++
++int dpio_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++#endif /* __FSL_DPIO_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+new file mode 100644
+index 00000000..e14fb65b
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+@@ -0,0 +1,1049 @@
++/*
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <asm/cacheflush.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include "../../include/dpaa2-global.h"
++
++#include "qbman-portal.h"
++
++struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
++struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
++
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++#define QMAN_REV_MASK 0xffff0000
++
++/* All QBMan command and result structures use this "valid bit" encoding */
++#define QB_VALID_BIT ((u32)0x80)
++
++/* QBMan portal management command codes */
++#define QBMAN_MC_ACQUIRE 0x30
++#define QBMAN_WQCHAN_CONFIGURE 0x46
++
++/* CINH register offsets */
++#define QBMAN_CINH_SWP_EQAR 0x8c0
++#define QBMAN_CINH_SWP_DQPI 0xa00
++#define QBMAN_CINH_SWP_DCAP 0xac0
++#define QBMAN_CINH_SWP_SDQCR 0xb00
++#define QBMAN_CINH_SWP_RAR 0xcc0
++#define QBMAN_CINH_SWP_ISR 0xe00
++#define QBMAN_CINH_SWP_IER 0xe40
++#define QBMAN_CINH_SWP_ISDR 0xe80
++#define QBMAN_CINH_SWP_IIR 0xec0
++
++/* CENA register offsets */
++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_CR 0x600
++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
++#define QBMAN_CENA_SWP_VDQCR 0x780
++
++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
++
++/* Define token used to determine if response written to memory is valid */
++#define QMAN_DQ_TOKEN_VALID 1
++
++/* SDQCR attribute codes */
++#define QB_SDQCR_FC_SHIFT 29
++#define QB_SDQCR_FC_MASK 0x1
++#define QB_SDQCR_DCT_SHIFT 24
++#define QB_SDQCR_DCT_MASK 0x3
++#define QB_SDQCR_TOK_SHIFT 16
++#define QB_SDQCR_TOK_MASK 0xff
++#define QB_SDQCR_SRC_SHIFT 0
++#define QB_SDQCR_SRC_MASK 0xffff
++
++/* opaque token for static dequeues */
++#define QMAN_SDQCR_TOKEN 0xbb
++
++enum qbman_sdqcr_dct {
++ qbman_sdqcr_dct_null = 0,
++ qbman_sdqcr_dct_prio_ics,
++ qbman_sdqcr_dct_active_ics,
++ qbman_sdqcr_dct_active
++};
++
++enum qbman_sdqcr_fc {
++ qbman_sdqcr_fc_one = 0,
++ qbman_sdqcr_fc_up_to_3 = 1
++};
++
++#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
++#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
++static inline void qbman_inval_prefetch(struct qbman_swp *p, uint32_t offset)
++{
++ dcivac(p->addr_cena + offset);
++ prefetch(p->addr_cena + offset);
++}
++
++/* Portal Access */
++
++static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
++{
++ return readl_relaxed(p->addr_cinh + offset);
++}
++
++static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
++ u32 value)
++{
++ writel_relaxed(value, p->addr_cinh + offset);
++}
++
++static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
++{
++ return p->addr_cena + offset;
++}
++
++#define QBMAN_CINH_SWP_CFG 0xd00
++
++#define SWP_CFG_DQRR_MF_SHIFT 20
++#define SWP_CFG_EST_SHIFT 16
++#define SWP_CFG_WN_SHIFT 14
++#define SWP_CFG_RPM_SHIFT 12
++#define SWP_CFG_DCM_SHIFT 10
++#define SWP_CFG_EPM_SHIFT 8
++#define SWP_CFG_SD_SHIFT 5
++#define SWP_CFG_SP_SHIFT 4
++#define SWP_CFG_SE_SHIFT 3
++#define SWP_CFG_DP_SHIFT 2
++#define SWP_CFG_DE_SHIFT 1
++#define SWP_CFG_EP_SHIFT 0
++
++static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
++ u8 epm, int sd, int sp, int se,
++ int dp, int de, int ep)
++{
++ return cpu_to_le32 (max_fill << SWP_CFG_DQRR_MF_SHIFT |
++ est << SWP_CFG_EST_SHIFT |
++ wn << SWP_CFG_WN_SHIFT |
++ rpm << SWP_CFG_RPM_SHIFT |
++ dcm << SWP_CFG_DCM_SHIFT |
++ epm << SWP_CFG_EPM_SHIFT |
++ sd << SWP_CFG_SD_SHIFT |
++ sp << SWP_CFG_SP_SHIFT |
++ se << SWP_CFG_SE_SHIFT |
++ dp << SWP_CFG_DP_SHIFT |
++ de << SWP_CFG_DE_SHIFT |
++ ep << SWP_CFG_EP_SHIFT);
++}
++
++/**
++ * qbman_swp_init() - Create a functional object representing the given
++ * QBMan portal descriptor.
++ * @d: the given qbman swp descriptor
++ *
++ * Return qbman_swp portal for success, NULL if the object cannot
++ * be created.
++ */
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
++{
++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
++ u32 reg;
++
++ if (!p)
++ return NULL;
++ p->desc = d;
++ p->mc.valid_bit = QB_VALID_BIT;
++ p->sdq = 0;
++ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
++ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
++ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
++
++ atomic_set(&p->vdq.available, 1);
++ p->vdq.valid_bit = QB_VALID_BIT;
++ p->dqrr.next_idx = 0;
++ p->dqrr.valid_bit = QB_VALID_BIT;
++
++ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
++ p->dqrr.dqrr_size = 4;
++ p->dqrr.reset_bug = 1;
++ } else {
++ p->dqrr.dqrr_size = 8;
++ p->dqrr.reset_bug = 0;
++ }
++
++ p->addr_cena = d->cena_bar;
++ p->addr_cinh = d->cinh_bar;
++
++ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
++ 0, /* Writes cacheable */
++ 0, /* EQCR_CI stashing threshold */
++ 3, /* RPM: Valid bit mode, RCR in array mode */
++ 2, /* DCM: Discrete consumption ack mode */
++ 3, /* EPM: Valid bit mode, EQCR in array mode */
++ 0, /* mem stashing drop enable == FALSE */
++ 1, /* mem stashing priority == TRUE */
++ 0, /* mem stashing enable == FALSE */
++ 1, /* dequeue stashing priority == TRUE */
++ 0, /* dequeue stashing enable == FALSE */
++ 0); /* EQCR_CI stashing priority == FALSE */
++
++ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
++ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
++ if (!reg) {
++ pr_err("qbman: the portal is not enabled!\n");
++ return NULL;
++ }
++
++ /*
++ * SDQCR needs to be initialized to 0 when no channels are
++ * being dequeued from or else the QMan HW will indicate an
++ * error. The values that were calculated above will be
++ * applied when dequeues from a specific channel are enabled.
++ */
++ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
++ return p;
++}
++
++/**
++ * qbman_swp_finish() - Create and destroy a functional object representing
++ * the given QBMan portal descriptor.
++ * @p: the qbman_swp object to be destroyed
++ */
++void qbman_swp_finish(struct qbman_swp *p)
++{
++ kfree(p);
++}
++
++/**
++ * qbman_swp_interrupt_read_status()
++ * @p: the given software portal
++ *
++ * Return the value in the SWP_ISR register.
++ */
++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
++}
++
++/**
++ * qbman_swp_interrupt_clear_status()
++ * @p: the given software portal
++ * @mask: The mask to clear in SWP_ISR register
++ */
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
++}
++
++/**
++ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
++ * @p: the given software portal
++ *
++ * Return the value in the SWP_IER register.
++ */
++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
++}
++
++/**
++ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
++ * @p: the given software portal
++ * @mask: The mask of bits to enable in SWP_IER
++ */
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
++}
++
++/**
++ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
++ * @p: the given software portal object
++ *
++ * Return the value in the SWP_IIR register.
++ */
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
++}
++
++/**
++ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
++ * @p: the given software portal object
++ * @mask: The mask to set in SWP_IIR register
++ */
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
++}
++
++/*
++ * Different management commands all use this common base layer of code to issue
++ * commands and poll for results.
++ */
++
++/*
++ * Returns a pointer to where the caller should fill in their management command
++ * (caller should ignore the verb byte)
++ */
++void *qbman_swp_mc_start(struct qbman_swp *p)
++{
++ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
++}
++
++/*
++ * Commits merges in the caller-supplied command verb (which should not include
++ * the valid-bit) and submits the command to hardware
++ */
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
++{
++ u8 *v = cmd;
++
++ dma_wmb();
++ *v = cmd_verb | p->mc.valid_bit;
++ dccvac(cmd);
++}
++
++/*
++ * Checks for a completed response (returns non-NULL if only if the response
++ * is complete).
++ */
++void *qbman_swp_mc_result(struct qbman_swp *p)
++{
++ u32 *ret, verb;
++
++ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++
++ /* Remove the valid-bit - command completed if the rest is non-zero */
++ verb = ret[0] & ~QB_VALID_BIT;
++ if (!verb)
++ return NULL;
++ p->mc.valid_bit ^= QB_VALID_BIT;
++ return ret;
++}
++
++#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
++enum qb_enqueue_commands {
++ enqueue_empty = 0,
++ enqueue_response_always = 1,
++ enqueue_rejects_to_fq = 2
++};
++
++#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
++#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
++#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
++
++/**
++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_eq_desc_clear(struct qbman_eq_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++/**
++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ */
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
++{
++ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
++ if (respond_success)
++ d->verb |= enqueue_response_always;
++ else
++ d->verb |= enqueue_rejects_to_fq;
++}
++
++/*
++ * Exactly one of the following descriptor "targets" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * -enqueue to a frame queue
++ * -enqueue to a queuing destination
++ */
++
++/**
++ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
++ * @d: the enqueue descriptor
++ * @fqid: the id of the frame queue to be enqueued
++ */
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
++{
++ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
++ d->tgtid = cpu_to_le32(fqid);
++}
++
++/**
++ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
++ * @d: the enqueue descriptor
++ * @qdid: the id of the queuing destination to be enqueued
++ * @qd_bin: the queuing destination bin
++ * @qd_prio: the queuing destination priority
++ */
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
++ u32 qd_bin, u32 qd_prio)
++{
++ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
++ d->tgtid = cpu_to_le32(qdid);
++ d->qdbin = cpu_to_le16(qd_bin);
++ d->qpri = qd_prio;
++}
++
++#define EQAR_IDX(eqar) ((eqar) & 0x7)
++#define EQAR_VB(eqar) ((eqar) & 0x80)
++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
++
++/**
++ * qbman_swp_enqueue() - Issue an enqueue command
++ * @s: the software portal used for enqueue
++ * @d: the enqueue descriptor
++ * @fd: the frame descriptor to be enqueued
++ *
++ * Please note that 'fd' should only be NULL if the "action" of the
++ * descriptor is "orp_hole" or "orp_nesn".
++ *
++ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
++ */
++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc *p;
++ u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
++
++ if (!EQAR_SUCCESS(eqar))
++ return -EBUSY;
++
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
++ memcpy(&p->dca, &d->dca, 31);
++ memcpy(&p->fd, fd, sizeof(*fd));
++
++ /* Set the verb byte, have to substitute in the valid-bit */
++ dma_wmb();
++ p->verb = d->verb | EQAR_VB(eqar);
++ dccvac(p);
++
++ return 0;
++}
++
++/* Static (push) dequeue */
++
++/**
++ * qbman_swp_push_get() - Get the push dequeue setup
++ * @p: the software portal object
++ * @channel_idx: the channel index to query
++ * @enabled: returned boolean to show whether the push dequeue is enabled
++ * for the given channel
++ */
++void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
++{
++ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
++
++ WARN_ON(channel_idx > 15);
++ *enabled = src | (1 << channel_idx);
++}
++
++/**
++ * qbman_swp_push_set() - Enable or disable push dequeue
++ * @p: the software portal object
++ * @channel_idx: the channel index (0 to 15)
++ * @enable: enable or disable push dequeue
++ */
++void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
++{
++ u16 dqsrc;
++
++ WARN_ON(channel_idx > 15);
++ if (enable)
++ s->sdq |= 1 << channel_idx;
++ else
++ s->sdq &= ~(1 << channel_idx);
++
++ /* Read make the complete src map. If no channels are enabled
++ * the SDQCR must be 0 or else QMan will assert errors
++ */
++ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
++ if (dqsrc != 0)
++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
++ else
++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
++}
++
++#define QB_VDQCR_VERB_DCT_SHIFT 0
++#define QB_VDQCR_VERB_DT_SHIFT 2
++#define QB_VDQCR_VERB_RLS_SHIFT 4
++#define QB_VDQCR_VERB_WAE_SHIFT 5
++
++enum qb_pull_dt_e {
++ qb_pull_dt_channel,
++ qb_pull_dt_workqueue,
++ qb_pull_dt_framequeue
++};
++
++/**
++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state
++ * @d: the pull dequeue descriptor to be cleared
++ */
++void qbman_pull_desc_clear(struct qbman_pull_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++/**
++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
++ * @d: the pull dequeue descriptor to be set
++ * @storage: the pointer of the memory to store the dequeue result
++ * @storage_phys: the physical address of the storage memory
++ * @stash: to indicate whether write allocate is enabled
++ *
++ * If not called, or if called with 'storage' as NULL, the result pull dequeues
++ * will produce results to DQRR. If 'storage' is non-NULL, then results are
++ * produced to the given memory location (using the DMA address which
++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
++ * those writes to main-memory express a cache-warming attribute.
++ */
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash)
++{
++ /* save the virtual address */
++ d->rsp_addr_virt = (u64)storage;
++
++ if (!storage) {
++ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
++ return;
++ }
++ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
++ if (stash)
++ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
++ else
++ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
++
++ d->rsp_addr = cpu_to_le64(storage_phys);
++}
++
++/**
++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
++ * @d: the pull dequeue descriptor to be set
++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
++ */
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
++{
++ d->numf = numframes - 1;
++}
++
++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, u8 token)
++{
++ d->tok = token;
++}
++
++/*
++ * Exactly one of the following descriptor "actions" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * - pull dequeue from the given frame queue (FQ)
++ * - pull dequeue from any FQ in the given work queue (WQ)
++ * - pull dequeue from any FQ in any WQ in the given channel
++ */
++
++/**
++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
++ * @fqid: the frame queue index of the given FQ
++ */
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
++{
++ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(fqid);
++}
++
++/**
++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
++ * @wqid: composed of channel id and wqid within the channel
++ * @dct: the dequeue command type
++ */
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
++ enum qbman_pull_type_e dct)
++{
++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(wqid);
++}
++
++/**
++ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
++ * dequeues
++ * @chid: the channel id to be dequeued
++ * @dct: the dequeue command type
++ */
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
++ enum qbman_pull_type_e dct)
++{
++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(chid);
++}
++
++/**
++ * qbman_swp_pull() - Issue the pull dequeue command
++ * @s: the software portal object
++ * @d: the software portal descriptor which has been configured with
++ * the set of qbman_pull_desc_set_*() calls
++ *
++ * Return 0 for success, and -EBUSY if the software portal is not ready
++ * to do pull dequeue.
++ */
++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
++{
++ struct qbman_pull_desc *p;
++
++ if (!atomic_dec_and_test(&s->vdq.available)) {
++ atomic_inc(&s->vdq.available);
++ return -EBUSY;
++ }
++ s->vdq.storage = (void *)d->rsp_addr_virt;
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
++ p->numf = d->numf;
++ p->tok = QMAN_DQ_TOKEN_VALID;
++ p->dq_src = d->dq_src;
++ p->rsp_addr = d->rsp_addr;
++ p->rsp_addr_virt = d->rsp_addr_virt;
++ dma_wmb();
++
++ /* Set the verb byte, have to substitute in the valid-bit */
++ p->verb = d->verb | s->vdq.valid_bit;
++ s->vdq.valid_bit ^= QB_VALID_BIT;
++ dccvac(p);
++
++ return 0;
++}
++
++#define QMAN_DQRR_PI_MASK 0xf
++
++/**
++ * qbman_swp_dqrr_next() - Get an valid DQRR entry
++ * @s: the software portal object
++ *
++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
++ * only once, so repeated calls can return a sequence of DQRR entries, without
++ * requiring they be consumed immediately or in any particular order.
++ */
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
++{
++ u32 verb;
++ u32 response_verb;
++ u32 flags;
++ struct dpaa2_dq *p;
++
++ /* Before using valid-bit to detect if something is there, we have to
++ * handle the case of the DQRR reset bug...
++ */
++ if (unlikely(s->dqrr.reset_bug)) {
++ /*
++ * We pick up new entries by cache-inhibited producer index,
++ * which means that a non-coherent mapping would require us to
++ * invalidate and read *only* once that PI has indicated that
++ * there's an entry here. The first trip around the DQRR ring
++ * will be much less efficient than all subsequent trips around
++ * it...
++ */
++ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
++ QMAN_DQRR_PI_MASK;
++
++ /* there are new entries if pi != next_idx */
++ if (pi == s->dqrr.next_idx)
++ return NULL;
++
++ /*
++ * if next_idx is/was the last ring index, and 'pi' is
++ * different, we can disable the workaround as all the ring
++ * entries have now been DMA'd to so valid-bit checking is
++ * repaired. Note: this logic needs to be based on next_idx
++ * (which increments one at a time), rather than on pi (which
++ * can burst and wrap-around between our snapshots of it).
++ */
++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
++ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
++ s->dqrr.next_idx, pi);
++ s->dqrr.reset_bug = 0;
++ }
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ }
++
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ verb = p->dq.verb;
++
++ /*
++ * If the valid-bit isn't of the expected polarity, nothing there. Note,
++ * in the DQRR reset bug workaround, we shouldn't need to skip these
++ * check, because we've already determined that a new entry is available
++ * and we've invalidated the cacheline before reading it, so the
++ * valid-bit behaviour is repaired and should tell us what we already
++ * knew from reading PI.
++ */
++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ return NULL;
++ }
++ /*
++ * There's something there. Move "next_idx" attention to the next ring
++ * entry (and prefetch it) before returning what we found.
++ */
++ s->dqrr.next_idx++;
++ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
++ if (!s->dqrr.next_idx)
++ s->dqrr.valid_bit ^= QB_VALID_BIT;
++
++ /*
++ * If this is the final response to a volatile dequeue command
++ * indicate that the vdq is available
++ */
++ flags = p->dq.stat;
++ response_verb = verb & QBMAN_RESULT_MASK;
++ if ((response_verb == QBMAN_RESULT_DQ) &&
++ (flags & DPAA2_DQ_STAT_VOLATILE) &&
++ (flags & DPAA2_DQ_STAT_EXPIRED))
++ atomic_inc(&s->vdq.available);
++
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++
++ return p;
++}
++
++/**
++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
++ * qbman_swp_dqrr_next().
++ * @s: the software portal object
++ * @dq: the DQRR entry to be consumed
++ */
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
++{
++ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
++}
++
++/**
++ * qbman_result_has_new_result() - Check and get the dequeue response from the
++ * dq storage memory set in pull dequeue command
++ * @s: the software portal object
++ * @dq: the dequeue result read from the memory
++ *
++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
++ * dequeue result.
++ *
++ * Only used for user-provided storage of dequeue results, not DQRR. For
++ * efficiency purposes, the driver will perform any required endianness
++ * conversion to ensure that the user's dequeue result storage is in host-endian
++ * format. As such, once the user has called qbman_result_has_new_result() and
++ * been returned a valid dequeue result, they should not call it again on
++ * the same memory location (except of course if another dequeue command has
++ * been executed to produce a new result to that location).
++ */
++int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
++{
++ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
++ return 0;
++
++ /*
++ * Set token to be 0 so we will detect change back to 1
++ * next time the looping is traversed. Const is cast away here
++ * as we want users to treat the dequeue responses as read only.
++ */
++ ((struct dpaa2_dq *)dq)->dq.tok = 0;
++
++ /*
++ * Determine whether VDQCR is available based on whether the
++ * current result is sitting in the first storage location of
++ * the busy command.
++ */
++ if (s->vdq.storage == dq) {
++ s->vdq.storage = NULL;
++ atomic_inc(&s->vdq.available);
++ }
++
++ return 1;
++}
++
++/**
++ * qbman_release_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_release_desc_clear(struct qbman_release_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++ d->verb = 1 << 5; /* Release Command Valid */
++}
++
++/**
++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
++ */
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
++{
++ d->bpid = cpu_to_le16(bpid);
++}
++
++/**
++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
++ * interrupt source should be asserted after the release command is completed.
++ */
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
++{
++ if (enable)
++ d->verb |= 1 << 6;
++ else
++ d->verb &= ~(1 << 6);
++}
++
++#define RAR_IDX(rar) ((rar) & 0x7)
++#define RAR_VB(rar) ((rar) & 0x80)
++#define RAR_SUCCESS(rar) ((rar) & 0x100)
++
++/**
++ * qbman_swp_release() - Issue a buffer release command
++ * @s: the software portal object
++ * @d: the release descriptor
++ * @buffers: a pointer pointing to the buffer address to be released
++ * @num_buffers: number of buffers to be released, must be less than 8
++ *
++ * Return 0 for success, -EBUSY if the release command ring is not ready.
++ */
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const u64 *buffers, unsigned int num_buffers)
++{
++ int i;
++ struct qbman_release_desc *p;
++ u32 rar;
++
++ if (!num_buffers || (num_buffers > 7))
++ return -EINVAL;
++
++ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
++ if (!RAR_SUCCESS(rar))
++ return -EBUSY;
++
++ /* Start the release command */
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ /* Copy the caller's buffer pointers to the command */
++ for (i = 0; i < num_buffers; i++)
++ p->buf[i] = cpu_to_le64(buffers[i]);
++ p->bpid = d->bpid;
++
++ /*
++ * Set the verb byte, have to substitute in the valid-bit and the number
++ * of buffers.
++ */
++ dma_wmb();
++ p->verb = d->verb | RAR_VB(rar) | num_buffers;
++ dccvac(p);
++
++ return 0;
++}
++
++struct qbman_acquire_desc {
++ u8 verb;
++ u8 reserved;
++ u16 bpid;
++ u8 num;
++ u8 reserved2[59];
++};
++
++struct qbman_acquire_rslt {
++ u8 verb;
++ u8 rslt;
++ u16 reserved;
++ u8 num;
++ u8 reserved2[3];
++ u64 buf[7];
++};
++
++/**
++ * qbman_swp_acquire() - Issue a buffer acquire command
++ * @s: the software portal object
++ * @bpid: the buffer pool index
++ * @buffers: a pointer pointing to the acquired buffer addresses
++ * @num_buffers: number of buffers to be acquired, must be less than 8
++ *
++ * Return 0 for success, or negative error code if the acquire command
++ * fails.
++ */
++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
++ unsigned int num_buffers)
++{
++ struct qbman_acquire_desc *p;
++ struct qbman_acquire_rslt *r;
++ int i;
++
++ if (!num_buffers || (num_buffers > 7))
++ return -EINVAL;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ p->bpid = cpu_to_le16(bpid);
++ p->num = num_buffers;
++
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
++ if (unlikely(!r)) {
++ pr_err("qbman: acquire from BPID %d failed, no response\n",
++ bpid);
++ return -EIO;
++ }
++
++ /* Decode the outcome */
++ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
++
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
++ bpid, r->rslt);
++ return -EIO;
++ }
++
++ WARN_ON(r->num > num_buffers);
++
++ /* Copy the acquired buffers to the caller's array */
++ for (i = 0; i < r->num; i++)
++ buffers[i] = le64_to_cpu(r->buf[i]);
++
++ return (int)r->num;
++}
++
++struct qbman_alt_fq_state_desc {
++ u8 verb;
++ u8 reserved[3];
++ u32 fqid;
++ u8 reserved2[56];
++};
++
++struct qbman_alt_fq_state_rslt {
++ u8 verb;
++ u8 rslt;
++ u8 reserved[62];
++};
++
++#define ALT_FQ_FQID_MASK 0x00FFFFFF
++
++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
++ u8 alt_fq_verb)
++{
++ struct qbman_alt_fq_state_desc *p;
++ struct qbman_alt_fq_state_rslt *r;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ p->fqid = cpu_to_le32(fqid) & ALT_FQ_FQID_MASK;
++
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
++ if (unlikely(!r)) {
++ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
++ alt_fq_verb);
++ return -EIO;
++ }
++
++ /* Decode the outcome */
++ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
++
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
++ fqid, r->verb, r->rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++struct qbman_cdan_ctrl_desc {
++ u8 verb;
++ u8 reserved;
++ u16 ch;
++ u8 we;
++ u8 ctrl;
++ u16 reserved2;
++ u64 cdan_ctx;
++ u8 reserved3[48];
++
++};
++
++struct qbman_cdan_ctrl_rslt {
++ u8 verb;
++ u8 rslt;
++ u16 ch;
++ u8 reserved[60];
++};
++
++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
++ u8 we_mask, u8 cdan_en,
++ u64 ctx)
++{
++ struct qbman_cdan_ctrl_desc *p = NULL;
++ struct qbman_cdan_ctrl_rslt *r = NULL;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ p->ch = cpu_to_le16(channelid);
++ p->we = we_mask;
++ if (cdan_en)
++ p->ctrl = 1;
++ else
++ p->ctrl = 0;
++ p->cdan_ctx = cpu_to_le64(ctx);
++
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
++ if (unlikely(!r)) {
++ pr_err("qbman: wqchan config failed, no response\n");
++ return -EIO;
++ }
++
++ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
++
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
++ channelid, r->rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+new file mode 100644
+index 00000000..4254034c
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+@@ -0,0 +1,662 @@
++/*
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_QBMAN_PORTAL_H
++#define __FSL_QBMAN_PORTAL_H
++
++#include "qbman_private.h"
++#include "../../include/dpaa2-fd.h"
++
++struct dpaa2_dq;
++struct qbman_swp;
++
++/* qbman software portal descriptor structure */
++struct qbman_swp_desc {
++ void *cena_bar; /* Cache-enabled portal base address */
++ void *cinh_bar; /* Cache-inhibited portal base address */
++ u32 qman_version;
++};
++
++#define QBMAN_SWP_INTERRUPT_EQRI 0x01
++#define QBMAN_SWP_INTERRUPT_EQDI 0x02
++#define QBMAN_SWP_INTERRUPT_DQRI 0x04
++#define QBMAN_SWP_INTERRUPT_RCRI 0x08
++#define QBMAN_SWP_INTERRUPT_RCDI 0x10
++#define QBMAN_SWP_INTERRUPT_VDCI 0x20
++
++/* the structure for pull dequeue descriptor */
++struct qbman_pull_desc {
++ u8 verb;
++ u8 numf;
++ u8 tok;
++ u8 reserved;
++ u32 dq_src;
++ u64 rsp_addr;
++ u64 rsp_addr_virt;
++ u8 padding[40];
++};
++
++enum qbman_pull_type_e {
++ /* dequeue with priority precedence, respect intra-class scheduling */
++ qbman_pull_type_prio = 1,
++ /* dequeue with active FQ precedence, respect ICS */
++ qbman_pull_type_active,
++ /* dequeue with active FQ precedence, no ICS */
++ qbman_pull_type_active_noics
++};
++
++/* Definitions for parsing dequeue entries */
++#define QBMAN_RESULT_MASK 0x7f
++#define QBMAN_RESULT_DQ 0x60
++#define QBMAN_RESULT_FQRN 0x21
++#define QBMAN_RESULT_FQRNI 0x22
++#define QBMAN_RESULT_FQPN 0x24
++#define QBMAN_RESULT_FQDAN 0x25
++#define QBMAN_RESULT_CDAN 0x26
++#define QBMAN_RESULT_CSCN_MEM 0x27
++#define QBMAN_RESULT_CGCU 0x28
++#define QBMAN_RESULT_BPSCN 0x29
++#define QBMAN_RESULT_CSCN_WQ 0x2a
++
++/* QBMan FQ management command codes */
++#define QBMAN_FQ_SCHEDULE 0x48
++#define QBMAN_FQ_FORCE 0x49
++#define QBMAN_FQ_XON 0x4d
++#define QBMAN_FQ_XOFF 0x4e
++
++/* structure of enqueue descriptor */
++struct qbman_eq_desc {
++ u8 verb;
++ u8 dca;
++ u16 seqnum;
++ u16 orpid;
++ u16 reserved1;
++ u32 tgtid;
++ u32 tag;
++ u16 qdbin;
++ u8 qpri;
++ u8 reserved[3];
++ u8 wae;
++ u8 rspid;
++ u64 rsp_addr;
++ u8 fd[32];
++};
++
++/* buffer release descriptor */
++struct qbman_release_desc {
++ u8 verb;
++ u8 reserved;
++ u16 bpid;
++ u32 reserved2;
++ u64 buf[7];
++};
++
++/* Management command result codes */
++#define QBMAN_MC_RSLT_OK 0xf0
++
++#define CODE_CDAN_WE_EN 0x1
++#define CODE_CDAN_WE_CTX 0x4
++
++/* portal data structure */
++struct qbman_swp {
++ const struct qbman_swp_desc *desc;
++ void __iomem *addr_cena;
++ void __iomem *addr_cinh;
++
++ /* Management commands */
++ struct {
++ u32 valid_bit; /* 0x00 or 0x80 */
++ } mc;
++
++ /* Push dequeues */
++ u32 sdq;
++
++ /* Volatile dequeues */
++ struct {
++ atomic_t available; /* indicates if a command can be sent */
++ u32 valid_bit; /* 0x00 or 0x80 */
++ struct dpaa2_dq *storage; /* NULL if DQRR */
++ } vdq;
++
++ /* DQRR */
++ struct {
++ u32 next_idx;
++ u32 valid_bit;
++ u8 dqrr_size;
++ int reset_bug; /* indicates dqrr reset workaround is needed */
++ } dqrr;
++};
++
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
++void qbman_swp_finish(struct qbman_swp *p);
++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
++
++void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
++void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
++
++void qbman_pull_desc_clear(struct qbman_pull_desc *d);
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash);
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
++ enum qbman_pull_type_e dct);
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
++ enum qbman_pull_type_e dct);
++
++int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
++
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
++
++int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
++
++void qbman_eq_desc_clear(struct qbman_eq_desc *d);
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
++ u32 qd_bin, u32 qd_prio);
++
++int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
++ const struct dpaa2_fd *fd);
++
++void qbman_release_desc_clear(struct qbman_release_desc *d);
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
++
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const u64 *buffers, unsigned int num_buffers);
++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
++ unsigned int num_buffers);
++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
++ u8 alt_fq_verb);
++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
++ u8 we_mask, u8 cdan_en,
++ u64 ctx);
++
++void *qbman_swp_mc_start(struct qbman_swp *p);
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
++void *qbman_swp_mc_result(struct qbman_swp *p);
++
++/**
++ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
++ * @dq: the dequeue result to be checked
++ *
++ * DQRR entries may contain non-dequeue results, ie. notifications
++ */
++static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
++}
++
++/**
++ * qbman_result_is_SCN() - Check the dequeue result is notification or not
++ * @dq: the dequeue result to be checked
++ *
++ */
++static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
++{
++ return !qbman_result_is_DQ(dq);
++}
++
++/* FQ Data Availability */
++static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
++}
++
++/* Channel Data Availability */
++static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
++}
++
++/* Congestion State Change */
++static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
++}
++
++/* Buffer Pool State Change */
++static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
++}
++
++/* Congestion Group Count Update */
++static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
++}
++
++/* Retirement */
++static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
++}
++
++/* Retirement Immediate */
++static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
++}
++
++ /* Park */
++static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
++}
++
++/**
++ * qbman_result_SCN_state() - Get the state field in State-change notification
++ */
++static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
++{
++ return scn->scn.state;
++}
++
++#define SCN_RID_MASK 0x00FFFFFF
++
++/**
++ * qbman_result_SCN_rid() - Get the resource id in State-change notification
++ */
++static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
++{
++ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
++}
++
++/**
++ * qbman_result_SCN_ctx() - Get the context data in State-change notification
++ */
++static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
++{
++ return le64_to_cpu(scn->scn.ctx);
++}
++
++/**
++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
++ * @s: the software portal object
++ * @fqid: the index of frame queue to be scheduled
++ *
++ * There are a couple of different ways that a FQ can end up parked state,
++ * This schedules it.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
++}
++
++/**
++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
++ * @s: the software portal object
++ * @fqid: the index of frame queue to be forced
++ *
++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
++ * and thus be available for selection by any channel-dequeuing behaviour (push
++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
++ * empty at the time this happens, the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
++}
++
++/**
++ * qbman_swp_fq_xon() - sets FQ flow-control to XON
++ * @s: the software portal object
++ * @fqid: the index of frame queue
++ *
++ * This setting doesn't affect enqueues to the FQ, just dequeues.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
++}
++
++/**
++ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
++ * @s: the software portal object
++ * @fqid: the index of frame queue
++ *
++ * This setting doesn't affect enqueues to the FQ, just dequeues.
++ * XOFF FQs will remain in the tenatively-scheduled state, even when
++ * non-empty, meaning they won't be selected for scheduled dequeuing.
++ * If a FQ is changed to XOFF after it had already become truly-scheduled
++ * to a channel, and a pull dequeue of that channel occurs that selects
++ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
++}
++
++/* If the user has been allocated a channel object that is going to generate
++ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
++ * necessary.
++ *
++ * CDAN-enabled channels only generate a single CDAN notification, after which
++ * they need to be reenabled before they'll generate another. The idea is
++ * that pull dequeuing will occur in reaction to the CDAN, followed by a
++ * reenable step. Each function generates a distinct command to hardware, so a
++ * combination function is provided if the user wishes to modify the "context"
++ * (which shows up in each CDAN message) each time they reenable, as a single
++ * command to hardware.
++ */
++
++/**
++ * qbman_swp_CDAN_set_context() - Set CDAN context
++ * @s: the software portal object
++ * @channelid: the channel index
++ * @ctx: the context to be set in CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
++ u64 ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_CTX,
++ 0, ctx);
++}
++
++/**
++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 1, 0);
++}
++
++/**
++ * qbman_swp_CDAN_disable() - disable CDAN for the channel
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 0, 0);
++}
++
++/**
++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ * @ctx:i the context set in CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
++ u16 channelid,
++ u64 ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
++ 1, ctx);
++}
++
++/* Wraps up submit + poll-for-result */
++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
++ u8 cmd_verb)
++{
++ int loopvar = 1000;
++
++ qbman_swp_mc_submit(swp, cmd, cmd_verb);
++
++ do {
++ cmd = qbman_swp_mc_result(swp);
++ } while (!cmd && loopvar--);
++
++ WARN_ON(!loopvar);
++
++ return cmd;
++}
++
++/* ------------ */
++/* qb_attr_code */
++/* ------------ */
++
++/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
++ * is either serving as a configuration command or a query result. The
++ * representation is inherently little-endian, as the indexing of the words is
++ * itself little-endian in nature and layerscape is little endian for anything
++ * that crosses a word boundary too (64-bit fields are the obvious examples).
++ */
++struct qb_attr_code {
++ unsigned int word; /* which u32[] array member encodes the field */
++ unsigned int lsoffset; /* encoding offset from ls-bit */
++ unsigned int width; /* encoding width. (bool must be 1.) */
++};
++
++/* Some pre-defined codes */
++extern struct qb_attr_code code_generic_verb;
++extern struct qb_attr_code code_generic_rslt;
++
++/* Macros to define codes */
++#define QB_CODE(a, b, c) { a, b, c}
++#define QB_CODE_NULL \
++ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
++
++/* Rotate a code "ms", meaning that it moves from less-significant bytes to
++ * more-significant, from less-significant words to more-significant, etc. The
++ * "ls" version does the inverse, from more-significant towards
++ * less-significant.
++ */
++static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ code->lsoffset += bits;
++ while (code->lsoffset > 31) {
++ code->word++;
++ code->lsoffset -= 32;
++ }
++}
++
++static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ /* Don't be fooled, this trick should work because the types are
++ * unsigned. So the case that interests the while loop (the rotate has
++ * gone too far and the word count needs to compensate for it), is
++ * manifested when lsoffset is negative. But that equates to a really
++ * large unsigned value, starting with lots of "F"s. As such, we can
++ * continue adding 32 back to it until it wraps back round above zero,
++ * to a value of 31 or less...
++ */
++ code->lsoffset -= bits;
++ while (code->lsoffset > 31) {
++ code->word--;
++ code->lsoffset += 32;
++ }
++}
++
++/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
++#define qb_attr_code_for_ms(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ms(code, bits))
++#define qb_attr_code_for_ls(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ls(code, bits))
++
++static inline void word_copy(void *d, const void *s, unsigned int cnt)
++{
++ u32 *dd = d;
++ const u32 *ss = s;
++
++ while (cnt--)
++ *(dd++) = *(ss++);
++}
++
++/*
++ * Currently, the CENA support code expects each 32-bit word to be written in
++ * host order, and these are converted to hardware (little-endian) order on
++ * command submission. However, 64-bit quantities are must be written (and read)
++ * as two 32-bit words with the least-significant word first, irrespective of
++ * host endianness.
++ */
++static inline void u64_to_le32_copy(void *d, const u64 *s,
++ unsigned int cnt)
++{
++ u32 *dd = d;
++ const u32 *ss = (const u32 *)s;
++
++ while (cnt--) {
++ /*
++ * TBD: the toolchain was choking on the use of 64-bit types up
++ * until recently so this works entirely with 32-bit variables.
++ * When 64-bit types become usable again, investigate better
++ * ways of doing this.
++ */
++#if defined(__BIG_ENDIAN)
++ *(dd++) = ss[1];
++ *(dd++) = ss[0];
++ ss += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++
++static inline void u64_from_le32_copy(u64 *d, const void *s,
++ unsigned int cnt)
++{
++ const u32 *ss = s;
++ u32 *dd = (u32 *)d;
++
++ while (cnt--) {
++#if defined(__BIG_ENDIAN)
++ dd[1] = *(ss++);
++ dd[0] = *(ss++);
++ dd += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++
++/* decode a field from a cacheline */
++static inline u32 qb_attr_code_decode(const struct qb_attr_code *code,
++ const u32 *cacheline)
++{
++ return d32_u32(code->lsoffset, code->width, cacheline[code->word]);
++}
++
++static inline u64 qb_attr_code_decode_64(const struct qb_attr_code *code,
++ const u64 *cacheline)
++{
++ u64 res;
++
++ u64_from_le32_copy(&res, &cacheline[code->word / 2], 1);
++ return res;
++}
++
++/* encode a field to a cacheline */
++static inline void qb_attr_code_encode(const struct qb_attr_code *code,
++ u32 *cacheline, u32 val)
++{
++ cacheline[code->word] =
++ r32_u32(code->lsoffset, code->width, cacheline[code->word])
++ | e32_u32(code->lsoffset, code->width, val);
++}
++
++static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
++ u64 *cacheline, u64 val)
++{
++ u64_to_le32_copy(&cacheline[code->word / 2], &val, 1);
++}
++
++/* Small-width signed values (two's-complement) will decode into medium-width
++ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
++ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
++ * 249. Likewise -120 would decode as 136.) This function allows the caller to
++ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
++ * encoding, will become 0xfffffff9 if you cast the return value to u32).
++ */
++static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
++ u32 val)
++{
++ WARN_ON(val >= (1 << code->width));
++ /* If the high bit was set, it was encoding a negative */
++ if (val >= (1 << (code->width - 1)))
++ return (int32_t)0 - (int32_t)(((u32)1 << code->width) -
++ val);
++ /* Otherwise, it was encoding a positive */
++ return (int32_t)val;
++}
++
++/* ---------------------- */
++/* Descriptors/cachelines */
++/* ---------------------- */
++
++/* To avoid needless dynamic allocation, the driver API often gives the caller
++ * a "descriptor" type that the caller can instantiate however they like.
++ * Ultimately though, it is just a cacheline of binary storage (or something
++ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
++ * holding pre-formatted pieces of hardware commands. The performance-critical
++ * code can then copy these descriptors directly into hardware command
++ * registers more efficiently than trying to construct/format commands
++ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
++ * order for the compiler to know its size, but the internal details are not
++ * exposed. The following macro is used within the driver for converting *any*
++ * descriptor pointer to a usable array pointer. The use of a macro (instead of
++ * an inline) is necessary to work with different descriptor types and to work
++ * correctly with const and non-const inputs (and similarly-qualified outputs).
++ */
++#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
++
++#endif /* __FSL_QBMAN_PORTAL_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+new file mode 100644
+index 00000000..1c77fa6a
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+@@ -0,0 +1,853 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/errno.h>
++
++#include "../../include/dpaa2-global.h"
++#include "qbman-portal.h"
++#include "qbman_debug.h"
++
++/* QBMan portal management command code */
++#define QBMAN_BP_QUERY 0x32
++#define QBMAN_FQ_QUERY 0x44
++#define QBMAN_FQ_QUERY_NP 0x45
++#define QBMAN_CGR_QUERY 0x51
++#define QBMAN_WRED_QUERY 0x54
++#define QBMAN_CGR_STAT_QUERY 0x55
++#define QBMAN_CGR_STAT_QUERY_CLR 0x56
++
++enum qbman_attr_usage_e {
++ qbman_attr_usage_fq,
++ qbman_attr_usage_bpool,
++ qbman_attr_usage_cgr,
++};
++
++struct int_qbman_attr {
++ u32 words[32];
++ enum qbman_attr_usage_e usage;
++};
++
++#define attr_type_set(a, e) \
++{ \
++ struct qbman_attr *__attr = a; \
++ enum qbman_attr_usage_e __usage = e; \
++ ((struct int_qbman_attr *)__attr)->usage = __usage; \
++}
++
++#define ATTR32(d) (&(d)->dont_manipulate_directly[0])
++#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16])
++
++static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1);
++static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1);
++static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1);
++static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16);
++static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16);
++static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16);
++static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16);
++static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14);
++static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32);
++static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16);
++static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3);
++static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32);
++static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32);
++static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8);
++static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8);
++static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8);
++
++void qbman_bp_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_bpool);
++}
++
++int qbman_bp_query(struct qbman_swp *s, u32 bpid,
++ struct qbman_attr *a)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *attr = ATTR32(a);
++
++ qbman_bp_attr_clear(a);
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_bp_bpid, p, bpid);
++
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != QBMAN_BP_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt);
++ return -EIO;
++ }
++
++ /* For the query, word[0] of the result contains only the
++ * verb/rslt fields, so skip word[0].
++ */
++ word_copy(&attr[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae)
++{
++ u32 *p = ATTR32(a);
++
++ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p);
++ *va = !!qb_attr_code_decode(&code_bp_va, p);
++ *wae = !!qb_attr_code_decode(&code_bp_wae, p);
++}
++
++static u32 qbman_bp_thresh_to_value(u32 val)
++{
++ return (val & 0xff) << ((val & 0xf00) >> 8);
++}
++
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet)
++{
++ u32 *p = ATTR32(a);
++
++ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet,
++ p));
++}
++
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt)
++{
++ u32 *p = ATTR32(a);
++
++ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt,
++ p));
++}
++
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet)
++{
++ u32 *p = ATTR32(a);
++
++ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet,
++ p));
++}
++
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt)
++{
++ u32 *p = ATTR32(a);
++
++ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt,
++ p));
++}
++
++void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset)
++{
++ u32 *p = ATTR32(a);
++
++ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset,
++ p));
++}
++
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt)
++{
++ u32 *p = ATTR32(a);
++
++ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt,
++ p));
++}
++
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid)
++{
++ u32 *p = ATTR32(a);
++
++ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p);
++}
++
++void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl)
++{
++ u32 *p = ATTR32(a);
++
++ *icid = qb_attr_code_decode(&code_bp_icid, p);
++ *pl = !!qb_attr_code_decode(&code_bp_pl, p);
++}
++
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr)
++{
++ u32 *p = ATTR32(a);
++
++ *bpscn_addr = ((u64)qb_attr_code_decode(&code_bp_bpscn_addr_hi,
++ p) << 32) |
++ (u64)qb_attr_code_decode(&code_bp_bpscn_addr_lo,
++ p);
++}
++
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx)
++{
++ u32 *p = ATTR32(a);
++
++ *bpscn_ctx = ((u64)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p)
++ << 32) |
++ (u64)qb_attr_code_decode(&code_bp_bpscn_ctx_lo,
++ p);
++}
++
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ)
++{
++ u32 *p = ATTR32(a);
++
++ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p);
++}
++
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1);
++}
++
++int qbman_bp_info_is_depleted(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2);
++}
++
++int qbman_bp_info_is_surplus(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4);
++}
++
++u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_fill, p);
++}
++
++u32 qbman_bp_info_hdptr(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdptr, p);
++}
++
++u32 qbman_bp_info_sdcnt(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sdcnt, p);
++}
++
++u32 qbman_bp_info_hdcnt(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdcnt, p);
++}
++
++u32 qbman_bp_info_sscnt(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sscnt, p);
++}
++
++static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16);
++static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15);
++static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8);
++static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15);
++static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12);
++static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1);
++static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1);
++static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1);
++static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1);
++static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1);
++static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1);
++static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32);
++static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15);
++static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1);
++static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24);
++static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24);
++
++void qbman_fq_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_fq);
++}
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, u32 fqid, struct qbman_attr *desc)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *d = ATTR32(desc);
++
++ qbman_fq_attr_clear(desc);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != QBMAN_FQ_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ /*
++ * For the configure, word[0] of the command contains only the WE-mask.
++ * For the query, word[0] of the result contains only the verb/rslt
++ * fields. Skip word[0] in the latter case.
++ */
++ word_copy(&d[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl)
++{
++ u32 *p = ATTR32(d);
++
++ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p);
++}
++
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid)
++{
++ u32 *p = ATTR32(d);
++
++ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p);
++}
++
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq)
++{
++ u32 *p = ATTR32(d);
++
++ *destwq = qb_attr_code_decode(&code_fq_destwq, p);
++}
++
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred)
++{
++ u32 *p = ATTR32(d);
++
++ *icscred = qb_attr_code_decode(&code_fq_icscred, p);
++}
++
++static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5);
++static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8);
++static u32 qbman_thresh_to_value(u32 val)
++{
++ u32 m, e;
++
++ m = qb_attr_code_decode(&code_tdthresh_mant, &val);
++ e = qb_attr_code_decode(&code_tdthresh_exp, &val);
++ return m << e;
++}
++
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh)
++{
++ u32 *p = ATTR32(d);
++
++ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh,
++ p));
++}
++
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len)
++{
++ u32 *p = ATTR32(d);
++
++ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p);
++ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p);
++ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len,
++ qb_attr_code_decode(&code_fq_oa_len, p));
++}
++
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps)
++{
++ u32 *p = ATTR32(d);
++
++ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p);
++ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p);
++ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p);
++ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p);
++}
++
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo)
++{
++ u32 *p = ATTR32(d);
++
++ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p);
++ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p);
++}
++
++void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl)
++{
++ u32 *p = ATTR32(d);
++
++ *icid = qb_attr_code_decode(&code_fq_icid, p);
++ *pl = !!qb_attr_code_decode(&code_fq_pl, p);
++}
++
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid)
++{
++ u32 *p = ATTR32(d);
++
++ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p);
++}
++
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid)
++{
++ u32 *p = ATTR32(d);
++
++ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p);
++}
++
++/* Query FQ Non-Programmalbe Fields */
++static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3);
++static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1);
++static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1);
++static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1);
++static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1);
++static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24);
++static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32);
++
++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
++ struct qbman_attr *state)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *d = ATTR32(state);
++
++ qbman_fq_attr_clear(state);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != QBMAN_FQ_QUERY_NP);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ word_copy(&d[0], &p[0], 16);
++ return 0;
++}
++
++u32 qbman_fq_state_schedstate(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_state, p);
++}
++
++int qbman_fq_state_force_eligible(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_fe, p);
++}
++
++int qbman_fq_state_xoff(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_x, p);
++}
++
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_r, p);
++}
++
++int qbman_fq_state_overflow_error(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_oe, p);
++}
++
++u32 qbman_fq_state_frame_count(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_frm_cnt, p);
++}
++
++u32 qbman_fq_state_byte_count(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_byte_cnt, p);
++}
++
++/* Query CGR */
++static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1);
++static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1);
++static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1);
++static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2);
++static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1);
++static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1);
++static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1);
++static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1);
++static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5);
++static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1);
++static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13);
++static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13);
++static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16);
++static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32);
++
++void qbman_cgr_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_cgr);
++}
++
++int qbman_cgr_query(struct qbman_swp *s, u32 cgid, struct qbman_attr *attr)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *d[2];
++ int i;
++ u32 query_verb;
++
++ d[0] = ATTR32(attr);
++ d[1] = ATTR32_1(attr);
++
++ qbman_cgr_attr_clear(attr);
++
++ for (i = 0; i < 2; i++) {
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++ /* For the configure, word[0] of the command contains only the
++ * verb/cgid. For the query, word[0] of the result contains
++ * only the verb/rslt fields. Skip word[0] in the latter case.
++ */
++ word_copy(&d[i][1], &p[1], 15);
++ }
++ return 0;
++}
++
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd)
++ {
++ u32 *p = ATTR32(d);
++ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter,
++ p);
++ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p);
++ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p);
++}
++
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode,
++ int *rej_cnt_mode, int *cscn_bdi)
++{
++ u32 *p = ATTR32(d);
++ *mode = qb_attr_code_decode(&code_cgr_mode, p);
++ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p);
++ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p);
++}
++
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va)
++{
++ u32 *p = ATTR32(d);
++ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter,
++ p);
++ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p);
++ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p);
++ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p);
++ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p);
++}
++
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ u32 *i_cnt_wr_bnd)
++{
++ u32 *p = ATTR32(d);
++ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p);
++ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p);
++}
++
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en)
++{
++ u32 *p = ATTR32(d);
++ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p);
++}
++
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres)
++{
++ u32 *p = ATTR32(d);
++ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres, p));
++}
++
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ u32 *cs_thres_x)
++{
++ u32 *p = ATTR32(d);
++ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres_x, p));
++}
++
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres)
++{
++ u32 *p = ATTR32(d);
++ *td_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_td_thres, p));
++}
++
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp)
++{
++ u32 *p = ATTR32(d);
++ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p);
++}
++
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid)
++{
++ u32 *p = ATTR32(d);
++ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p);
++}
++
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ u32 *cscn_vcgid)
++{
++ u32 *p = ATTR32(d);
++ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p);
++}
++
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid,
++ int *pl)
++{
++ u32 *p = ATTR32(d);
++ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p);
++ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p);
++}
++
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ u64 *cg_wr_addr)
++{
++ u32 *p = ATTR32(d);
++ *cg_wr_addr = ((u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi,
++ p) << 32) |
++ (u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo,
++ p);
++}
++
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx)
++{
++ u32 *p = ATTR32(d);
++ *cscn_ctx = ((u64)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p)
++ << 32) |
++ (u64)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p);
++}
++
++#define WRED_EDP_WORD(n) (18 + (n) / 4)
++#define WRED_EDP_OFFSET(n) (8 * ((n) % 4))
++#define WRED_PARM_DP_WORD(n) ((n) + 20)
++#define WRED_WE_EDP(n) (16 + (n) * 2)
++#define WRED_WE_PARM_DP(n) (17 + (n) * 2)
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx,
++ int *edp)
++{
++ u32 *p = ATTR32(d);
++ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx),
++ WRED_EDP_OFFSET(idx), 8);
++ *edp = (int)qb_attr_code_decode(&code_wred_edp, p);
++}
++
++void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth,
++ u64 *maxth, u8 *maxp)
++{
++ u8 ma, mn, step_i, step_s, pn;
++
++ ma = (u8)(dp >> 24);
++ mn = (u8)(dp >> 19) & 0x1f;
++ step_i = (u8)(dp >> 11);
++ step_s = (u8)(dp >> 6) & 0x1f;
++ pn = (u8)dp & 0x3f;
++
++ *maxp = ((pn << 2) * 100) / 256;
++
++ if (mn == 0)
++ *maxth = ma;
++ else
++ *maxth = ((ma + 256) * (1 << (mn - 1)));
++
++ if (step_s == 0)
++ *minth = *maxth - step_i;
++ else
++ *minth = *maxth - (256 + step_i) * (1 << (step_s - 1));
++}
++
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx,
++ u32 *dp)
++{
++ u32 *p = ATTR32(d);
++ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx),
++ 0, 8);
++ *dp = qb_attr_code_decode(&code_wred_parm_dp, p);
++}
++
++/* Query CGR/CCGR/CQ statistics */
++static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8);
++static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16);
++static int qbman_cgr_statistics_query(struct qbman_swp *s, u32 cgid,
++ int clear, u32 command_type,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 query_verb;
++ u32 hi, lo;
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ if (command_type < 2)
++ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type);
++ query_verb = clear ?
++ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY;
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query statistics of CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++
++ if (*frame_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p);
++ *frame_cnt = ((u64)hi << 32) | (u64)lo;
++ }
++ if (*byte_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p);
++ *byte_cnt = ((u64)hi << 32) | (u64)lo;
++ }
++
++ return 0;
++}
++
++int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0xff,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 1,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0,
++ frame_cnt, byte_cnt);
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+new file mode 100644
+index 00000000..0a247a49
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+@@ -0,0 +1,136 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++struct qbman_attr {
++ u32 dont_manipulate_directly[40];
++};
++
++/* Buffer pool query commands */
++int qbman_bp_query(struct qbman_swp *s, u32 bpid,
++ struct qbman_attr *a);
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae);
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet);
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt);
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet);
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt);
++void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset);
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt);
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid);
++void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl);
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr);
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx);
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ);
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a);
++int qbman_bp_info_is_depleted(struct qbman_attr *a);
++int qbman_bp_info_is_surplus(struct qbman_attr *a);
++u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a);
++u32 qbman_bp_info_hdptr(struct qbman_attr *a);
++u32 qbman_bp_info_sdcnt(struct qbman_attr *a);
++u32 qbman_bp_info_hdcnt(struct qbman_attr *a);
++u32 qbman_bp_info_sscnt(struct qbman_attr *a);
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, u32 fqid,
++ struct qbman_attr *desc);
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl);
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid);
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq);
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred);
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh);
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len);
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps);
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo);
++void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl);
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid);
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid);
++
++/* FQ query command for non-programmable fields*/
++enum qbman_fq_schedstate_e {
++ qbman_fq_schedstate_oos = 0,
++ qbman_fq_schedstate_retired,
++ qbman_fq_schedstate_tentatively_scheduled,
++ qbman_fq_schedstate_truly_scheduled,
++ qbman_fq_schedstate_parked,
++ qbman_fq_schedstate_held_active,
++};
++
++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
++ struct qbman_attr *state);
++u32 qbman_fq_state_schedstate(const struct qbman_attr *state);
++int qbman_fq_state_force_eligible(const struct qbman_attr *state);
++int qbman_fq_state_xoff(const struct qbman_attr *state);
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state);
++int qbman_fq_state_overflow_error(const struct qbman_attr *state);
++u32 qbman_fq_state_frame_count(const struct qbman_attr *state);
++u32 qbman_fq_state_byte_count(const struct qbman_attr *state);
++
++/* CGR query */
++int qbman_cgr_query(struct qbman_swp *s, u32 cgid,
++ struct qbman_attr *attr);
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd);
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode,
++ int *rej_cnt_mode, int *cscn_bdi);
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va);
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ u32 *i_cnt_wr_bnd);
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en);
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres);
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ u32 *cs_thres_x);
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres);
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp);
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid);
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ u32 *cscn_vcgid);
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid,
++ int *pl);
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ u64 *cg_wr_addr);
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx);
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx,
++ int *edp);
++void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth,
++ u64 *maxth, u8 *maxp);
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx,
++ u32 *dp);
++
++/* CGR/CCGR/CQ statistics query */
++int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt);
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt);
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt);
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_private.h b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+new file mode 100644
+index 00000000..98a64be2
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+@@ -0,0 +1,171 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/* Perform extra checking */
++#define QBMAN_CHECKING
++
++/* To maximise the amount of logic that is common between the Linux driver and
++ * other targets (such as the embedded MC firmware), we pivot here between the
++ * inclusion of two platform-specific headers.
++ *
++ * The first, qbman_sys_decl.h, includes any and all required system headers as
++ * well as providing any definitions for the purposes of compatibility. The
++ * second, qbman_sys.h, is where platform-specific routines go.
++ *
++ * The point of the split is that the platform-independent code (including this
++ * header) may depend on platform-specific declarations, yet other
++ * platform-specific routines may depend on platform-independent definitions.
++ */
++
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++
++/* When things go wrong, it is a convenient trick to insert a few FOO()
++ * statements in the code to trace progress. TODO: remove this once we are
++ * hacking the code less actively.
++ */
++#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
++
++/* Any time there is a register interface which we poll on, this provides a
++ * "break after x iterations" scheme for it. It's handy for debugging, eg.
++ * where you don't want millions of lines of log output from a polling loop
++ * that won't, because such things tend to drown out the earlier log output
++ * that might explain what caused the problem. (NB: put ";" after each macro!)
++ * TODO: we should probably remove this once we're done sanitising the
++ * simulator...
++ */
++#define DBG_POLL_START(loopvar) (loopvar = 1000)
++#define DBG_POLL_CHECK(loopvar) \
++ do {if (!((loopvar)--)) WARN_ON(1); } while (0)
++
++/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
++ * and widths, these macro-generated encode/decode/isolate/remove inlines can
++ * be used.
++ *
++ * Eg. to "d"ecode a 14-bit field out of a register (into a "u16" type),
++ * where the field is located 3 bits "up" from the least-significant bit of the
++ * register (ie. the field location within the 32-bit register corresponds to a
++ * mask of 0x0001fff8), you would do;
++ * u16 field = d32_u16(3, 14, reg_value);
++ *
++ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
++ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
++ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
++ * LS bit), do;
++ * reg_value |= e32_int(19, 1, !!field);
++ *
++ * If you wish to read-modify-write a register, such that you leave the 14-bit
++ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
++ * value using;
++ * reg_value = i32_u16(3, 14, reg_value);
++ *
++ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
++ * zero) but leaving all other fields as-is;
++ * reg_val = r32_int(19, 1, reg_value);
++ *
++ */
++#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
++ (u32)((1 << width) - 1))
++#define DECLARE_CODEC32(t) \
++static inline u32 e32_##t(u32 lsoffset, u32 width, t val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return ((u32)val & MAKE_MASK32(width)) << lsoffset; \
++} \
++static inline t d32_##t(u32 lsoffset, u32 width, u32 val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
++} \
++static inline u32 i32_##t(u32 lsoffset, u32 width, \
++ u32 val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
++} \
++static inline u32 r32_##t(u32 lsoffset, u32 width, \
++ u32 val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return ~(MAKE_MASK32(width) << lsoffset) & val; \
++}
++DECLARE_CODEC32(u32)
++DECLARE_CODEC32(u16)
++DECLARE_CODEC32(u8)
++DECLARE_CODEC32(int)
++
++ /*********************/
++ /* Debugging assists */
++ /*********************/
++
++static inline void __hexdump(unsigned long start, unsigned long end,
++ unsigned long p, size_t sz,
++ const unsigned char *c)
++{
++ while (start < end) {
++ unsigned int pos = 0;
++ char buf[64];
++ int nl = 0;
++
++ pos += sprintf(buf + pos, "%08lx: ", start);
++ do {
++ if ((start < p) || (start >= (p + sz)))
++ pos += sprintf(buf + pos, "..");
++ else
++ pos += sprintf(buf + pos, "%02x", *(c++));
++ if (!(++start & 15)) {
++ buf[pos++] = '\n';
++ nl = 1;
++ } else {
++ nl = 0;
++ if (!(start & 1))
++ buf[pos++] = ' ';
++ if (!(start & 3))
++ buf[pos++] = ' ';
++ }
++ } while (start & 15);
++ if (!nl)
++ buf[pos++] = '\n';
++ buf[pos] = '\0';
++ pr_info("%s", buf);
++ }
++}
++
++static inline void hexdump(const void *ptr, size_t sz)
++{
++ unsigned long p = (unsigned long)ptr;
++ unsigned long start = p & ~15ul;
++ unsigned long end = (p + sz + 15) & ~15ul;
++ const unsigned char *c = ptr;
++
++ __hexdump(start, end, p, sz, c);
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+index d098a6d8..384a13d0 100644
+--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -33,108 +33,24 @@
+ #define _FSL_DPMCP_CMD_H
+
+ /* Minimal supported DPMCP Version */
+-#define DPMCP_MIN_VER_MAJOR 3
+-#define DPMCP_MIN_VER_MINOR 0
++#define DPMCP_MIN_VER_MAJOR 3
++#define DPMCP_MIN_VER_MINOR 0
+
+-/* Command IDs */
+-#define DPMCP_CMDID_CLOSE 0x800
+-#define DPMCP_CMDID_OPEN 0x80b
+-#define DPMCP_CMDID_CREATE 0x90b
+-#define DPMCP_CMDID_DESTROY 0x900
++/* Command versioning */
++#define DPMCP_CMD_BASE_VERSION 1
++#define DPMCP_CMD_ID_OFFSET 4
+
+-#define DPMCP_CMDID_GET_ATTR 0x004
+-#define DPMCP_CMDID_RESET 0x005
++#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
++#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
++#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
+
+-#define DPMCP_CMDID_SET_IRQ 0x010
+-#define DPMCP_CMDID_GET_IRQ 0x011
+-#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPMCP_CMDID_SET_IRQ_MASK 0x014
+-#define DPMCP_CMDID_GET_IRQ_MASK 0x015
+-#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
++#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+ struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+ };
+
+-struct dpmcp_cmd_create {
+- __le32 portal_id;
+-};
+-
+-struct dpmcp_cmd_set_irq {
+- /* cmd word 0 */
+- u8 irq_index;
+- u8 pad[3];
+- __le32 irq_val;
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+-};
+-
+-struct dpmcp_cmd_get_irq {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq {
+- /* cmd word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* cmd word 1 */
+- __le64 irq_paddr;
+- /* cmd word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-#define DPMCP_ENABLE 0x1
+-
+-struct dpmcp_cmd_set_irq_enable {
+- u8 enable;
+- u8 pad[3];
+- u8 irq_index;
+-};
+-
+-struct dpmcp_cmd_get_irq_enable {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_enable {
+- u8 enabled;
+-};
+-
+-struct dpmcp_cmd_set_irq_mask {
+- __le32 mask;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_cmd_get_irq_mask {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_mask {
+- __le32 mask;
+-};
+-
+-struct dpmcp_cmd_get_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_status {
+- __le32 status;
+-};
+-
+-struct dpmcp_rsp_get_attributes {
+- /* response word 0 */
+- __le32 pad;
+- __le32 id;
+- /* response word 1 */
+- __le16 version_major;
+- __le16 version_minor;
+-};
+-
+ #endif /* _FSL_DPMCP_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
+index 55766f78..ad4c8b43 100644
+--- a/drivers/staging/fsl-mc/bus/dpmcp.c
++++ b/drivers/staging/fsl-mc/bus/dpmcp.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -103,76 +103,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
+ return mc_send_command(mc_io, &cmd);
+ }
+
+-/**
+- * dpmcp_create() - Create the DPMCP object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @cfg: Configuration structure
+- * @token: Returned token; use in subsequent API calls
+- *
+- * Create the DPMCP object, allocate required resources and
+- * perform required initialization.
+- *
+- * The object can be created either by declaring it in the
+- * DPL file, or by calling this function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent calls to
+- * this specific object. For objects that are created using the
+- * DPL file, call dpmcp_open function to get an authentication
+- * token first.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpmcp_cfg *cfg,
+- u16 *token)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_create *cmd_params;
+-
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
+- cmd_flags, 0);
+- cmd_params = (struct dpmcp_cmd_create *)cmd.params;
+- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- *token = mc_cmd_hdr_read_token(&cmd);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- *
+- * Return: '0' on Success; error code otherwise.
+- */
+-int dpmcp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+ /**
+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -196,309 +126,33 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
+ }
+
+ /**
+- * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: Identifies the interrupt index to configure
+- * @irq_cfg: IRQ configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpmcp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq() - Get IRQ information from the DPMCP.
+- * @mc_io: Pointer to MC portal's I/O object
++ * dpmcp_get_api_version - Get Data Path Management Command Portal API version
++ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: IRQ attributes
++ * @major_ver: Major version of Data Path Management Command Portal API
++ * @minor_ver: Minor version of Data Path Management Command Portal API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpmcp_irq_cfg *irq_cfg)
++int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
+ {
+ struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq *cmd_params;
+- struct dpmcp_rsp_get_irq *rsp_params;
+ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
+- return 0;
+-}
+-
+-/**
+- * dpmcp_set_irq_enable() - Set overall interrupt state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @en: Interrupt state - enable = 1, disable = 0
+- *
+- * Allows GPP software to control when interrupts are generated.
+- * Each interrupt can have up to 32 causes. The enable/disable control's the
+- * overall interrupt state. if the interrupt is disabled no causes will cause
+- * an interrupt.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq_enable *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params;
+- cmd_params->enable = en & DPMCP_ENABLE;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq_enable() - Get overall interrupt state
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @en: Returned interrupt state - enable = 1, disable = 0
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_enable *cmd_params;
+- struct dpmcp_rsp_get_irq_enable *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params;
+- *en = rsp_params->enabled & DPMCP_ENABLE;
+- return 0;
+-}
+-
+-/**
+- * dpmcp_set_irq_mask() - Set interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Event mask to trigger interrupt;
+- * each bit:
+- * 0 = ignore event
+- * 1 = consider event for asserting IRQ
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq_mask *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params;
+- cmd_params->mask = cpu_to_le32(mask);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq_mask() - Get interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Returned event mask to trigger interrupt
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_mask *cmd_params;
+- struct dpmcp_rsp_get_irq_mask *rsp_params;
+-
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params;
+- *mask = le32_to_cpu(rsp_params->mask);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_get_irq_status() - Get the current status of any pending interrupts.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @status: Returned interrupts status - one bit per cause:
+- * 0 = no interrupt pending
+- * 1 = interrupt pending
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_status *cmd_params;
+- struct dpmcp_rsp_get_irq_status *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(*status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params;
+- *status = le32_to_cpu(rsp_params->status);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_get_attributes - Retrieve DPMCP attributes.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @attr: Returned object's attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpmcp_attr *attr)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_rsp_get_attributes *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR,
+- cmd_flags, token);
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
+- /* send command to mc*/
++ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
+- attr->id = le32_to_cpu(rsp_params->id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+ }
+diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
+index fe79d4d9..f616031e 100644
+--- a/drivers/staging/fsl-mc/bus/dpmcp.h
++++ b/drivers/staging/fsl-mc/bus/dpmcp.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -32,128 +32,29 @@
+ #ifndef __FSL_DPMCP_H
+ #define __FSL_DPMCP_H
+
+-/* Data Path Management Command Portal API
++/*
++ * Data Path Management Command Portal API
+ * Contains initialization APIs and runtime control APIs for DPMCP
+ */
+
+ struct fsl_mc_io;
+
+ int dpmcp_open(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
++ u32 cmd_flags,
+ int dpmcp_id,
+- uint16_t *token);
+-
+-/* Get portal ID from pool */
+-#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
++ u16 *token);
+
+ int dpmcp_close(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
+-
+-/**
+- * struct dpmcp_cfg - Structure representing DPMCP configuration
+- * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID
+- * from pool
+- */
+-struct dpmcp_cfg {
+- int portal_id;
+-};
+-
+-int dpmcp_create(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- const struct dpmcp_cfg *cfg,
+- uint16_t *token);
++ u32 cmd_flags,
++ u16 token);
+
+-int dpmcp_destroy(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
++int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
+
+ int dpmcp_reset(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
+-
+-/* IRQ */
+-/* IRQ Index */
+-#define DPMCP_IRQ_INDEX 0
+-/* irq event - Indicates that the link state changed */
+-#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001
+-
+-/**
+- * struct dpmcp_irq_cfg - IRQ configuration
+- * @paddr: Address that must be written to signal a message-based interrupt
+- * @val: Value to write into irq_addr address
+- * @irq_num: A user defined number associated with this IRQ
+- */
+-struct dpmcp_irq_cfg {
+- uint64_t paddr;
+- uint32_t val;
+- int irq_num;
+-};
+-
+-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- struct dpmcp_irq_cfg *irq_cfg);
+-
+-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- int *type,
+- struct dpmcp_irq_cfg *irq_cfg);
+-
+-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint8_t en);
+-
+-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint8_t *en);
+-
+-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t mask);
+-
+-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t *mask);
+-
+-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t *status);
+-
+-/**
+- * struct dpmcp_attr - Structure representing DPMCP attributes
+- * @id: DPMCP object ID
+- * @version: DPMCP version
+- */
+-struct dpmcp_attr {
+- int id;
+- /**
+- * struct version - Structure representing DPMCP version
+- * @major: DPMCP major version
+- * @minor: DPMCP minor version
+- */
+- struct {
+- uint16_t major;
+- uint16_t minor;
+- } version;
+-};
+-
+-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- struct dpmcp_attr *attr);
++ u32 cmd_flags,
++ u16 token);
+
+ #endif /* __FSL_DPMCP_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+index a7b77d58..cdddfb80 100644
+--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+@@ -12,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -41,13 +40,14 @@
+ #ifndef __FSL_DPMNG_CMD_H
+ #define __FSL_DPMNG_CMD_H
+
+-/* Command IDs */
+-#define DPMNG_CMDID_GET_CONT_ID 0x830
+-#define DPMNG_CMDID_GET_VERSION 0x831
++/* Command versioning */
++#define DPMNG_CMD_BASE_VERSION 1
++#define DPMNG_CMD_ID_OFFSET 4
+
+-struct dpmng_rsp_get_container_id {
+- __le32 container_id;
+-};
++#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
+ struct dpmng_rsp_get_version {
+ __le32 revision;
+diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
+index 96b1d677..ad5d5bbe 100644
+--- a/drivers/staging/fsl-mc/bus/dpmng.c
++++ b/drivers/staging/fsl-mc/bus/dpmng.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(mc_get_version);
+
+-/**
+- * dpmng_get_container_id() - Get container ID associated with a given portal.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @container_id: Requested container ID
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int *container_id)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmng_rsp_get_container_id *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
+- cmd_flags,
+- 0);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
+- *container_id = le32_to_cpu(rsp_params->container_id);
+-
+- return 0;
+-}
+-
+diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
+index 009d6567..b7d8c345 100644
+--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
+@@ -12,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -42,48 +41,39 @@
+ #define _FSL_DPRC_CMD_H
+
+ /* Minimal supported DPRC Version */
+-#define DPRC_MIN_VER_MAJOR 5
++#define DPRC_MIN_VER_MAJOR 6
+ #define DPRC_MIN_VER_MINOR 0
+
++/* Command versioning */
++#define DPRC_CMD_BASE_VERSION 1
++#define DPRC_CMD_ID_OFFSET 4
++
++#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
++
+ /* Command IDs */
+-#define DPRC_CMDID_CLOSE 0x800
+-#define DPRC_CMDID_OPEN 0x805
+-#define DPRC_CMDID_CREATE 0x905
+-
+-#define DPRC_CMDID_GET_ATTR 0x004
+-#define DPRC_CMDID_RESET_CONT 0x005
+-
+-#define DPRC_CMDID_SET_IRQ 0x010
+-#define DPRC_CMDID_GET_IRQ 0x011
+-#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPRC_CMDID_SET_IRQ_MASK 0x014
+-#define DPRC_CMDID_GET_IRQ_MASK 0x015
+-#define DPRC_CMDID_GET_IRQ_STATUS 0x016
+-#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPRC_CMDID_CREATE_CONT 0x151
+-#define DPRC_CMDID_DESTROY_CONT 0x152
+-#define DPRC_CMDID_SET_RES_QUOTA 0x155
+-#define DPRC_CMDID_GET_RES_QUOTA 0x156
+-#define DPRC_CMDID_ASSIGN 0x157
+-#define DPRC_CMDID_UNASSIGN 0x158
+-#define DPRC_CMDID_GET_OBJ_COUNT 0x159
+-#define DPRC_CMDID_GET_OBJ 0x15A
+-#define DPRC_CMDID_GET_RES_COUNT 0x15B
+-#define DPRC_CMDID_GET_RES_IDS 0x15C
+-#define DPRC_CMDID_GET_OBJ_REG 0x15E
+-#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
+-#define DPRC_CMDID_GET_OBJ_IRQ 0x160
+-#define DPRC_CMDID_SET_OBJ_LABEL 0x161
+-#define DPRC_CMDID_GET_OBJ_DESC 0x162
+-
+-#define DPRC_CMDID_CONNECT 0x167
+-#define DPRC_CMDID_DISCONNECT 0x168
+-#define DPRC_CMDID_GET_POOL 0x169
+-#define DPRC_CMDID_GET_POOL_COUNT 0x16A
+-
+-#define DPRC_CMDID_GET_CONNECTION 0x16C
++#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
++#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
++#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
++
++#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
++#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
++
++#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
++#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
++#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
++#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
++#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
++#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
++#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
++#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
++
++#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
++#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
++#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
++#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
++#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
++#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
++#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
+
+ struct dprc_cmd_open {
+ __le32 container_id;
+@@ -199,9 +189,6 @@ struct dprc_rsp_get_attributes {
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+- /* response word 2 */
+- __le16 version_major;
+- __le16 version_minor;
+ };
+
+ struct dprc_cmd_set_res_quota {
+@@ -367,11 +354,16 @@ struct dprc_cmd_get_obj_region {
+
+ struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+- __le64 pad;
++ __le64 pad0;
+ /* response word 1 */
+- __le64 base_addr;
++ __le32 base_addr;
++ __le32 pad1;
+ /* response word 2 */
+ __le32 size;
++ u8 type;
++ u8 pad2[3];
++ /* response word 3 */
++ __le32 flags;
+ };
+
+ struct dprc_cmd_set_obj_label {
+diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
+index c5ee4639..f6e6211b 100644
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale data path resource container (DPRC) driver
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -160,6 +160,8 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the
++ * DPRC, or NULL, if none.
+ * @obj_desc_array: array of device descriptors for child devices currently
+ * present in the physical DPRC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+@@ -169,6 +171,7 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ * in the physical DPRC.
+ */
+ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ struct dprc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+ {
+@@ -188,11 +191,12 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
++ put_device(&child_dev->dev);
+ continue;
+ }
+
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+- &child_dev);
++ driver_override, &child_dev);
+ if (error < 0)
+ continue;
+ }
+@@ -202,6 +206,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the
++ * DPRC, or NULL, if none.
+ * @total_irq_count: total number of IRQs needed by objects in the DPRC.
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+@@ -217,6 +223,7 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ * of the device drivers for the non-allocatable devices.
+ */
+ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ unsigned int *total_irq_count)
+ {
+ int num_child_objects;
+@@ -297,7 +304,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
++ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array,
+ num_child_objects);
+
+ if (child_obj_desc_array)
+@@ -328,7 +335,7 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+- error = dprc_scan_objects(mc_bus_dev, &irq_count);
++ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count);
+ mutex_unlock(&mc_bus->scan_mutex);
+ if (error < 0)
+ goto error;
+@@ -415,7 +422,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+ unsigned int irq_count;
+
+- error = dprc_scan_objects(mc_dev, &irq_count);
++ error = dprc_scan_objects(mc_dev, NULL, &irq_count);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+@@ -505,7 +512,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+ dprc_irq0_handler,
+ dprc_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+- "FSL MC DPRC irq0",
++ dev_name(&mc_dev->dev),
+ &mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+@@ -597,6 +604,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ bool mc_io_created = false;
+ bool msi_domain_set = false;
++ u16 major_ver, minor_ver;
+
+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+ return -EINVAL;
+@@ -669,13 +677,21 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
+ goto error_cleanup_open;
+ }
+
+- if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
+- (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
+- mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
++ error = dprc_get_api_version(mc_dev->mc_io, 0,
++ &major_ver,
++ &minor_ver);
++ if (error < 0) {
++ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
++ error);
++ goto error_cleanup_open;
++ }
++
++ if (major_ver < DPRC_MIN_VER_MAJOR ||
++ (major_ver == DPRC_MIN_VER_MAJOR &&
++ minor_ver < DPRC_MIN_VER_MINOR)) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+- mc_bus->dprc_attr.version.major,
+- mc_bus->dprc_attr.version.minor);
++ major_ver, minor_ver);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
+index 9fea3def..764cd3fb 100644
+--- a/drivers/staging/fsl-mc/bus/dprc.c
++++ b/drivers/staging/fsl-mc/bus/dprc.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -99,93 +99,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dprc_close);
+
+-/**
+- * dprc_create_container() - Create child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @cfg: Child container configuration
+- * @child_container_id: Returned child container ID
+- * @child_portal_offset: Returned child portal offset from MC portal base
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_create_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_cfg *cfg,
+- int *child_container_id,
+- u64 *child_portal_offset)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_create_container *cmd_params;
+- struct dprc_rsp_create_container *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd_params = (struct dprc_cmd_create_container *)cmd.params;
+- cmd_params->options = cpu_to_le32(cfg->options);
+- cmd_params->icid = cpu_to_le16(cfg->icid);
+- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+- strncpy(cmd_params->label, cfg->label, 16);
+- cmd_params->label[15] = '\0';
+-
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_create_container *)cmd.params;
+- *child_container_id = le32_to_cpu(rsp_params->child_container_id);
+- *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_destroy_container() - Destroy child container.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the container to destroy
+- *
+- * This function terminates the child container, so following this call the
+- * child container ID becomes invalid.
+- *
+- * Notes:
+- * - All resources and objects of the destroyed container are returned to the
+- * parent container or destroyed if were created be the destroyed container.
+- * - This function destroy all the child containers of the specified
+- * container prior to destroying the container itself.
+- *
+- * warning: Only the parent container is allowed to destroy a child policy
+- * Container 0 can't be destroyed
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- */
+-int dprc_destroy_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_destroy_container *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_destroy_container *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+ /**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -565,279 +478,6 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ attr->icid = le16_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_set_res_quota() - Set allocation policy for a specific resource/object
+- * type in a child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the child container
+- * @type: Resource/object type
+- * @quota: Sets the maximum number of resources of the selected type
+- * that the child container is allowed to allocate from its parent;
+- * when quota is set to -1, the policy is the same as container's
+- * general policy.
+- *
+- * Allocation policy determines whether or not a container may allocate
+- * resources from its parent. Each container has a 'global' allocation policy
+- * that is set when the container is created.
+- *
+- * This function sets allocation policy for a specific resource type.
+- * The default policy for all resource types matches the container's 'global'
+- * allocation policy.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- * @warning Only the parent container is allowed to change a child policy.
+- */
+-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 quota)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_res_quota *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- cmd_params->quota = cpu_to_le16(quota);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_res_quota() - Gets the allocation policy of a specific
+- * resource/object type in a child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id; ID of the child container
+- * @type: resource/object type
+- * @quota: Returnes the maximum number of resources of the selected type
+- * that the child container is allowed to allocate from the parent;
+- * when quota is set to -1, the policy is the same as container's
+- * general policy.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 *quota)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_res_quota *cmd_params;
+- struct dprc_rsp_get_res_quota *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params;
+- *quota = le16_to_cpu(rsp_params->quota);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_assign() - Assigns objects or resource to a child container.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @container_id: ID of the child container
+- * @res_req: Describes the type and amount of resources to
+- * assign to the given container
+- *
+- * Assignment is usually done by a parent (this DPRC) to one of its child
+- * containers.
+- *
+- * According to the DPRC allocation policy, the assigned resources may be taken
+- * (allocated) from the container's ancestors, if not enough resources are
+- * available in the container itself.
+- *
+- * The type of assignment depends on the dprc_res_req options, as follows:
+- * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have
+- * the explicit base ID specified at the id_base_align field of res_req.
+- * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be
+- * aligned to the value given at id_base_align field of res_req.
+- * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment,
+- * and indicates that the object must be set to the plugged state.
+- *
+- * A container may use this function with its own ID in order to change a
+- * object state to plugged or unplugged.
+- *
+- * If IRQ information has been set in the child DPRC, it will signal an
+- * interrupt following every change in its object assignment.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_assign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int container_id,
+- struct dprc_res_req *res_req)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_assign *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_assign *)cmd.params;
+- cmd_params->container_id = cpu_to_le32(container_id);
+- cmd_params->options = cpu_to_le32(res_req->options);
+- cmd_params->num = cpu_to_le32(res_req->num);
+- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+- strncpy(cmd_params->type, res_req->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_unassign() - Un-assigns objects or resources from a child container
+- * and moves them into this (parent) DPRC.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the child container
+- * @res_req: Describes the type and amount of resources to un-assign from
+- * the child container
+- *
+- * Un-assignment of objects can succeed only if the object is not in the
+- * plugged or opened state.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_unassign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- struct dprc_res_req *res_req)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_unassign *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_unassign *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- cmd_params->options = cpu_to_le32(res_req->options);
+- cmd_params->num = cpu_to_le32(res_req->num);
+- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+- strncpy(cmd_params->type, res_req->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_pool_count() - Get the number of dprc's pools
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @mc_io: Pointer to MC portal's I/O object
+- * @token: Token of DPRC object
+- * @pool_count: Returned number of resource pools in the dprc
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *pool_count)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_rsp_get_pool_count *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params;
+- *pool_count = le32_to_cpu(rsp_params->pool_count);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_get_pool() - Get the type (string) of a certain dprc's pool
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @pool_index; Index of the pool to be queried (< pool_count)
+- * @type: The type of the pool
+- *
+- * The pool types retrieved one by one by incrementing
+- * pool_index up to (not including) the value of pool_count returned
+- * from dprc_get_pool_count(). dprc_get_pool_count() must
+- * be called prior to dprc_get_pool().
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_pool(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int pool_index,
+- char *type)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_pool *cmd_params;
+- struct dprc_rsp_get_pool *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_pool *)cmd.params;
+- cmd_params->pool_index = cpu_to_le32(pool_index);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_pool *)cmd.params;
+- strncpy(type, rsp_params->type, 16);
+- type[15] = '\0';
+
+ return 0;
+ }
+@@ -933,64 +573,6 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dprc_get_obj);
+
+-/**
+- * dprc_get_obj_desc() - Get object descriptor.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: The type of the object to get its descriptor.
+- * @obj_id: The id of the object to get its descriptor
+- * @obj_desc: The returned descriptor to fill and return to the user
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- */
+-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- struct dprc_obj_desc *obj_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_obj_desc *cmd_params;
+- struct dprc_rsp_get_obj_desc *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- strncpy(cmd_params->type, obj_type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params;
+- obj_desc->id = le32_to_cpu(rsp_params->id);
+- obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+- obj_desc->irq_count = rsp_params->irq_count;
+- obj_desc->region_count = rsp_params->region_count;
+- obj_desc->state = le32_to_cpu(rsp_params->state);
+- obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+- obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+- obj_desc->flags = le16_to_cpu(rsp_params->flags);
+- strncpy(obj_desc->type, rsp_params->type, 16);
+- obj_desc->type[15] = '\0';
+- strncpy(obj_desc->label, rsp_params->label, 16);
+- obj_desc->label[15] = '\0';
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_obj_desc);
+-
+ /**
+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -1129,52 +711,6 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dprc_get_res_count);
+
+-/**
+- * dprc_get_res_ids() - Obtains IDs of free resources in the container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @type: pool type
+- * @range_desc: range descriptor
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- struct dprc_res_ids_range_desc *range_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_res_ids *cmd_params;
+- struct dprc_rsp_get_res_ids *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params;
+- cmd_params->iter_status = range_desc->iter_status;
+- cmd_params->base_id = cpu_to_le32(range_desc->base_id);
+- cmd_params->last_id = cpu_to_le32(range_desc->last_id);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params;
+- range_desc->iter_status = rsp_params->iter_status;
+- range_desc->base_id = le32_to_cpu(rsp_params->base_id);
+- range_desc->last_id = le32_to_cpu(rsp_params->last_id);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_res_ids);
+-
+ /**
+ * dprc_get_obj_region() - Get region information for a specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -1216,160 +752,66 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+- region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
++ region_desc->base_offset = le32_to_cpu(rsp_params->base_addr);
+ region_desc->size = le32_to_cpu(rsp_params->size);
++ region_desc->type = rsp_params->type;
++ region_desc->flags = le32_to_cpu(rsp_params->flags);
+
+ return 0;
+ }
+ EXPORT_SYMBOL(dprc_get_obj_region);
+
+ /**
+- * dprc_set_obj_label() - Set object label.
+- * @mc_io: Pointer to MC portal's I/O object
++ * dprc_get_api_version - Get Data Path Resource Container API version
++ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: Object's type
+- * @obj_id: Object's ID
+- * @label: The required label. The maximum length is 16 chars.
++ * @major_ver: Major version of Data Path Resource Container API
++ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- char *label)
++int dprc_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
+ {
+ struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_obj_label *cmd_params;
++ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- strncpy(cmd_params->label, label, 16);
+- cmd_params->label[15] = '\0';
+- strncpy(cmd_params->obj_type, obj_type, 16);
+- cmd_params->obj_type[15] = '\0';
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dprc_set_obj_label);
+-
+-/**
+- * dprc_connect() - Connect two endpoints to create a network link between them
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint1: Endpoint 1 configuration parameters
+- * @endpoint2: Endpoint 2 configuration parameters
+- * @cfg: Connection configuration. The connection configuration is ignored for
+- * connections made to DPMAC objects, where rate is retrieved from the
+- * MAC configuration.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_connect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- const struct dprc_endpoint *endpoint2,
+- const struct dprc_connection_cfg *cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_connect *cmd_params;
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_connect *)cmd.params;
+- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+- cmd_params->ep2_id = cpu_to_le32(endpoint2->id);
+- cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id);
+- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+- cmd_params->ep1_type[15] = '\0';
+- cmd_params->max_rate = cpu_to_le32(cfg->max_rate);
+- cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate);
+- strncpy(cmd_params->ep2_type, endpoint2->type, 16);
+- cmd_params->ep2_type[15] = '\0';
++ /* retrieve response parameters */
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
++ return 0;
+ }
+
+ /**
+- * dprc_disconnect() - Disconnect one endpoint to remove its network connection
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint: Endpoint configuration parameters
++ * dprc_get_container_id - Get container ID associated with a given portal.
++ * @mc_io: Pointer to Mc portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dprc_disconnect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_disconnect *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_disconnect *)cmd.params;
+- cmd_params->id = cpu_to_le32(endpoint->id);
+- cmd_params->interface_id = cpu_to_le32(endpoint->if_id);
+- strncpy(cmd_params->type, endpoint->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_connection() - Get connected endpoint and link status if connection
+- * exists.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint1: Endpoint 1 configuration parameters
+- * @endpoint2: Returned endpoint 2 configuration parameters
+- * @state: Returned link state:
+- * 1 - link is up;
+- * 0 - link is down;
+- * -1 - no connection (endpoint2 information is irrelevant)
+- *
+- * Return: '0' on Success; -ENAVAIL if connection does not exist.
+- */
+-int dprc_get_connection(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- struct dprc_endpoint *endpoint2,
+- int *state)
++int dprc_get_container_id(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int *container_id)
+ {
+ struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_connection *cmd_params;
+- struct dprc_rsp_get_connection *rsp_params;
+ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+- cmd_params->ep1_type[15] = '\0';
++ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+@@ -1377,12 +819,7 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
+ return err;
+
+ /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+- endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+- endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id);
+- strncpy(endpoint2->type, rsp_params->ep2_type, 16);
+- endpoint2->type[15] = '\0';
+- *state = le32_to_cpu(rsp_params->state);
++ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+ }
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+index e93ab53b..ce07096c 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+@@ -1,7 +1,7 @@
+ /*
+- * Freescale MC object device allocator driver
++ * fsl-mc object allocator driver
+ *
+- * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+@@ -12,9 +12,9 @@
+ #include <linux/msi.h>
+ #include "../include/mc-bus.h"
+ #include "../include/mc-sys.h"
+-#include "../include/dpbp-cmd.h"
+-#include "../include/dpcon-cmd.h"
+
++#include "dpbp-cmd.h"
++#include "dpcon-cmd.h"
+ #include "fsl-mc-private.h"
+
+ #define FSL_MC_IS_ALLOCATABLE(_obj_type) \
+@@ -23,15 +23,12 @@
+ strcmp(_obj_type, "dpcon") == 0)
+
+ /**
+- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
+- * pool of a given MC bus
++ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
++ * pool of a given fsl-mc bus
+ *
+- * @mc_bus: pointer to the MC bus
+- * @pool_type: MC bus pool type
+- * @mc_dev: Pointer to allocatable MC object device
+- *
+- * It adds an allocatable MC object device to a container's resource pool of
+- * the given resource type
++ * @mc_bus: pointer to the fsl-mc bus
++ * @pool_type: pool type
++ * @mc_dev: pointer to allocatable fsl-mc device
+ */
+ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ *mc_bus,
+@@ -95,10 +92,10 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
+ * resource pool
+ *
+- * @mc_dev: Pointer to allocatable MC object device
++ * @mc_dev: pointer to allocatable fsl-mc device
+ *
+- * It permanently removes an allocatable MC object device from the resource
+- * pool, the device is currently in, as long as it is in the pool's free list.
++ * It permanently removes an allocatable fsl-mc device from the resource
++ * pool. It's an error if the device is in use.
+ */
+ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
+ *mc_dev)
+@@ -255,17 +252,18 @@ void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+ EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+
+ /**
+- * fsl_mc_object_allocate - Allocates a MC object device of the given
+- * pool type from a given MC bus
++ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
++ * pool type from a given fsl-mc bus instance
+ *
+- * @mc_dev: MC device for which the MC object device is to be allocated
+- * @pool_type: MC bus resource pool type
+- * @new_mc_dev: Pointer to area where the pointer to the allocated
+- * MC object device is to be returned
++ * @mc_dev: fsl-mc device which is used in conjunction with the
++ * allocated object
++ * @pool_type: pool type
++ * @new_mc_dev: pointer to area where the pointer to the allocated device
++ * is to be returned
+ *
+- * This function allocates a MC object device from the device's parent DPRC,
+- * from the corresponding MC bus' pool of allocatable MC object devices of
+- * the given resource type. mc_dev cannot be a DPRC itself.
++ * Allocatable objects are always used in conjunction with some functional
++ * device. This function allocates an object of the specified type from
++ * the DPRC containing the functional device.
+ *
+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
+ * portals are allocated using fsl_mc_portal_allocate(), instead of
+@@ -312,10 +310,9 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+
+ /**
+- * fsl_mc_object_free - Returns an allocatable MC object device to the
+- * corresponding resource pool of a given MC bus.
+- *
+- * @mc_adev: Pointer to the MC object device
++ * fsl_mc_object_free - Returns an fsl-mc object to the resource
++ * pool where it came from.
++ * @mc_adev: Pointer to the fsl-mc device
+ */
+ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+ {
+@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+ EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
+ /*
+- * Initialize the interrupt pool associated with a MC bus.
+- * It allocates a block of IRQs from the GIC-ITS
++ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
++ * ID. A block of IRQs is pre-allocated and maintained in a pool
++ * from which devices can allocate them when needed.
++ */
++
++/*
++ * Initialize the interrupt pool associated with an fsl-mc bus.
++ * It allocates a block of IRQs from the GIC-ITS.
+ */
+ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count)
+@@ -395,7 +398,7 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+ /**
+- * Teardown the interrupt pool associated with an MC bus.
++ * Teardown the interrupt pool associated with an fsl-mc bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+ EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+ /**
+- * It allocates the IRQs required by a given MC object device. The
+- * IRQs are allocated from the interrupt pool associated with the
+- * MC bus that contains the device, if the device is not a DPRC device.
+- * Otherwise, the IRQs are allocated from the interrupt pool associated
+- * with the MC bus that represents the DPRC device itself.
++ * Allocate the IRQs required by a given fsl-mc device.
+ */
+ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+ {
+@@ -495,8 +494,7 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+ EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+ /*
+- * It frees the IRQs that were allocated for a MC object device, by
+- * returning them to the corresponding interrupt pool.
++ * Frees the IRQs that were allocated for an fsl-mc device.
+ */
+ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+ {
+@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+ return error;
+
+ dev_dbg(&mc_dev->dev,
+- "Allocatable MC object device bound to fsl_mc_allocator driver");
++ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
+ return 0;
+ }
+
+@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+ }
+
+ dev_dbg(&mc_dev->dev,
+- "Allocatable MC object device unbound from fsl_mc_allocator driver");
++ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
+ return 0;
+ }
+
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+index 44f64b6f..30a48df3 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus driver
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -9,6 +9,8 @@
+ * warranty of any kind, whether express or implied.
+ */
+
++#define pr_fmt(fmt) "fsl-mc: " fmt
++
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+ #include <linux/of_address.h>
+@@ -25,8 +27,6 @@
+ #include "fsl-mc-private.h"
+ #include "dprc-cmd.h"
+
+-static struct kmem_cache *mc_dev_cache;
+-
+ /**
+ * Default DMA mask for devices on a fsl-mc bus
+ */
+@@ -34,7 +34,7 @@ static struct kmem_cache *mc_dev_cache;
+
+ /**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+- * @root_mc_bus_dev: MC object device representing the root DPRC
++ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+ * @translation_ranges: array of bus to system address translation ranges
+ */
+@@ -62,8 +62,8 @@ struct fsl_mc_addr_translation_range {
+
+ /**
+ * fsl_mc_bus_match - device to driver matching callback
+- * @dev: the MC object device structure to match against
+- * @drv: the device driver to search for matching MC object device id
++ * @dev: the fsl-mc device to match against
++ * @drv: the device driver to search for matching fsl-mc object type
+ * structures
+ *
+ * Returns 1 on success, 0 otherwise.
+@@ -75,8 +75,11 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+
+- if (WARN_ON(!fsl_mc_bus_exists()))
++ /* When driver_override is set, only bind to the matching driver */
++ if (mc_dev->driver_override) {
++ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
+ goto out;
++ }
+
+ if (!mc_drv->match_id_table)
+ goto out;
+@@ -91,7 +94,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+
+ /*
+ * Traverse the match_id table of the given driver, trying to find
+- * a matching for the given MC object device.
++ * a matching for the given device.
+ */
+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+ if (id->vendor == mc_dev->obj_desc.vendor &&
+@@ -132,23 +135,141 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(modalias);
+
++static ssize_t rescan_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long val;
++ unsigned int irq_count;
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
++
++ if (!fsl_mc_is_root_dprc(dev))
++ return -EINVAL;
++
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
++
++ if (val) {
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
++ mutex_unlock(&root_mc_bus->scan_mutex);
++ }
++
++ return count;
++}
++static DEVICE_ATTR_WO(rescan);
++
++static ssize_t driver_override_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ const char *driver_override, *old = mc_dev->driver_override;
++ char *cp;
++
++ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
++ return -EINVAL;
++
++ if (count >= (PAGE_SIZE - 1))
++ return -EINVAL;
++
++ driver_override = kstrndup(buf, count, GFP_KERNEL);
++ if (!driver_override)
++ return -ENOMEM;
++
++ cp = strchr(driver_override, '\n');
++ if (cp)
++ *cp = '\0';
++
++ if (strlen(driver_override)) {
++ mc_dev->driver_override = driver_override;
++ } else {
++ kfree(driver_override);
++ mc_dev->driver_override = NULL;
++ }
++
++ kfree(old);
++
++ return count;
++}
++
++static ssize_t driver_override_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++
++ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
++}
++static DEVICE_ATTR_RW(driver_override);
++
+ static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
++ &dev_attr_rescan.attr,
++ &dev_attr_driver_override.attr,
+ NULL,
+ };
+
+ ATTRIBUTE_GROUPS(fsl_mc_dev);
+
++static int scan_fsl_mc_bus(struct device *dev, void *data)
++{
++ unsigned int irq_count;
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
++
++ if (fsl_mc_is_root_dprc(dev)) {
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
++ mutex_unlock(&root_mc_bus->scan_mutex);
++ }
++
++ return 0;
++}
++
++static ssize_t bus_rescan_store(struct bus_type *bus,
++ const char *buf, size_t count)
++{
++ unsigned long val;
++
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
++
++ if (val)
++ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
++
++ return count;
++}
++static BUS_ATTR(rescan, (S_IWUSR | S_IWGRP), NULL, bus_rescan_store);
++
++static struct attribute *fsl_mc_bus_attrs[] = {
++ &bus_attr_rescan.attr,
++ NULL,
++};
++
++static const struct attribute_group fsl_mc_bus_group = {
++ .attrs = fsl_mc_bus_attrs,
++};
++
++static const struct attribute_group *fsl_mc_bus_groups[] = {
++ &fsl_mc_bus_group,
++ NULL,
++};
++
+ struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
+ .dev_groups = fsl_mc_dev_groups,
++ .bus_groups = fsl_mc_bus_groups,
+ };
+ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
+-static atomic_t root_dprc_count = ATOMIC_INIT(0);
+-
+ static int fsl_mc_driver_probe(struct device *dev)
+ {
+ struct fsl_mc_driver *mc_drv;
+@@ -164,8 +285,7 @@ static int fsl_mc_driver_probe(struct device *dev)
+
+ error = mc_drv->probe(mc_dev);
+ if (error < 0) {
+- dev_err(dev, "MC object device probe callback failed: %d\n",
+- error);
++ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+@@ -183,9 +303,7 @@ static int fsl_mc_driver_remove(struct device *dev)
+
+ error = mc_drv->remove(mc_dev);
+ if (error < 0) {
+- dev_err(dev,
+- "MC object device remove callback failed: %d\n",
+- error);
++ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+@@ -232,8 +350,6 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
+ return error;
+ }
+
+- pr_info("MC object device driver %s registered\n",
+- mc_driver->driver.name);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+@@ -248,15 +364,6 @@ void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+
+-/**
+- * fsl_mc_bus_exists - check if a root dprc exists
+- */
+-bool fsl_mc_bus_exists(void)
+-{
+- return atomic_read(&root_dprc_count) > 0;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_bus_exists);
+-
+ /**
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+@@ -315,21 +422,6 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ return error;
+ }
+
+-static int get_dprc_version(struct fsl_mc_io *mc_io,
+- int container_id, u16 *major, u16 *minor)
+-{
+- struct dprc_attributes attr;
+- int error;
+-
+- error = get_dprc_attr(mc_io, container_id, &attr);
+- if (error == 0) {
+- *major = attr.version.major;
+- *minor = attr.version.minor;
+- }
+-
+- return error;
+-}
+-
+ static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+ enum dprc_region_type mc_region_type,
+ u64 mc_offset, phys_addr_t *phys_addr)
+@@ -451,18 +543,37 @@ bool fsl_mc_is_root_dprc(struct device *dev)
+ return dev == root_dprc_dev;
+ }
+
++static void fsl_mc_device_release(struct device *dev)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ struct fsl_mc_bus *mc_bus = NULL;
++
++ kfree(mc_dev->regions);
++
++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
++ mc_bus = to_fsl_mc_bus(mc_dev);
++
++ if (mc_bus)
++ kfree(mc_bus);
++ else
++ kfree(mc_dev);
++}
++
+ /**
+- * Add a newly discovered MC object device to be visible in Linux
++ * Add a newly discovered fsl-mc device to be visible in Linux
+ */
+ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
++ const char *driver_override,
+ struct fsl_mc_device **new_mc_dev)
+ {
+ int error;
+ struct fsl_mc_device *mc_dev = NULL;
+ struct fsl_mc_bus *mc_bus = NULL;
+ struct fsl_mc_device *parent_mc_dev;
++ struct device *fsl_mc_platform_dev;
++ struct device_node *fsl_mc_platform_node;
+
+ if (dev_is_fsl_mc(parent_dev))
+ parent_mc_dev = to_fsl_mc_device(parent_dev);
+@@ -473,7 +584,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ /*
+ * Allocate an MC bus device object:
+ */
+- mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL);
++ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
+ if (!mc_bus)
+ return -ENOMEM;
+
+@@ -482,16 +593,30 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ /*
+ * Allocate a regular fsl_mc_device object:
+ */
+- mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL);
++ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
+ if (!mc_dev)
+ return -ENOMEM;
+ }
+
+ mc_dev->obj_desc = *obj_desc;
+ mc_dev->mc_io = mc_io;
++
++ if (driver_override) {
++ /*
++ * We trust driver_override, so we don't need to use
++ * kstrndup() here
++ */
++ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL);
++ if (!mc_dev->driver_override) {
++ error = -ENOMEM;
++ goto error_cleanup_dev;
++ }
++ }
++
+ device_initialize(&mc_dev->dev);
+ mc_dev->dev.parent = parent_dev;
+ mc_dev->dev.bus = &fsl_mc_bus_type;
++ mc_dev->dev.release = fsl_mc_device_release;
+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+@@ -524,8 +649,6 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ }
+
+ mc_io2 = mc_io;
+-
+- atomic_inc(&root_dprc_count);
+ }
+
+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
+@@ -533,8 +656,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ goto error_cleanup_dev;
+ } else {
+ /*
+- * A non-DPRC MC object device has to be a child of another
+- * MC object (specifically a DPRC object)
++ * A non-DPRC object has to be a child of a DPRC, use the
++ * parent's ICID and interrupt domain.
+ */
+ mc_dev->icid = parent_mc_dev->icid;
+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+@@ -556,9 +679,14 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ goto error_cleanup_dev;
+ }
+
+- /* Objects are coherent, unless 'no shareability' flag set. */
+- if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
++ fsl_mc_platform_dev = &mc_dev->dev;
++ while (dev_is_fsl_mc(fsl_mc_platform_dev))
++ fsl_mc_platform_dev = fsl_mc_platform_dev->parent;
++ fsl_mc_platform_node = fsl_mc_platform_dev->of_node;
++
++ /* Set up the iommu configuration for the devices. */
++ fsl_mc_dma_configure(mc_dev, fsl_mc_platform_node,
++ !(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY));
+
+ /*
+ * The device-specific probe callback will get invoked by device_add()
+@@ -571,9 +699,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ goto error_cleanup_dev;
+ }
+
+- (void)get_device(&mc_dev->dev);
+- dev_dbg(parent_dev, "Added MC object device %s\n",
+- dev_name(&mc_dev->dev));
++ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
+
+ *new_mc_dev = mc_dev;
+ return 0;
+@@ -581,47 +707,34 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ error_cleanup_dev:
+ kfree(mc_dev->regions);
+ if (mc_bus)
+- devm_kfree(parent_dev, mc_bus);
++ kfree(mc_bus);
+ else
+- kmem_cache_free(mc_dev_cache, mc_dev);
++ kfree(mc_dev);
+
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+
+ /**
+- * fsl_mc_device_remove - Remove a MC object device from being visible to
++ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
+ * Linux
+ *
+- * @mc_dev: Pointer to a MC object device object
++ * @mc_dev: Pointer to an fsl-mc device
+ */
+ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+ {
+- struct fsl_mc_bus *mc_bus = NULL;
+-
+- kfree(mc_dev->regions);
++ kfree(mc_dev->driver_override);
++ mc_dev->driver_override = NULL;
+
+ /*
+ * The device-specific remove callback will get invoked by device_del()
+ */
+ device_del(&mc_dev->dev);
+- put_device(&mc_dev->dev);
+
+- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
+- mc_bus = to_fsl_mc_bus(mc_dev);
++ if (strcmp(mc_dev->obj_desc.type, "dprc") != 0)
++ mc_dev->dev.iommu_fwspec = NULL;
+
+- if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
+- if (atomic_read(&root_dprc_count) > 0)
+- atomic_dec(&root_dprc_count);
+- else
+- WARN_ON(1);
+- }
+- }
+-
+- if (mc_bus)
+- devm_kfree(mc_dev->dev.parent, mc_bus);
+- else
+- kmem_cache_free(mc_dev_cache, mc_dev);
++ put_device(&mc_dev->dev);
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+
+@@ -629,8 +742,7 @@ static int parse_mc_ranges(struct device *dev,
+ int *paddr_cells,
+ int *mc_addr_cells,
+ int *mc_size_cells,
+- const __be32 **ranges_start,
+- u8 *num_ranges)
++ const __be32 **ranges_start)
+ {
+ const __be32 *prop;
+ int range_tuple_cell_count;
+@@ -643,8 +755,6 @@ static int parse_mc_ranges(struct device *dev,
+ dev_warn(dev,
+ "missing or empty ranges property for device tree node '%s'\n",
+ mc_node->name);
+-
+- *num_ranges = 0;
+ return 0;
+ }
+
+@@ -671,8 +781,7 @@ static int parse_mc_ranges(struct device *dev,
+ return -EINVAL;
+ }
+
+- *num_ranges = ranges_len / tuple_len;
+- return 0;
++ return ranges_len / tuple_len;
+ }
+
+ static int get_mc_addr_translation_ranges(struct device *dev,
+@@ -680,7 +789,7 @@ static int get_mc_addr_translation_ranges(struct device *dev,
+ **ranges,
+ u8 *num_ranges)
+ {
+- int error;
++ int ret;
+ int paddr_cells;
+ int mc_addr_cells;
+ int mc_size_cells;
+@@ -688,16 +797,16 @@ static int get_mc_addr_translation_ranges(struct device *dev,
+ const __be32 *ranges_start;
+ const __be32 *cell;
+
+- error = parse_mc_ranges(dev,
++ ret = parse_mc_ranges(dev,
+ &paddr_cells,
+ &mc_addr_cells,
+ &mc_size_cells,
+- &ranges_start,
+- num_ranges);
+- if (error < 0)
+- return error;
++ &ranges_start);
++ if (ret < 0)
++ return ret;
+
+- if (!(*num_ranges)) {
++ *num_ranges = ret;
++ if (!ret) {
+ /*
+ * Missing or empty ranges property ("ranges;") for the
+ * 'fsl,qoriq-mc' node. In this case, identity mapping
+@@ -749,8 +858,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ struct mc_version mc_version;
+ struct resource res;
+
+- dev_info(&pdev->dev, "Root MC bus device probed");
+-
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+@@ -783,8 +890,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ goto error_cleanup_mc_io;
+ }
+
+- dev_info(&pdev->dev,
+- "Freescale Management Complex Firmware version: %u.%u.%u\n",
++ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
+ mc_version.major, mc_version.minor, mc_version.revision);
+
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+@@ -793,16 +899,17 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+- error = dpmng_get_container_id(mc_io, 0, &container_id);
++ error = dprc_get_container_id(mc_io, 0, &container_id);
+ if (error < 0) {
+ dev_err(&pdev->dev,
+- "dpmng_get_container_id() failed: %d\n", error);
++ "dprc_get_container_id() failed: %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+- error = get_dprc_version(mc_io, container_id,
+- &obj_desc.ver_major, &obj_desc.ver_minor);
++ error = dprc_get_api_version(mc_io, 0,
++ &obj_desc.ver_major,
++ &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+@@ -812,7 +919,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
+- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
++ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL,
++ &mc_bus_dev);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+@@ -840,7 +948,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+ mc->root_mc_bus_dev->mc_io = NULL;
+
+- dev_info(&pdev->dev, "Root MC bus device removed");
+ return 0;
+ }
+
+@@ -865,22 +972,12 @@ static int __init fsl_mc_bus_driver_init(void)
+ {
+ int error;
+
+- mc_dev_cache = kmem_cache_create("fsl_mc_device",
+- sizeof(struct fsl_mc_device), 0, 0,
+- NULL);
+- if (!mc_dev_cache) {
+- pr_err("Could not create fsl_mc_device cache\n");
+- return -ENOMEM;
+- }
+-
+ error = bus_register(&fsl_mc_bus_type);
+ if (error < 0) {
+- pr_err("fsl-mc bus type registration failed: %d\n", error);
++ pr_err("bus type registration failed: %d\n", error);
+ goto error_cleanup_cache;
+ }
+
+- pr_info("fsl-mc bus type registered\n");
+-
+ error = platform_driver_register(&fsl_mc_bus_driver);
+ if (error < 0) {
+ pr_err("platform_driver_register() failed: %d\n", error);
+@@ -914,7 +1011,6 @@ static int __init fsl_mc_bus_driver_init(void)
+ bus_unregister(&fsl_mc_bus_type);
+
+ error_cleanup_cache:
+- kmem_cache_destroy(mc_dev_cache);
+ return error;
+ }
+ postcore_initcall(fsl_mc_bus_driver_init);
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c b/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
+new file mode 100644
+index 00000000..86b2cd84
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
+@@ -0,0 +1,104 @@
++/*
++ * Copyright 2016-17 NXP
++ * Author: Nipun Gupta <nipun.gupta@nxp.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/iommu.h>
++#include <linux/of.h>
++#include <linux/of_iommu.h>
++#include "../include/mc.h"
++
++/* Setup the IOMMU for the DPRC container */
++static const struct iommu_ops
++*fsl_mc_iommu_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node)
++{
++ struct of_phandle_args iommu_spec;
++ const struct iommu_ops *ops;
++ u32 iommu_phandle;
++ struct device_node *iommu_node;
++ const __be32 *map = NULL;
++ int iommu_cells, map_len, ret;
++
++ map = of_get_property(fsl_mc_platform_node, "iommu-map", &map_len);
++ if (!map)
++ return NULL;
++
++ ops = mc_dev->dev.bus->iommu_ops;
++ if (!ops || !ops->of_xlate)
++ return NULL;
++
++ iommu_phandle = be32_to_cpup(map + 1);
++ iommu_node = of_find_node_by_phandle(iommu_phandle);
++
++ if (of_property_read_u32(iommu_node, "#iommu-cells", &iommu_cells)) {
++ pr_err("%s: missing #iommu-cells property\n", iommu_node->name);
++ return NULL;
++ }
++
++ /* Initialize the fwspec */
++ ret = iommu_fwspec_init(&mc_dev->dev, &iommu_node->fwnode, ops);
++ if (ret)
++ return NULL;
++
++ /*
++ * Fill in the required stream-id before calling the iommu's
++ * ops->xlate callback.
++ */
++ iommu_spec.np = iommu_node;
++ iommu_spec.args[0] = mc_dev->icid;
++ iommu_spec.args_count = 1;
++
++ ret = ops->of_xlate(&mc_dev->dev, &iommu_spec);
++ if (ret)
++ return NULL;
++
++ of_node_put(iommu_spec.np);
++
++ return ops;
++}
++
++/* Set up DMA configuration for fsl-mc devices */
++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node, int coherent)
++{
++ const struct iommu_ops *ops;
++
++ ops = fsl_mc_iommu_configure(mc_dev, fsl_mc_platform_node);
++
++ mc_dev->dev.coherent_dma_mask = DMA_BIT_MASK(48);
++ mc_dev->dev.dma_mask = &mc_dev->dev.coherent_dma_mask;
++ arch_setup_dma_ops(&mc_dev->dev, 0,
++ mc_dev->dev.coherent_dma_mask + 1, ops, coherent);
++}
++
++/* Macro to get the container device of a MC device */
++#define fsl_mc_cont_dev(_dev) ((to_fsl_mc_device(_dev)->flags & \
++ FSL_MC_IS_DPRC) ? (_dev) : ((_dev)->parent))
++
++/* Macro to check if a device is a container device */
++#define is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & FSL_MC_IS_DPRC)
++
++/* Get the IOMMU group for device on fsl-mc bus */
++struct iommu_group *fsl_mc_device_group(struct device *dev)
++{
++ struct device *cont_dev = fsl_mc_cont_dev(dev);
++ struct iommu_group *group;
++
++ /* Container device is responsible for creating the iommu group */
++ if (is_cont_dev(dev)) {
++ group = iommu_group_alloc();
++ if (IS_ERR(group))
++ return NULL;
++ } else {
++ get_device(cont_dev);
++ group = iommu_group_get(cont_dev);
++ put_device(cont_dev);
++ }
++
++ return group;
++}
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+index 3d46b1b1..b8b2c86e 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+- * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -17,6 +17,7 @@
+ #include <linux/irqdomain.h>
+ #include <linux/msi.h>
+ #include "../include/mc-bus.h"
++#include "fsl-mc-private.h"
+
+ /*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+index d459c267..e08b8843 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+@@ -10,13 +10,15 @@
+ #ifndef _FSL_MC_PRIVATE_H_
+ #define _FSL_MC_PRIVATE_H_
+
++#include "../include/mc.h"
++#include "../include/mc-bus.h"
++
+ int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
++ const char *driver_override,
+ struct fsl_mc_device **new_mc_dev);
+
+-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+-
+ int __init dprc_driver_init(void);
+
+ void dprc_driver_exit(void);
+diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+index 7a6ac640..49127acb 100644
+--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+- * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -17,9 +17,10 @@
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include "../include/mc-bus.h"
++#include "fsl-mc-private.h"
+
+ static struct irq_chip its_msi_irq_chip = {
+- .name = "fsl-mc-bus-msi",
++ .name = "ITS-fMSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+@@ -51,7 +52,7 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+ }
+
+-static struct msi_domain_ops its_fsl_mc_msi_ops = {
++static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = {
+ .msi_prepare = its_fsl_mc_msi_prepare,
+ };
+
+@@ -94,8 +95,8 @@ int __init its_fsl_mc_msi_init(void)
+ continue;
+ }
+
+- WARN_ON(mc_msi_domain->
+- host_data != &its_fsl_mc_msi_domain_info);
++ WARN_ON(mc_msi_domain->host_data !=
++ &its_fsl_mc_msi_domain_info);
+
+ pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
+ }
+diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
+index 798c965f..d66b87f0 100644
+--- a/drivers/staging/fsl-mc/bus/mc-io.c
++++ b/drivers/staging/fsl-mc/bus/mc-io.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+diff --git a/drivers/staging/fsl-mc/bus/mc-ioctl.h b/drivers/staging/fsl-mc/bus/mc-ioctl.h
+new file mode 100644
+index 00000000..8ac502a1
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h
+@@ -0,0 +1,22 @@
++/*
++ * Freescale Management Complex (MC) ioclt interface
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Author: Lijun Pan <Lijun.Pan@freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++#ifndef _FSL_MC_IOCTL_H_
++#define _FSL_MC_IOCTL_H_
++
++#include <linux/ioctl.h>
++#include "../include/mc-sys.h"
++
++#define RESTOOL_IOCTL_TYPE 'R'
++
++#define RESTOOL_SEND_MC_COMMAND \
++ _IOWR(RESTOOL_IOCTL_TYPE, 0xE0, struct mc_command)
++
++#endif /* _FSL_MC_IOCTL_H_ */
+diff --git a/drivers/staging/fsl-mc/bus/mc-restool.c b/drivers/staging/fsl-mc/bus/mc-restool.c
+new file mode 100644
+index 00000000..d5330b68
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/mc-restool.c
+@@ -0,0 +1,405 @@
++/*
++ * Freescale Management Complex (MC) restool driver
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Author: Lijun Pan <Lijun.Pan@freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "../include/mc.h"
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/mutex.h>
++#include <linux/platform_device.h>
++#include "mc-ioctl.h"
++#include "../include/mc-sys.h"
++#include "../include/mc-bus.h"
++#include "../include/mc-cmd.h"
++#include "../include/dpmng.h"
++
++/**
++ * Maximum number of DPRCs that can be opened at the same time
++ */
++#define MAX_DPRC_HANDLES 64
++
++/**
++ * restool_misc - information associated with the newly added miscdevice
++ * @misc: newly created miscdevice associated with root dprc
++ * @miscdevt: device id of this miscdevice
++ * @list: a linked list node representing this miscdevcie
++ * @static_mc_io: pointer to the static MC I/O object used by the restool
++ * @dynamic_instance_count: number of dynamically created instances
++ * @static_instance_in_use: static instance is in use or not
++ * @mutex: mutex lock to serialze the open/release operations
++ * @dev: root dprc associated with this miscdevice
++ */
++struct restool_misc {
++ struct miscdevice misc;
++ dev_t miscdevt;
++ struct list_head list;
++ struct fsl_mc_io *static_mc_io;
++ u32 dynamic_instance_count;
++ bool static_instance_in_use;
++ struct mutex mutex; /* serialze the open/release operations */
++ struct device *dev;
++};
++
++/**
++ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
++ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
++ * @num_translation_ranges: number of entries in addr_translation_ranges
++ * @translation_ranges: array of bus to system address translation ranges
++ */
++struct fsl_mc {
++ struct fsl_mc_device *root_mc_bus_dev;
++ u8 num_translation_ranges;
++ struct fsl_mc_addr_translation_range *translation_ranges;
++};
++
++/*
++ * initialize a global list to link all
++ * the miscdevice nodes (struct restool_misc)
++ */
++static LIST_HEAD(misc_list);
++static DEFINE_MUTEX(misc_list_mutex);
++
++static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep)
++{
++ struct fsl_mc_device *root_mc_dev;
++ int error;
++ struct fsl_mc_io *dynamic_mc_io = NULL;
++ struct restool_misc *restool_misc = NULL;
++ struct restool_misc *restool_misc_cursor;
++
++ mutex_lock(&misc_list_mutex);
++
++ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
++ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
++ restool_misc = restool_misc_cursor;
++ break;
++ }
++ }
++
++ mutex_unlock(&misc_list_mutex);
++
++ if (!restool_misc)
++ return -EINVAL;
++
++ if (WARN_ON(!restool_misc->dev))
++ return -EINVAL;
++
++ mutex_lock(&restool_misc->mutex);
++
++ if (!restool_misc->static_instance_in_use) {
++ restool_misc->static_instance_in_use = true;
++ filep->private_data = restool_misc->static_mc_io;
++ } else {
++ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL);
++ if (!dynamic_mc_io) {
++ error = -ENOMEM;
++ goto err_unlock;
++ }
++
++ root_mc_dev = to_fsl_mc_device(restool_misc->dev);
++ error = fsl_mc_portal_allocate(root_mc_dev, 0, &dynamic_mc_io);
++ if (error < 0) {
++ pr_err("Not able to allocate MC portal\n");
++ goto free_dynamic_mc_io;
++ }
++ ++restool_misc->dynamic_instance_count;
++ filep->private_data = dynamic_mc_io;
++ }
++
++ mutex_unlock(&restool_misc->mutex);
++
++ return 0;
++
++free_dynamic_mc_io:
++ kfree(dynamic_mc_io);
++err_unlock:
++ mutex_unlock(&restool_misc->mutex);
++
++ return error;
++}
++
++static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep)
++{
++ struct fsl_mc_io *local_mc_io = filep->private_data;
++ struct restool_misc *restool_misc = NULL;
++ struct restool_misc *restool_misc_cursor;
++
++ if (WARN_ON(!filep->private_data))
++ return -EINVAL;
++
++ mutex_lock(&misc_list_mutex);
++
++ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
++ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
++ restool_misc = restool_misc_cursor;
++ break;
++ }
++ }
++
++ mutex_unlock(&misc_list_mutex);
++
++ if (!restool_misc)
++ return -EINVAL;
++
++ mutex_lock(&restool_misc->mutex);
++
++ if (WARN_ON(restool_misc->dynamic_instance_count == 0 &&
++ !restool_misc->static_instance_in_use)) {
++ mutex_unlock(&restool_misc->mutex);
++ return -EINVAL;
++ }
++
++ /* Globally clean up opened/untracked handles */
++ fsl_mc_portal_reset(local_mc_io);
++
++ /*
++ * must check
++ * whether local_mc_io is dynamic or static instance
++ * Otherwise it will free up the reserved portal by accident
++ * or even not free up the dynamic allocated portal
++ * if 2 or more instances running concurrently
++ */
++ if (local_mc_io == restool_misc->static_mc_io) {
++ restool_misc->static_instance_in_use = false;
++ } else {
++ fsl_mc_portal_free(local_mc_io);
++ kfree(filep->private_data);
++ --restool_misc->dynamic_instance_count;
++ }
++
++ filep->private_data = NULL;
++ mutex_unlock(&restool_misc->mutex);
++
++ return 0;
++}
++
++static int restool_send_mc_command(unsigned long arg,
++ struct fsl_mc_io *local_mc_io)
++{
++ int error;
++ struct mc_command mc_cmd;
++
++ if (copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)))
++ return -EFAULT;
++
++ /*
++ * Send MC command to the MC:
++ */
++ error = mc_send_command(local_mc_io, &mc_cmd);
++ if (error < 0)
++ return error;
++
++ if (copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static long
++fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int error;
++
++ switch (cmd) {
++ case RESTOOL_SEND_MC_COMMAND:
++ error = restool_send_mc_command(arg, file->private_data);
++ break;
++ default:
++ pr_err("%s: unexpected ioctl call number\n", __func__);
++ error = -EINVAL;
++ }
++
++ return error;
++}
++
++static const struct file_operations fsl_mc_restool_dev_fops = {
++ .owner = THIS_MODULE,
++ .open = fsl_mc_restool_dev_open,
++ .release = fsl_mc_restool_dev_release,
++ .unlocked_ioctl = fsl_mc_restool_dev_ioctl,
++};
++
++static int restool_add_device_file(struct device *dev)
++{
++ u32 name1 = 0;
++ char name2[20] = {0};
++ int error;
++ struct fsl_mc_device *root_mc_dev;
++ struct restool_misc *restool_misc;
++
++ if (dev->bus == &platform_bus_type && dev->driver_data) {
++ if (sscanf(dev_name(dev), "%x.%s", &name1, name2) != 2)
++ return -EINVAL;
++
++ if (strcmp(name2, "fsl-mc") == 0)
++ pr_debug("platform's root dprc name is: %s\n",
++ dev_name(&(((struct fsl_mc *)
++ (dev->driver_data))->root_mc_bus_dev->dev)));
++ }
++
++ if (!fsl_mc_is_root_dprc(dev))
++ return 0;
++
++ restool_misc = kzalloc(sizeof(*restool_misc), GFP_KERNEL);
++ if (!restool_misc)
++ return -ENOMEM;
++
++ restool_misc->dev = dev;
++ root_mc_dev = to_fsl_mc_device(dev);
++ error = fsl_mc_portal_allocate(root_mc_dev, 0,
++ &restool_misc->static_mc_io);
++ if (error < 0) {
++ pr_err("Not able to allocate MC portal\n");
++ goto free_restool_misc;
++ }
++
++ restool_misc->misc.minor = MISC_DYNAMIC_MINOR;
++ restool_misc->misc.name = dev_name(dev);
++ restool_misc->misc.fops = &fsl_mc_restool_dev_fops;
++
++ error = misc_register(&restool_misc->misc);
++ if (error < 0) {
++ pr_err("misc_register() failed: %d\n", error);
++ goto free_portal;
++ }
++
++ restool_misc->miscdevt = restool_misc->misc.this_device->devt;
++ mutex_init(&restool_misc->mutex);
++ mutex_lock(&misc_list_mutex);
++ list_add(&restool_misc->list, &misc_list);
++ mutex_unlock(&misc_list_mutex);
++
++ pr_info("/dev/%s driver registered\n", dev_name(dev));
++
++ return 0;
++
++free_portal:
++ fsl_mc_portal_free(restool_misc->static_mc_io);
++free_restool_misc:
++ kfree(restool_misc);
++
++ return error;
++}
++
++static int restool_bus_notifier(struct notifier_block *nb,
++ unsigned long action, void *data)
++{
++ int error;
++ struct device *dev = data;
++
++ switch (action) {
++ case BUS_NOTIFY_ADD_DEVICE:
++ error = restool_add_device_file(dev);
++ if (error)
++ return error;
++ break;
++ case BUS_NOTIFY_DEL_DEVICE:
++ case BUS_NOTIFY_REMOVED_DEVICE:
++ case BUS_NOTIFY_BIND_DRIVER:
++ case BUS_NOTIFY_BOUND_DRIVER:
++ case BUS_NOTIFY_UNBIND_DRIVER:
++ case BUS_NOTIFY_UNBOUND_DRIVER:
++ break;
++ default:
++ pr_err("%s: unrecognized device action from %s\n", __func__,
++ dev_name(dev));
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int add_to_restool(struct device *dev, void *data)
++{
++ return restool_add_device_file(dev);
++}
++
++static int __init fsl_mc_restool_driver_init(void)
++{
++ int error;
++ struct notifier_block *nb;
++
++ nb = kzalloc(sizeof(*nb), GFP_KERNEL);
++ if (!nb)
++ return -ENOMEM;
++
++ nb->notifier_call = restool_bus_notifier;
++ error = bus_register_notifier(&fsl_mc_bus_type, nb);
++ if (error)
++ goto free_nb;
++
++ /*
++ * This driver runs after fsl-mc bus driver runs.
++ * Hence, many of the root dprcs are already attached to fsl-mc bus
++ * In order to make sure we find all the root dprcs,
++ * we need to scan the fsl_mc_bus_type.
++ */
++ error = bus_for_each_dev(&fsl_mc_bus_type, NULL, NULL, add_to_restool);
++ if (error) {
++ bus_unregister_notifier(&fsl_mc_bus_type, nb);
++ kfree(nb);
++ pr_err("restool driver registration failure\n");
++ return error;
++ }
++
++ return 0;
++
++free_nb:
++ kfree(nb);
++ return error;
++}
++
++module_init(fsl_mc_restool_driver_init);
++
++static void __exit fsl_mc_restool_driver_exit(void)
++{
++ struct restool_misc *restool_misc;
++ struct restool_misc *restool_misc_tmp;
++ char name1[20] = {0};
++ u32 name2 = 0;
++
++ list_for_each_entry_safe(restool_misc, restool_misc_tmp,
++ &misc_list, list) {
++ if (sscanf(restool_misc->misc.name, "%4s.%u", name1, &name2)
++ != 2)
++ continue;
++
++ pr_debug("name1=%s,name2=%u\n", name1, name2);
++ pr_debug("misc-device: %s\n", restool_misc->misc.name);
++ if (strcmp(name1, "dprc") != 0)
++ continue;
++
++ if (WARN_ON(!restool_misc->static_mc_io))
++ return;
++
++ if (WARN_ON(restool_misc->dynamic_instance_count != 0))
++ return;
++
++ if (WARN_ON(restool_misc->static_instance_in_use))
++ return;
++
++ misc_deregister(&restool_misc->misc);
++ pr_info("/dev/%s driver unregistered\n",
++ restool_misc->misc.name);
++ fsl_mc_portal_free(restool_misc->static_mc_io);
++ list_del(&restool_misc->list);
++ kfree(restool_misc);
++ }
++}
++
++module_exit(fsl_mc_restool_driver_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor Inc.");
++MODULE_DESCRIPTION("Freescale's MC restool driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
+index 285917c7..cf63c7b6 100644
+--- a/drivers/staging/fsl-mc/bus/mc-sys.c
++++ b/drivers/staging/fsl-mc/bus/mc-sys.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2014 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * I/O services to send MC commands to the MC hardware
+ *
+@@ -13,7 +14,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -46,7 +46,7 @@
+ /**
+ * Timeout in milliseconds to wait for the completion of an MC command
+ */
+-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
++#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
+
+ /*
+ * usleep_range() min and max values used to throttle down polling
+@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+- return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
++ return cmd_id;
+ }
+
+ static int mc_status_to_error(enum mc_cmd_status status)
+@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+ dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
++ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+ dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
++ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
+
+ if (status != MC_CMD_STATUS_OK) {
+ dev_dbg(mc_io->dev,
+- "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
++ "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
+diff --git a/drivers/staging/fsl-mc/include/dpaa2-fd.h b/drivers/staging/fsl-mc/include/dpaa2-fd.h
+new file mode 100644
+index 00000000..72328415
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpaa2-fd.h
+@@ -0,0 +1,706 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_FD_H
++#define __FSL_DPAA2_FD_H
++
++#include <linux/kernel.h>
++
++/**
++ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
++ *
++ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2.
++ * Frames can be enqueued and dequeued to Frame Queues (FQs) which are consumed
++ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE)
++ *
++ * There are three types of frames: single, scatter gather, and frame lists.
++ *
++ * The set of APIs in this file must be used to create, manipulate and
++ * query Frame Descriptors.
++ */
++
++/**
++ * struct dpaa2_fd - Struct describing FDs
++ * @words: for easier/faster copying the whole FD structure
++ * @addr: address in the FD
++ * @len: length in the FD
++ * @bpid: buffer pool ID
++ * @format_offset: format, offset, and short-length fields
++ * @frc: frame context
++ * @ctrl: control bits...including dd, sc, va, err, etc
++ * @flc: flow context address
++ *
++ * This structure represents the basic Frame Descriptor used in the system.
++ */
++struct dpaa2_fd {
++ union {
++ u32 words[8];
++ struct dpaa2_fd_simple {
++ __le64 addr;
++ __le32 len;
++ __le16 bpid;
++ __le16 format_offset;
++ __le32 frc;
++ __le32 ctrl;
++ __le64 flc;
++ } simple;
++ };
++};
++
++#define FD_SHORT_LEN_FLAG_MASK 0x1
++#define FD_SHORT_LEN_FLAG_SHIFT 14
++#define FD_SHORT_LEN_MASK 0x3FFFF
++#define FD_OFFSET_MASK 0x0FFF
++#define FD_FORMAT_MASK 0x3
++#define FD_FORMAT_SHIFT 12
++#define FD_BPID_MASK 0x3FFF
++#define SG_SHORT_LEN_FLAG_MASK 0x1
++#define SG_SHORT_LEN_FLAG_SHIFT 14
++#define SG_SHORT_LEN_MASK 0x1FFFF
++#define SG_OFFSET_MASK 0x0FFF
++#define SG_FORMAT_MASK 0x3
++#define SG_FORMAT_SHIFT 12
++#define SG_BPID_MASK 0x3FFF
++#define SG_FINAL_FLAG_MASK 0x1
++#define SG_FINAL_FLAG_SHIFT 15
++#define FL_SHORT_LEN_FLAG_MASK 0x1
++#define FL_SHORT_LEN_FLAG_SHIFT 14
++#define FL_SHORT_LEN_MASK 0x3FFFF
++#define FL_OFFSET_MASK 0x0FFF
++#define FL_FORMAT_MASK 0x3
++#define FL_FORMAT_SHIFT 12
++#define FL_BPID_MASK 0x3FFF
++#define FL_FINAL_FLAG_MASK 0x1
++#define FL_FINAL_FLAG_SHIFT 15
++
++/* Error bits in FD CTRL */
++#define FD_CTRL_ERR_MASK 0x000000FF
++#define FD_CTRL_UFD 0x00000004
++#define FD_CTRL_SBE 0x00000008
++#define FD_CTRL_FLC 0x00000010
++#define FD_CTRL_FSE 0x00000020
++#define FD_CTRL_FAERR 0x00000040
++
++/* Annotation bits in FD CTRL */
++#define FD_CTRL_PTA 0x00800000
++#define FD_CTRL_PTV1 0x00400000
++
++enum dpaa2_fd_format {
++ dpaa2_fd_single = 0,
++ dpaa2_fd_list,
++ dpaa2_fd_sg
++};
++
++/**
++ * dpaa2_fd_get_addr() - get the addr field of frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the address in the frame descriptor.
++ */
++static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd)
++{
++ return (dma_addr_t)le64_to_cpu(fd->simple.addr);
++}
++
++/**
++ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor
++ * @fd: the given frame descriptor
++ * @addr: the address needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr)
++{
++ fd->simple.addr = cpu_to_le64(addr);
++}
++
++/**
++ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the frame context field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd)
++{
++ return le32_to_cpu(fd->simple.frc);
++}
++
++/**
++ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor
++ * @fd: the given frame descriptor
++ * @frc: the frame context needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc)
++{
++ fd->simple.frc = cpu_to_le32(frc);
++}
++
++/**
++ * dpaa2_fd_get_ctrl() - Get the control bits in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the control bits field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_ctrl(const struct dpaa2_fd *fd)
++{
++ return le32_to_cpu(fd->simple.ctrl);
++}
++
++/**
++ * dpaa2_fd_set_ctrl() - Set the control bits in the frame descriptor
++ * @fd: the given frame descriptor
++ * @ctrl: the control bits to be set in the frame descriptor
++ */
++static inline void dpaa2_fd_set_ctrl(struct dpaa2_fd *fd, u32 ctrl)
++{
++ fd->simple.ctrl = cpu_to_le32(ctrl);
++}
++
++/**
++ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the flow context in the frame descriptor.
++ */
++static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd)
++{
++ return (dma_addr_t)le64_to_cpu(fd->simple.flc);
++}
++
++/**
++ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor
++ * @fd: the given frame descriptor
++ * @flc_addr: the flow context needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr)
++{
++ fd->simple.flc = cpu_to_le64(flc_addr);
++}
++
++static inline bool dpaa2_fd_short_len(const struct dpaa2_fd *fd)
++{
++ return !!((le16_to_cpu(fd->simple.format_offset) >>
++ FD_SHORT_LEN_FLAG_SHIFT) & FD_SHORT_LEN_FLAG_MASK);
++}
++
++/**
++ * dpaa2_fd_get_len() - Get the length in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the length field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd)
++{
++ if (dpaa2_fd_short_len(fd))
++ return le32_to_cpu(fd->simple.len) & FD_SHORT_LEN_MASK;
++
++ return le32_to_cpu(fd->simple.len);
++}
++
++/**
++ * dpaa2_fd_set_len() - Set the length field of frame descriptor
++ * @fd: the given frame descriptor
++ * @len: the length needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len)
++{
++ fd->simple.len = cpu_to_le32(len);
++}
++
++/**
++ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the offset.
++ */
++static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd)
++{
++ return le16_to_cpu(fd->simple.format_offset) & FD_OFFSET_MASK;
++}
++
++/**
++ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor
++ * @fd: the given frame descriptor
++ * @offset: the offset needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset)
++{
++ fd->simple.format_offset &= cpu_to_le16(~FD_OFFSET_MASK);
++ fd->simple.format_offset |= cpu_to_le16(offset);
++}
++
++/**
++ * dpaa2_fd_get_format() - Get the format field in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_fd_format dpaa2_fd_get_format(
++ const struct dpaa2_fd *fd)
++{
++ return (enum dpaa2_fd_format)((le16_to_cpu(fd->simple.format_offset)
++ >> FD_FORMAT_SHIFT) & FD_FORMAT_MASK);
++}
++
++/**
++ * dpaa2_fd_set_format() - Set the format field of frame descriptor
++ * @fd: the given frame descriptor
++ * @format: the format needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd,
++ enum dpaa2_fd_format format)
++{
++ fd->simple.format_offset &=
++ cpu_to_le16(~(FD_FORMAT_MASK << FD_FORMAT_SHIFT));
++ fd->simple.format_offset |= cpu_to_le16(format << FD_FORMAT_SHIFT);
++}
++
++/**
++ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the buffer pool id.
++ */
++static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd)
++{
++ return le16_to_cpu(fd->simple.bpid) & FD_BPID_MASK;
++}
++
++/**
++ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor
++ * @fd: the given frame descriptor
++ * @bpid: buffer pool id to be set
++ */
++static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid)
++{
++ fd->simple.bpid &= cpu_to_le16(~(FD_BPID_MASK));
++ fd->simple.bpid |= cpu_to_le16(bpid);
++}
++
++/**
++ * struct dpaa2_sg_entry - the scatter-gathering structure
++ * @addr: address of the sg entry
++ * @len: length in this sg entry
++ * @bpid: buffer pool id
++ * @format_offset: format and offset fields
++ */
++struct dpaa2_sg_entry {
++ __le64 addr;
++ __le32 len;
++ __le16 bpid;
++ __le16 format_offset;
++};
++
++enum dpaa2_sg_format {
++ dpaa2_sg_single = 0,
++ dpaa2_sg_frame_data,
++ dpaa2_sg_sgt_ext
++};
++
++/* Accessors for SG entry fields */
++
++/**
++ * dpaa2_sg_get_addr() - Get the address from SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the address.
++ */
++static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
++{
++ return le64_to_cpu((dma_addr_t)sg->addr);
++}
++
++/**
++ * dpaa2_sg_set_addr() - Set the address in SG entry
++ * @sg: the given scatter-gathering object
++ * @addr: the address to be set
++ */
++static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr)
++{
++ sg->addr = cpu_to_le64(addr);
++}
++
++static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg)
++{
++ return !!((le16_to_cpu(sg->format_offset) >> SG_SHORT_LEN_FLAG_SHIFT)
++ & SG_SHORT_LEN_FLAG_MASK);
++}
++
++/**
++ * dpaa2_sg_get_len() - Get the length in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the length.
++ */
++static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg)
++{
++ if (dpaa2_sg_short_len(sg))
++ return le32_to_cpu(sg->len) & SG_SHORT_LEN_MASK;
++
++ return le32_to_cpu(sg->len);
++}
++
++/**
++ * dpaa2_sg_set_len() - Set the length in SG entry
++ * @sg: the given scatter-gathering object
++ * @len: the length to be set
++ */
++static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len)
++{
++ sg->len = cpu_to_le32(len);
++}
++
++/**
++ * dpaa2_sg_get_offset() - Get the offset in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the offset.
++ */
++static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg)
++{
++ return le16_to_cpu(sg->format_offset) & SG_OFFSET_MASK;
++}
++
++/**
++ * dpaa2_sg_set_offset() - Set the offset in SG entry
++ * @sg: the given scatter-gathering object
++ * @offset: the offset to be set
++ */
++static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg,
++ u16 offset)
++{
++ sg->format_offset &= cpu_to_le16(~SG_OFFSET_MASK);
++ sg->format_offset |= cpu_to_le16(offset);
++}
++
++/**
++ * dpaa2_sg_get_format() - Get the SG format in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_sg_format
++ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg)
++{
++ return (enum dpaa2_sg_format)((le16_to_cpu(sg->format_offset)
++ >> SG_FORMAT_SHIFT) & SG_FORMAT_MASK);
++}
++
++/**
++ * dpaa2_sg_set_format() - Set the SG format in SG entry
++ * @sg: the given scatter-gathering object
++ * @format: the format to be set
++ */
++static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg,
++ enum dpaa2_sg_format format)
++{
++ sg->format_offset &= cpu_to_le16(~(SG_FORMAT_MASK << SG_FORMAT_SHIFT));
++ sg->format_offset |= cpu_to_le16(format << SG_FORMAT_SHIFT);
++}
++
++/**
++ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the bpid.
++ */
++static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg)
++{
++ return le16_to_cpu(sg->bpid) & SG_BPID_MASK;
++}
++
++/**
++ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry
++ * @sg: the given scatter-gathering object
++ * @bpid: the bpid to be set
++ */
++static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid)
++{
++ sg->bpid &= cpu_to_le16(~(SG_BPID_MASK));
++ sg->bpid |= cpu_to_le16(bpid);
++}
++
++/**
++ * dpaa2_sg_is_final() - Check final bit in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return bool.
++ */
++static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg)
++{
++ return !!(le16_to_cpu(sg->format_offset) >> SG_FINAL_FLAG_SHIFT);
++}
++
++/**
++ * dpaa2_sg_set_final() - Set the final bit in SG entry
++ * @sg: the given scatter-gathering object
++ * @final: the final boolean to be set
++ */
++static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
++{
++ sg->format_offset &= cpu_to_le16(~(SG_FINAL_FLAG_MASK
++ << SG_FINAL_FLAG_SHIFT));
++ sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
++}
++
++/**
++ * struct dpaa2_fl_entry - structure for frame list entry.
++ * @addr: address in the FLE
++ * @len: length in the FLE
++ * @bpid: buffer pool ID
++ * @format_offset: format, offset, and short-length fields
++ * @frc: frame context
++ * @ctrl: control bits...including pta, pvt1, pvt2, err, etc
++ * @flc: flow context address
++ */
++struct dpaa2_fl_entry {
++ __le64 addr;
++ __le32 len;
++ __le16 bpid;
++ __le16 format_offset;
++ __le32 frc;
++ __le32 ctrl;
++ __le64 flc;
++};
++
++enum dpaa2_fl_format {
++ dpaa2_fl_single = 0,
++ dpaa2_fl_res,
++ dpaa2_fl_sg
++};
++
++/**
++ * dpaa2_fl_get_addr() - get the addr field of FLE
++ * @fle: the given frame list entry
++ *
++ * Return the address in the frame list entry.
++ */
++static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
++{
++ return (dma_addr_t)le64_to_cpu(fle->addr);
++}
++
++/**
++ * dpaa2_fl_set_addr() - Set the addr field of FLE
++ * @fle: the given frame list entry
++ * @addr: the address needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
++ dma_addr_t addr)
++{
++ fle->addr = cpu_to_le64(addr);
++}
++
++/**
++ * dpaa2_fl_get_frc() - Get the frame context in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the frame context field in the frame lsit entry.
++ */
++static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle)
++{
++ return le32_to_cpu(fle->frc);
++}
++
++/**
++ * dpaa2_fl_set_frc() - Set the frame context in the FLE
++ * @fle: the given frame list entry
++ * @frc: the frame context needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc)
++{
++ fle->frc = cpu_to_le32(frc);
++}
++
++/**
++ * dpaa2_fl_get_ctrl() - Get the control bits in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the control bits field in the frame list entry.
++ */
++static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle)
++{
++ return le32_to_cpu(fle->ctrl);
++}
++
++/**
++ * dpaa2_fl_set_ctrl() - Set the control bits in the FLE
++ * @fle: the given frame list entry
++ * @ctrl: the control bits to be set in the frame list entry
++ */
++static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl)
++{
++ fle->ctrl = cpu_to_le32(ctrl);
++}
++
++/**
++ * dpaa2_fl_get_flc() - Get the flow context in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the flow context in the frame list entry.
++ */
++static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
++{
++ return (dma_addr_t)le64_to_cpu(fle->flc);
++}
++
++/**
++ * dpaa2_fl_set_flc() - Set the flow context field of FLE
++ * @fle: the given frame list entry
++ * @flc_addr: the flow context needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
++ dma_addr_t flc_addr)
++{
++ fle->flc = cpu_to_le64(flc_addr);
++}
++
++static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle)
++{
++ return !!((le16_to_cpu(fle->format_offset) >>
++ FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK);
++}
++
++/**
++ * dpaa2_fl_get_len() - Get the length in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the length field in the frame list entry.
++ */
++static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
++{
++ if (dpaa2_fl_short_len(fle))
++ return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK;
++
++ return le32_to_cpu(fle->len);
++}
++
++/**
++ * dpaa2_fl_set_len() - Set the length field of FLE
++ * @fle: the given frame list entry
++ * @len: the length needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
++{
++ fle->len = cpu_to_le32(len);
++}
++
++/**
++ * dpaa2_fl_get_offset() - Get the offset field in the frame list entry
++ * @fle: the given frame list entry
++ *
++ * Return the offset.
++ */
++static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
++{
++ return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK;
++}
++
++/**
++ * dpaa2_fl_set_offset() - Set the offset field of FLE
++ * @fle: the given frame list entry
++ * @offset: the offset needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset)
++{
++ fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK);
++ fle->format_offset |= cpu_to_le16(offset);
++}
++
++/**
++ * dpaa2_fl_get_format() - Get the format field in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_fl_format dpaa2_fl_get_format(
++ const struct dpaa2_fl_entry *fle)
++{
++ return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >>
++ FL_FORMAT_SHIFT) & FL_FORMAT_MASK);
++}
++
++/**
++ * dpaa2_fl_set_format() - Set the format field of FLE
++ * @fle: the given frame list entry
++ * @format: the format needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
++ enum dpaa2_fl_format format)
++{
++ fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT));
++ fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT);
++}
++
++/**
++ * dpaa2_fl_get_bpid() - Get the bpid field in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the buffer pool id.
++ */
++static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
++{
++ return le16_to_cpu(fle->bpid) & FL_BPID_MASK;
++}
++
++/**
++ * dpaa2_fl_set_bpid() - Set the bpid field of FLE
++ * @fle: the given frame list entry
++ * @bpid: buffer pool id to be set
++ */
++static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid)
++{
++ fle->bpid &= cpu_to_le16(~(FL_BPID_MASK));
++ fle->bpid |= cpu_to_le16(bpid);
++}
++
++/**
++ * dpaa2_fl_is_final() - Check final bit in FLE
++ * @fle: the given frame list entry
++ *
++ * Return bool.
++ */
++static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
++{
++ return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT);
++}
++
++/**
++ * dpaa2_fl_set_final() - Set the final bit in FLE
++ * @fle: the given frame list entry
++ * @final: the final boolean to be set
++ */
++static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
++{
++ fle->format_offset &= cpu_to_le16(~(FL_FINAL_FLAG_MASK <<
++ FL_FINAL_FLAG_SHIFT));
++ fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT);
++}
++
++#endif /* __FSL_DPAA2_FD_H */
+diff --git a/drivers/staging/fsl-mc/include/dpaa2-global.h b/drivers/staging/fsl-mc/include/dpaa2-global.h
+new file mode 100644
+index 00000000..0326447f
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpaa2-global.h
+@@ -0,0 +1,202 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_GLOBAL_H
++#define __FSL_DPAA2_GLOBAL_H
++
++#include <linux/types.h>
++#include <linux/cpumask.h>
++#include "dpaa2-fd.h"
++
++struct dpaa2_dq {
++ union {
++ struct common {
++ u8 verb;
++ u8 reserved[63];
++ } common;
++ struct dq {
++ u8 verb;
++ u8 stat;
++ __le16 seqnum;
++ __le16 oprid;
++ u8 reserved;
++ u8 tok;
++ __le32 fqid;
++ u32 reserved2;
++ __le32 fq_byte_cnt;
++ __le32 fq_frm_cnt;
++ __le64 fqd_ctx;
++ u8 fd[32];
++ } dq;
++ struct scn {
++ u8 verb;
++ u8 stat;
++ u8 state;
++ u8 reserved;
++ __le32 rid_tok;
++ __le64 ctx;
++ } scn;
++ };
++};
++
++/* Parsing frame dequeue results */
++/* FQ empty */
++#define DPAA2_DQ_STAT_FQEMPTY 0x80
++/* FQ held active */
++#define DPAA2_DQ_STAT_HELDACTIVE 0x40
++/* FQ force eligible */
++#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20
++/* valid frame */
++#define DPAA2_DQ_STAT_VALIDFRAME 0x10
++/* FQ ODP enable */
++#define DPAA2_DQ_STAT_ODPVALID 0x04
++/* volatile dequeue */
++#define DPAA2_DQ_STAT_VOLATILE 0x02
++/* volatile dequeue command is expired */
++#define DPAA2_DQ_STAT_EXPIRED 0x01
++
++#define DQ_FQID_MASK 0x00FFFFFF
++#define DQ_FRAME_COUNT_MASK 0x00FFFFFF
++
++/**
++ * dpaa2_dq_flags() - Get the stat field of dequeue response
++ * @dq: the dequeue result.
++ */
++static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
++{
++ return dq->dq.stat;
++}
++
++/**
++ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
++ * command.
++ * @dq: the dequeue result
++ *
++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
++ */
++static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
++{
++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
++}
++
++/**
++ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
++ * @dq: the dequeue result
++ *
++ * Return boolean.
++ */
++static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
++{
++ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
++}
++
++/**
++ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
++ * @dq: the dequeue result
++ *
++ * seqnum is valid only if VALIDFRAME flag is TRUE
++ *
++ * Return seqnum.
++ */
++static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
++{
++ return le16_to_cpu(dq->dq.seqnum);
++}
++
++/**
++ * dpaa2_dq_odpid() - Get the odpid field in dequeue response
++ * @dq: the dequeue result
++ *
++ * odpid is valid only if ODPVALID flag is TRUE.
++ *
++ * Return odpid.
++ */
++static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
++{
++ return le16_to_cpu(dq->dq.oprid);
++}
++
++/**
++ * dpaa2_dq_fqid() - Get the fqid in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return fqid.
++ */
++static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
++}
++
++/**
++ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the byte count remaining in the FQ.
++ */
++static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fq_byte_cnt);
++}
++
++/**
++ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame count remaining in the FQ.
++ */
++static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
++}
++
++/**
++ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame queue context.
++ */
++static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
++{
++ return le64_to_cpu(dq->dq.fqd_ctx);
++}
++
++/**
++ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame descriptor.
++ */
++static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
++{
++ return (const struct dpaa2_fd *)&dq->dq.fd[0];
++}
++
++#endif /* __FSL_DPAA2_GLOBAL_H */
+diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
+new file mode 100644
+index 00000000..c7d1d997
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
+@@ -0,0 +1,190 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_IO_H
++#define __FSL_DPAA2_IO_H
++
++#include <linux/types.h>
++#include <linux/cpumask.h>
++
++#include "dpaa2-fd.h"
++#include "dpaa2-global.h"
++
++struct dpaa2_io;
++struct dpaa2_io_store;
++struct device;
++
++/**
++ * DOC: DPIO Service
++ *
++ * The DPIO service provides APIs for users to interact with the datapath
++ * by enqueueing and dequeing frame descriptors.
++ *
++ * The following set of APIs can be used to enqueue and dequeue frames
++ * as well as producing notification callbacks when data is available
++ * for dequeue.
++ */
++
++/**
++ * struct dpaa2_io_desc - The DPIO descriptor
++ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
++ * has a channel.
++ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
++ * unless receives_notification is TRUE.
++ * @cpu: The cpu index that at least interrupt handlers will
++ * execute on.
++ * @stash_affinity: The stash affinity for this portal favour 'cpu'
++ * @regs_cena: The cache enabled regs.
++ * @regs_cinh: The cache inhibited regs
++ * @dpio_id: The dpio index
++ * @qman_version: The qman version
++ *
++ * Describes the attributes and features of the DPIO object.
++ */
++struct dpaa2_io_desc {
++ int receives_notifications;
++ int has_8prio;
++ int cpu;
++ void *regs_cena;
++ void *regs_cinh;
++ int dpio_id;
++ u32 qman_version;
++};
++
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
++
++void dpaa2_io_down(struct dpaa2_io *d);
++
++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
++
++/**
++ * struct dpaa2_io_notification_ctx - The DPIO notification context structure
++ * @cb: The callback to be invoked when the notification arrives
++ * @is_cdan: Zero for FQDAN, non-zero for CDAN
++ * @id: FQID or channel ID, needed for rearm
++ * @desired_cpu: The cpu on which the notifications will show up. -1 means
++ * any CPU.
++ * @dpio_id: The dpio index
++ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
++ * @node: The list node
++ * @dpio_private: The dpio object internal to dpio_service
++ *
++ * Used when a FQDAN/CDAN registration is made by drivers.
++ */
++struct dpaa2_io_notification_ctx {
++ void (*cb)(struct dpaa2_io_notification_ctx *);
++ int is_cdan;
++ u32 id;
++ int desired_cpu;
++ int dpio_id;
++ u64 qman64;
++ struct list_head node;
++ void *dpio_private;
++};
++
++int dpaa2_io_service_register(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++void dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++int dpaa2_io_service_rearm(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
++ struct dpaa2_io_store *s);
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
++ struct dpaa2_io_store *s);
++
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
++ const struct dpaa2_fd *fd);
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
++ u16 qdbin, const struct dpaa2_fd *fd);
++int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
++ const u64 *buffers, unsigned int num_buffers);
++int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
++ u64 *buffers, unsigned int num_buffers);
++
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev);
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
++ uint32_t *fcnt, uint32_t *bcnt);
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid,
++ uint32_t *num);
++#endif
++
++
++/***************/
++/* CSCN */
++/***************/
++
++/**
++ * struct dpaa2_cscn - The CSCN message format
++ * @verb: identifies the type of message (should be 0x27).
++ * @stat: status bits related to dequeuing response (not used)
++ * @state: bit 0 = 0/1 if CG is no/is congested
++ * @reserved: reserved byte
++ * @cgid: congest grp ID - the first 16 bits
++ * @ctx: context data
++ *
++ * Congestion management can be implemented in software through
++ * the use of Congestion State Change Notifications (CSCN). These
++ * are messages written by DPAA2 hardware to memory whenever the
++ * instantaneous count (I_CNT field in the CG) exceeds the
++ * Congestion State (CS) entrance threshold, signifying congestion
++ * entrance, or when the instantaneous count returns below exit
++ * threshold, signifying congestion exit. The format of the message
++ * is given by the dpaa2_cscn structure. Bit 0 of the state field
++ * represents congestion state written by the hardware.
++ */
++struct dpaa2_cscn {
++ u8 verb;
++ u8 stat;
++ u8 state;
++ u8 reserved;
++ __le32 cgid;
++ __le64 ctx;
++};
++
++#define DPAA2_CSCN_SIZE 64
++#define DPAA2_CSCN_ALIGN 16
++
++#define DPAA2_CSCN_STATE_MASK 0x1
++#define DPAA2_CSCN_CONGESTED 1
++
++static inline bool dpaa2_cscn_state_congested(struct dpaa2_cscn *cscn)
++{
++ return ((cscn->state & DPAA2_CSCN_STATE_MASK) == DPAA2_CSCN_CONGESTED);
++}
++
++#endif /* __FSL_DPAA2_IO_H */
+diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
+deleted file mode 100644
+index 2860411d..00000000
+--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
++++ /dev/null
+@@ -1,185 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef _FSL_DPBP_CMD_H
+-#define _FSL_DPBP_CMD_H
+-
+-/* DPBP Version */
+-#define DPBP_VER_MAJOR 2
+-#define DPBP_VER_MINOR 2
+-
+-/* Command IDs */
+-#define DPBP_CMDID_CLOSE 0x800
+-#define DPBP_CMDID_OPEN 0x804
+-#define DPBP_CMDID_CREATE 0x904
+-#define DPBP_CMDID_DESTROY 0x900
+-
+-#define DPBP_CMDID_ENABLE 0x002
+-#define DPBP_CMDID_DISABLE 0x003
+-#define DPBP_CMDID_GET_ATTR 0x004
+-#define DPBP_CMDID_RESET 0x005
+-#define DPBP_CMDID_IS_ENABLED 0x006
+-
+-#define DPBP_CMDID_SET_IRQ 0x010
+-#define DPBP_CMDID_GET_IRQ 0x011
+-#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPBP_CMDID_SET_IRQ_MASK 0x014
+-#define DPBP_CMDID_GET_IRQ_MASK 0x015
+-#define DPBP_CMDID_GET_IRQ_STATUS 0x016
+-#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
+-#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+-
+-struct dpbp_cmd_open {
+- __le32 dpbp_id;
+-};
+-
+-#define DPBP_ENABLE 0x1
+-
+-struct dpbp_rsp_is_enabled {
+- u8 enabled;
+-};
+-
+-struct dpbp_cmd_set_irq {
+- /* cmd word 0 */
+- u8 irq_index;
+- u8 pad[3];
+- __le32 irq_val;
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+-};
+-
+-struct dpbp_cmd_get_irq {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq {
+- /* response word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* response word 1 */
+- __le64 irq_addr;
+- /* response word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-struct dpbp_cmd_set_irq_enable {
+- u8 enable;
+- u8 pad[3];
+- u8 irq_index;
+-};
+-
+-struct dpbp_cmd_get_irq_enable {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_enable {
+- u8 enabled;
+-};
+-
+-struct dpbp_cmd_set_irq_mask {
+- __le32 mask;
+- u8 irq_index;
+-};
+-
+-struct dpbp_cmd_get_irq_mask {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_mask {
+- __le32 mask;
+-};
+-
+-struct dpbp_cmd_get_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_status {
+- __le32 status;
+-};
+-
+-struct dpbp_cmd_clear_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_attributes {
+- /* response word 0 */
+- __le16 pad;
+- __le16 bpid;
+- __le32 id;
+- /* response word 1 */
+- __le16 version_major;
+- __le16 version_minor;
+-};
+-
+-struct dpbp_cmd_set_notifications {
+- /* cmd word 0 */
+- __le32 depletion_entry;
+- __le32 depletion_exit;
+- /* cmd word 1 */
+- __le32 surplus_entry;
+- __le32 surplus_exit;
+- /* cmd word 2 */
+- __le16 options;
+- __le16 pad[3];
+- /* cmd word 3 */
+- __le64 message_ctx;
+- /* cmd word 4 */
+- __le64 message_iova;
+-};
+-
+-struct dpbp_rsp_get_notifications {
+- /* response word 0 */
+- __le32 depletion_entry;
+- __le32 depletion_exit;
+- /* response word 1 */
+- __le32 surplus_entry;
+- __le32 surplus_exit;
+- /* response word 2 */
+- __le16 options;
+- __le16 pad[3];
+- /* response word 3 */
+- __le64 message_ctx;
+- /* response word 4 */
+- __le64 message_iova;
+-};
+-
+-#endif /* _FSL_DPBP_CMD_H */
+diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
+index e14e85a5..e9e04cce 100644
+--- a/drivers/staging/fsl-mc/include/dpbp.h
++++ b/drivers/staging/fsl-mc/include/dpbp.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -32,7 +33,8 @@
+ #ifndef __FSL_DPBP_H
+ #define __FSL_DPBP_H
+
+-/* Data Path Buffer Pool API
++/*
++ * Data Path Buffer Pool API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+@@ -44,25 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io,
+ u16 *token);
+
+ int dpbp_close(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-/**
+- * struct dpbp_cfg - Structure representing DPBP configuration
+- * @options: place holder
+- */
+-struct dpbp_cfg {
+- u32 options;
+-};
+-
+-int dpbp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpbp_cfg *cfg,
+- u16 *token);
+-
+-int dpbp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
++ u32 cmd_flags,
++ u16 token);
+
+ int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+@@ -81,140 +66,25 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+-/**
+- * struct dpbp_irq_cfg - IRQ configuration
+- * @addr: Address that must be written to signal a message-based interrupt
+- * @val: Value to write into irq_addr address
+- * @irq_num: A user defined number associated with this IRQ
+- */
+-struct dpbp_irq_cfg {
+- u64 addr;
+- u32 val;
+- int irq_num;
+-};
+-
+-int dpbp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpbp_irq_cfg *irq_cfg);
+-
+-int dpbp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpbp_irq_cfg *irq_cfg);
+-
+-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en);
+-
+-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en);
+-
+-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask);
+-
+-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask);
+-
+-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status);
+-
+-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status);
+-
+ /**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+- * @version: DPBP version
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+ struct dpbp_attr {
+ int id;
+- /**
+- * struct version - Structure representing DPBP version
+- * @major: DPBP major version
+- * @minor: DPBP minor version
+- */
+- struct {
+- u16 major;
+- u16 minor;
+- } version;
+ u16 bpid;
+ };
+
+-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_attr *attr);
+-
+-/**
+- * DPBP notifications options
+- */
+-
+-/**
+- * BPSCN write will attempt to allocate into a cache (coherent write)
+- */
+-#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
+-
+-/**
+- * struct dpbp_notification_cfg - Structure representing DPBP notifications
+- * towards software
+- * @depletion_entry: below this threshold the pool is "depleted";
+- * set it to '0' to disable it
+- * @depletion_exit: greater than or equal to this threshold the pool exit its
+- * "depleted" state
+- * @surplus_entry: above this threshold the pool is in "surplus" state;
+- * set it to '0' to disable it
+- * @surplus_exit: less than or equal to this threshold the pool exit its
+- * "surplus" state
+- * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
+- * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
+- * must be 16B aligned.
+- * @message_ctx: The context that will be part of the BPSCN message and will
+- * be written to 'message_iova'
+- * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
+- */
+-struct dpbp_notification_cfg {
+- u32 depletion_entry;
+- u32 depletion_exit;
+- u32 surplus_entry;
+- u32 surplus_exit;
+- u64 message_iova;
+- u64 message_ctx;
+- u16 options;
+-};
+-
+-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg);
+-
+-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg);
++int dpbp_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_attr *attr);
+
+-/** @} */
++int dpbp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
+
+ #endif /* __FSL_DPBP_H */
+diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h
+new file mode 100644
+index 00000000..efa23906
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpcon.h
+@@ -0,0 +1,115 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPCON_H
++#define __FSL_DPCON_H
++
++/* Data Path Concentrator API
++ * Contains initialization APIs and runtime control APIs for DPCON
++ */
++
++struct fsl_mc_io;
++
++/** General DPCON macros */
++
++/**
++ * Use it to disable notifications; see dpcon_set_notification()
++ */
++#define DPCON_INVALID_DPIO_ID (int)(-1)
++
++int dpcon_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpcon_id,
++ u16 *token);
++
++int dpcon_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpcon_attr - Structure representing DPCON attributes
++ * @id: DPCON object ID
++ * @qbman_ch_id: Channel ID to be used by dequeue operation
++ * @num_priorities: Number of priorities for the DPCON channel (1-8)
++ */
++struct dpcon_attr {
++ int id;
++ u16 qbman_ch_id;
++ u8 num_priorities;
++};
++
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_attr *attr);
++
++/**
++ * struct dpcon_notification_cfg - Structure representing notification params
++ * @dpio_id: DPIO object ID; must be configured with a notification channel;
++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
++ * @priority: Priority selection within the DPIO channel; valid values
++ * are 0-7, depending on the number of priorities in that channel
++ * @user_ctx: User context value provided with each CDAN message
++ */
++struct dpcon_notification_cfg {
++ int dpio_id;
++ u8 priority;
++ u64 user_ctx;
++};
++
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_notification_cfg *cfg);
++
++int dpcon_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++#endif /* __FSL_DPCON_H */
+diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
+index e5cfd017..170c07dd 100644
+--- a/drivers/staging/fsl-mc/include/dpmng.h
++++ b/drivers/staging/fsl-mc/include/dpmng.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -32,7 +33,8 @@
+ #ifndef __FSL_DPMNG_H
+ #define __FSL_DPMNG_H
+
+-/* Management Complex General API
++/*
++ * Management Complex General API
+ * Contains general API for the Management Complex firmware
+ */
+
+@@ -58,12 +60,8 @@ struct mc_version {
+ u32 revision;
+ };
+
+-int mc_get_version(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- struct mc_version *mc_ver_info);
+-
+-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int *container_id);
++int mc_get_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ struct mc_version *mc_ver_info);
+
+ #endif /* __FSL_DPMNG_H */
+diff --git a/drivers/staging/fsl-mc/include/dpopr.h b/drivers/staging/fsl-mc/include/dpopr.h
+new file mode 100644
+index 00000000..e1110af2
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpopr.h
+@@ -0,0 +1,110 @@
++/*
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPOPR_H_
++#define __FSL_DPOPR_H_
++
++/* Data Path Order Restoration API
++ * Contains initialization APIs and runtime APIs for the Order Restoration
++ */
++
++/** Order Restoration properties */
++
++/**
++ * Create a new Order Point Record option
++ */
++#define OPR_OPT_CREATE 0x1
++/**
++ * Retire an existing Order Point Record option
++ */
++#define OPR_OPT_RETIRE 0x2
++
++/**
++ * struct opr_cfg - Structure representing OPR configuration
++ * @oprrws: Order point record (OPR) restoration window size (0 to 5)
++ * 0 - Window size is 32 frames.
++ * 1 - Window size is 64 frames.
++ * 2 - Window size is 128 frames.
++ * 3 - Window size is 256 frames.
++ * 4 - Window size is 512 frames.
++ * 5 - Window size is 1024 frames.
++ * @oa: OPR auto advance NESN window size (0 disabled, 1 enabled)
++ * @olws: OPR acceptable late arrival window size (0 to 3)
++ * 0 - Disabled. Late arrivals are always rejected.
++ * 1 - Window size is 32 frames.
++ * 2 - Window size is the same as the OPR restoration
++ * window size configured in the OPRRWS field.
++ * 3 - Window size is 8192 frames. Late arrivals are
++ * always accepted.
++ * @oeane: Order restoration list (ORL) resource exhaustion
++ * advance NESN enable (0 disabled, 1 enabled)
++ * @oloe: OPR loose ordering enable (0 disabled, 1 enabled)
++ */
++struct opr_cfg {
++ u8 oprrws;
++ u8 oa;
++ u8 olws;
++ u8 oeane;
++ u8 oloe;
++};
++
++/**
++ * struct opr_qry - Structure representing OPR configuration
++ * @enable: Enabled state
++ * @rip: Retirement In Progress
++ * @ndsn: Next dispensed sequence number
++ * @nesn: Next expected sequence number
++ * @ea_hseq: Early arrival head sequence number
++ * @hseq_nlis: HSEQ not last in sequence
++ * @ea_tseq: Early arrival tail sequence number
++ * @tseq_nlis: TSEQ not last in sequence
++ * @ea_tptr: Early arrival tail pointer
++ * @ea_hptr: Early arrival head pointer
++ * @opr_id: Order Point Record ID
++ * @opr_vid: Order Point Record Virtual ID
++ */
++struct opr_qry {
++ char enable;
++ char rip;
++ u16 ndsn;
++ u16 nesn;
++ u16 ea_hseq;
++ char hseq_nlis;
++ u16 ea_tseq;
++ char tseq_nlis;
++ u16 ea_tptr;
++ u16 ea_hptr;
++ u16 opr_id;
++ u16 opr_vid;
++};
++
++#endif /* __FSL_DPOPR_H_ */
+diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
+index 593b2bbe..8dc411ec 100644
+--- a/drivers/staging/fsl-mc/include/dprc.h
++++ b/drivers/staging/fsl-mc/include/dprc.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -34,26 +35,13 @@
+
+ #include "mc-cmd.h"
+
+-/* Data Path Resource Container API
++/*
++ * Data Path Resource Container API
+ * Contains DPRC API for managing and querying DPAA resources
+ */
+
+ struct fsl_mc_io;
+
+-/**
+- * Set this value as the icid value in dprc_cfg structure when creating a
+- * container, in case the ICID is not selected by the user and should be
+- * allocated by the DPRC from the pool of ICIDs.
+- */
+-#define DPRC_GET_ICID_FROM_POOL (u16)(~(0))
+-
+-/**
+- * Set this value as the portal_id value in dprc_cfg structure when creating a
+- * container, in case the portal ID is not specifically selected by the
+- * user and should be allocated by the DPRC from the pool of portal ids.
+- */
+-#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
+-
+ int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+@@ -63,75 +51,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+-/**
+- * Container general options
+- *
+- * These options may be selected at container creation by the container creator
+- * and can be retrieved using dprc_get_attributes()
+- */
+-
+-/* Spawn Policy Option allowed - Indicates that the new container is allowed
+- * to spawn and have its own child containers.
+- */
+-#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
+-
+-/* General Container allocation policy - Indicates that the new container is
+- * allowed to allocate requested resources from its parent container; if not
+- * set, the container is only allowed to use resources in its own pools; Note
+- * that this is a container's global policy, but the parent container may
+- * override it and set specific quota per resource type.
+- */
+-#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
+-
+-/* Object initialization allowed - software context associated with this
+- * container is allowed to invoke object initialization operations.
+- */
+-#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
+-
+-/* Topology change allowed - software context associated with this
+- * container is allowed to invoke topology operations, such as attach/detach
+- * of network objects.
+- */
+-#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
+-
+-/* AIOP - Indicates that container belongs to AIOP. */
+-#define DPRC_CFG_OPT_AIOP 0x00000020
+-
+-/* IRQ Config - Indicates that the container allowed to configure its IRQs. */
+-#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040
+-
+-/**
+- * struct dprc_cfg - Container configuration options
+- * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
+- * ICID value is allocated by the DPRC
+- * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
+- * portal ID is allocated by the DPRC
+- * @options: Combination of 'DPRC_CFG_OPT_<X>' options
+- * @label: Object's label
+- */
+-struct dprc_cfg {
+- u16 icid;
+- int portal_id;
+- u64 options;
+- char label[16];
+-};
+-
+-int dprc_create_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_cfg *cfg,
+- int *child_container_id,
+- u64 *child_portal_offset);
+-
+-int dprc_destroy_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id);
+-
+-int dprc_reset_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id);
+
+ /* IRQ */
+
+@@ -139,7 +58,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
+ #define DPRC_IRQ_INDEX 0
+
+ /* Number of dprc's IRQs */
+-#define DPRC_NUM_OF_IRQS 1
++#define DPRC_NUM_OF_IRQS 1
+
+ /* DPRC IRQ events */
+
+@@ -151,12 +70,14 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
+ #define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
+ /* IRQ event - Indicates that resources removed from the container */
+ #define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
+-/* IRQ event - Indicates that one of the descendant containers that opened by
++/*
++ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+ #define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+-/* IRQ event - Indicates that on one of the container's opened object is
++/*
++ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+ #define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+@@ -171,59 +92,59 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
+ * @irq_num: A user defined number associated with this IRQ
+ */
+ struct dprc_irq_cfg {
+- phys_addr_t paddr;
+- u32 val;
+- int irq_num;
++ phys_addr_t paddr;
++ u32 val;
++ int irq_num;
+ };
+
+-int dprc_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en);
+-
+-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en);
+-
+-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask);
+-
+-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask);
+-
+-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status);
+-
+-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status);
++int dprc_set_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_get_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
++
++int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
++
++int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
++
++int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
++
++int dprc_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
++
++int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
+
+ /**
+ * struct dprc_attributes - Container attributes
+@@ -231,114 +152,23 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+- * @version: DPRC version
+ */
+ struct dprc_attributes {
+ int container_id;
+ u16 icid;
+ int portal_id;
+ u64 options;
+- /**
+- * struct version - DPRC version
+- * @major: DPRC major version
+- * @minor: DPRC minor version
+- */
+- struct {
+- u16 major;
+- u16 minor;
+- } version;
+-};
+-
+-int dprc_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_attributes *attributes);
+-
+-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 quota);
+-
+-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 *quota);
+-
+-/* Resource request options */
+-
+-/* Explicit resource ID request - The requested objects/resources
+- * are explicit and sequential (in case of resources).
+- * The base ID is given at res_req at base_align field
+- */
+-#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
+-
+-/* Aligned resources request - Relevant only for resources
+- * request (and not objects). Indicates that resources base ID should be
+- * sequential and aligned to the value given at dprc_res_req base_align field
+- */
+-#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
+-
+-/* Plugged Flag - Relevant only for object assignment request.
+- * Indicates that after all objects assigned. An interrupt will be invoked at
+- * the relevant GPP. The assigned object will be marked as plugged.
+- * plugged objects can't be assigned from their container
+- */
+-#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
+-
+-/**
+- * struct dprc_res_req - Resource request descriptor, to be used in assignment
+- * or un-assignment of resources and objects.
+- * @type: Resource/object type: Represent as a NULL terminated string.
+- * This string may received by using dprc_get_pool() to get resource
+- * type and dprc_get_obj() to get object type;
+- * Note: it is not possible to assign/un-assign DPRC objects
+- * @num: Number of resources
+- * @options: Request options: combination of DPRC_RES_REQ_OPT_ options
+- * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT
+- * is set at option), this field represents the required base ID
+- * for resource allocation; In case of aligned assignment
+- * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field
+- * indicates the required alignment for the resource ID(s) -
+- * use 0 if there is no alignment or explicit ID requirements
+- */
+-struct dprc_res_req {
+- char type[16];
+- u32 num;
+- u32 options;
+- int id_base_align;
+ };
+
+-int dprc_assign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int container_id,
+- struct dprc_res_req *res_req);
+-
+-int dprc_unassign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- struct dprc_res_req *res_req);
+-
+-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *pool_count);
+-
+-int dprc_get_pool(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int pool_index,
+- char *type);
++int dprc_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dprc_attributes *attributes);
+
+ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *obj_count);
++ u32 cmd_flags,
++ u16 token,
++ int *obj_count);
+
+ /* Objects Attributes Flags */
+
+@@ -353,7 +183,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+ /**
+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
+@@ -381,41 +211,41 @@ struct dprc_obj_desc {
+ u16 flags;
+ };
+
+-int dprc_get_obj(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int obj_index,
+- struct dprc_obj_desc *obj_desc);
+-
+-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- struct dprc_obj_desc *obj_desc);
+-
+-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_res_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- int *res_count);
++int dprc_get_obj(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int obj_index,
++ struct dprc_obj_desc *obj_desc);
++
++int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ struct dprc_obj_desc *obj_desc);
++
++int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 irq_index,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_get_res_count(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *type,
++ int *res_count);
+
+ /**
+ * enum dprc_iter_status - Iteration status
+@@ -429,27 +259,6 @@ enum dprc_iter_status {
+ DPRC_ITER_STATUS_LAST = 2
+ };
+
+-/**
+- * struct dprc_res_ids_range_desc - Resource ID range descriptor
+- * @base_id: Base resource ID of this range
+- * @last_id: Last resource ID of this range
+- * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
+- * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
+- * additional iterations are needed, until the returned marker is
+- * DPRC_ITER_STATUS_LAST
+- */
+-struct dprc_res_ids_range_desc {
+- int base_id;
+- int last_id;
+- enum dprc_iter_status iter_status;
+-};
+-
+-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- struct dprc_res_ids_range_desc *range_desc);
+-
+ /* Region flags */
+ /* Cacheable - Indicates that region should be mapped as cacheable */
+ #define DPRC_REGION_CACHEABLE 0x00000001
+@@ -481,64 +290,27 @@ struct dprc_region_desc {
+ enum dprc_region_type type;
+ };
+
+-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 region_index,
+- struct dprc_region_desc *region_desc);
+-
+-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- char *label);
++int dprc_get_obj_region(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 region_index,
++ struct dprc_region_desc *region_desc);
+
+-/**
+- * struct dprc_endpoint - Endpoint description for link connect/disconnect
+- * operations
+- * @type: Endpoint object type: NULL terminated string
+- * @id: Endpoint object ID
+- * @if_id: Interface ID; should be set for endpoints with multiple
+- * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+- */
+-struct dprc_endpoint {
+- char type[16];
+- int id;
+- int if_id;
+-};
++int dprc_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
+
+-/**
+- * struct dprc_connection_cfg - Connection configuration.
+- * Used for virtual connections only
+- * @committed_rate: Committed rate (Mbits/s)
+- * @max_rate: Maximum rate (Mbits/s)
+- */
+-struct dprc_connection_cfg {
+- u32 committed_rate;
+- u32 max_rate;
+-};
++int dprc_get_container_id(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int *container_id);
+
+-int dprc_connect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- const struct dprc_endpoint *endpoint2,
+- const struct dprc_connection_cfg *cfg);
+-
+-int dprc_disconnect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint);
+-
+-int dprc_get_connection(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- struct dprc_endpoint *endpoint2,
+- int *state);
++int dprc_reset_container(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int child_container_id);
+
+ #endif /* _FSL_DPRC_H */
+
+diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h
+index 170684a5..4d1f2d3e 100644
+--- a/drivers/staging/fsl-mc/include/mc-bus.h
++++ b/drivers/staging/fsl-mc/include/mc-bus.h
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus declarations
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -42,8 +42,8 @@ struct msi_domain_info;
+ */
+ struct fsl_mc_resource_pool {
+ enum fsl_mc_pool_type type;
+- int16_t max_count;
+- int16_t free_count;
++ int max_count;
++ int free_count;
+ struct mutex mutex; /* serializes access to free_list */
+ struct list_head free_list;
+ struct fsl_mc_bus *mc_bus;
+@@ -73,6 +73,7 @@ struct fsl_mc_bus {
+ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
+
+ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ unsigned int *total_irq_count);
+
+ int __init dprc_driver_init(void);
+diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
+index 5decb989..2e08aa31 100644
+--- a/drivers/staging/fsl-mc/include/mc-cmd.h
++++ b/drivers/staging/fsl-mc/include/mc-cmd.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -48,6 +49,15 @@ struct mc_command {
+ u64 params[MC_CMD_NUM_OF_PARAMS];
+ };
+
++struct mc_rsp_create {
++ __le32 object_id;
++};
++
++struct mc_rsp_api_ver {
++ __le16 major_ver;
++ __le16 minor_ver;
++};
++
+ enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+@@ -72,11 +82,6 @@ enum mc_cmd_status {
+ /* Command completion flag */
+ #define MC_CMD_FLAG_INTR_DIS 0x01
+
+-#define MC_CMD_HDR_CMDID_MASK 0xFFF0
+-#define MC_CMD_HDR_CMDID_SHIFT 4
+-#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
+-#define MC_CMD_HDR_TOKEN_SHIFT 6
+-
+ static inline u64 mc_encode_cmd_header(u16 cmd_id,
+ u32 cmd_flags,
+ u16 token)
+@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u16 cmd_id,
+ u64 header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+- hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
+- MC_CMD_HDR_CMDID_MASK);
+- hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
+- MC_CMD_HDR_TOKEN_MASK);
++ hdr->cmd_id = cpu_to_le16(cmd_id);
++ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ if (cmd_flags & MC_CMD_FLAG_PRI)
+ hdr->flags_hw = MC_CMD_FLAG_PRI;
+@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 token = le16_to_cpu(hdr->token);
+
+- return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
++ return token;
++}
++
++static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
++{
++ struct mc_rsp_create *rsp_params;
++
++ rsp_params = (struct mc_rsp_create *)cmd->params;
++ return le32_to_cpu(rsp_params->object_id);
++}
++
++static inline void mc_cmd_read_api_version(struct mc_command *cmd,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_rsp_api_ver *rsp_params;
++
++ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
++ *major_ver = le16_to_cpu(rsp_params->major_ver);
++ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
+ }
+
+ #endif /* __FSL_MC_CMD_H */
+diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
+index 89ad0cf5..dca7f908 100644
+--- a/drivers/staging/fsl-mc/include/mc-sys.h
++++ b/drivers/staging/fsl-mc/include/mc-sys.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2014 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Interface of the I/O services to send MC commands to the MC hardware
+ *
+diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
+index f6e720e8..c23b78a4 100644
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus public interface
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -81,7 +81,7 @@ enum fsl_mc_pool_type {
+ */
+ struct fsl_mc_resource {
+ enum fsl_mc_pool_type type;
+- int32_t id;
++ s32 id;
+ void *data;
+ struct fsl_mc_resource_pool *parent_pool;
+ struct list_head node;
+@@ -122,6 +122,7 @@ struct fsl_mc_device_irq {
+ * @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
+ * @resource: generic resource associated with this MC object device, if any.
++ * @driver_override: Driver name to force a match
+ *
+ * Generic device object for MC object devices that are "attached" to a
+ * MC bus.
+@@ -154,6 +155,7 @@ struct fsl_mc_device {
+ struct resource *regions;
+ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
++ const char *driver_override;
+ };
+
+ #define to_fsl_mc_device(_dev) \
+@@ -175,6 +177,8 @@ struct fsl_mc_device {
+ #define fsl_mc_driver_register(drv) \
+ __fsl_mc_driver_register(drv, THIS_MODULE)
+
++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
++
+ int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
+ struct module *owner);
+
+@@ -198,4 +202,13 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+
+ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node, int coherent);
++
++#ifdef CONFIG_FSL_MC_BUS
++struct iommu_group *fsl_mc_device_group(struct device *dev);
++#else
++#define fsl_mc_device_group(__dev) NULL
++#endif
++
+ #endif /* _FSL_MC_H_ */
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch
new file mode 100644
index 0000000000..d513efb2fb
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch
@@ -0,0 +1,23027 @@
+From 3a302437605308079db398b67000a77a4fe92da8 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:07:58 +0800
+Subject: [PATCH] dpaa2: support layerscape
+
+This is a integrated patch for layerscape dpaa2 support.
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
+Signed-off-by: costi <constantin.tudor@freescale.com>
+Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/soc/fsl/ls2-console/Kconfig | 4 +
+ drivers/soc/fsl/ls2-console/Makefile | 1 +
+ drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
+ drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
+ drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 350 +++
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 ++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3155 ++++++++++++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 460 +++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 856 ++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 ++
+ drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 600 ++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1770 +++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 989 ++++++
+ drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
+ drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
+ drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 ++++++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 ++++++++
+ drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 ++++++++++++
+ drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
+ drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
+ drivers/staging/fsl-dpaa2/evb/evb.c | 1350 +++++++++
+ drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
+ drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++
+ drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
+ drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++
+ drivers/staging/fsl-dpaa2/mac/mac.c | 666 +++++
+ drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
+ drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 ++
+ drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
+ 39 files changed, 22696 insertions(+)
+ create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
+ create mode 100644 drivers/soc/fsl/ls2-console/Makefile
+ create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
+ create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
+ create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
+ create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
+
+diff --git a/drivers/soc/fsl/ls2-console/Kconfig b/drivers/soc/fsl/ls2-console/Kconfig
+new file mode 100644
+index 00000000..47d0dc11
+--- /dev/null
++++ b/drivers/soc/fsl/ls2-console/Kconfig
+@@ -0,0 +1,4 @@
++config FSL_LS2_CONSOLE
++ tristate "Layerscape MC and AIOP console support"
++ depends on ARCH_LAYERSCAPE
++ default y
+diff --git a/drivers/soc/fsl/ls2-console/Makefile b/drivers/soc/fsl/ls2-console/Makefile
+new file mode 100644
+index 00000000..62b96346
+--- /dev/null
++++ b/drivers/soc/fsl/ls2-console/Makefile
+@@ -0,0 +1 @@
++obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o
+diff --git a/drivers/soc/fsl/ls2-console/ls2-console.c b/drivers/soc/fsl/ls2-console/ls2-console.c
+new file mode 100644
+index 00000000..68415ad0
+--- /dev/null
++++ b/drivers/soc/fsl/ls2-console/ls2-console.c
+@@ -0,0 +1,284 @@
++/* Copyright 2015-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/miscdevice.h>
++#include <linux/uaccess.h>
++#include <linux/poll.h>
++#include <linux/compat.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/io.h>
++
++/* SoC address for the MC firmware base low/high registers */
++#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020
++#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2
++/* MC firmware base low/high registers indexes */
++#define MCFBALR_OFFSET 0
++#define MCFBAHR_OFFSET 1
++
++/* Bit mask used to obtain the most significant part of the MC base address */
++#define MC_FW_HIGH_ADDR_MASK 0x1FFFF
++/* Bit mask used to obtain the least significant part of the MC base address */
++#define MC_FW_LOW_ADDR_MASK 0xE0000000
++
++#define MC_BUFFER_OFFSET 0x01000000
++#define MC_BUFFER_SIZE (1024*1024*16)
++#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET)
++
++#define AIOP_BUFFER_OFFSET 0x06000000
++#define AIOP_BUFFER_SIZE (1024*1024*16)
++#define AIOP_OFFSET_DELTA (0)
++
++struct log_header {
++ char magic_word[8]; /* magic word */
++ uint32_t buf_start; /* holds the 32-bit little-endian
++ * offset of the start of the buffer
++ */
++ uint32_t buf_length; /* holds the 32-bit little-endian
++ * length of the buffer
++ */
++ uint32_t last_byte; /* holds the 32-bit little-endian offset
++ * of the byte after the last byte that
++ * was written
++ */
++ char reserved[44];
++};
++
++#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
++#define LOG_VERSION_MAJOR 1
++#define LOG_VERSION_MINOR 0
++
++
++#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); }
++
++struct console_data {
++ char *map_addr;
++ struct log_header *hdr;
++ char *start_addr; /* Start of buffer */
++ char *end_addr; /* End of buffer */
++ char *end_of_data; /* Current end of data */
++ char *cur_ptr; /* Last data sent to console */
++};
++
++#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
++
++static inline void __adjust_end(struct console_data *cd)
++{
++ cd->end_of_data = cd->start_addr
++ + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte));
++}
++
++static inline void adjust_end(struct console_data *cd)
++{
++ invalidate(cd->hdr);
++ __adjust_end(cd);
++}
++
++static inline uint64_t get_mc_fw_base_address(void)
++{
++ u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS,
++ SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE);
++ u64 mcfwbase = 0ULL;
++
++ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK;
++ mcfwbase <<= 32;
++ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK;
++ iounmap(mcfbaregs);
++ pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase);
++ return mcfwbase;
++}
++
++static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp,
++ u64 offset, u64 size,
++ uint8_t *emagic, uint8_t magic_len,
++ u32 offset_delta)
++{
++ struct console_data *cd;
++ uint8_t *magic;
++ uint32_t wrapped;
++
++ cd = kmalloc(sizeof(*cd), GFP_KERNEL);
++ if (cd == NULL)
++ return -ENOMEM;
++ fp->private_data = cd;
++ cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size);
++
++ cd->hdr = (struct log_header *) cd->map_addr;
++ invalidate(cd->hdr);
++
++ magic = cd->hdr->magic_word;
++ if (memcmp(magic, emagic, magic_len)) {
++ pr_info("magic didn't match!\n");
++ pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n",
++ emagic[0], emagic[1], emagic[2], emagic[3],
++ emagic[4], emagic[5], emagic[6], emagic[7]);
++ pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n",
++ magic[0], magic[1], magic[2], magic[3],
++ magic[4], magic[5], magic[6], magic[7]);
++ kfree(cd);
++ iounmap(cd->map_addr);
++ return -EIO;
++ }
++
++ cd->start_addr = cd->map_addr
++ + le32_to_cpu(cd->hdr->buf_start) - offset_delta;
++ cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length);
++
++ wrapped = le32_to_cpu(cd->hdr->last_byte)
++ & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
++
++ __adjust_end(cd);
++ if (wrapped && (cd->end_of_data != cd->end_addr))
++ cd->cur_ptr = cd->end_of_data+1;
++ else
++ cd->cur_ptr = cd->start_addr;
++
++ return 0;
++}
++
++static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp)
++{
++ uint8_t magic_word[] = { 0, 1, 'C', 'M' };
++
++ return fsl_ls2_generic_console_open(node, fp,
++ MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
++ magic_word, sizeof(magic_word),
++ MC_OFFSET_DELTA);
++}
++
++static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp)
++{
++ uint8_t magic_word[] = { 'P', 'O', 'I', 'A' };
++
++ return fsl_ls2_generic_console_open(node, fp,
++ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
++ magic_word, sizeof(magic_word),
++ AIOP_OFFSET_DELTA);
++}
++
++static int fsl_ls2_console_close(struct inode *node, struct file *fp)
++{
++ struct console_data *cd = fp->private_data;
++
++ iounmap(cd->map_addr);
++ kfree(cd);
++ return 0;
++}
++
++ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count,
++ loff_t *f_pos)
++{
++ struct console_data *cd = fp->private_data;
++ size_t bytes = 0;
++ char data;
++
++ /* Check if we need to adjust the end of data addr */
++ adjust_end(cd);
++
++ while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) {
++ if (((u64)cd->cur_ptr) % 64 == 0)
++ invalidate(cd->cur_ptr);
++
++ data = *(cd->cur_ptr);
++ if (copy_to_user(&buf[bytes], &data, 1))
++ return -EFAULT;
++ cd->cur_ptr++;
++ if (cd->cur_ptr >= cd->end_addr)
++ cd->cur_ptr = cd->start_addr;
++ ++bytes;
++ }
++ return bytes;
++}
++
++static const struct file_operations fsl_ls2_mc_console_fops = {
++ .owner = THIS_MODULE,
++ .open = fsl_ls2_mc_console_open,
++ .release = fsl_ls2_console_close,
++ .read = fsl_ls2_console_read,
++};
++
++static struct miscdevice fsl_ls2_mc_console_dev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "fsl_mc_console",
++ .fops = &fsl_ls2_mc_console_fops
++};
++
++static const struct file_operations fsl_ls2_aiop_console_fops = {
++ .owner = THIS_MODULE,
++ .open = fsl_ls2_aiop_console_open,
++ .release = fsl_ls2_console_close,
++ .read = fsl_ls2_console_read,
++};
++
++static struct miscdevice fsl_ls2_aiop_console_dev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "fsl_aiop_console",
++ .fops = &fsl_ls2_aiop_console_fops
++};
++
++static int __init fsl_ls2_console_init(void)
++{
++ int err = 0;
++
++ pr_info("Freescale LS2 console driver\n");
++ err = misc_register(&fsl_ls2_mc_console_dev);
++ if (err) {
++ pr_err("fsl_mc_console: cannot register device\n");
++ return err;
++ }
++ pr_info("fsl-ls2-console: device %s registered\n",
++ fsl_ls2_mc_console_dev.name);
++
++ err = misc_register(&fsl_ls2_aiop_console_dev);
++ if (err) {
++ pr_err("fsl_aiop_console: cannot register device\n");
++ return err;
++ }
++ pr_info("fsl-ls2-console: device %s registered\n",
++ fsl_ls2_aiop_console_dev.name);
++
++ return 0;
++}
++
++static void __exit fsl_ls2_console_exit(void)
++{
++ misc_deregister(&fsl_ls2_mc_console_dev);
++
++ misc_deregister(&fsl_ls2_aiop_console_dev);
++}
++
++module_init(fsl_ls2_console_init);
++module_exit(fsl_ls2_console_exit);
++
++MODULE_AUTHOR("Roy Pledge <roy.pledge@freescale.com>");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("Freescale LS2 console driver");
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
+new file mode 100644
+index 00000000..e26911d5
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
+@@ -0,0 +1,11 @@
++#
++# Makefile for the Freescale DPAA2 Ethernet controller
++#
++
++obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
++
++fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
++
++# Needed by the tracing framework
++CFLAGS_dpaa2-eth.o := -I$(src)
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/README b/drivers/staging/fsl-dpaa2/ethernet/README
+new file mode 100644
+index 00000000..410952ec
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/README
+@@ -0,0 +1,186 @@
++Freescale DPAA2 Ethernet driver
++===============================
++
++This file provides documentation for the Freescale DPAA2 Ethernet driver.
++
++
++Contents
++========
++ Supported Platforms
++ Architecture Overview
++ Creating a Network Interface
++ Features & Offloads
++
++
++Supported Platforms
++===================
++This driver provides networking support for Freescale DPAA2 SoCs, e.g.
++LS2080A, LS2088A, LS1088A.
++
++
++Architecture Overview
++=====================
++Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
++representing network interfaces; instead, several separate hardware resources
++concur to provide the networking functionality:
++ - network interfaces
++ - queues, channels
++ - buffer pools
++ - MAC/PHY
++
++All hardware resources are allocated and configured through the Management
++Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
++and exposes ABIs through which they can be configured and controlled. A few
++hardware resources, like queues, do not have a corresponding MC object and
++are treated as internal resources of other objects.
++
++For a more detailed description of the DPAA2 architecture and its object
++abstractions see:
++ drivers/staging/fsl-mc/README.txt
++
++Each Linux net device is built on top of a Datapath Network Interface (DPNI)
++object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
++(DPCONs).
++
++Configuration interface:
++
++ -----------------------
++ | DPAA2 Ethernet Driver |
++ -----------------------
++ . . .
++ . . .
++ . . . . . . . . . . . .
++ . . .
++ . . .
++ ---------- ---------- -----------
++ | DPBP API | | DPNI API | | DPCON API |
++ ---------- ---------- -----------
++ . . . software
++=========== . ========== . ============ . ===================
++ . . . hardware
++ ------------------------------------------
++ | MC hardware portals |
++ ------------------------------------------
++ . . .
++ . . .
++ ------ ------ -------
++ | DPBP | | DPNI | | DPCON |
++ ------ ------ -------
++
++The DPNIs are network interfaces without a direct one-on-one mapping to PHYs.
++DPBPs represent hardware buffer pools. Packet I/O is performed in the context
++of DPCON objects, using DPIO portals for managing and communicating with the
++hardware resources.
++
++Datapath (I/O) interface:
++
++ -----------------------------------------------
++ | DPAA2 Ethernet Driver |
++ -----------------------------------------------
++ | ^ ^ | |
++ | | | | |
++ enqueue| dequeue| data | dequeue| seed |
++ (Tx) | (Rx, TxC)| avail.| request| buffers|
++ | | notify| | |
++ | | | | |
++ V | | V V
++ -----------------------------------------------
++ | DPIO Driver |
++ -----------------------------------------------
++ | | | | | software
++ | | | | | ================
++ | | | | | hardware
++ -----------------------------------------------
++ | I/O hardware portals |
++ -----------------------------------------------
++ | ^ ^ | |
++ | | | | |
++ | | | V |
++ V | ================ V
++ ---------------------- | -------------
++ queues ---------------------- | | Buffer pool |
++ ---------------------- | -------------
++ =======================
++ Channel
++
++Datapath I/O (DPIO) portals provide enqueue and dequeue services, data
++availability notifications and buffer pool management. DPIOs are shared between
++all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data
++frames, but must be affine to the CPUs for the purpose of traffic distribution.
++
++Frames are transmitted and received through hardware frame queues, which can be
++grouped in channels for the purpose of hardware scheduling. The Ethernet driver
++enqueues TX frames on egress queues and after transmission is complete a TX
++confirmation frame is sent back to the CPU.
++
++When frames are available on ingress queues, a data availability notification
++is sent to the CPU; notifications are raised per channel, so even if multiple
++queues in the same channel have available frames, only one notification is sent.
++After a channel fires a notification, is must be explicitly rearmed.
++
++Each network interface can have multiple Rx, Tx and confirmation queues affined
++to CPUs, and one channel (DPCON) for each CPU that services at least one queue.
++DPCONs are used to distribute ingress traffic to different CPUs via the cores'
++affine DPIOs.
++
++The role of hardware buffer pools is storage of ingress frame data. Each network
++interface has a privately owned buffer pool which it seeds with kernel allocated
++buffers.
++
++
++DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
++object or to another DPNI through an internal link, but the connection is
++managed by MC and completely transparent to the Ethernet driver.
++
++ --------- --------- ---------
++ | eth if1 | | eth if2 | | eth ifn |
++ --------- --------- ---------
++ . . .
++ . . .
++ . . .
++ ---------------------------
++ | DPAA2 Ethernet Driver |
++ ---------------------------
++ . . .
++ . . .
++ . . .
++ ------ ------ ------ -------
++ | DPNI | | DPNI | | DPNI | | DPMAC |----+
++ ------ ------ ------ ------- |
++ | | | | |
++ | | | | -----
++ =========== ================== | PHY |
++ -----
++
++Creating a Network Interface
++============================
++A net device is created for each DPNI object probed on the MC bus. Each DPNI has
++a number of properties which determine the network interface configuration
++options and associated hardware resources.
++
++DPNI objects (and the other DPAA2 objects needed for a network interface) can be
++added to a container on the MC bus in one of two ways: statically, through a
++Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created
++dynamically at runtime, via the DPAA2 objects APIs.
++
++
++Features & Offloads
++===================
++Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames.
++The checksum offloads can be independently configured on RX and TX through
++ethtool.
++
++Hardware offload of unicast and multicast MAC filtering is supported on the
++ingress path and permanently enabled.
++
++Scatter-gather frames are supported on both RX and TX paths. On TX, SG support
++is configurable via ethtool; on RX it is always enabled.
++
++The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes.
++
++The Ethernet driver defines a static flow hashing scheme that distributes
++traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port,
++L4 dst port. No user configuration is supported for now.
++
++Hardware specific statistics for the network interface as well as some
++non-standard driver stats can be consulted through ethtool -S option.
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+new file mode 100644
+index 00000000..445c5d17
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+@@ -0,0 +1,350 @@
++
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include "dpaa2-eth.h"
++#include "dpaa2-eth-debugfs.h"
++
++#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
++
++static struct dentry *dpaa2_dbg_root;
++
++static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct rtnl_link_stats64 *stats;
++ struct dpaa2_eth_drv_stats *extras;
++ int i;
++
++ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
++ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
++ "Tx SG", "Enq busy");
++
++ for_each_online_cpu(i) {
++ stats = per_cpu_ptr(priv->percpu_stats, i);
++ extras = per_cpu_ptr(priv->percpu_extras, i);
++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
++ i,
++ stats->rx_packets,
++ stats->rx_errors,
++ extras->rx_sg_frames,
++ stats->tx_packets,
++ stats->tx_errors,
++ extras->tx_conf_frames,
++ extras->tx_sg_frames,
++ extras->tx_portal_busy);
++ }
++
++ return 0;
++}
++
++static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
++{
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++
++ err = single_open(file, dpaa2_dbg_cpu_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
++
++ return err;
++}
++
++static const struct file_operations dpaa2_dbg_cpu_ops = {
++ .open = dpaa2_dbg_cpu_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
++{
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ return "Rx";
++ case DPAA2_TX_CONF_FQ:
++ return "Tx conf";
++ case DPAA2_RX_ERR_FQ:
++ return "Rx err";
++ default:
++ return "N/A";
++ }
++}
++
++static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct dpaa2_eth_fq *fq;
++ u32 fcnt, bcnt;
++ int i, err;
++
++ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
++ "VFQID", "CPU", "Type", "Frames", "Pending frames",
++ "Congestion");
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
++ if (err)
++ fcnt = 0;
++
++ seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n",
++ fq->fqid,
++ fq->target_cpu,
++ fq_type_to_str(fq),
++ fq->stats.frames,
++ fcnt,
++ fq->stats.congestion_entry);
++ }
++
++ return 0;
++}
++
++static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
++{
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++
++ err = single_open(file, dpaa2_dbg_fqs_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
++
++ return err;
++}
++
++static const struct file_operations dpaa2_dbg_fq_ops = {
++ .open = dpaa2_dbg_fqs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
++ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
++ "Avg frm/CDAN");
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n",
++ ch->ch_id,
++ ch->nctx.desired_cpu,
++ ch->stats.dequeue_portal_busy,
++ ch->stats.frames,
++ ch->stats.cdan,
++ ch->stats.frames / ch->stats.cdan);
++ }
++
++ return 0;
++}
++
++static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
++{
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++
++ err = single_open(file, dpaa2_dbg_ch_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
++
++ return err;
++}
++
++static const struct file_operations dpaa2_dbg_ch_ops = {
++ .open = dpaa2_dbg_ch_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct dpaa2_eth_priv *priv = file->private_data;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct dpaa2_eth_fq *fq;
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ for_each_online_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ memset(percpu_stats, 0, sizeof(*percpu_stats));
++
++ percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
++ memset(percpu_extras, 0, sizeof(*percpu_extras));
++ }
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ memset(&fq->stats, 0, sizeof(fq->stats));
++ }
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ memset(&ch->stats, 0, sizeof(ch->stats));
++ }
++
++ return count;
++}
++
++static const struct file_operations dpaa2_dbg_reset_ops = {
++ .open = simple_open,
++ .write = dpaa2_dbg_reset_write,
++};
++
++static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
++ const char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct dpaa2_eth_priv *priv = file->private_data;
++ int err;
++
++ err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
++ if (err)
++ netdev_err(priv->net_dev,
++ "dpni_reset_statistics() failed %d\n", err);
++
++ return count;
++}
++
++static const struct file_operations dpaa2_dbg_reset_mc_ops = {
++ .open = simple_open,
++ .write = dpaa2_dbg_reset_mc_write,
++};
++
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
++{
++ if (!dpaa2_dbg_root)
++ return;
++
++ /* Create a directory for the interface */
++ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
++ dpaa2_dbg_root);
++ if (!priv->dbg.dir) {
++ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
++ return;
++ }
++
++ /* per-cpu stats file */
++ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_cpu_ops);
++ if (!priv->dbg.cpu_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_cpu_stats;
++ }
++
++ /* per-fq stats file */
++ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_fq_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_fq_stats;
++ }
++
++ /* per-fq stats file */
++ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_ch_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_ch_stats;
++ }
++
++ /* reset stats */
++ priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_reset_ops);
++ if (!priv->dbg.reset_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_reset_stats;
++ }
++
++ /* reset MC stats */
++ priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
++ 0222, priv->dbg.dir, priv,
++ &dpaa2_dbg_reset_mc_ops);
++ if (!priv->dbg.reset_mc_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_reset_mc_stats;
++ }
++
++ return;
++
++err_reset_mc_stats:
++ debugfs_remove(priv->dbg.reset_stats);
++err_reset_stats:
++ debugfs_remove(priv->dbg.ch_stats);
++err_ch_stats:
++ debugfs_remove(priv->dbg.fq_stats);
++err_fq_stats:
++ debugfs_remove(priv->dbg.cpu_stats);
++err_cpu_stats:
++ debugfs_remove(priv->dbg.dir);
++}
++
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
++{
++ debugfs_remove(priv->dbg.reset_mc_stats);
++ debugfs_remove(priv->dbg.reset_stats);
++ debugfs_remove(priv->dbg.fq_stats);
++ debugfs_remove(priv->dbg.ch_stats);
++ debugfs_remove(priv->dbg.cpu_stats);
++ debugfs_remove(priv->dbg.dir);
++}
++
++void dpaa2_eth_dbg_init(void)
++{
++ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
++ if (!dpaa2_dbg_root) {
++ pr_err("DPAA2-ETH: debugfs create failed\n");
++ return;
++ }
++
++ pr_info("DPAA2-ETH: debugfs created\n");
++}
++
++void __exit dpaa2_eth_dbg_exit(void)
++{
++ debugfs_remove(dpaa2_dbg_root);
++}
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+new file mode 100644
+index 00000000..551e6c4c
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+@@ -0,0 +1,60 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPAA2_ETH_DEBUGFS_H
++#define DPAA2_ETH_DEBUGFS_H
++
++#include <linux/dcache.h>
++
++struct dpaa2_eth_priv;
++
++struct dpaa2_debugfs {
++ struct dentry *dir;
++ struct dentry *fq_stats;
++ struct dentry *ch_stats;
++ struct dentry *cpu_stats;
++ struct dentry *reset_stats;
++ struct dentry *reset_mc_stats;
++};
++
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++void dpaa2_eth_dbg_init(void);
++void dpaa2_eth_dbg_exit(void);
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
++#else
++static inline void dpaa2_eth_dbg_init(void) {}
++static inline void dpaa2_eth_dbg_exit(void) {}
++static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
++static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
++#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
++
++#endif /* DPAA2_ETH_DEBUGFS_H */
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+new file mode 100644
+index 00000000..e8e6522a
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+@@ -0,0 +1,184 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM dpaa2_eth
++
++#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _DPAA2_ETH_TRACE_H
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/tracepoint.h>
++
++#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
++/* trace_printk format for raw buffer event class */
++#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
++
++/* This is used to declare a class of events.
++ * individual events of this type will be defined below.
++ */
++
++/* Store details about a frame descriptor */
++DECLARE_EVENT_CLASS(dpaa2_eth_fd,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ /* Repeat argument list here */
++ TP_ARGS(netdev, fd),
++
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(u64, fd_addr)
++ __field(u32, fd_len)
++ __field(u16, fd_offset)
++ __string(name, netdev->name)
++ ),
++
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->fd_addr = dpaa2_fd_get_addr(fd);
++ __entry->fd_len = dpaa2_fd_get_len(fd);
++ __entry->fd_offset = dpaa2_fd_get_offset(fd);
++ __assign_str(name, netdev->name);
++ ),
++
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_FMT,
++ __get_str(name),
++ __entry->fd_addr,
++ __entry->fd_len,
++ __entry->fd_offset)
++);
++
++/* Now declare events of the above type. Format is:
++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
++ */
++
++/* Tx (egress) fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ TP_ARGS(netdev, fd)
++);
++
++/* Rx fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ TP_ARGS(netdev, fd)
++);
++
++/* Tx confirmation fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ TP_ARGS(netdev, fd)
++);
++
++/* Log data about raw buffers. Useful for tracing DPBP content. */
++TRACE_EVENT(dpaa2_eth_buf_seed,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ /* virtual address and size */
++ void *vaddr,
++ size_t size,
++ /* dma map address and size */
++ dma_addr_t dma_addr,
++ size_t map_size,
++ /* buffer pool id, if relevant */
++ u16 bpid),
++
++ /* Repeat argument list here */
++ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
++
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(void *, vaddr)
++ __field(size_t, size)
++ __field(dma_addr_t, dma_addr)
++ __field(size_t, map_size)
++ __field(u16, bpid)
++ __string(name, netdev->name)
++ ),
++
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->vaddr = vaddr;
++ __entry->size = size;
++ __entry->dma_addr = dma_addr;
++ __entry->map_size = map_size;
++ __entry->bpid = bpid;
++ __assign_str(name, netdev->name);
++ ),
++
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_BUF_FMT,
++ __get_str(name),
++ __entry->vaddr,
++ __entry->size,
++ &__entry->dma_addr,
++ __entry->map_size,
++ __entry->bpid)
++);
++
++/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
++ * The syntax is the same as for DECLARE_EVENT_CLASS().
++ */
++
++#endif /* _DPAA2_ETH_TRACE_H */
++
++/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE dpaa2-eth-trace
++#include <trace/define_trace.h>
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+new file mode 100644
+index 00000000..452eca52
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -0,0 +1,3155 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/etherdevice.h>
++#include <linux/of_net.h>
++#include <linux/interrupt.h>
++#include <linux/debugfs.h>
++#include <linux/kthread.h>
++#include <linux/msi.h>
++#include <linux/net_tstamp.h>
++#include <linux/iommu.h>
++
++#include "../../fsl-mc/include/dpbp.h"
++#include "../../fsl-mc/include/dpcon.h"
++#include "../../fsl-mc/include/mc.h"
++#include "../../fsl-mc/include/mc-sys.h"
++#include "dpaa2-eth.h"
++#include "dpkg.h"
++
++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
++ * using trace events only need to #include <trace/events/sched.h>
++ */
++#define CREATE_TRACE_POINTS
++#include "dpaa2-eth-trace.h"
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
++
++const char dpaa2_eth_drv_version[] = "0.1";
++
++void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
++{
++ phys_addr_t phys_addr;
++
++ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
++
++ return phys_to_virt(phys_addr);
++}
++
++static void validate_rx_csum(struct dpaa2_eth_priv *priv,
++ u32 fd_status,
++ struct sk_buff *skb)
++{
++ skb_checksum_none_assert(skb);
++
++ /* HW checksum validation is disabled, nothing to do here */
++ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
++ return;
++
++ /* Read checksum validation bits */
++ if (!((fd_status & DPAA2_FAS_L3CV) &&
++ (fd_status & DPAA2_FAS_L4CV)))
++ return;
++
++ /* Inform the stack there's no need to compute L3/L4 csum anymore */
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
++/* Free a received FD.
++ * Not to be used for Tx conf FDs or on any other paths.
++ */
++static void free_rx_fd(struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ void *vaddr)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ struct dpaa2_sg_entry *sgt;
++ void *sg_vaddr;
++ int i;
++
++ /* If single buffer frame, just free the data buffer */
++ if (fd_format == dpaa2_fd_single)
++ goto free_buf;
++ else if (fd_format != dpaa2_fd_sg)
++ /* we don't support any other format */
++ return;
++
++ /* For S/G frames, we first need to free all SG entries */
++ sgt = vaddr + dpaa2_fd_get_offset(fd);
++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ addr = dpaa2_sg_get_addr(&sgt[i]);
++ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
++
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
++
++ put_page(virt_to_head_page(sg_vaddr));
++
++ if (dpaa2_sg_is_final(&sgt[i]))
++ break;
++ }
++
++free_buf:
++ put_page(virt_to_head_page(vaddr));
++}
++
++/* Build a linear skb based on a single-buffer frame descriptor */
++static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ void *fd_vaddr)
++{
++ struct sk_buff *skb = NULL;
++ u16 fd_offset = dpaa2_fd_get_offset(fd);
++ u32 fd_length = dpaa2_fd_get_len(fd);
++
++ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
++ if (unlikely(!skb))
++ return NULL;
++
++ skb_reserve(skb, fd_offset);
++ skb_put(skb, fd_length);
++
++ ch->buf_count--;
++
++ return skb;
++}
++
++/* Build a non linear (fragmented) skb based on a S/G table */
++static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ struct dpaa2_sg_entry *sgt)
++{
++ struct sk_buff *skb = NULL;
++ struct device *dev = priv->net_dev->dev.parent;
++ void *sg_vaddr;
++ dma_addr_t sg_addr;
++ u16 sg_offset;
++ u32 sg_length;
++ struct page *page, *head_page;
++ int page_offset;
++ int i;
++
++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ struct dpaa2_sg_entry *sge = &sgt[i];
++
++ /* NOTE: We only support SG entries in dpaa2_sg_single format,
++ * but this is the only format we may receive from HW anyway
++ */
++
++ /* Get the address and length from the S/G entry */
++ sg_addr = dpaa2_sg_get_addr(sge);
++ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
++
++ sg_length = dpaa2_sg_get_len(sge);
++
++ if (i == 0) {
++ /* We build the skb around the first data buffer */
++ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
++ if (unlikely(!skb))
++ return NULL;
++
++ sg_offset = dpaa2_sg_get_offset(sge);
++ skb_reserve(skb, sg_offset);
++ skb_put(skb, sg_length);
++ } else {
++ /* Rest of the data buffers are stored as skb frags */
++ page = virt_to_page(sg_vaddr);
++ head_page = virt_to_head_page(sg_vaddr);
++
++ /* Offset in page (which may be compound).
++ * Data in subsequent SG entries is stored from the
++ * beginning of the buffer, so we don't need to add the
++ * sg_offset.
++ */
++ page_offset = ((unsigned long)sg_vaddr &
++ (PAGE_SIZE - 1)) +
++ (page_address(page) - page_address(head_page));
++
++ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
++ sg_length, DPAA2_ETH_RX_BUF_SIZE);
++ }
++
++ if (dpaa2_sg_is_final(sge))
++ break;
++ }
++
++ /* Count all data buffers + SG table buffer */
++ ch->buf_count -= i + 2;
++
++ return skb;
++}
++
++/* Main Rx frame processing routine */
++static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi,
++ u16 queue_id)
++{
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ void *vaddr;
++ struct sk_buff *skb;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpaa2_fas *fas;
++ void *buf_data;
++ u32 status = 0;
++
++ /* Tracing point */
++ trace_dpaa2_rx_fd(priv->net_dev, fd);
++
++ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++
++ /* HWA - FAS, timestamp */
++ fas = dpaa2_eth_get_fas(vaddr);
++ prefetch(fas);
++ /* data / SG table */
++ buf_data = vaddr + dpaa2_fd_get_offset(fd);
++ prefetch(buf_data);
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ switch (fd_format) {
++ case dpaa2_fd_single:
++ skb = build_linear_skb(priv, ch, fd, vaddr);
++ break;
++ case dpaa2_fd_sg:
++ skb = build_frag_skb(priv, ch, buf_data);
++ put_page(virt_to_head_page(vaddr));
++ percpu_extras->rx_sg_frames++;
++ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
++ break;
++ default:
++ /* We don't support any other format */
++ goto err_frame_format;
++ }
++
++ if (unlikely(!skb))
++ goto err_build_skb;
++
++ prefetch(skb->data);
++
++ /* Get the timestamp value */
++ if (priv->ts_rx_en) {
++ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
++ u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
++
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
++ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
++ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
++ }
++
++ /* Check if we need to validate the L4 csum */
++ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
++ status = le32_to_cpu(fas->status);
++ validate_rx_csum(priv, status, skb);
++ }
++
++ skb->protocol = eth_type_trans(skb, priv->net_dev);
++
++ /* Record Rx queue - this will be used when picking a Tx queue to
++ * forward the frames. We're keeping flow affinity through the
++ * network stack.
++ */
++ skb_record_rx_queue(skb, queue_id);
++
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
++
++ napi_gro_receive(napi, skb);
++
++ return;
++
++err_build_skb:
++ free_rx_fd(priv, fd, vaddr);
++err_frame_format:
++ percpu_stats->rx_dropped++;
++}
++
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++/* Processing of Rx frames received on the error FQ
++ * We check and print the error bits and then free the frame
++ */
++static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused,
++ u16 queue_id __always_unused)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ void *vaddr;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_fas *fas;
++ u32 status = 0;
++ bool check_fas_errors = false;
++
++ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++
++ /* check frame errors in the FD field */
++ if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
++ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
++ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
++ fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
++ }
++
++ /* check frame errors in the FAS field */
++ if (check_fas_errors) {
++ fas = dpaa2_eth_get_fas(vaddr);
++ status = le32_to_cpu(fas->status);
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
++ status & DPAA2_FAS_RX_ERR_MASK);
++ }
++ free_rx_fd(priv, fd, vaddr);
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_stats->rx_errors++;
++}
++#endif
++
++/* Consume all frames pull-dequeued into the store. This is the simplest way to
++ * make sure we don't accidentally issue another volatile dequeue which would
++ * overwrite (leak) frames already in the store.
++ *
++ * The number of frames is returned using the last 2 output arguments,
++ * separately for Rx and Tx confirmations.
++ *
++ * Observance of NAPI budget is not our concern, leaving that to the caller.
++ */
++static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
++ int *tx_conf_cleaned)
++{
++ struct dpaa2_eth_priv *priv = ch->priv;
++ struct dpaa2_eth_fq *fq = NULL;
++ struct dpaa2_dq *dq;
++ const struct dpaa2_fd *fd;
++ int cleaned = 0;
++ int is_last;
++
++ do {
++ dq = dpaa2_io_store_next(ch->store, &is_last);
++ if (unlikely(!dq)) {
++ /* If we're here, we *must* have placed a
++ * volatile dequeue comnmand, so keep reading through
++ * the store until we get some sort of valid response
++ * token (either a valid frame or an "empty dequeue")
++ */
++ continue;
++ }
++
++ fd = dpaa2_dq_fd(dq);
++
++ /* prefetch the frame descriptor */
++ prefetch(fd);
++
++ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
++ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
++ cleaned++;
++ } while (!is_last);
++
++ if (!cleaned)
++ return false;
++
++ /* All frames brought in store by a volatile dequeue
++ * come from the same queue
++ */
++ if (fq->type == DPAA2_TX_CONF_FQ)
++ *tx_conf_cleaned += cleaned;
++ else
++ *rx_cleaned += cleaned;
++
++ fq->stats.frames += cleaned;
++ ch->stats.frames += cleaned;
++
++ return true;
++}
++
++/* Configure the egress frame annotation for timestamp update */
++static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
++{
++ struct dpaa2_faead *faead;
++ u32 ctrl;
++ u32 frc;
++
++ /* Mark the egress frame annotation area as valid */
++ frc = dpaa2_fd_get_frc(fd);
++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
++
++ /* enable UPD (update prepanded data) bit in FAEAD field of
++ * hardware frame annotation area
++ */
++ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
++ faead = dpaa2_eth_get_faead(buf_start);
++ faead->ctrl = cpu_to_le32(ctrl);
++}
++
++/* Create a frame descriptor based on a fragmented skb */
++static int build_sg_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ void *sgt_buf = NULL;
++ dma_addr_t addr;
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ struct dpaa2_sg_entry *sgt;
++ int i, err;
++ int sgt_buf_size;
++ struct scatterlist *scl, *crt_scl;
++ int num_sg;
++ int num_dma_bufs;
++ struct dpaa2_fas *fas;
++ struct dpaa2_eth_swa *swa;
++
++ /* Create and map scatterlist.
++ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
++ * to go beyond nr_frags+1.
++ * Note: We don't support chained scatterlists
++ */
++ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
++ return -EINVAL;
++
++ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
++ if (unlikely(!scl))
++ return -ENOMEM;
++
++ sg_init_table(scl, nr_frags + 1);
++ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++ if (unlikely(!num_dma_bufs)) {
++ err = -ENOMEM;
++ goto dma_map_sg_failed;
++ }
++
++ /* Prepare the HW SGT structure */
++ sgt_buf_size = priv->tx_data_offset +
++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
++ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
++ if (unlikely(!sgt_buf)) {
++ err = -ENOMEM;
++ goto sgt_buf_alloc_failed;
++ }
++ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
++
++ /* PTA from egress side is passed as is to the confirmation side so
++ * we need to clear some fields here in order to find consistent values
++ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
++ * field from the hardware annotation area
++ */
++ fas = dpaa2_eth_get_fas(sgt_buf);
++ memset(fas, 0, DPAA2_FAS_SIZE);
++
++ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
++
++ /* Fill in the HW SGT structure.
++ *
++ * sgt_buf is zeroed out, so the following fields are implicit
++ * in all sgt entries:
++ * - offset is 0
++ * - format is 'dpaa2_sg_single'
++ */
++ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
++ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
++ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
++ }
++ dpaa2_sg_set_final(&sgt[i - 1], true);
++
++ /* Store the skb backpointer in the SGT buffer.
++ * Fit the scatterlist and the number of buffers alongside the
++ * skb backpointer in the software annotation area. We'll need
++ * all of them on Tx Conf.
++ */
++ swa = (struct dpaa2_eth_swa *)sgt_buf;
++ swa->skb = skb;
++ swa->scl = scl;
++ swa->num_sg = num_sg;
++ swa->num_dma_bufs = num_dma_bufs;
++
++ /* Separately map the SGT buffer */
++ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr))) {
++ err = -ENOMEM;
++ goto dma_map_single_failed;
++ }
++ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
++ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_len(fd, skb->len);
++
++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
++
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ enable_tx_tstamp(fd, sgt_buf);
++
++ return 0;
++
++dma_map_single_failed:
++ kfree(sgt_buf);
++sgt_buf_alloc_failed:
++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++dma_map_sg_failed:
++ kfree(scl);
++ return err;
++}
++
++/* Create a frame descriptor based on a linear skb */
++static int build_single_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u8 *buffer_start;
++ struct sk_buff **skbh;
++ dma_addr_t addr;
++ struct dpaa2_fas *fas;
++
++ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
++ DPAA2_ETH_TX_BUF_ALIGN,
++ DPAA2_ETH_TX_BUF_ALIGN);
++
++ /* PTA from egress side is passed as is to the confirmation side so
++ * we need to clear some fields here in order to find consistent values
++ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
++ * field from the hardware annotation area
++ */
++ fas = dpaa2_eth_get_fas(buffer_start);
++ memset(fas, 0, DPAA2_FAS_SIZE);
++
++ /* Store a backpointer to the skb at the beginning of the buffer
++ * (in the private data area) such that we can release it
++ * on Tx confirm
++ */
++ skbh = (struct sk_buff **)buffer_start;
++ *skbh = skb;
++
++ addr = dma_map_single(dev, buffer_start,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr)))
++ return -ENOMEM;
++
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
++ dpaa2_fd_set_len(fd, skb->len);
++ dpaa2_fd_set_format(fd, dpaa2_fd_single);
++
++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
++
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ enable_tx_tstamp(fd, buffer_start);
++
++ return 0;
++}
++
++/* FD freeing routine on the Tx path
++ *
++ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
++ * back-pointed to is also freed.
++ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
++ * dpaa2_eth_tx().
++ * Optionally, return the frame annotation status word (FAS), which needs
++ * to be checked if we're on the confirmation path.
++ */
++static void free_tx_fd(const struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ u32 *status)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t fd_addr;
++ struct sk_buff **skbh, *skb;
++ unsigned char *buffer_start;
++ int unmap_size;
++ struct scatterlist *scl;
++ int num_sg, num_dma_bufs;
++ struct dpaa2_eth_swa *swa;
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ struct dpaa2_fas *fas;
++
++ fd_addr = dpaa2_fd_get_addr(fd);
++ skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
++
++ /* HWA - FAS, timestamp (for Tx confirmation frames) */
++ fas = dpaa2_eth_get_fas(skbh);
++ prefetch(fas);
++
++ switch (fd_format) {
++ case dpaa2_fd_single:
++ skb = *skbh;
++ buffer_start = (unsigned char *)skbh;
++ /* Accessing the skb buffer is safe before dma unmap, because
++ * we didn't map the actual skb shell.
++ */
++ dma_unmap_single(dev, fd_addr,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_BIDIRECTIONAL);
++ break;
++ case dpaa2_fd_sg:
++ swa = (struct dpaa2_eth_swa *)skbh;
++ skb = swa->skb;
++ scl = swa->scl;
++ num_sg = swa->num_sg;
++ num_dma_bufs = swa->num_dma_bufs;
++
++ /* Unmap the scatterlist */
++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++ kfree(scl);
++
++ /* Unmap the SGT buffer */
++ unmap_size = priv->tx_data_offset +
++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
++ dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
++ break;
++ default:
++ /* Unsupported format, mark it as errored and give up */
++ if (status)
++ *status = ~0;
++ return;
++ }
++
++ /* Get the timestamp value */
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
++ struct skb_shared_hwtstamps shhwtstamps;
++ u64 *ns;
++
++ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++
++ ns = (u64 *)dpaa2_eth_get_ts(skbh);
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
++ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
++ skb_tstamp_tx(skb, &shhwtstamps);
++ }
++
++ /* Read the status from the Frame Annotation after we unmap the first
++ * buffer but before we free it. The caller function is responsible
++ * for checking the status value.
++ */
++ if (status)
++ *status = le32_to_cpu(fas->status);
++
++ /* Free SGT buffer kmalloc'ed on tx */
++ if (fd_format != dpaa2_fd_single)
++ kfree(skbh);
++
++ /* Move on with skb release */
++ dev_kfree_skb(skb);
++}
++
++static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ struct dpaa2_fd fd;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct dpaa2_eth_fq *fq;
++ u16 queue_mapping = skb_get_queue_mapping(skb);
++ int err, i;
++
++ /* If we're congested, stop this tx queue; transmission of the
++ * current skb happens regardless of congestion state
++ */
++ fq = &priv->fq[queue_mapping];
++
++ dma_sync_single_for_cpu(dev, priv->cscn_dma,
++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
++ netif_stop_subqueue(net_dev, queue_mapping);
++ fq->stats.congestion_entry++;
++ }
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
++ struct sk_buff *ns;
++
++ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
++ if (unlikely(!ns)) {
++ percpu_stats->tx_dropped++;
++ goto err_alloc_headroom;
++ }
++ dev_kfree_skb(skb);
++ skb = ns;
++ }
++
++ /* We'll be holding a back-reference to the skb until Tx Confirmation;
++ * we don't want that overwritten by a concurrent Tx with a cloned skb.
++ */
++ skb = skb_unshare(skb, GFP_ATOMIC);
++ if (unlikely(!skb)) {
++ /* skb_unshare() has already freed the skb */
++ percpu_stats->tx_dropped++;
++ return NETDEV_TX_OK;
++ }
++
++ /* Setup the FD fields */
++ memset(&fd, 0, sizeof(fd));
++
++ if (skb_is_nonlinear(skb)) {
++ err = build_sg_fd(priv, skb, &fd);
++ percpu_extras->tx_sg_frames++;
++ percpu_extras->tx_sg_bytes += skb->len;
++ } else {
++ err = build_single_fd(priv, skb, &fd);
++ }
++
++ if (unlikely(err)) {
++ percpu_stats->tx_dropped++;
++ goto err_build_fd;
++ }
++
++ /* Tracing point */
++ trace_dpaa2_tx_fd(net_dev, &fd);
++
++ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
++ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
++ fq->tx_qdbin, &fd);
++ /* TODO: This doesn't work. Check on simulator.
++ * err = dpaa2_io_service_enqueue_fq(NULL,
++ * priv->fq[0].fqid_tx, &fd);
++ */
++ if (err != -EBUSY)
++ break;
++ }
++ percpu_extras->tx_portal_busy += i;
++ if (unlikely(err < 0)) {
++ percpu_stats->tx_errors++;
++ /* Clean up everything, including freeing the skb */
++ free_tx_fd(priv, &fd, NULL);
++ } else {
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
++ }
++
++ return NETDEV_TX_OK;
++
++err_build_fd:
++err_alloc_headroom:
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++}
++
++/* Tx confirmation frame processing routine */
++static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused,
++ u16 queue_id)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ u32 status = 0;
++ bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
++ bool check_fas_errors = false;
++
++ /* Tracing point */
++ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
++
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++ percpu_extras->tx_conf_frames++;
++ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
++
++ /* Check congestion state and wake all queues if necessary */
++ if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
++ dma_sync_single_for_cpu(dev, priv->cscn_dma,
++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ if (!dpaa2_cscn_state_congested(priv->cscn_mem))
++ netif_tx_wake_all_queues(priv->net_dev);
++ }
++
++ /* check frame errors in the FD field */
++ if (unlikely(errors)) {
++ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
++ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
++ fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
++ }
++
++ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL);
++
++ /* if there are no errors, we're done */
++ if (likely(!errors))
++ return;
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ /* Tx-conf logically pertains to the egress path. */
++ percpu_stats->tx_errors++;
++
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
++ status & DPAA2_FAS_TX_ERR_MASK);
++}
++
++static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
++{
++ int err;
++
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_RX_L3_CSUM, enable);
++ if (err) {
++ netdev_err(priv->net_dev,
++ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
++ return err;
++ }
++
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_RX_L4_CSUM, enable);
++ if (err) {
++ netdev_err(priv->net_dev,
++ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
++ return err;
++ }
++
++ return 0;
++}
++
++static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
++{
++ int err;
++
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_TX_L3_CSUM, enable);
++ if (err) {
++ netdev_err(priv->net_dev,
++ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
++ return err;
++ }
++
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_TX_L4_CSUM, enable);
++ if (err) {
++ netdev_err(priv->net_dev,
++ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
++ return err;
++ }
++
++ return 0;
++}
++
++/* Perform a single release command to add buffers
++ * to the specified buffer pool
++ */
++static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ void *buf;
++ dma_addr_t addr;
++ int i;
++
++ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
++ /* Allocate buffer visible to WRIOP + skb shared info +
++ * alignment padding.
++ */
++ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
++ if (unlikely(!buf))
++ goto err_alloc;
++
++ buf = PTR_ALIGN(buf, priv->rx_buf_align);
++
++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev, addr)))
++ goto err_map;
++
++ buf_array[i] = addr;
++
++ /* tracing point */
++ trace_dpaa2_eth_buf_seed(priv->net_dev,
++ buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
++ addr, DPAA2_ETH_RX_BUF_SIZE,
++ bpid);
++ }
++
++release_bufs:
++ /* In case the portal is busy, retry until successful.
++ * The buffer release function would only fail if the QBMan portal
++ * was busy, which implies portal contention (i.e. more CPUs than
++ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
++ * there is little we can realistically do, short of giving up -
++ * in which case we'd risk depleting the buffer pool and never again
++ * receiving the Rx interrupt which would kick-start the refill logic.
++ * So just keep retrying, at the risk of being moved to ksoftirqd.
++ */
++ while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
++ cpu_relax();
++ return i;
++
++err_map:
++ put_page(virt_to_head_page(buf));
++err_alloc:
++ if (i)
++ goto release_bufs;
++
++ return 0;
++}
++
++static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
++{
++ int i, j;
++ int new_count;
++
++ /* This is the lazy seeding of Rx buffer pools.
++ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
++ * napi_alloc_frag(). The trouble with that is that it in turn ends up
++ * calling this_cpu_ptr(), which mandates execution in atomic context.
++ * Rather than splitting up the code, do a one-off preempt disable.
++ */
++ preempt_disable();
++ for (j = 0; j < priv->num_channels; j++) {
++ priv->channel[j]->buf_count = 0;
++ for (i = 0; i < priv->num_bufs;
++ i += DPAA2_ETH_BUFS_PER_CMD) {
++ new_count = add_bufs(priv, bpid);
++ priv->channel[j]->buf_count += new_count;
++
++ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
++ preempt_enable();
++ return -ENOMEM;
++ }
++ }
++ }
++ preempt_enable();
++
++ return 0;
++}
++
++/**
++ * Drain the specified number of buffers from the DPNI's private buffer pool.
++ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
++ */
++static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ void *vaddr;
++ int ret, i;
++
++ do {
++ ret = dpaa2_io_service_acquire(NULL, priv->bpid,
++ buf_array, count);
++ if (ret < 0) {
++ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
++ return;
++ }
++ for (i = 0; i < ret; i++) {
++ /* Same logic as on regular Rx path */
++ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain,
++ buf_array[i]);
++ dma_unmap_single(dev, buf_array[i],
++ DPAA2_ETH_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
++ put_page(virt_to_head_page(vaddr));
++ }
++ } while (ret);
++}
++
++static void drain_pool(struct dpaa2_eth_priv *priv)
++{
++ preempt_disable();
++ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
++ drain_bufs(priv, 1);
++ preempt_enable();
++}
++
++/* Function is called from softirq context only, so we don't need to guard
++ * the access to percpu count
++ */
++static int refill_pool(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ u16 bpid)
++{
++ int new_count;
++
++ if (likely(ch->buf_count >= priv->refill_thresh))
++ return 0;
++
++ do {
++ new_count = add_bufs(priv, bpid);
++ if (unlikely(!new_count)) {
++ /* Out of memory; abort for now, we'll try later on */
++ break;
++ }
++ ch->buf_count += new_count;
++ } while (ch->buf_count < priv->num_bufs);
++
++ if (unlikely(ch->buf_count < priv->num_bufs))
++ return -ENOMEM;
++
++ return 0;
++}
++
++static int pull_channel(struct dpaa2_eth_channel *ch)
++{
++ int err;
++ int dequeues = -1;
++
++ /* Retry while portal is busy */
++ do {
++ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
++ dequeues++;
++ cpu_relax();
++ } while (err == -EBUSY);
++
++ ch->stats.dequeue_portal_busy += dequeues;
++ if (unlikely(err))
++ ch->stats.pull_err++;
++
++ return err;
++}
++
++/* NAPI poll routine
++ *
++ * Frames are dequeued from the QMan channel associated with this NAPI context.
++ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
++ * confirmation frames are limited by a threshold per NAPI poll cycle.
++ */
++static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
++{
++ struct dpaa2_eth_channel *ch;
++ int rx_cleaned = 0, tx_conf_cleaned = 0;
++ bool store_cleaned;
++ struct dpaa2_eth_priv *priv;
++ int err;
++
++ ch = container_of(napi, struct dpaa2_eth_channel, napi);
++ priv = ch->priv;
++
++ do {
++ err = pull_channel(ch);
++ if (unlikely(err))
++ break;
++
++ /* Refill pool if appropriate */
++ refill_pool(priv, ch, priv->bpid);
++
++ store_cleaned = consume_frames(ch, &rx_cleaned,
++ &tx_conf_cleaned);
++
++ /* If we've either consumed the budget with Rx frames,
++ * or reached the Tx conf threshold, we're done.
++ */
++ if (rx_cleaned >= budget ||
++ tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
++ return budget;
++ } while (store_cleaned);
++
++ /* We didn't consume the entire budget, finish napi and
++ * re-enable data availability notifications.
++ */
++ napi_complete(napi);
++ do {
++ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
++ cpu_relax();
++ } while (err == -EBUSY);
++
++ return max(rx_cleaned, 1);
++}
++
++static void enable_ch_napi(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ napi_enable(&ch->napi);
++ }
++}
++
++static void disable_ch_napi(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ napi_disable(&ch->napi);
++ }
++}
++
++static int link_state_update(struct dpaa2_eth_priv *priv)
++{
++ struct dpni_link_state state;
++ int err;
++
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (unlikely(err)) {
++ netdev_err(priv->net_dev,
++ "dpni_get_link_state() failed\n");
++ return err;
++ }
++
++ /* Chech link state; speed / duplex changes are not treated yet */
++ if (priv->link_state.up == state.up)
++ return 0;
++
++ priv->link_state = state;
++ if (state.up) {
++ netif_carrier_on(priv->net_dev);
++ netif_tx_start_all_queues(priv->net_dev);
++ } else {
++ netif_tx_stop_all_queues(priv->net_dev);
++ netif_carrier_off(priv->net_dev);
++ }
++
++ netdev_info(priv->net_dev, "Link Event: state %s",
++ state.up ? "up" : "down");
++
++ return 0;
++}
++
++static int dpaa2_eth_open(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ /* We'll only start the txqs when the link is actually ready; make sure
++ * we don't race against the link up notification, which may come
++ * immediately after dpni_enable();
++ */
++ netif_tx_stop_all_queues(net_dev);
++
++ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
++ * return true and cause 'ip link show' to report the LOWER_UP flag,
++ * even though the link notification wasn't even received.
++ */
++ netif_carrier_off(net_dev);
++
++ err = seed_pool(priv, priv->bpid);
++ if (err) {
++ /* Not much to do; the buffer pool, though not filled up,
++ * may still contain some buffers which would enable us
++ * to limp on.
++ */
++ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
++ priv->dpbp_dev->obj_desc.id, priv->bpid);
++ }
++
++ if (priv->tx_pause_frames)
++ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
++ else
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
++
++ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
++ if (err < 0) {
++ netdev_err(net_dev, "dpni_enable() failed\n");
++ goto enable_err;
++ }
++
++ /* If the DPMAC object has already processed the link up interrupt,
++ * we have to learn the link state ourselves.
++ */
++ err = link_state_update(priv);
++ if (err < 0) {
++ netdev_err(net_dev, "Can't update link state\n");
++ goto link_state_err;
++ }
++
++ return 0;
++
++link_state_err:
++enable_err:
++ priv->refill_thresh = 0;
++ drain_pool(priv);
++ return err;
++}
++
++static int dpaa2_eth_stop(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int dpni_enabled;
++ int retries = 10, i;
++
++ netif_tx_stop_all_queues(net_dev);
++ netif_carrier_off(net_dev);
++
++ /* Loop while dpni_disable() attempts to drain the egress FQs
++ * and confirm them back to us.
++ */
++ do {
++ dpni_disable(priv->mc_io, 0, priv->mc_token);
++ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
++ if (dpni_enabled)
++ /* Allow the MC some slack */
++ msleep(100);
++ } while (dpni_enabled && --retries);
++ if (!retries) {
++ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
++ /* Must go on and disable NAPI nonetheless, so we don't crash at
++ * the next "ifconfig up"
++ */
++ }
++
++ priv->refill_thresh = 0;
++
++ /* Wait for all running napi poll routines to finish, so that no
++ * new refill operations are started.
++ */
++ for (i = 0; i < priv->num_channels; i++)
++ napi_synchronize(&priv->channel[i]->napi);
++
++ /* Empty the buffer pool */
++ drain_pool(priv);
++
++ return 0;
++}
++
++static int dpaa2_eth_init(struct net_device *net_dev)
++{
++ u64 supported = 0;
++ u64 not_supported = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u32 options = priv->dpni_attrs.options;
++
++ /* Capabilities listing */
++ supported |= IFF_LIVE_ADDR_CHANGE;
++
++ if (options & DPNI_OPT_NO_MAC_FILTER)
++ not_supported |= IFF_UNICAST_FLT;
++ else
++ supported |= IFF_UNICAST_FLT;
++
++ net_dev->priv_flags |= supported;
++ net_dev->priv_flags &= ~not_supported;
++
++ /* Features */
++ net_dev->features = NETIF_F_RXCSUM |
++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_SG | NETIF_F_HIGHDMA |
++ NETIF_F_LLTX;
++ net_dev->hw_features = net_dev->features;
++
++ return 0;
++}
++
++static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ int err;
++
++ err = eth_mac_addr(net_dev, addr);
++ if (err < 0) {
++ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
++ return err;
++ }
++
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++/** Fill in counters maintained by the GPP driver. These may be different from
++ * the hardware counters obtained by ethtool.
++ */
++static void dpaa2_eth_get_stats(struct net_device *net_dev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct rtnl_link_stats64 *percpu_stats;
++ u64 *cpustats;
++ u64 *netstats = (u64 *)stats;
++ int i, j;
++ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
++
++ for_each_possible_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ cpustats = (u64 *)percpu_stats;
++ for (j = 0; j < num; j++)
++ netstats[j] += cpustats[j];
++ }
++}
++
++static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ /* Set the maximum Rx frame length to match the transmit side;
++ * account for L2 headers when computing the MFL
++ */
++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
++ (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
++ if (err) {
++ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
++ return err;
++ }
++
++ net_dev->mtu = mtu;
++ return 0;
++}
++
++/* Copy mac unicast addresses from @net_dev to @priv.
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void add_uc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
++{
++ struct netdev_hw_addr *ha;
++ int err;
++
++ netdev_for_each_uc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
++ if (err)
++ netdev_warn(priv->net_dev,
++ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
++ }
++}
++
++/* Copy mac multicast addresses from @net_dev to @priv
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void add_mc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
++{
++ struct netdev_hw_addr *ha;
++ int err;
++
++ netdev_for_each_mc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
++ if (err)
++ netdev_warn(priv->net_dev,
++ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
++ }
++}
++
++static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int uc_count = netdev_uc_count(net_dev);
++ int mc_count = netdev_mc_count(net_dev);
++ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
++ u32 options = priv->dpni_attrs.options;
++ u16 mc_token = priv->mc_token;
++ struct fsl_mc_io *mc_io = priv->mc_io;
++ int err;
++
++ /* Basic sanity checks; these probably indicate a misconfiguration */
++ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
++ netdev_info(net_dev,
++ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
++ max_mac);
++
++ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
++ if (uc_count > max_mac) {
++ netdev_info(net_dev,
++ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ uc_count, max_mac);
++ goto force_promisc;
++ }
++ if (mc_count + uc_count > max_mac) {
++ netdev_info(net_dev,
++ "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ uc_count + mc_count, max_mac);
++ goto force_mc_promisc;
++ }
++
++ /* Adjust promisc settings due to flag combinations */
++ if (net_dev->flags & IFF_PROMISC)
++ goto force_promisc;
++ if (net_dev->flags & IFF_ALLMULTI) {
++ /* First, rebuild unicast filtering table. This should be done
++ * in promisc mode, in order to avoid frame loss while we
++ * progressively add entries to the table.
++ * We don't know whether we had been in promisc already, and
++ * making an MC call to find out is expensive; so set uc promisc
++ * nonetheless.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set uc promisc\n");
++
++ /* Actual uc table reconstruction. */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc filters\n");
++ add_uc_hw_addr(net_dev, priv);
++
++ /* Finally, clear uc promisc and set mc promisc as requested. */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc promisc\n");
++ goto force_mc_promisc;
++ }
++
++ /* Neither unicast, nor multicast promisc will be on... eventually.
++ * For now, rebuild mac filtering tables while forcing both of them on.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
++
++ /* Actual mac filtering tables reconstruction */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't clear mac filters\n");
++ add_mc_hw_addr(net_dev, priv);
++ add_uc_hw_addr(net_dev, priv);
++
++ /* Now we can clear both ucast and mcast promisc, without risking
++ * to drop legitimate frames anymore.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear ucast promisc\n");
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear mcast promisc\n");
++
++ return;
++
++force_promisc:
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set ucast promisc\n");
++force_mc_promisc:
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set mcast promisc\n");
++}
++
++static int dpaa2_eth_set_features(struct net_device *net_dev,
++ netdev_features_t features)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ netdev_features_t changed = features ^ net_dev->features;
++ bool enable;
++ int err;
++
++ if (changed & NETIF_F_RXCSUM) {
++ enable = !!(features & NETIF_F_RXCSUM);
++ err = set_rx_csum(priv, enable);
++ if (err)
++ return err;
++ }
++
++ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
++ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
++ err = set_tx_csum(priv, enable);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ struct hwtstamp_config config;
++
++ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
++ return -EFAULT;
++
++ switch (config.tx_type) {
++ case HWTSTAMP_TX_OFF:
++ priv->ts_tx_en = false;
++ break;
++ case HWTSTAMP_TX_ON:
++ priv->ts_tx_en = true;
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
++ priv->ts_rx_en = false;
++ } else {
++ priv->ts_rx_en = true;
++ /* TS is set for all frame types, not only those requested */
++ config.rx_filter = HWTSTAMP_FILTER_ALL;
++ }
++
++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
++ -EFAULT : 0;
++}
++
++static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ if (cmd == SIOCSHWTSTAMP)
++ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
++
++ return -EINVAL;
++}
++
++static const struct net_device_ops dpaa2_eth_ops = {
++ .ndo_open = dpaa2_eth_open,
++ .ndo_start_xmit = dpaa2_eth_tx,
++ .ndo_stop = dpaa2_eth_stop,
++ .ndo_init = dpaa2_eth_init,
++ .ndo_set_mac_address = dpaa2_eth_set_addr,
++ .ndo_get_stats64 = dpaa2_eth_get_stats,
++ .ndo_change_mtu = dpaa2_eth_change_mtu,
++ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
++ .ndo_set_features = dpaa2_eth_set_features,
++ .ndo_do_ioctl = dpaa2_eth_ioctl,
++};
++
++static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_eth_channel *ch;
++
++ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
++
++ /* Update NAPI statistics */
++ ch->stats.cdan++;
++
++ napi_schedule_irqoff(&ch->napi);
++}
++
++/* Allocate and configure a DPCON object */
++static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
++{
++ struct fsl_mc_device *dpcon;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpcon_attr attrs;
++ int err;
++
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
++ FSL_MC_POOL_DPCON, &dpcon);
++ if (err) {
++ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
++ return NULL;
++ }
++
++ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_reset() failed\n");
++ goto err_reset;
++ }
++
++ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
++ if (err) {
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++
++ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_enable() failed\n");
++ goto err_enable;
++ }
++
++ return dpcon;
++
++err_enable:
++err_get_attr:
++err_reset:
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++err_open:
++ fsl_mc_object_free(dpcon);
++
++ return NULL;
++}
++
++static void free_dpcon(struct dpaa2_eth_priv *priv,
++ struct fsl_mc_device *dpcon)
++{
++ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++ fsl_mc_object_free(dpcon);
++}
++
++static struct dpaa2_eth_channel *
++alloc_channel(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_attr attr;
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
++
++ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
++ if (!channel)
++ return NULL;
++
++ channel->dpcon = setup_dpcon(priv);
++ if (!channel->dpcon)
++ goto err_setup;
++
++ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
++ &attr);
++ if (err) {
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++
++ channel->dpcon_id = attr.id;
++ channel->ch_id = attr.qbman_ch_id;
++ channel->priv = priv;
++
++ return channel;
++
++err_get_attr:
++ free_dpcon(priv, channel->dpcon);
++err_setup:
++ kfree(channel);
++ return NULL;
++}
++
++static void free_channel(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *channel)
++{
++ free_dpcon(priv, channel->dpcon);
++ kfree(channel);
++}
++
++/* DPIO setup: allocate and configure QBMan channels, setup core affinity
++ * and register data availability notifications
++ */
++static int setup_dpio(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_io_notification_ctx *nctx;
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_notification_cfg dpcon_notif_cfg;
++ struct device *dev = priv->net_dev->dev.parent;
++ int i, err;
++
++ /* We want the ability to spread ingress traffic (RX, TX conf) to as
++ * many cores as possible, so we need one channel for each core
++ * (unless there's fewer queues than cores, in which case the extra
++ * channels would be wasted).
++ * Allocate one channel per core and register it to the core's
++ * affine DPIO. If not enough channels are available for all cores
++ * or if some cores don't have an affine DPIO, there will be no
++ * ingress frame processing on those cores.
++ */
++ cpumask_clear(&priv->dpio_cpumask);
++ for_each_online_cpu(i) {
++ /* Try to allocate a channel */
++ channel = alloc_channel(priv);
++ if (!channel) {
++ dev_info(dev,
++ "No affine channel for cpu %d and above\n", i);
++ goto err_alloc_ch;
++ }
++
++ priv->channel[priv->num_channels] = channel;
++
++ nctx = &channel->nctx;
++ nctx->is_cdan = 1;
++ nctx->cb = cdan_cb;
++ nctx->id = channel->ch_id;
++ nctx->desired_cpu = i;
++
++ /* Register the new context */
++ err = dpaa2_io_service_register(NULL, nctx);
++ if (err) {
++ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
++ /* If no affine DPIO for this core, there's probably
++ * none available for next cores either.
++ */
++ goto err_service_reg;
++ }
++
++ /* Register DPCON notification with MC */
++ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
++ dpcon_notif_cfg.priority = 0;
++ dpcon_notif_cfg.user_ctx = nctx->qman64;
++ err = dpcon_set_notification(priv->mc_io, 0,
++ channel->dpcon->mc_handle,
++ &dpcon_notif_cfg);
++ if (err) {
++ dev_err(dev, "dpcon_set_notification failed()\n");
++ goto err_set_cdan;
++ }
++
++ /* If we managed to allocate a channel and also found an affine
++ * DPIO for this core, add it to the final mask
++ */
++ cpumask_set_cpu(i, &priv->dpio_cpumask);
++ priv->num_channels++;
++
++ /* Stop if we already have enough channels to accommodate all
++ * RX and TX conf queues
++ */
++ if (priv->num_channels == dpaa2_eth_queue_count(priv))
++ break;
++ }
++
++ /* Tx confirmation queues can only be serviced by cpus
++ * with an affine DPIO/channel
++ */
++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
++
++ return 0;
++
++err_set_cdan:
++ dpaa2_io_service_deregister(NULL, nctx);
++err_service_reg:
++ free_channel(priv, channel);
++err_alloc_ch:
++ if (cpumask_empty(&priv->dpio_cpumask)) {
++ dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
++ return -ENODEV;
++ }
++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
++
++ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
++ cpumask_pr_args(&priv->dpio_cpumask));
++
++ return 0;
++}
++
++static void free_dpio(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
++
++ /* deregister CDAN notifications and free channels */
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ dpaa2_io_service_deregister(NULL, &ch->nctx);
++ free_channel(priv, ch);
++ }
++}
++
++static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
++ int cpu)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++)
++ if (priv->channel[i]->nctx.desired_cpu == cpu)
++ return priv->channel[i];
++
++ /* We should never get here. Issue a warning and return
++ * the first channel, because it's still better than nothing
++ */
++ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
++
++ return priv->channel[0];
++}
++
++static void set_fq_affinity(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct cpumask xps_mask = CPU_MASK_NONE;
++ struct dpaa2_eth_fq *fq;
++ int rx_cpu, txc_cpu;
++ int i, err;
++
++ /* For each FQ, pick one channel/CPU to deliver frames to.
++ * This may well change at runtime, either through irqbalance or
++ * through direct user intervention.
++ */
++ rx_cpu = cpumask_first(&priv->dpio_cpumask);
++ txc_cpu = cpumask_first(&priv->txconf_cpumask);
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ case DPAA2_RX_ERR_FQ:
++ fq->target_cpu = rx_cpu;
++ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
++ if (rx_cpu >= nr_cpu_ids)
++ rx_cpu = cpumask_first(&priv->dpio_cpumask);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ fq->target_cpu = txc_cpu;
++
++ /* register txc_cpu to XPS */
++ cpumask_set_cpu(txc_cpu, &xps_mask);
++ err = netif_set_xps_queue(priv->net_dev, &xps_mask,
++ fq->flowid);
++ if (err)
++ dev_info_once(dev,
++ "Tx: error setting XPS queue\n");
++ cpumask_clear_cpu(txc_cpu, &xps_mask);
++
++ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
++ if (txc_cpu >= nr_cpu_ids)
++ txc_cpu = cpumask_first(&priv->txconf_cpumask);
++ break;
++ default:
++ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
++ }
++ fq->channel = get_affine_channel(priv, fq->target_cpu);
++ }
++}
++
++static void setup_fqs(struct dpaa2_eth_priv *priv)
++{
++ int i;
++
++ /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
++ * beginning of the queue array.
++ * Number of Rx and Tx queues are the same.
++ * We only support one traffic class for now.
++ */
++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
++ priv->fq[priv->num_fqs++].flowid = (u16)i;
++ }
++
++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
++ priv->fq[priv->num_fqs++].flowid = (u16)i;
++ }
++
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ /* We have exactly one Rx error queue per DPNI */
++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
++#endif
++
++ /* For each FQ, decide on which core to process incoming frames */
++ set_fq_affinity(priv);
++}
++
++/* Allocate and configure one buffer pool for each interface */
++static int setup_dpbp(struct dpaa2_eth_priv *priv)
++{
++ int err;
++ struct fsl_mc_device *dpbp_dev;
++ struct dpbp_attr dpbp_attrs;
++ struct device *dev = priv->net_dev->dev.parent;
++
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
++ &dpbp_dev);
++ if (err) {
++ dev_err(dev, "DPBP device allocation failed\n");
++ return err;
++ }
++
++ priv->dpbp_dev = dpbp_dev;
++
++ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
++ &dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_reset() failed\n");
++ goto err_reset;
++ }
++
++ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_enable() failed\n");
++ goto err_enable;
++ }
++
++ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
++ &dpbp_attrs);
++ if (err) {
++ dev_err(dev, "dpbp_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++
++ priv->bpid = dpbp_attrs.bpid;
++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
++
++ return 0;
++
++err_get_attr:
++ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_enable:
++err_reset:
++ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_open:
++ fsl_mc_object_free(dpbp_dev);
++
++ return err;
++}
++
++static void free_dpbp(struct dpaa2_eth_priv *priv)
++{
++ drain_pool(priv);
++ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ fsl_mc_object_free(priv->dpbp_dev);
++}
++
++static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
++{
++ struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
++
++ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
++ GFP_KERNEL);
++ if (!priv->cscn_unaligned)
++ return -ENOMEM;
++
++ priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
++ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, priv->cscn_dma)) {
++ dev_err(dev, "Error mapping CSCN memory area\n");
++ err = -ENOMEM;
++ goto err_dma_map;
++ }
++
++ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
++ cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
++ cong_notif_cfg.message_ctx = (u64)priv;
++ cong_notif_cfg.message_iova = priv->cscn_dma;
++ cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
++ DPNI_CONG_OPT_COHERENT_WRITE;
++ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, 0,
++ &cong_notif_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_congestion_notification failed\n");
++ goto err_set_cong;
++ }
++
++ return 0;
++
++err_set_cong:
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++err_dma_map:
++ kfree(priv->cscn_unaligned);
++
++ return err;
++}
++
++/* Configure the DPNI object this interface is associated with */
++static int setup_dpni(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev = &ls_dev->dev;
++ struct dpaa2_eth_priv *priv;
++ struct net_device *net_dev;
++ struct dpni_buffer_layout buf_layout;
++ struct dpni_link_cfg cfg = {0};
++ int err;
++
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
++
++ priv->dpni_id = ls_dev->obj_desc.id;
++
++ /* get a handle for the DPNI object */
++ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
++ if (err) {
++ dev_err(dev, "dpni_open() failed\n");
++ goto err_open;
++ }
++
++ ls_dev->mc_io = priv->mc_io;
++ ls_dev->mc_handle = priv->mc_token;
++
++ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
++ if (err) {
++ dev_err(dev, "dpni_reset() failed\n");
++ goto err_reset;
++ }
++
++ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
++ &priv->dpni_attrs);
++
++ if (err) {
++ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
++ goto err_get_attr;
++ }
++
++ /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
++ * align value must be a multiple of 256.
++ */
++ priv->rx_buf_align =
++ priv->dpni_attrs.wriop_version & 0x3ff ?
++ DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
++
++ /* Update number of logical FQs in netdev */
++ err = netif_set_real_num_tx_queues(net_dev,
++ dpaa2_eth_queue_count(priv));
++ if (err) {
++ dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
++ goto err_set_tx_queues;
++ }
++
++ err = netif_set_real_num_rx_queues(net_dev,
++ dpaa2_eth_queue_count(priv));
++ if (err) {
++ dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
++ goto err_set_rx_queues;
++ }
++
++ /* Configure buffer layouts */
++ /* rx buffer */
++ buf_layout.pass_parser_result = true;
++ buf_layout.pass_frame_status = true;
++ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
++ buf_layout.data_align = priv->rx_buf_align;
++ buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
++ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, &buf_layout);
++ if (err) {
++ dev_err(dev,
++ "dpni_set_buffer_layout(RX) failed\n");
++ goto err_buf_layout;
++ }
++
++ /* tx buffer */
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
++ buf_layout.pass_timestamp = true;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, &buf_layout);
++ if (err) {
++ dev_err(dev,
++ "dpni_set_buffer_layout(TX) failed\n");
++ goto err_buf_layout;
++ }
++
++ /* tx-confirm buffer */
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
++ goto err_buf_layout;
++ }
++
++ /* Now that we've set our tx buffer layout, retrieve the minimum
++ * required tx data offset.
++ */
++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
++ &priv->tx_data_offset);
++ if (err) {
++ dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
++ goto err_data_offset;
++ }
++
++ if ((priv->tx_data_offset % 64) != 0)
++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
++ priv->tx_data_offset);
++
++ /* Accommodate software annotation space (SWA) */
++ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
++
++ /* Enable congestion notifications for Tx queues */
++ err = setup_tx_congestion(priv);
++ if (err)
++ goto err_tx_cong;
++
++ /* allocate classification rule space */
++ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
++ dpaa2_eth_fs_count(priv), GFP_KERNEL);
++ if (!priv->cls_rule)
++ goto err_cls_rule;
++
++ /* Enable flow control */
++ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
++ priv->tx_pause_frames = 1;
++
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d setting link cfg", err);
++ goto err_set_link_cfg;
++ }
++
++ return 0;
++
++err_set_link_cfg:
++err_cls_rule:
++err_tx_cong:
++err_data_offset:
++err_buf_layout:
++err_set_rx_queues:
++err_set_tx_queues:
++err_get_attr:
++err_reset:
++ dpni_close(priv->mc_io, 0, priv->mc_token);
++err_open:
++ return err;
++}
++
++static void free_dpni(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
++
++ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
++ if (err)
++ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
++ err);
++
++ dpni_close(priv->mc_io, 0, priv->mc_token);
++
++ kfree(priv->cls_rule);
++
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ kfree(priv->cscn_unaligned);
++}
++
++int setup_fqs_taildrop(struct dpaa2_eth_priv *priv,
++ bool enable)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_taildrop td;
++ int err = 0, i;
++
++ td.enable = enable;
++ td.threshold = DPAA2_ETH_TAILDROP_THRESH;
++
++ if (enable) {
++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
++ } else {
++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
++ priv->num_channels;
++ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
++ }
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ if (priv->fq[i].type != DPAA2_RX_FQ)
++ continue;
++
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
++ priv->fq[i].flowid, &td);
++ if (err) {
++ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
++ break;
++ }
++ }
++
++ return err;
++}
++
++static int setup_rx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue q = { { 0 } };
++ struct dpni_queue_id qid;
++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
++ int err;
++
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ return err;
++ }
++
++ fq->fqid = qid.fqid;
++
++ q.destination.id = fq->channel->dpcon_id;
++ q.destination.type = DPNI_DEST_DPCON;
++ q.destination.priority = 1;
++ q.user_context = (u64)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q);
++ if (err) {
++ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++static int setup_tx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue q = { { 0 } };
++ struct dpni_queue_id qid;
++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
++ int err;
++
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ return err;
++ }
++
++ fq->tx_qdbin = qid.qdbin;
++
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ return err;
++ }
++
++ fq->fqid = qid.fqid;
++
++ q.destination.id = fq->channel->dpcon_id;
++ q.destination.type = DPNI_DEST_DPCON;
++ q.destination.priority = 0;
++ q.user_context = (u64)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
++ if (err) {
++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue q = { { 0 } };
++ struct dpni_queue_id qid;
++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
++ int err;
++
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ return err;
++ }
++
++ fq->fqid = qid.fqid;
++
++ q.destination.id = fq->channel->dpcon_id;
++ q.destination.type = DPNI_DEST_DPCON;
++ q.destination.priority = 1;
++ q.user_context = (u64)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
++ if (err) {
++ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
++ return err;
++ }
++
++ return 0;
++}
++#endif
++
++/* default hash key fields */
++static struct dpaa2_eth_hash_fields default_hash_fields[] = {
++ {
++ /* L2 header */
++ .rxnfc_field = RXH_L2DA,
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_DA,
++ .size = 6,
++ }, {
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_SA,
++ .size = 6,
++ }, {
++ /* This is the last ethertype field parsed:
++ * depending on frame format, it can be the MAC ethertype
++ * or the VLAN etype.
++ */
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_TYPE,
++ .size = 2,
++ }, {
++ /* VLAN header */
++ .rxnfc_field = RXH_VLAN,
++ .cls_prot = NET_PROT_VLAN,
++ .cls_field = NH_FLD_VLAN_TCI,
++ .size = 2,
++ }, {
++ /* IP header */
++ .rxnfc_field = RXH_IP_SRC,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_SRC,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_IP_DST,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_DST,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_L3_PROTO,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_PROTO,
++ .size = 1,
++ }, {
++ /* Using UDP ports, this is functionally equivalent to raw
++ * byte pairs from L4 header.
++ */
++ .rxnfc_field = RXH_L4_B_0_1,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_SRC,
++ .size = 2,
++ }, {
++ .rxnfc_field = RXH_L4_B_2_3,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_DST,
++ .size = 2,
++ },
++};
++
++/* Set RX hash options */
++static int set_hash(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpkg_profile_cfg cls_cfg;
++ struct dpni_rx_tc_dist_cfg dist_cfg;
++ u8 *dma_mem;
++ int i;
++ int err = 0;
++
++ memset(&cls_cfg, 0, sizeof(cls_cfg));
++
++ for (i = 0; i < priv->num_hash_fields; i++) {
++ struct dpkg_extract *key =
++ &cls_cfg.extracts[cls_cfg.num_extracts];
++
++ key->type = DPKG_EXTRACT_FROM_HDR;
++ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
++ key->extract.from_hdr.type = DPKG_FULL_FIELD;
++ key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
++ cls_cfg.num_extracts++;
++
++ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
++ }
++
++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
++ if (!dma_mem)
++ return -ENOMEM;
++
++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
++ if (err) {
++ dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
++ goto err_prep_key;
++ }
++
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
++
++ /* Prepare for setting the rx dist */
++ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
++ DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
++ dev_err(dev, "DMA mapping failed\n");
++ err = -ENOMEM;
++ goto err_dma_map;
++ }
++
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ if (dpaa2_eth_fs_enabled(priv)) {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
++ } else {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++ }
++
++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
++ dma_unmap_single(dev, dist_cfg.key_cfg_iova,
++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
++ if (err)
++ dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
++
++err_dma_map:
++err_prep_key:
++ kfree(dma_mem);
++ return err;
++}
++
++/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
++ * frame queues and channels
++ */
++static int bind_dpni(struct dpaa2_eth_priv *priv)
++{
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ struct dpni_pools_cfg pools_params;
++ struct dpni_error_cfg err_cfg;
++ int err = 0;
++ int i;
++
++ pools_params.num_dpbp = 1;
++ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
++ pools_params.pools[0].backup_pool = 0;
++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
++ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
++ if (err) {
++ dev_err(dev, "dpni_set_pools() failed\n");
++ return err;
++ }
++
++ /* Verify classification options and disable hashing and/or
++ * flow steering support in case of invalid configuration values
++ */
++ priv->hash_fields = default_hash_fields;
++ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
++ check_cls_support(priv);
++
++ /* have the interface implicitly distribute traffic based on
++ * a static hash key
++ */
++ if (dpaa2_eth_hash_enabled(priv)) {
++ err = set_hash(priv);
++ if (err) {
++ dev_err(dev, "Hashing configuration failed\n");
++ return err;
++ }
++ }
++
++ /* Configure handling of error frames */
++ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
++ err_cfg.set_frame_annotation = 1;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
++#else
++ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
++#endif
++ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
++ &err_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
++ return err;
++ }
++
++ /* Configure Rx and Tx conf queues to generate CDANs */
++ for (i = 0; i < priv->num_fqs; i++) {
++ switch (priv->fq[i].type) {
++ case DPAA2_RX_FQ:
++ err = setup_rx_flow(priv, &priv->fq[i]);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ err = setup_tx_flow(priv, &priv->fq[i]);
++ break;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ case DPAA2_RX_ERR_FQ:
++ err = setup_rx_err_flow(priv, &priv->fq[i]);
++ break;
++#endif
++ default:
++ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
++ return -EINVAL;
++ }
++ if (err)
++ return err;
++ }
++
++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
++ &priv->tx_qdid);
++ if (err) {
++ dev_err(dev, "dpni_get_qdid() failed\n");
++ return err;
++ }
++
++ return 0;
++}
++
++/* Allocate rings for storing incoming frame descriptors */
++static int alloc_rings(struct dpaa2_eth_priv *priv)
++{
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ priv->channel[i]->store =
++ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
++ if (!priv->channel[i]->store) {
++ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
++ goto err_ring;
++ }
++ }
++
++ return 0;
++
++err_ring:
++ for (i = 0; i < priv->num_channels; i++) {
++ if (!priv->channel[i]->store)
++ break;
++ dpaa2_io_store_destroy(priv->channel[i]->store);
++ }
++
++ return -ENOMEM;
++}
++
++static void free_rings(struct dpaa2_eth_priv *priv)
++{
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++)
++ dpaa2_io_store_destroy(priv->channel[i]->store);
++}
++
++static int netdev_init(struct net_device *net_dev)
++{
++ int err;
++ struct device *dev = net_dev->dev.parent;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
++ u8 bcast_addr[ETH_ALEN];
++ u16 rx_headroom, rx_req_headroom;
++
++ net_dev->netdev_ops = &dpaa2_eth_ops;
++
++ /* Get firmware address, if any */
++ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
++ return err;
++ }
++
++ /* Get DPNI atttributes address, if any */
++ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ dpni_mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
++ return err;
++ }
++
++ /* First check if firmware has any address configured by bootloader */
++ if (!is_zero_ether_addr(mac_addr)) {
++ /* If the DPMAC addr != the DPNI addr, update it */
++ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
++ priv->mc_token,
++ mac_addr);
++ if (err) {
++ dev_err(dev,
++ "dpni_set_primary_mac_addr() failed (%d)\n",
++ err);
++ return err;
++ }
++ }
++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
++ } else if (is_zero_ether_addr(dpni_mac_addr)) {
++ /* Fills in net_dev->dev_addr, as required by
++ * register_netdevice()
++ */
++ eth_hw_addr_random(net_dev);
++ /* Make the user aware, without cluttering the boot log */
++ dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
++ priv->mc_token, net_dev->dev_addr);
++ if (err) {
++ dev_err(dev,
++ "dpni_set_primary_mac_addr() failed (%d)\n", err);
++ return err;
++ }
++ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
++ * practical purposes, this will be our "permanent" mac address,
++ * at least until the next reboot. This move will also permit
++ * register_netdevice() to properly fill up net_dev->perm_addr.
++ */
++ net_dev->addr_assign_type = NET_ADDR_PERM;
++ /* If DPMAC address is non-zero, use that one */
++ } else {
++ /* NET_ADDR_PERM is default, all we have to do is
++ * fill in the device addr.
++ */
++ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
++ }
++
++ /* Explicitly add the broadcast address to the MAC filtering table;
++ * the MC won't do that for us.
++ */
++ eth_broadcast_addr(bcast_addr);
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
++ if (err) {
++ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
++ /* Won't return an error; at least, we'd have egress traffic */
++ }
++
++ /* Reserve enough space to align buffer as per hardware requirement;
++ * NOTE: priv->tx_data_offset MUST be initialized at this point.
++ */
++ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
++
++ /* Set MTU limits */
++ net_dev->min_mtu = 68;
++ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
++
++ /* Required headroom for Rx skbs, to avoid reallocation on
++ * forwarding path.
++ */
++ rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
++ rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
++ DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
++ if (rx_req_headroom > rx_headroom)
++ dev_info_once(dev,
++ "Required headroom (%d) greater than available (%d).\n"
++ "This will impact performance due to reallocations.\n",
++ rx_req_headroom, rx_headroom);
++
++ /* Our .ndo_init will be called herein */
++ err = register_netdev(net_dev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev() failed (%d)\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++static int poll_link_state(void *arg)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
++ int err;
++
++ while (!kthread_should_stop()) {
++ err = link_state_update(priv);
++ if (unlikely(err))
++ return err;
++
++ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
++ }
++
++ return 0;
++}
++
++static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
++{
++ u32 status = 0, clear = 0;
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
++ struct net_device *net_dev = dev_get_drvdata(dev);
++ int err;
++
++ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
++ DPNI_IRQ_INDEX, &status);
++ if (unlikely(err)) {
++ netdev_err(net_dev, "Can't get irq status (err %d)", err);
++ clear = 0xffffffff;
++ goto out;
++ }
++
++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
++ clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
++ link_state_update(netdev_priv(net_dev));
++ }
++
++out:
++ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
++ DPNI_IRQ_INDEX, clear);
++ return IRQ_HANDLED;
++}
++
++static int setup_irqs(struct fsl_mc_device *ls_dev)
++{
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
++
++ err = fsl_mc_allocate_irqs(ls_dev);
++ if (err) {
++ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
++ return err;
++ }
++
++ irq = ls_dev->irqs[0];
++ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
++ dpni_irq0_handler,
++ dpni_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(&ls_dev->dev), &ls_dev->dev);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
++ goto free_mc_irq;
++ }
++
++ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
++ goto free_irq;
++ }
++
++ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ DPNI_IRQ_INDEX, 1);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
++ goto free_irq;
++ }
++
++ return 0;
++
++free_irq:
++ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
++free_mc_irq:
++ fsl_mc_free_irqs(ls_dev);
++
++ return err;
++}
++
++static void add_ch_napi(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
++ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
++ NAPI_POLL_WEIGHT);
++ }
++}
++
++static void del_ch_napi(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ netif_napi_del(&ch->napi);
++ }
++}
++
++/* SysFS support */
++static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ /* No MC API for getting the shaping config. We're stateful. */
++ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
++
++ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
++}
++
++static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ int err, items;
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ struct dpni_tx_shaping_cfg scfg;
++
++ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
++ if (items != 2) {
++ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
++ return -EINVAL;
++ }
++ /* Size restriction as per MC API documentation */
++ if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
++ pr_err("max_burst_size must be <= %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
++ }
++
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_shaping() failed\n");
++ return -EPERM;
++ }
++ /* If successful, save the current configuration for future inquiries */
++ priv->shaping_cfg = scfg;
++
++ return count;
++}
++
++static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++
++ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
++}
++
++static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ struct dpaa2_eth_fq *fq;
++ bool running = netif_running(priv->net_dev);
++ int i, err;
++
++ err = cpulist_parse(buf, &priv->txconf_cpumask);
++ if (err)
++ return err;
++
++ /* Only accept CPUs that have an affine DPIO */
++ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
++ netdev_info(priv->net_dev,
++ "cpumask must be a subset of 0x%lx\n",
++ *cpumask_bits(&priv->dpio_cpumask));
++ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
++ &priv->txconf_cpumask);
++ }
++
++ /* Rewiring the TxConf FQs requires interface shutdown.
++ */
++ if (running) {
++ err = dpaa2_eth_stop(priv->net_dev);
++ if (err)
++ return -ENODEV;
++ }
++
++ /* Set the new TxConf FQ affinities */
++ set_fq_affinity(priv);
++
++ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
++ * link up notification is received. Give the polling thread enough time
++ * to detect the link state change, or else we'll end up with the
++ * transmission side forever shut down.
++ */
++ if (priv->do_link_poll)
++ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ if (fq->type != DPAA2_TX_CONF_FQ)
++ continue;
++ setup_tx_flow(priv, fq);
++ }
++
++ if (running) {
++ err = dpaa2_eth_open(priv->net_dev);
++ if (err)
++ return -ENODEV;
++ }
++
++ return count;
++}
++
++static struct device_attribute dpaa2_eth_attrs[] = {
++ __ATTR(txconf_cpumask,
++ 0600,
++ dpaa2_eth_show_txconf_cpumask,
++ dpaa2_eth_write_txconf_cpumask),
++
++ __ATTR(tx_shaping,
++ 0600,
++ dpaa2_eth_show_tx_shaping,
++ dpaa2_eth_write_tx_shaping),
++};
++
++static void dpaa2_eth_sysfs_init(struct device *dev)
++{
++ int i, err;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
++ err = device_create_file(dev, &dpaa2_eth_attrs[i]);
++ if (err) {
++ dev_err(dev, "ERROR creating sysfs file\n");
++ goto undo;
++ }
++ }
++ return;
++
++undo:
++ while (i > 0)
++ device_remove_file(dev, &dpaa2_eth_attrs[--i]);
++}
++
++static void dpaa2_eth_sysfs_remove(struct device *dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
++ device_remove_file(dev, &dpaa2_eth_attrs[i]);
++}
++
++static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev = NULL;
++ struct dpaa2_eth_priv *priv = NULL;
++ int err = 0;
++
++ dev = &dpni_dev->dev;
++
++ /* Net device */
++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
++ if (!net_dev) {
++ dev_err(dev, "alloc_etherdev_mq() failed\n");
++ return -ENOMEM;
++ }
++
++ SET_NETDEV_DEV(net_dev, dev);
++ dev_set_drvdata(dev, net_dev);
++
++ priv = netdev_priv(net_dev);
++ priv->net_dev = net_dev;
++
++ priv->iommu_domain = iommu_get_domain_for_dev(dev);
++
++ /* Obtain a MC portal */
++ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &priv->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_portal_alloc;
++ }
++
++ /* MC objects initialization and configuration */
++ err = setup_dpni(dpni_dev);
++ if (err)
++ goto err_dpni_setup;
++
++ err = setup_dpio(priv);
++ if (err) {
++ dev_info(dev, "Defer probing as no DPIO available\n");
++ err = -EPROBE_DEFER;
++ goto err_dpio_setup;
++ }
++
++ setup_fqs(priv);
++
++ err = setup_dpbp(priv);
++ if (err)
++ goto err_dpbp_setup;
++
++ err = bind_dpni(priv);
++ if (err)
++ goto err_bind;
++
++ /* Add a NAPI context for each channel */
++ add_ch_napi(priv);
++ enable_ch_napi(priv);
++
++ /* Percpu statistics */
++ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
++ if (!priv->percpu_stats) {
++ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_stats;
++ }
++ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
++ if (!priv->percpu_extras) {
++ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_extras;
++ }
++
++ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
++ if (!dev_valid_name(net_dev->name)) {
++ dev_warn(&net_dev->dev,
++ "netdevice name \"%s\" cannot be used, reverting to default..\n",
++ net_dev->name);
++ dev_alloc_name(net_dev, "eth%d");
++ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
++ }
++
++ err = netdev_init(net_dev);
++ if (err)
++ goto err_netdev_init;
++
++ /* Configure checksum offload based on current interface flags */
++ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
++ if (err)
++ goto err_csum;
++
++ err = set_tx_csum(priv, !!(net_dev->features &
++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
++ if (err)
++ goto err_csum;
++
++ err = alloc_rings(priv);
++ if (err)
++ goto err_alloc_rings;
++
++ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
++
++ err = setup_irqs(dpni_dev);
++ if (err) {
++ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
++ priv->poll_thread = kthread_run(poll_link_state, priv,
++ "%s_poll_link", net_dev->name);
++ if (IS_ERR(priv->poll_thread)) {
++ netdev_err(net_dev, "Error starting polling thread\n");
++ goto err_poll_thread;
++ }
++ priv->do_link_poll = true;
++ }
++
++ dpaa2_eth_sysfs_init(&net_dev->dev);
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ dpaa2_dbg_add(priv);
++#endif
++
++ dev_info(dev, "Probed interface %s\n", net_dev->name);
++ return 0;
++
++err_poll_thread:
++ free_rings(priv);
++err_alloc_rings:
++err_csum:
++ unregister_netdev(net_dev);
++err_netdev_init:
++ free_percpu(priv->percpu_extras);
++err_alloc_percpu_extras:
++ free_percpu(priv->percpu_stats);
++err_alloc_percpu_stats:
++ disable_ch_napi(priv);
++ del_ch_napi(priv);
++err_bind:
++ free_dpbp(priv);
++err_dpbp_setup:
++ free_dpio(priv);
++err_dpio_setup:
++ free_dpni(priv);
++err_dpni_setup:
++ fsl_mc_portal_free(priv->mc_io);
++err_portal_alloc:
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
++
++ return err;
++}
++
++static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev;
++ struct dpaa2_eth_priv *priv;
++
++ dev = &ls_dev->dev;
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
++
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ dpaa2_dbg_remove(priv);
++#endif
++ dpaa2_eth_sysfs_remove(&net_dev->dev);
++
++ unregister_netdev(net_dev);
++ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
++
++ if (priv->do_link_poll)
++ kthread_stop(priv->poll_thread);
++ else
++ fsl_mc_free_irqs(ls_dev);
++
++ free_rings(priv);
++ free_percpu(priv->percpu_stats);
++ free_percpu(priv->percpu_extras);
++
++ disable_ch_napi(priv);
++ del_ch_napi(priv);
++ free_dpbp(priv);
++ free_dpio(priv);
++ free_dpni(priv);
++
++ fsl_mc_portal_free(priv->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
++
++ return 0;
++}
++
++static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpni",
++ },
++ { .vendor = 0x0 }
++};
++MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
++
++static struct fsl_mc_driver dpaa2_eth_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_eth_probe,
++ .remove = dpaa2_eth_remove,
++ .match_id_table = dpaa2_eth_match_id_table
++};
++
++static int __init dpaa2_eth_driver_init(void)
++{
++ int err;
++
++ dpaa2_eth_dbg_init();
++ err = fsl_mc_driver_register(&dpaa2_eth_driver);
++ if (err) {
++ dpaa2_eth_dbg_exit();
++ return err;
++ }
++
++ return 0;
++}
++
++static void __exit dpaa2_eth_driver_exit(void)
++{
++ dpaa2_eth_dbg_exit();
++ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++}
++
++module_init(dpaa2_eth_driver_init);
++module_exit(dpaa2_eth_driver_exit);
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+new file mode 100644
+index 00000000..86cb12e9
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+@@ -0,0 +1,460 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DPAA2_ETH_H
++#define __DPAA2_ETH_H
++
++#include <linux/atomic.h>
++#include <linux/netdevice.h>
++#include <linux/if_vlan.h>
++#include "../../fsl-mc/include/dpaa2-io.h"
++#include "dpni.h"
++#include "net.h"
++
++#include "dpaa2-eth-debugfs.h"
++
++#define DPAA2_ETH_STORE_SIZE 16
++
++/* We set a max threshold for how many Tx confirmations we should process
++ * on a NAPI poll call, they take less processing time.
++ */
++#define TX_CONF_PER_NAPI_POLL 256
++
++/* Maximum number of scatter-gather entries in an ingress frame,
++ * considering the maximum receive frame size is 64K
++ */
++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
++
++/* Maximum acceptable MTU value. It is in direct relation with the hardware
++ * enforced Max Frame Length (currently 10k).
++ */
++#define DPAA2_ETH_MFL (10 * 1024)
++#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
++/* Convert L3 MTU to L2 MFL */
++#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
++
++/* Maximum burst size value for Tx shaping */
++#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
++
++/* Maximum number of buffers that can be acquired/released through a single
++ * QBMan command
++ */
++#define DPAA2_ETH_BUFS_PER_CMD 7
++
++/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
++ * frames in the Rx queues (length of the current frame is not
++ * taken into account when making the taildrop decision)
++ */
++#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
++
++/* Buffer quota per queue. Must be large enough such that for minimum sized
++ * frames taildrop kicks in before the bpool gets depleted, so we compute
++ * how many 64B frames fit inside the taildrop threshold and add a margin
++ * to accommodate the buffer refill delay.
++ */
++#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
++#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
++#define DPAA2_ETH_REFILL_THRESH_TD \
++ (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
++
++/* Buffer quota per queue to use when flow control is active. */
++#define DPAA2_ETH_NUM_BUFS_FC 256
++
++/* Hardware requires alignment for ingress/egress buffer addresses
++ * and ingress buffer lengths.
++ */
++#define DPAA2_ETH_RX_BUF_SIZE 2048
++#define DPAA2_ETH_TX_BUF_ALIGN 64
++#define DPAA2_ETH_RX_BUF_ALIGN 64
++#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
++#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
++ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
++
++/* rx_extra_head prevents reallocations in L3 processing. */
++#define DPAA2_ETH_SKB_SIZE \
++ (DPAA2_ETH_RX_BUF_SIZE + \
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
++
++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
++ * buffers large enough to allow building an skb around them and also account
++ * for alignment restrictions.
++ */
++#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
++ (DPAA2_ETH_SKB_SIZE + \
++ (p_priv)->rx_buf_align)
++
++/* PTP nominal frequency 1GHz */
++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
++
++/* Leave enough extra space in the headroom to make sure the skb is
++ * not realloc'd in forwarding scenarios.
++ */
++#define DPAA2_ETH_RX_HEAD_ROOM 192
++
++/* We are accommodating a skb backpointer and some S/G info
++ * in the frame's software annotation. The hardware
++ * options are either 0 or 64, so we choose the latter.
++ */
++#define DPAA2_ETH_SWA_SIZE 64
++
++/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
++struct dpaa2_eth_swa {
++ struct sk_buff *skb;
++ struct scatterlist *scl;
++ int num_sg;
++ int num_dma_bufs;
++};
++
++/* Annotation valid bits in FD FRC */
++#define DPAA2_FD_FRC_FASV 0x8000
++#define DPAA2_FD_FRC_FAEADV 0x4000
++#define DPAA2_FD_FRC_FAPRV 0x2000
++#define DPAA2_FD_FRC_FAIADV 0x1000
++#define DPAA2_FD_FRC_FASWOV 0x0800
++#define DPAA2_FD_FRC_FAICFDV 0x0400
++
++#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
++#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
++ FD_CTRL_SBE | \
++ FD_CTRL_FSE | \
++ FD_CTRL_FAERR)
++
++/* Annotation bits in FD CTRL */
++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
++
++/* Size of hardware annotation area based on the current buffer layout
++ * configuration
++ */
++#define DPAA2_ETH_RX_HWA_SIZE 64
++#define DPAA2_ETH_TX_HWA_SIZE 128
++
++/* Frame annotation status */
++struct dpaa2_fas {
++ u8 reserved;
++ u8 ppid;
++ __le16 ifpid;
++ __le32 status;
++} __packed;
++
++/* Frame annotation status word is located in the first 8 bytes
++ * of the buffer's hardware annotation area
++ */
++#define DPAA2_FAS_OFFSET 0
++#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
++
++/* Timestamp is located in the next 8 bytes of the buffer's
++ * hardware annotation area
++ */
++#define DPAA2_TS_OFFSET 0x8
++
++/* Frame annotation egress action descriptor */
++#define DPAA2_FAEAD_OFFSET 0x58
++
++struct dpaa2_faead {
++ __le32 conf_fqid;
++ __le32 ctrl;
++};
++
++#define DPAA2_FAEAD_A2V 0x20000000
++#define DPAA2_FAEAD_UPDV 0x00001000
++#define DPAA2_FAEAD_UPD 0x00000010
++
++/* accessors for the hardware annotation fields that we use */
++#define dpaa2_eth_get_hwa(buf_addr) \
++ ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
++
++#define dpaa2_eth_get_fas(buf_addr) \
++ (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
++
++#define dpaa2_eth_get_ts(buf_addr) \
++ (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
++
++#define dpaa2_eth_get_faead(buf_addr) \
++ (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
++
++/* Error and status bits in the frame annotation status word */
++/* Debug frame, otherwise supposed to be discarded */
++#define DPAA2_FAS_DISC 0x80000000
++/* MACSEC frame */
++#define DPAA2_FAS_MS 0x40000000
++#define DPAA2_FAS_PTP 0x08000000
++/* Ethernet multicast frame */
++#define DPAA2_FAS_MC 0x04000000
++/* Ethernet broadcast frame */
++#define DPAA2_FAS_BC 0x02000000
++#define DPAA2_FAS_KSE 0x00040000
++#define DPAA2_FAS_EOFHE 0x00020000
++#define DPAA2_FAS_MNLE 0x00010000
++#define DPAA2_FAS_TIDE 0x00008000
++#define DPAA2_FAS_PIEE 0x00004000
++/* Frame length error */
++#define DPAA2_FAS_FLE 0x00002000
++/* Frame physical error */
++#define DPAA2_FAS_FPE 0x00001000
++#define DPAA2_FAS_PTE 0x00000080
++#define DPAA2_FAS_ISP 0x00000040
++#define DPAA2_FAS_PHE 0x00000020
++#define DPAA2_FAS_BLE 0x00000010
++/* L3 csum validation performed */
++#define DPAA2_FAS_L3CV 0x00000008
++/* L3 csum error */
++#define DPAA2_FAS_L3CE 0x00000004
++/* L4 csum validation performed */
++#define DPAA2_FAS_L4CV 0x00000002
++/* L4 csum error */
++#define DPAA2_FAS_L4CE 0x00000001
++/* Possible errors on the ingress path */
++#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
++ (DPAA2_FAS_EOFHE) | \
++ (DPAA2_FAS_MNLE) | \
++ (DPAA2_FAS_TIDE) | \
++ (DPAA2_FAS_PIEE) | \
++ (DPAA2_FAS_FLE) | \
++ (DPAA2_FAS_FPE) | \
++ (DPAA2_FAS_PTE) | \
++ (DPAA2_FAS_ISP) | \
++ (DPAA2_FAS_PHE) | \
++ (DPAA2_FAS_BLE) | \
++ (DPAA2_FAS_L3CE) | \
++ (DPAA2_FAS_L4CE))
++/* Tx errors */
++#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
++ (DPAA2_FAS_EOFHE) | \
++ (DPAA2_FAS_MNLE) | \
++ (DPAA2_FAS_TIDE))
++
++/* Time in milliseconds between link state updates */
++#define DPAA2_ETH_LINK_STATE_REFRESH 1000
++
++/* Number of times to retry a frame enqueue before giving up.
++ * Value determined empirically, in order to minimize the number
++ * of frames dropped on Tx
++ */
++#define DPAA2_ETH_ENQUEUE_RETRIES 10
++
++/* Tx congestion entry & exit thresholds, in number of bytes.
++ * We allow a maximum of 512KB worth of frames pending processing on the Tx
++ * queues of an interface
++ */
++#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
++#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
++
++/* Driver statistics, other than those in struct rtnl_link_stats64.
++ * These are usually collected per-CPU and aggregated by ethtool.
++ */
++struct dpaa2_eth_drv_stats {
++ __u64 tx_conf_frames;
++ __u64 tx_conf_bytes;
++ __u64 tx_sg_frames;
++ __u64 tx_sg_bytes;
++ __u64 rx_sg_frames;
++ __u64 rx_sg_bytes;
++ /* Enqueues retried due to portal busy */
++ __u64 tx_portal_busy;
++};
++
++/* Per-FQ statistics */
++struct dpaa2_eth_fq_stats {
++ /* Number of frames received on this queue */
++ __u64 frames;
++ /* Number of times this queue entered congestion */
++ __u64 congestion_entry;
++};
++
++/* Per-channel statistics */
++struct dpaa2_eth_ch_stats {
++ /* Volatile dequeues retried due to portal busy */
++ __u64 dequeue_portal_busy;
++ /* Number of CDANs; useful to estimate avg NAPI len */
++ __u64 cdan;
++ /* Number of frames received on queues from this channel */
++ __u64 frames;
++ /* Pull errors */
++ __u64 pull_err;
++};
++
++/* Maximum number of queues associated with a DPNI */
++#define DPAA2_ETH_MAX_RX_QUEUES 16
++#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
++#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
++ DPAA2_ETH_MAX_TX_QUEUES + \
++ DPAA2_ETH_MAX_RX_ERR_QUEUES)
++
++#define DPAA2_ETH_MAX_DPCONS NR_CPUS
++
++enum dpaa2_eth_fq_type {
++ DPAA2_RX_FQ = 0,
++ DPAA2_TX_CONF_FQ,
++ DPAA2_RX_ERR_FQ
++};
++
++struct dpaa2_eth_priv;
++
++struct dpaa2_eth_fq {
++ u32 fqid;
++ u32 tx_qdbin;
++ u16 flowid;
++ int target_cpu;
++ struct dpaa2_eth_channel *channel;
++ enum dpaa2_eth_fq_type type;
++
++ void (*consume)(struct dpaa2_eth_priv *,
++ struct dpaa2_eth_channel *,
++ const struct dpaa2_fd *,
++ struct napi_struct *,
++ u16 queue_id);
++ struct dpaa2_eth_fq_stats stats;
++};
++
++struct dpaa2_eth_channel {
++ struct dpaa2_io_notification_ctx nctx;
++ struct fsl_mc_device *dpcon;
++ int dpcon_id;
++ int ch_id;
++ int dpio_id;
++ struct napi_struct napi;
++ struct dpaa2_io_store *store;
++ struct dpaa2_eth_priv *priv;
++ int buf_count;
++ struct dpaa2_eth_ch_stats stats;
++};
++
++struct dpaa2_eth_cls_rule {
++ struct ethtool_rx_flow_spec fs;
++ bool in_use;
++};
++
++struct dpaa2_eth_hash_fields {
++ u64 rxnfc_field;
++ enum net_prot cls_prot;
++ int cls_field;
++ int offset;
++ int size;
++};
++
++/* Driver private data */
++struct dpaa2_eth_priv {
++ struct net_device *net_dev;
++
++ /* Standard statistics */
++ struct rtnl_link_stats64 __percpu *percpu_stats;
++ /* Extra stats, in addition to the ones known by the kernel */
++ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
++ struct iommu_domain *iommu_domain;
++
++ bool ts_tx_en; /* Tx timestamping enabled */
++ bool ts_rx_en; /* Rx timestamping enabled */
++
++ u16 tx_data_offset;
++ u16 rx_buf_align;
++
++ u16 bpid;
++ u16 tx_qdid;
++
++ int tx_pause_frames;
++ int num_bufs;
++ int refill_thresh;
++
++ /* Tx congestion notifications are written here */
++ void *cscn_mem;
++ void *cscn_unaligned;
++ dma_addr_t cscn_dma;
++
++ u8 num_fqs;
++ /* Tx queues are at the beginning of the array */
++ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
++
++ u8 num_channels;
++ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
++
++ int dpni_id;
++ struct dpni_attr dpni_attrs;
++ struct fsl_mc_device *dpbp_dev;
++
++ struct fsl_mc_io *mc_io;
++ /* SysFS-controlled affinity mask for TxConf FQs */
++ struct cpumask txconf_cpumask;
++ /* Cores which have an affine DPIO/DPCON.
++ * This is the cpu set on which Rx frames are processed;
++ * Tx confirmation frames are processed on a subset of this,
++ * depending on user settings.
++ */
++ struct cpumask dpio_cpumask;
++
++ u16 mc_token;
++
++ struct dpni_link_state link_state;
++ bool do_link_poll;
++ struct task_struct *poll_thread;
++
++ struct dpaa2_eth_hash_fields *hash_fields;
++ u8 num_hash_fields;
++ /* enabled ethtool hashing bits */
++ u64 rx_flow_hash;
++
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ struct dpaa2_debugfs dbg;
++#endif
++
++ /* array of classification rules */
++ struct dpaa2_eth_cls_rule *cls_rule;
++
++ struct dpni_tx_shaping_cfg shaping_cfg;
++};
++
++#define dpaa2_eth_hash_enabled(priv) \
++ ((priv)->dpni_attrs.num_queues > 1)
++
++#define dpaa2_eth_fs_enabled(priv) \
++ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
++
++#define dpaa2_eth_fs_mask_enabled(priv) \
++ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
++
++#define dpaa2_eth_fs_count(priv) \
++ ((priv)->dpni_attrs.fs_entries)
++
++/* size of DMA memory used to pass configuration to classifier, in bytes */
++#define DPAA2_CLASSIFIER_DMA_SIZE 256
++
++extern const struct ethtool_ops dpaa2_ethtool_ops;
++extern const char dpaa2_eth_drv_version[];
++
++static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
++{
++ return priv->dpni_attrs.num_queues;
++}
++
++void check_cls_support(struct dpaa2_eth_priv *priv);
++
++int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable);
++#endif /* __DPAA2_H */
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+new file mode 100644
+index 00000000..9859814e
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+@@ -0,0 +1,856 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "dpni.h" /* DPNI_LINK_OPT_* */
++#include "dpaa2-eth.h"
++
++/* To be kept in sync with dpni_statistics */
++static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
++ "rx frames",
++ "rx bytes",
++ "rx mcast frames",
++ "rx mcast bytes",
++ "rx bcast frames",
++ "rx bcast bytes",
++ "tx frames",
++ "tx bytes",
++ "tx mcast frames",
++ "tx mcast bytes",
++ "tx bcast frames",
++ "tx bcast bytes",
++ "rx filtered frames",
++ "rx discarded frames",
++ "rx nobuffer discards",
++ "tx discarded frames",
++ "tx confirmed frames",
++};
++
++#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
++
++/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
++static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
++ /* per-cpu stats */
++
++ "tx conf frames",
++ "tx conf bytes",
++ "tx sg frames",
++ "tx sg bytes",
++ "rx sg frames",
++ "rx sg bytes",
++ /* how many times we had to retry the enqueue command */
++ "enqueue portal busy",
++
++ /* Channel stats */
++ /* How many times we had to retry the volatile dequeue command */
++ "dequeue portal busy",
++ "channel pull errors",
++ /* Number of notifications received */
++ "cdan",
++ "tx congestion state",
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ /* FQ stats */
++ "rx pending frames",
++ "rx pending bytes",
++ "tx conf pending frames",
++ "tx conf pending bytes",
++ "buffer count"
++#endif
++};
++
++#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
++
++static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
++ strlcpy(drvinfo->version, dpaa2_eth_drv_version,
++ sizeof(drvinfo->version));
++ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
++}
++
++static int dpaa2_eth_get_settings(struct net_device *net_dev,
++ struct ethtool_cmd *cmd)
++{
++ struct dpni_link_state state = {0};
++ int err = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPNI side - and for that matter there may exist
++ * no DPMAC at all. So for now we just don't report anything
++ * beyond the DPNI attributes.
++ */
++ if (state.options & DPNI_LINK_OPT_AUTONEG)
++ cmd->autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
++ cmd->duplex = DUPLEX_FULL;
++ ethtool_cmd_speed_set(cmd, state.rate);
++
++out:
++ return err;
++}
++
++static int dpaa2_eth_set_settings(struct net_device *net_dev,
++ struct ethtool_cmd *cmd)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ int err = 0;
++
++ netdev_dbg(net_dev, "Setting link parameters...");
++
++ /* Need to interrogate on link state to get flow control params */
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ cfg.options = state.options;
++ cfg.rate = ethtool_cmd_speed(cmd);
++ if (cmd->autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPNI_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
++ if (cmd->duplex == DUPLEX_HALF)
++ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
++
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
++
++out:
++ return err;
++}
++
++static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
++ int err;
++
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err)
++ netdev_dbg(net_dev, "ERROR %d getting link state", err);
++
++ /* for now, pause frames autonegotiation is not separate */
++ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
++ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
++ pause->tx_pause = pause->rx_pause ^
++ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
++}
++
++static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ u32 current_tx_pause;
++ int err = 0;
++
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_dbg(net_dev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ cfg.rate = state.rate;
++ cfg.options = state.options;
++ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
++ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
++
++ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
++ netdev_warn(net_dev,
++ "WARN: Can't change pause frames autoneg separately\n");
++
++ if (pause->rx_pause)
++ cfg.options |= DPNI_LINK_OPT_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
++
++ if (pause->rx_pause ^ pause->tx_pause)
++ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
++
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
++ goto out;
++ }
++
++ /* Enable / disable taildrops if Tx pause frames have changed */
++ if (current_tx_pause == pause->tx_pause)
++ goto out;
++
++ err = setup_fqs_taildrop(priv, !pause->tx_pause);
++ if (err)
++ netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
++
++ priv->tx_pause_frames = pause->tx_pause;
++out:
++ return err;
++}
++
++static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
++ u8 *data)
++{
++ u8 *p = data;
++ int i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ break;
++ }
++}
++
++static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
++ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++/** Fill in hardware counters, as returned by MC.
++ */
++static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ int i = 0; /* Current index in the data array */
++ int j = 0, k, err;
++ union dpni_statistics dpni_stats;
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ u32 fcnt, bcnt;
++ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
++ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
++ u32 buf_cnt;
++#endif
++ u64 cdan = 0;
++ u64 portal_busy = 0, pull_err = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_drv_stats *extras;
++ struct dpaa2_eth_ch_stats *ch_stats;
++
++ memset(data, 0,
++ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
++
++ /* Print standard counters, from DPNI statistics */
++ for (j = 0; j <= 2; j++) {
++ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
++ j, &dpni_stats);
++ if (err != 0)
++ netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
++ err, j);
++
++ switch (j) {
++ case 0:
++ *(data + i++) = dpni_stats.page_0.ingress_all_frames;
++ *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
++ *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
++ *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
++ *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
++ *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
++ break;
++ case 1:
++ *(data + i++) = dpni_stats.page_1.egress_all_frames;
++ *(data + i++) = dpni_stats.page_1.egress_all_bytes;
++ *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
++ *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
++ *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
++ *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
++ break;
++ case 2:
++ *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
++ *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
++ *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
++ *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
++ *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
++ break;
++ default:
++ break;
++ }
++ }
++
++ /* Print per-cpu extra stats */
++ for_each_online_cpu(k) {
++ extras = per_cpu_ptr(priv->percpu_extras, k);
++ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
++ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
++ }
++
++ i += j;
++
++ /* We may be using fewer DPIOs than actual CPUs */
++ for (j = 0; j < priv->num_channels; j++) {
++ ch_stats = &priv->channel[j]->stats;
++ cdan += ch_stats->cdan;
++ portal_busy += ch_stats->dequeue_portal_busy;
++ pull_err += ch_stats->pull_err;
++ }
++
++ *(data + i++) = portal_busy;
++ *(data + i++) = pull_err;
++ *(data + i++) = cdan;
++
++ *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ for (j = 0; j < priv->num_fqs; j++) {
++ /* Print FQ instantaneous counts */
++ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
++ &fcnt, &bcnt);
++ if (err) {
++ netdev_warn(net_dev, "FQ query error %d", err);
++ return;
++ }
++
++ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
++ fcnt_tx_total += fcnt;
++ bcnt_tx_total += bcnt;
++ } else {
++ fcnt_rx_total += fcnt;
++ bcnt_rx_total += bcnt;
++ }
++ }
++
++ *(data + i++) = fcnt_rx_total;
++ *(data + i++) = bcnt_rx_total;
++ *(data + i++) = fcnt_tx_total;
++ *(data + i++) = bcnt_tx_total;
++
++ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
++ if (err) {
++ netdev_warn(net_dev, "Buffer count query error %d\n", err);
++ return;
++ }
++ *(data + i++) = buf_cnt;
++#endif
++}
++
++static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
++{
++ int i, off = 0;
++
++ for (i = 0; i < priv->num_hash_fields; i++) {
++ if (priv->hash_fields[i].cls_prot == prot &&
++ priv->hash_fields[i].cls_field == field)
++ return off;
++ off += priv->hash_fields[i].size;
++ }
++
++ return -1;
++}
++
++static u8 cls_key_size(struct dpaa2_eth_priv *priv)
++{
++ u8 i, size = 0;
++
++ for (i = 0; i < priv->num_hash_fields; i++)
++ size += priv->hash_fields[i].size;
++
++ return size;
++}
++
++void check_cls_support(struct dpaa2_eth_priv *priv)
++{
++ u8 key_size = cls_key_size(priv);
++ struct device *dev = priv->net_dev->dev.parent;
++
++ if (dpaa2_eth_hash_enabled(priv)) {
++ if (priv->dpni_attrs.fs_key_size < key_size) {
++ dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
++ priv->dpni_attrs.fs_key_size,
++ key_size);
++ goto disable_fs;
++ }
++ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
++ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
++ DPKG_MAX_NUM_OF_EXTRACTS);
++ goto disable_fs;
++ }
++ }
++
++ if (dpaa2_eth_fs_enabled(priv)) {
++ if (!dpaa2_eth_hash_enabled(priv)) {
++ dev_info(dev, "Insufficient queues. Steering is disabled\n");
++ goto disable_fs;
++ }
++
++ if (!dpaa2_eth_fs_mask_enabled(priv)) {
++ dev_info(dev, "Key masks not supported. Steering is disabled\n");
++ goto disable_fs;
++ }
++ }
++
++ return;
++
++disable_fs:
++ priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
++ priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
++}
++
++static int prep_l4_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_tcpip4_spec *l4_value,
++ struct ethtool_tcpip4_spec *l4_mask,
++ void *key, void *mask, u8 l4_proto)
++{
++ int offset;
++
++ if (l4_mask->tos) {
++ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
++ return -EOPNOTSUPP;
++ }
++
++ if (l4_mask->ip4src) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
++ *(u32 *)(key + offset) = l4_value->ip4src;
++ *(u32 *)(mask + offset) = l4_mask->ip4src;
++ }
++
++ if (l4_mask->ip4dst) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
++ *(u32 *)(key + offset) = l4_value->ip4dst;
++ *(u32 *)(mask + offset) = l4_mask->ip4dst;
++ }
++
++ if (l4_mask->psrc) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(u32 *)(key + offset) = l4_value->psrc;
++ *(u32 *)(mask + offset) = l4_mask->psrc;
++ }
++
++ if (l4_mask->pdst) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(u32 *)(key + offset) = l4_value->pdst;
++ *(u32 *)(mask + offset) = l4_mask->pdst;
++ }
++
++ /* Only apply the rule for the user-specified L4 protocol
++ * and if ethertype matches IPv4
++ */
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(u16 *)(key + offset) = htons(ETH_P_IP);
++ *(u16 *)(mask + offset) = 0xFFFF;
++
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u8 *)(key + offset) = l4_proto;
++ *(u8 *)(mask + offset) = 0xFF;
++
++ /* TODO: check IP version */
++
++ return 0;
++}
++
++static int prep_eth_rule(struct dpaa2_eth_priv *priv,
++ struct ethhdr *eth_value, struct ethhdr *eth_mask,
++ void *key, void *mask)
++{
++ int offset;
++
++ if (eth_mask->h_proto) {
++ netdev_err(priv->net_dev, "Ethertype is not supported!\n");
++ return -EOPNOTSUPP;
++ }
++
++ if (!is_zero_ether_addr(eth_mask->h_source)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
++ ether_addr_copy(key + offset, eth_value->h_source);
++ ether_addr_copy(mask + offset, eth_mask->h_source);
++ }
++
++ if (!is_zero_ether_addr(eth_mask->h_dest)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + offset, eth_value->h_dest);
++ ether_addr_copy(mask + offset, eth_mask->h_dest);
++ }
++
++ return 0;
++}
++
++static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_usrip4_spec *uip_value,
++ struct ethtool_usrip4_spec *uip_mask,
++ void *key, void *mask)
++{
++ int offset;
++
++ if (uip_mask->tos)
++ return -EOPNOTSUPP;
++
++ if (uip_mask->ip4src) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
++ *(u32 *)(key + offset) = uip_value->ip4src;
++ *(u32 *)(mask + offset) = uip_mask->ip4src;
++ }
++
++ if (uip_mask->ip4dst) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
++ *(u32 *)(key + offset) = uip_value->ip4dst;
++ *(u32 *)(mask + offset) = uip_mask->ip4dst;
++ }
++
++ if (uip_mask->proto) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u32 *)(key + offset) = uip_value->proto;
++ *(u32 *)(mask + offset) = uip_mask->proto;
++ }
++ if (uip_mask->l4_4_bytes) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
++
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
++ }
++
++ /* Ethertype must be IP */
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(u16 *)(key + offset) = htons(ETH_P_IP);
++ *(u16 *)(mask + offset) = 0xFFFF;
++
++ return 0;
++}
++
++static int prep_ext_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_flow_ext *ext_value,
++ struct ethtool_flow_ext *ext_mask,
++ void *key, void *mask)
++{
++ int offset;
++
++ if (ext_mask->vlan_etype)
++ return -EOPNOTSUPP;
++
++ if (ext_mask->vlan_tci) {
++ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
++ *(u16 *)(key + offset) = ext_value->vlan_tci;
++ *(u16 *)(mask + offset) = ext_mask->vlan_tci;
++ }
++
++ return 0;
++}
++
++static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_flow_ext *ext_value,
++ struct ethtool_flow_ext *ext_mask,
++ void *key, void *mask)
++{
++ int offset;
++
++ if (!is_zero_ether_addr(ext_mask->h_dest)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + offset, ext_value->h_dest);
++ ether_addr_copy(mask + offset, ext_mask->h_dest);
++ }
++
++ return 0;
++}
++
++static int prep_cls_rule(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ void *key)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const u8 key_size = cls_key_size(priv);
++ void *msk = key + key_size;
++ int err;
++
++ memset(key, 0, key_size * 2);
++
++ switch (fs->flow_type & 0xff) {
++ case TCP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
++ &fs->m_u.tcp_ip4_spec, key, msk,
++ IPPROTO_TCP);
++ break;
++ case UDP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
++ &fs->m_u.udp_ip4_spec, key, msk,
++ IPPROTO_UDP);
++ break;
++ case SCTP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
++ &fs->m_u.sctp_ip4_spec, key, msk,
++ IPPROTO_SCTP);
++ break;
++ case ETHER_FLOW:
++ err = prep_eth_rule(priv, &fs->h_u.ether_spec,
++ &fs->m_u.ether_spec, key, msk);
++ break;
++ case IP_USER_FLOW:
++ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
++ &fs->m_u.usr_ip4_spec, key, msk);
++ break;
++ default:
++ /* TODO: AH, ESP */
++ return -EOPNOTSUPP;
++ }
++ if (err)
++ return err;
++
++ if (fs->flow_type & FLOW_EXT) {
++ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ if (err)
++ return err;
++ }
++
++ if (fs->flow_type & FLOW_MAC_EXT) {
++ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int del_cls(struct net_device *net_dev, int location);
++
++static int do_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ bool add)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ const int rule_cnt = dpaa2_eth_fs_count(priv);
++ struct dpni_rule_cfg rule_cfg;
++ struct dpni_fs_action_cfg fs_act = { 0 };
++ void *dma_mem;
++ int err = 0;
++
++ if (!dpaa2_eth_fs_enabled(priv)) {
++ netdev_err(net_dev, "dev does not support steering!\n");
++ /* dev doesn't support steering */
++ return -EOPNOTSUPP;
++ }
++
++ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
++ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
++ fs->location >= rule_cnt)
++ return -EINVAL;
++
++ /* When adding a new rule, check if location if available,
++ * and if not free the existing table entry before inserting
++ * the new one
++ */
++ if (add && (priv->cls_rule[fs->location].in_use == true))
++ del_cls(net_dev, fs->location);
++
++ memset(&rule_cfg, 0, sizeof(rule_cfg));
++ rule_cfg.key_size = cls_key_size(priv);
++
++ /* allocate twice the key size, for the actual key and for mask */
++ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
++ if (!dma_mem)
++ return -ENOMEM;
++
++ err = prep_cls_rule(net_dev, fs, dma_mem);
++ if (err)
++ goto err_free_mem;
++
++ rule_cfg.key_iova = dma_map_single(dev, dma_mem,
++ rule_cfg.key_size * 2,
++ DMA_TO_DEVICE);
++
++ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
++
++ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
++ fs_act.options |= DPNI_FS_OPT_DISCARD;
++ else
++ fs_act.flow_id = fs->ring_cookie;
++
++ if (add)
++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
++ 0, fs->location, &rule_cfg, &fs_act);
++ else
++ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token,
++ 0, &rule_cfg);
++
++ dma_unmap_single(dev, rule_cfg.key_iova,
++ rule_cfg.key_size * 2, DMA_TO_DEVICE);
++
++ if (err)
++ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
++
++err_free_mem:
++ kfree(dma_mem);
++
++ return err;
++}
++
++static int add_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ err = do_cls(net_dev, fs, true);
++ if (err)
++ return err;
++
++ priv->cls_rule[fs->location].in_use = true;
++ priv->cls_rule[fs->location].fs = *fs;
++
++ return 0;
++}
++
++static int del_cls(struct net_device *net_dev, int location)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
++ if (err)
++ return err;
++
++ priv->cls_rule[location].in_use = false;
++
++ return 0;
++}
++
++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc)
++{
++ int err = 0;
++
++ switch (rxnfc->cmd) {
++ case ETHTOOL_SRXCLSRLINS:
++ err = add_cls(net_dev, &rxnfc->fs);
++ break;
++
++ case ETHTOOL_SRXCLSRLDEL:
++ err = del_cls(net_dev, rxnfc->fs.location);
++ break;
++
++ default:
++ err = -EOPNOTSUPP;
++ }
++
++ return err;
++}
++
++static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const int rule_cnt = dpaa2_eth_fs_count(priv);
++ int i, j;
++
++ switch (rxnfc->cmd) {
++ case ETHTOOL_GRXFH:
++ /* we purposely ignore cmd->flow_type, because the hashing key
++ * is the same (and fixed) for all protocols
++ */
++ rxnfc->data = priv->rx_flow_hash;
++ break;
++
++ case ETHTOOL_GRXRINGS:
++ rxnfc->data = dpaa2_eth_queue_count(priv);
++ break;
++
++ case ETHTOOL_GRXCLSRLCNT:
++ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
++ if (priv->cls_rule[i].in_use)
++ rxnfc->rule_cnt++;
++ rxnfc->data = rule_cnt;
++ break;
++
++ case ETHTOOL_GRXCLSRULE:
++ if (!priv->cls_rule[rxnfc->fs.location].in_use)
++ return -EINVAL;
++
++ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
++ break;
++
++ case ETHTOOL_GRXCLSRLALL:
++ for (i = 0, j = 0; i < rule_cnt; i++) {
++ if (!priv->cls_rule[i].in_use)
++ continue;
++ if (j == rxnfc->rule_cnt)
++ return -EMSGSIZE;
++ rule_locs[j++] = i;
++ }
++ rxnfc->rule_cnt = j;
++ rxnfc->data = rule_cnt;
++ break;
++
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++const struct ethtool_ops dpaa2_ethtool_ops = {
++ .get_drvinfo = dpaa2_eth_get_drvinfo,
++ .get_link = ethtool_op_get_link,
++ .get_settings = dpaa2_eth_get_settings,
++ .set_settings = dpaa2_eth_set_settings,
++ .get_pauseparam = dpaa2_eth_get_pauseparam,
++ .set_pauseparam = dpaa2_eth_set_pauseparam,
++ .get_sset_count = dpaa2_eth_get_sset_count,
++ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
++ .get_strings = dpaa2_eth_get_strings,
++ .get_rxnfc = dpaa2_eth_get_rxnfc,
++ .set_rxnfc = dpaa2_eth_set_rxnfc,
++};
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+new file mode 100644
+index 00000000..02290a08
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+@@ -0,0 +1,176 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPKG_H_
++#define __FSL_DPKG_H_
++
++#include <linux/types.h>
++#include "net.h"
++
++/* Data Path Key Generator API
++ * Contains initialization APIs and runtime APIs for the Key Generator
++ */
++
++/** Key Generator properties */
++
++/**
++ * Number of masks per key extraction
++ */
++#define DPKG_NUM_OF_MASKS 4
++/**
++ * Number of extractions per key profile
++ */
++#define DPKG_MAX_NUM_OF_EXTRACTS 10
++
++/**
++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
++ * @DPKG_FULL_FIELD: Extract a full field
++ */
++enum dpkg_extract_from_hdr_type {
++ DPKG_FROM_HDR = 0,
++ DPKG_FROM_FIELD = 1,
++ DPKG_FULL_FIELD = 2
++};
++
++/**
++ * enum dpkg_extract_type - Enumeration for selecting extraction type
++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
++ * e.g. can be used to extract header existence;
++ * please refer to 'Parse Result definition' section in the parser BG
++ */
++enum dpkg_extract_type {
++ DPKG_EXTRACT_FROM_HDR = 0,
++ DPKG_EXTRACT_FROM_DATA = 1,
++ DPKG_EXTRACT_FROM_PARSE = 3
++};
++
++/**
++ * struct dpkg_mask - A structure for defining a single extraction mask
++ * @mask: Byte mask for the extracted content
++ * @offset: Offset within the extracted content
++ */
++struct dpkg_mask {
++ u8 mask;
++ u8 offset;
++};
++
++/**
++ * struct dpkg_extract - A structure for defining a single extraction
++ * @type: Determines how the union below is interpreted:
++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
++ * @extract: Selects extraction method
++ * @num_of_byte_masks: Defines the number of valid entries in the array below;
++ * This is also the number of bytes to be used as masks
++ * @masks: Masks parameters
++ */
++struct dpkg_extract {
++ enum dpkg_extract_type type;
++ /**
++ * union extract - Selects extraction method
++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
++ */
++ union {
++ /**
++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @prot: Any of the supported headers
++ * @type: Defines the type of header extraction:
++ * DPKG_FROM_HDR: use size & offset below;
++ * DPKG_FROM_FIELD: use field, size and offset below;
++ * DPKG_FULL_FIELD: use field below
++ * @field: One of the supported fields (NH_FLD_)
++ *
++ * @size: Size in bytes
++ * @offset: Byte offset
++ * @hdr_index: Clear for cases not listed below;
++ * Used for protocols that may have more than a single
++ * header, 0 indicates an outer header;
++ * Supported protocols (possible values):
++ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
++ * NET_PROT_IP(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
++ */
++
++ struct {
++ enum net_prot prot;
++ enum dpkg_extract_from_hdr_type type;
++ u32 field;
++ u8 size;
++ u8 offset;
++ u8 hdr_index;
++ } from_hdr;
++ /**
++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ u8 size;
++ u8 offset;
++ } from_data;
++
++ /**
++ * struct from_parse - Used when
++ * 'type = DPKG_EXTRACT_FROM_PARSE'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ u8 size;
++ u8 offset;
++ } from_parse;
++ } extract;
++
++ u8 num_of_byte_masks;
++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
++};
++
++/**
++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
++ * profile (rule)
++ * @num_extracts: Defines the number of valid entries in the array below
++ * @extracts: Array of required extractions
++ */
++struct dpkg_profile_cfg {
++ u8 num_extracts;
++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
++
++#endif /* __FSL_DPKG_H_ */
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+new file mode 100644
+index 00000000..fa353d75
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+@@ -0,0 +1,600 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPNI_CMD_H
++#define _FSL_DPNI_CMD_H
++
++/* DPNI Version */
++#define DPNI_VER_MAJOR 7
++#define DPNI_VER_MINOR 0
++#define DPNI_CMD_BASE_VERSION 1
++#define DPNI_CMD_ID_OFFSET 4
++
++#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
++
++#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
++#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
++#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
++#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
++#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
++
++#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
++#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
++#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
++#define DPNI_CMDID_RESET DPNI_CMD(0x005)
++#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
++
++#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
++#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
++#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
++#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
++#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
++#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
++#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
++#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
++
++#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
++
++#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
++#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
++#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
++#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
++#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
++
++#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
++#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
++#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
++#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
++#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
++#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
++#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
++#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
++#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
++
++#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
++
++#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
++#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
++#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
++
++#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
++#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
++#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
++#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
++#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
++#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
++
++#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
++
++#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
++#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
++
++#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
++#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
++#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
++#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
++#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
++#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
++#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
++
++/* Macros for accessing command fields smaller than 1byte */
++#define DPNI_MASK(field) \
++ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
++ DPNI_##field##_SHIFT)
++
++#define dpni_set_field(var, field, val) \
++ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
++#define dpni_get_field(var, field) \
++ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
++
++struct dpni_cmd_open {
++ __le32 dpni_id;
++};
++
++#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
++struct dpni_cmd_set_pools {
++ /* cmd word 0 */
++ u8 num_dpbp;
++ u8 backup_pool_mask;
++ __le16 pad;
++ /* cmd word 0..4 */
++ __le32 dpbp_id[DPNI_MAX_DPBP];
++ /* cmd word 4..6 */
++ __le16 buffer_size[DPNI_MAX_DPBP];
++};
++
++/* The enable indication is always the least significant bit */
++#define DPNI_ENABLE_SHIFT 0
++#define DPNI_ENABLE_SIZE 1
++
++struct dpni_rsp_is_enabled {
++ u8 enabled;
++};
++
++struct dpni_rsp_get_irq {
++ /* response word 0 */
++ __le32 irq_val;
++ __le32 pad;
++ /* response word 1 */
++ __le64 irq_addr;
++ /* response word 2 */
++ __le32 irq_num;
++ __le32 type;
++};
++
++struct dpni_cmd_set_irq_enable {
++ u8 enable;
++ u8 pad[3];
++ u8 irq_index;
++};
++
++struct dpni_cmd_get_irq_enable {
++ __le32 pad;
++ u8 irq_index;
++};
++
++struct dpni_rsp_get_irq_enable {
++ u8 enabled;
++};
++
++struct dpni_cmd_set_irq_mask {
++ __le32 mask;
++ u8 irq_index;
++};
++
++struct dpni_cmd_get_irq_mask {
++ __le32 pad;
++ u8 irq_index;
++};
++
++struct dpni_rsp_get_irq_mask {
++ __le32 mask;
++};
++
++struct dpni_cmd_get_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
++
++struct dpni_rsp_get_irq_status {
++ __le32 status;
++};
++
++struct dpni_cmd_clear_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
++
++struct dpni_rsp_get_attr {
++ /* response word 0 */
++ __le32 options;
++ u8 num_queues;
++ u8 num_tcs;
++ u8 mac_filter_entries;
++ u8 pad0;
++ /* response word 1 */
++ u8 vlan_filter_entries;
++ u8 pad1;
++ u8 qos_entries;
++ u8 pad2;
++ __le16 fs_entries;
++ __le16 pad3;
++ /* response word 2 */
++ u8 qos_key_size;
++ u8 fs_key_size;
++ __le16 wriop_version;
++};
++
++#define DPNI_ERROR_ACTION_SHIFT 0
++#define DPNI_ERROR_ACTION_SIZE 4
++#define DPNI_FRAME_ANN_SHIFT 4
++#define DPNI_FRAME_ANN_SIZE 1
++
++struct dpni_cmd_set_errors_behavior {
++ __le32 errors;
++ /* from least significant bit: error_action:4, set_frame_annotation:1 */
++ u8 flags;
++};
++
++/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
++ * buffer layouts, but they all share the same parameters.
++ * If one of the functions changes, below structure needs to be split.
++ */
++
++#define DPNI_PASS_TS_SHIFT 0
++#define DPNI_PASS_TS_SIZE 1
++#define DPNI_PASS_PR_SHIFT 1
++#define DPNI_PASS_PR_SIZE 1
++#define DPNI_PASS_FS_SHIFT 2
++#define DPNI_PASS_FS_SIZE 1
++
++struct dpni_cmd_get_buffer_layout {
++ u8 qtype;
++};
++
++struct dpni_rsp_get_buffer_layout {
++ /* response word 0 */
++ u8 pad0[6];
++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
++ u8 flags;
++ u8 pad1;
++ /* response word 1 */
++ __le16 private_data_size;
++ __le16 data_align;
++ __le16 head_room;
++ __le16 tail_room;
++};
++
++struct dpni_cmd_set_buffer_layout {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 pad0[3];
++ __le16 options;
++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
++ u8 flags;
++ u8 pad1;
++ /* cmd word 1 */
++ __le16 private_data_size;
++ __le16 data_align;
++ __le16 head_room;
++ __le16 tail_room;
++};
++
++struct dpni_cmd_set_offload {
++ u8 pad[3];
++ u8 dpni_offload;
++ __le32 config;
++};
++
++struct dpni_cmd_get_offload {
++ u8 pad[3];
++ u8 dpni_offload;
++};
++
++struct dpni_rsp_get_offload {
++ __le32 pad;
++ __le32 config;
++};
++
++struct dpni_cmd_get_qdid {
++ u8 qtype;
++};
++
++struct dpni_rsp_get_qdid {
++ __le16 qdid;
++};
++
++struct dpni_rsp_get_tx_data_offset {
++ __le16 data_offset;
++};
++
++struct dpni_cmd_get_statistics {
++ u8 page_number;
++};
++
++struct dpni_rsp_get_statistics {
++ __le64 counter[DPNI_STATISTICS_CNT];
++};
++
++struct dpni_cmd_set_link_cfg {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad1;
++ /* cmd word 2 */
++ __le64 options;
++};
++
++#define DPNI_LINK_STATE_SHIFT 0
++#define DPNI_LINK_STATE_SIZE 1
++
++struct dpni_rsp_get_link_state {
++ /* response word 0 */
++ __le32 pad0;
++ /* from LSB: up:1 */
++ u8 flags;
++ u8 pad1[3];
++ /* response word 1 */
++ __le32 rate;
++ __le32 pad2;
++ /* response word 2 */
++ __le64 options;
++};
++
++struct dpni_cmd_set_tx_shaping {
++ /* cmd word 0 */
++ __le16 max_burst_size;
++ __le16 pad0[3];
++ /* cmd word 1 */
++ __le32 rate_limit;
++};
++
++struct dpni_cmd_set_max_frame_length {
++ __le16 max_frame_length;
++};
++
++struct dpni_rsp_get_max_frame_length {
++ __le16 max_frame_length;
++};
++
++struct dpni_cmd_set_multicast_promisc {
++ u8 enable;
++};
++
++struct dpni_rsp_get_multicast_promisc {
++ u8 enabled;
++};
++
++struct dpni_cmd_set_unicast_promisc {
++ u8 enable;
++};
++
++struct dpni_rsp_get_unicast_promisc {
++ u8 enabled;
++};
++
++struct dpni_cmd_set_primary_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
++
++struct dpni_rsp_get_primary_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
++
++struct dpni_rsp_get_port_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
++
++struct dpni_cmd_add_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
++
++struct dpni_cmd_remove_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
++
++#define DPNI_UNICAST_FILTERS_SHIFT 0
++#define DPNI_UNICAST_FILTERS_SIZE 1
++#define DPNI_MULTICAST_FILTERS_SHIFT 1
++#define DPNI_MULTICAST_FILTERS_SIZE 1
++
++struct dpni_cmd_clear_mac_filters {
++ /* from LSB: unicast:1, multicast:1 */
++ u8 flags;
++};
++
++#define DPNI_DIST_MODE_SHIFT 0
++#define DPNI_DIST_MODE_SIZE 4
++#define DPNI_MISS_ACTION_SHIFT 4
++#define DPNI_MISS_ACTION_SIZE 4
++
++struct dpni_cmd_set_rx_tc_dist {
++ /* cmd word 0 */
++ __le16 dist_size;
++ u8 tc_id;
++ /* from LSB: dist_mode:4, miss_action:4 */
++ u8 flags;
++ __le16 pad0;
++ __le16 default_flow_id;
++ /* cmd word 1..5 */
++ __le64 pad1[5];
++ /* cmd word 6 */
++ __le64 key_cfg_iova;
++};
++
++/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
++ * key_cfg_iova)
++ */
++struct dpni_mask_cfg {
++ u8 mask;
++ u8 offset;
++};
++
++#define DPNI_EFH_TYPE_SHIFT 0
++#define DPNI_EFH_TYPE_SIZE 4
++#define DPNI_EXTRACT_TYPE_SHIFT 0
++#define DPNI_EXTRACT_TYPE_SIZE 4
++
++struct dpni_dist_extract {
++ /* word 0 */
++ u8 prot;
++ /* EFH type stored in the 4 least significant bits */
++ u8 efh_type;
++ u8 size;
++ u8 offset;
++ __le32 field;
++ /* word 1 */
++ u8 hdr_index;
++ u8 constant;
++ u8 num_of_repeats;
++ u8 num_of_byte_masks;
++ /* Extraction type is stored in the 4 LSBs */
++ u8 extract_type;
++ u8 pad[3];
++ /* word 2 */
++ struct dpni_mask_cfg masks[4];
++};
++
++struct dpni_ext_set_rx_tc_dist {
++ /* extension word 0 */
++ u8 num_extracts;
++ u8 pad[7];
++ /* words 1..25 */
++ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
++
++struct dpni_cmd_get_queue {
++ u8 qtype;
++ u8 tc;
++ u8 index;
++};
++
++#define DPNI_DEST_TYPE_SHIFT 0
++#define DPNI_DEST_TYPE_SIZE 4
++#define DPNI_STASH_CTRL_SHIFT 6
++#define DPNI_STASH_CTRL_SIZE 1
++#define DPNI_HOLD_ACTIVE_SHIFT 7
++#define DPNI_HOLD_ACTIVE_SIZE 1
++
++struct dpni_rsp_get_queue {
++ /* response word 0 */
++ __le64 pad0;
++ /* response word 1 */
++ __le32 dest_id;
++ __le16 pad1;
++ u8 dest_prio;
++ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
++ u8 flags;
++ /* response word 2 */
++ __le64 flc;
++ /* response word 3 */
++ __le64 user_context;
++ /* response word 4 */
++ __le32 fqid;
++ __le16 qdbin;
++};
++
++struct dpni_cmd_set_queue {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++ u8 index;
++ u8 options;
++ __le32 pad0;
++ /* cmd word 1 */
++ __le32 dest_id;
++ __le16 pad1;
++ u8 dest_prio;
++ u8 flags;
++ /* cmd word 2 */
++ __le64 flc;
++ /* cmd word 3 */
++ __le64 user_context;
++};
++
++struct dpni_cmd_add_fs_entry {
++ /* cmd word 0 */
++ u16 options;
++ u8 tc_id;
++ u8 key_size;
++ u16 index;
++ u16 flow_id;
++ /* cmd word 1 */
++ u64 key_iova;
++ /* cmd word 2 */
++ u64 mask_iova;
++ /* cmd word 3 */
++ u64 flc;
++};
++
++struct dpni_cmd_remove_fs_entry {
++ /* cmd word 0 */
++ __le16 pad0;
++ u8 tc_id;
++ u8 key_size;
++ __le32 pad1;
++ /* cmd word 1 */
++ u64 key_iova;
++ /* cmd word 2 */
++ u64 mask_iova;
++};
++
++struct dpni_cmd_set_taildrop {
++ /* cmd word 0 */
++ u8 congestion_point;
++ u8 qtype;
++ u8 tc;
++ u8 index;
++ __le32 pad0;
++ /* cmd word 1 */
++ /* Only least significant bit is relevant */
++ u8 enable;
++ u8 pad1;
++ u8 units;
++ u8 pad2;
++ __le32 threshold;
++};
++
++struct dpni_cmd_get_taildrop {
++ u8 congestion_point;
++ u8 qtype;
++ u8 tc;
++ u8 index;
++};
++
++struct dpni_rsp_get_taildrop {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ /* only least significant bit is relevant */
++ u8 enable;
++ u8 pad1;
++ u8 units;
++ u8 pad2;
++ __le32 threshold;
++};
++
++#define DPNI_DEST_TYPE_SHIFT 0
++#define DPNI_DEST_TYPE_SIZE 4
++#define DPNI_CONG_UNITS_SHIFT 4
++#define DPNI_CONG_UNITS_SIZE 2
++
++struct dpni_cmd_set_congestion_notification {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++ u8 pad[6];
++ /* cmd word 1 */
++ u32 dest_id;
++ u16 notification_mode;
++ u8 dest_priority;
++ /* from LSB: dest_type: 4 units:2 */
++ u8 type_units;
++ /* cmd word 2 */
++ u64 message_iova;
++ /* cmd word 3 */
++ u64 message_ctx;
++ /* cmd word 4 */
++ u32 threshold_entry;
++ u32 threshold_exit;
++};
++
++#endif /* _FSL_DPNI_CMD_H */
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+new file mode 100644
+index 00000000..3c23e4dc
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+@@ -0,0 +1,1770 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpni.h"
++#include "dpni-cmd.h"
++
++/**
++ * dpni_prepare_key_cfg() - function prepare extract parameters
++ * @cfg: defining a full Key Generation profile (rule)
++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before the following functions:
++ * - dpni_set_rx_tc_dist()
++ * - dpni_set_qos_table()
++ */
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
++{
++ int i, j;
++ struct dpni_ext_set_rx_tc_dist *dpni_ext;
++ struct dpni_dist_extract *extr;
++
++ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
++ return -EINVAL;
++
++ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
++ dpni_ext->num_extracts = cfg->num_extracts;
++
++ for (i = 0; i < cfg->num_extracts; i++) {
++ extr = &dpni_ext->extracts[i];
++
++ switch (cfg->extracts[i].type) {
++ case DPKG_EXTRACT_FROM_HDR:
++ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
++ dpni_set_field(extr->efh_type, EFH_TYPE,
++ cfg->extracts[i].extract.from_hdr.type);
++ extr->size = cfg->extracts[i].extract.from_hdr.size;
++ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
++ extr->field = cpu_to_le32(
++ cfg->extracts[i].extract.from_hdr.field);
++ extr->hdr_index =
++ cfg->extracts[i].extract.from_hdr.hdr_index;
++ break;
++ case DPKG_EXTRACT_FROM_DATA:
++ extr->size = cfg->extracts[i].extract.from_data.size;
++ extr->offset =
++ cfg->extracts[i].extract.from_data.offset;
++ break;
++ case DPKG_EXTRACT_FROM_PARSE:
++ extr->size = cfg->extracts[i].extract.from_parse.size;
++ extr->offset =
++ cfg->extracts[i].extract.from_parse.offset;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
++ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
++ cfg->extracts[i].type);
++
++ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
++ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
++ extr->masks[j].offset =
++ cfg->extracts[i].masks[j].offset;
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * dpni_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpni_id: DPNI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpni_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpni_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_open *cmd_params;
++
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dpni_cmd_open *)cmd.params;
++ cmd_params->dpni_id = cpu_to_le32(dpni_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpni_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_pools() - Set buffer pools configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Buffer pools configuration
++ *
++ * mandatory for DPNI operation
++ * warning:Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_pools_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_pools *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
++ cmd_params->num_dpbp = cfg->num_dpbp;
++ for (i = 0; i < DPNI_MAX_DPBP; i++) {
++ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
++ cmd_params->buffer_size[i] =
++ cpu_to_le16(cfg->pools[i].buffer_size);
++ cmd_params->backup_pool_mask |=
++ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
++ }
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_is_enabled() - Check if the DPNI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_is_enabled *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpni_reset() - Reset the DPNI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state: - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_irq_enable *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_enable *cmd_params;
++ struct dpni_rsp_get_irq_enable *rsp_params;
++
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpni_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_irq_mask *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_mask *cmd_params;
++ struct dpni_rsp_get_irq_mask *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
++ *mask = le32_to_cpu(rsp_params->mask);
++
++ return 0;
++}
++
++/**
++ * dpni_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_status *cmd_params;
++ struct dpni_rsp_get_irq_status *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
++
++ return 0;
++}
++
++/**
++ * dpni_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_clear_irq_status *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->status = cpu_to_le32(status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_attributes() - Retrieve DPNI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_attr *rsp_params;
++
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
++ attr->options = le32_to_cpu(rsp_params->options);
++ attr->num_queues = rsp_params->num_queues;
++ attr->num_tcs = rsp_params->num_tcs;
++ attr->mac_filter_entries = rsp_params->mac_filter_entries;
++ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
++ attr->qos_entries = rsp_params->qos_entries;
++ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
++ attr->qos_key_size = rsp_params->qos_key_size;
++ attr->fs_key_size = rsp_params->fs_key_size;
++ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
++
++ return 0;
++}
++
++/**
++ * dpni_set_errors_behavior() - Set errors behavior
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Errors configuration
++ *
++ * this function may be called numerous times with different
++ * error masks
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_error_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_errors_behavior *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
++ cmd_params->errors = cpu_to_le32(cfg->errors);
++ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
++ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue to retrieve configuration for
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_buffer_layout *cmd_params;
++ struct dpni_rsp_get_buffer_layout *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
++ cmd_params->qtype = qtype;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
++ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
++ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
++ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
++ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
++ layout->data_align = le16_to_cpu(rsp_params->data_align);
++ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
++ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
++
++ return 0;
++}
++
++/**
++ * dpni_set_buffer_layout() - Set buffer layout configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue this configuration applies to
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ const struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_buffer_layout *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->options = cpu_to_le16(layout->options);
++ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
++ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
++ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
++ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
++ cmd_params->data_align = cpu_to_le16(layout->data_align);
++ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
++ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_offload() - Set DPNI offload configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @type: Type of DPNI offload
++ * @config: Offload configuration.
++ * For checksum offloads, non-zero value enables the offload
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++
++int dpni_set_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 config)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_offload *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
++ cmd_params->dpni_offload = type;
++ cmd_params->config = cpu_to_le32(config);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 *config)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_offload *cmd_params;
++ struct dpni_rsp_get_offload *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
++ cmd_params->dpni_offload = type;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
++ *config = le32_to_cpu(rsp_params->config);
++
++ return 0;
++}
++
++/**
++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
++ * for enqueue operations
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue to receive QDID for
++ * @qdid: Returned virtual QDID value that should be used as an argument
++ * in all enqueue operations
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u16 *qdid)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_qdid *cmd_params;
++ struct dpni_rsp_get_qdid *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
++ cmd_params->qtype = qtype;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
++ *qdid = le16_to_cpu(rsp_params->qdid);
++
++ return 0;
++}
++
++/**
++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @data_offset: Tx data offset (from start of buffer)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *data_offset)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_tx_data_offset *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
++ *data_offset = le16_to_cpu(rsp_params->data_offset);
++
++ return 0;
++}
++
++/**
++ * dpni_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_link_cfg *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_link_state() - Return the link state (either up or down)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @state: Returned link state;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_link_state *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
++ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
++
++ return 0;
++}
++
++/**
++ * dpni_set_tx_shaping() - Set the transmit shaping
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tx_shaper: tx shaping configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_shaping_cfg *tx_shaper)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_tx_shaping *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
++ cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
++ cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_max_frame_length() - Set the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_max_frame_length *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
++ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_max_frame_length() - Get the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_max_frame_length *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
++ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
++
++ return 0;
++}
++
++/**
++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_multicast_promisc *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_multicast_promisc *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_unicast_promisc *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_unicast_promisc *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpni_set_primary_mac_addr() - Set the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to set as primary address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_primary_mac_addr *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_primary_mac_addr() - Get the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: Returned MAC address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_primary_mac_addr *rsp_params;
++ int i, err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ mac_addr[5 - i] = rsp_params->mac_addr[i];
++
++ return 0;
++}
++
++/**
++ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
++ * port the DPNI is attached to
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address of the physical port, if any, otherwise 0
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_rsp_get_port_mac_addr *rsp_params;
++ int i, err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ mac_addr[5 - i] = rsp_params->mac_addr[i];
++
++ return 0;
++}
++
++/**
++ * dpni_add_mac_addr() - Add MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to add
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_add_mac_addr *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_remove_mac_addr() - Remove MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_remove_mac_addr *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @unicast: Set to '1' to clear unicast addresses
++ * @multicast: Set to '1' to clear multicast addresses
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int unicast,
++ int multicast)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_clear_mac_filters *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
++ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
++ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class distribution configuration
++ *
++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
++ * first to prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_rx_tc_dist *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
++ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
++ cmd_params->tc_id = tc_id;
++ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
++ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
++ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
++ * (to select a flow ID)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @index: Location in the QoS table where to insert the entry.
++ * Only relevant if MASKING is enabled for QoS
++ * classification on this DPNI, it is ignored for exact match.
++ * @cfg: Flow steering rule to add
++ * @action: Action to be taken as result of a classification hit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ u16 index,
++ const struct dpni_rule_cfg *cfg,
++ const struct dpni_fs_action_cfg *action)
++{
++ struct dpni_cmd_add_fs_entry *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->index = cpu_to_le16(index);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++ cmd_params->options = cpu_to_le16(action->options);
++ cmd_params->flow_id = cpu_to_le16(action->flow_id);
++ cmd_params->flc = cpu_to_le64(action->flc);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
++ * traffic class
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct dpni_cmd_remove_fs_entry *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_congestion_notification() - Set traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct dpni_cmd_set_congestion_notification *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc_id;
++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
++ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
++ cmd_params->dest_priority = cfg->dest_cfg.priority;
++ dpni_set_field(cmd_params->type_units, DEST_TYPE,
++ cfg->dest_cfg.dest_type);
++ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
++ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
++ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
++ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
++ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_queue() - Set queue parameters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - all queue types are supported, although
++ * the command is ignored for Tx
++ * @tc: Traffic class, in range 0 to NUM_TCS - 1
++ * @index: Selects the specific queue out of the set allocated for the
++ * same TC. Value must be in range 0 to NUM_QUEUES - 1
++ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
++ * configuration options are set on the queue
++ * @queue: Queue structure
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ u8 options,
++ const struct dpni_queue *queue)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_queue *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++ cmd_params->options = options;
++ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
++ cmd_params->dest_prio = queue->destination.priority;
++ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
++ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
++ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
++ queue->destination.hold_active);
++ cmd_params->flc = cpu_to_le64(queue->flc.value);
++ cmd_params->user_context = cpu_to_le64(queue->user_context);
++
++ /* send command to mc */
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_queue() - Get queue parameters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - all queue types are supported
++ * @tc: Traffic class, in range 0 to NUM_TCS - 1
++ * @index: Selects the specific queue out of the set allocated for the
++ * same TC. Value must be in range 0 to NUM_QUEUES - 1
++ * @queue: Queue configuration structure
++ * @qid: Queue identification
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_queue *queue,
++ struct dpni_queue_id *qid)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_queue *cmd_params;
++ struct dpni_rsp_get_queue *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
++ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
++ queue->destination.priority = rsp_params->dest_prio;
++ queue->destination.type = dpni_get_field(rsp_params->flags,
++ DEST_TYPE);
++ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
++ STASH_CTRL);
++ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
++ HOLD_ACTIVE);
++ queue->flc.value = le64_to_cpu(rsp_params->flc);
++ queue->user_context = le64_to_cpu(rsp_params->user_context);
++ qid->fqid = le32_to_cpu(rsp_params->fqid);
++ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
++
++ return 0;
++}
++
++/**
++ * dpni_get_statistics() - Get DPNI statistics
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @page: Selects the statistics page to retrieve, see
++ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
++ * @stat: Structure containing the statistics
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 page,
++ union dpni_statistics *stat)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_statistics *cmd_params;
++ struct dpni_rsp_get_statistics *rsp_params;
++ int i, err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
++ cmd_params->page_number = page;
++
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
++ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
++ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
++
++ return 0;
++}
++
++/**
++ * dpni_reset_statistics() - Clears DPNI statistics
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_taildrop() - Set taildrop per queue or TC
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cg_point: Congestion point
++ * @q_type: Queue type on which the taildrop is configured.
++ * Only Rx queues are supported for now
++ * @tc: Traffic class to apply this taildrop to
++ * @q_index: Index of the queue if the DPNI supports multiple queues for
++ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
++ * @taildrop: Taildrop structure
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_taildrop *taildrop)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_set_taildrop *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
++ cmd_params->congestion_point = cg_point;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
++ cmd_params->units = taildrop->units;
++ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
++
++ /* send command to mc */
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_taildrop() - Get taildrop information
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cg_point: Congestion point
++ * @q_type: Queue type on which the taildrop is configured.
++ * Only Rx queues are supported for now
++ * @tc: Traffic class to apply this taildrop to
++ * @q_index: Index of the queue if the DPNI supports multiple queues for
++ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
++ * @taildrop: Taildrop structure
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_taildrop *taildrop)
++{
++ struct mc_command cmd = { 0 };
++ struct dpni_cmd_get_taildrop *cmd_params;
++ struct dpni_rsp_get_taildrop *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
++ cmd_params->congestion_point = cg_point;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
++ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
++ taildrop->units = rsp_params->units;
++ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+new file mode 100644
+index 00000000..600c3574
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+@@ -0,0 +1,989 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPNI_H
++#define __FSL_DPNI_H
++
++#include "dpkg.h"
++
++struct fsl_mc_io;
++
++/**
++ * Data Path Network Interface API
++ * Contains initialization APIs and runtime control APIs for DPNI
++ */
++
++/** General DPNI macros */
++
++/**
++ * Maximum number of traffic classes
++ */
++#define DPNI_MAX_TC 8
++/**
++ * Maximum number of buffer pools per DPNI
++ */
++#define DPNI_MAX_DPBP 8
++
++/**
++ * All traffic classes considered; see dpni_set_queue()
++ */
++#define DPNI_ALL_TCS (u8)(-1)
++/**
++ * All flows within traffic class considered; see dpni_set_queue()
++ */
++#define DPNI_ALL_TC_FLOWS (u16)(-1)
++/**
++ * Generate new flow ID; see dpni_set_queue()
++ */
++#define DPNI_NEW_FLOW_ID (u16)(-1)
++
++/**
++ * Tx traffic is always released to a buffer pool on transmit, there are no
++ * resources allocated to have the frames confirmed back to the source after
++ * transmission.
++ */
++#define DPNI_OPT_TX_FRM_RELEASE 0x000001
++/**
++ * Disables support for MAC address filtering for addresses other than primary
++ * MAC address. This affects both unicast and multicast. Promiscuous mode can
++ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
++ * is disabled, only traffic matching the primary MAC address will be accepted.
++ */
++#define DPNI_OPT_NO_MAC_FILTER 0x000002
++/**
++ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
++ * traffic class (TC) basis.
++ */
++#define DPNI_OPT_HAS_POLICING 0x000004
++/**
++ * Congestion can be managed in several ways, allowing the buffer pool to
++ * deplete on ingress, taildrop on each queue or use congestion groups for sets
++ * of queues. If set, it configures a single congestion groups across all TCs.
++ * If reset, a congestion group is allocated for each TC. Only relevant if the
++ * DPNI has multiple traffic classes.
++ */
++#define DPNI_OPT_SHARED_CONGESTION 0x000008
++/**
++ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
++ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
++ * variants. Setting this bit on these SoCs will trigger an error.
++ */
++#define DPNI_OPT_HAS_KEY_MASKING 0x000010
++/**
++ * Disables the flow steering table.
++ */
++#define DPNI_OPT_NO_FS 0x000020
++
++int dpni_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpni_id,
++ u16 *token);
++
++int dpni_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpni_pools_cfg - Structure representing buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpni_pools_cfg {
++ u8 num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ int dpbp_id;
++ u16 buffer_size;
++ int backup_pool;
++ } pools[DPNI_MAX_DPBP];
++};
++
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_pools_cfg *cfg);
++
++int dpni_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpni_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpni_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * DPNI IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPNI_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
++
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
++
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
++
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
++
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
++
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
++
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
++
++/**
++ * struct dpni_attr - Structure representing DPNI attributes
++ * @options: Any combination of the following options:
++ * DPNI_OPT_TX_FRM_RELEASE
++ * DPNI_OPT_NO_MAC_FILTER
++ * DPNI_OPT_HAS_POLICING
++ * DPNI_OPT_SHARED_CONGESTION
++ * DPNI_OPT_HAS_KEY_MASKING
++ * DPNI_OPT_NO_FS
++ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
++ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
++ * @mac_filter_entries: Number of entries in the MAC address filtering table.
++ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
++ * @qos_entries: Number of entries in the QoS classification table.
++ * @fs_entries: Number of entries in the flow steering table.
++ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
++ * than this when adding QoS entries will result in an error.
++ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
++ * key larger than this when composing the hash + FS key will
++ * result in an error.
++ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
++ * on 6, 5, 5 bits respectively.
++ */
++struct dpni_attr {
++ u32 options;
++ u8 num_queues;
++ u8 num_tcs;
++ u8 mac_filter_entries;
++ u8 vlan_filter_entries;
++ u8 qos_entries;
++ u16 fs_entries;
++ u8 qos_key_size;
++ u8 fs_key_size;
++ u16 wriop_version;
++};
++
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_attr *attr);
++
++/**
++ * DPNI errors
++ */
++
++/**
++ * Extract out of frame header error
++ */
++#define DPNI_ERROR_EOFHE 0x00020000
++/**
++ * Frame length error
++ */
++#define DPNI_ERROR_FLE 0x00002000
++/**
++ * Frame physical error
++ */
++#define DPNI_ERROR_FPE 0x00001000
++/**
++ * Parsing header error
++ */
++#define DPNI_ERROR_PHE 0x00000020
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L3CE 0x00000004
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L4CE 0x00000001
++
++/**
++ * enum dpni_error_action - Defines DPNI behavior for errors
++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
++ */
++enum dpni_error_action {
++ DPNI_ERROR_ACTION_DISCARD = 0,
++ DPNI_ERROR_ACTION_CONTINUE = 1,
++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
++};
++
++/**
++ * struct dpni_error_cfg - Structure representing DPNI errors treatment
++ * @errors: Errors mask; use 'DPNI_ERROR__<X>
++ * @error_action: The desired action for the errors mask
++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
++ * status (FAS); relevant only for the non-discard action
++ */
++struct dpni_error_cfg {
++ u32 errors;
++ enum dpni_error_action error_action;
++ int set_frame_annotation;
++};
++
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_error_cfg *cfg);
++
++/**
++ * DPNI buffer layout modification options
++ */
++
++/**
++ * Select to modify the time-stamp setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
++/**
++ * Select to modify the parser-result setting; not applicable for Tx
++ */
++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
++/**
++ * Select to modify the frame-status setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
++/**
++ * Select to modify the private-data-size setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
++/**
++ * Select to modify the data-alignment setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
++/**
++ * Select to modify the data-head-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
++/**
++ * Select to modify the data-tail-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
++
++/**
++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
++ * @options: Flags representing the suggested modifications to the buffer
++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
++ * @pass_timestamp: Pass timestamp value
++ * @pass_parser_result: Pass parser results
++ * @pass_frame_status: Pass frame status
++ * @private_data_size: Size kept for private data (in bytes)
++ * @data_align: Data alignment
++ * @data_head_room: Data head room
++ * @data_tail_room: Data tail room
++ */
++struct dpni_buffer_layout {
++ u32 options;
++ int pass_timestamp;
++ int pass_parser_result;
++ int pass_frame_status;
++ u16 private_data_size;
++ u16 data_align;
++ u16 data_head_room;
++ u16 data_tail_room;
++};
++
++/**
++ * enum dpni_queue_type - Identifies a type of queue targeted by the command
++ * @DPNI_QUEUE_RX: Rx queue
++ * @DPNI_QUEUE_TX: Tx queue
++ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
++ * @DPNI_QUEUE_RX_ERR: Rx error queue
++ */enum dpni_queue_type {
++ DPNI_QUEUE_RX,
++ DPNI_QUEUE_TX,
++ DPNI_QUEUE_TX_CONFIRM,
++ DPNI_QUEUE_RX_ERR,
++};
++
++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ struct dpni_buffer_layout *layout);
++
++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * enum dpni_offload - Identifies a type of offload targeted by the command
++ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
++ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
++ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
++ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
++ */
++enum dpni_offload {
++ DPNI_OFF_RX_L3_CSUM,
++ DPNI_OFF_RX_L4_CSUM,
++ DPNI_OFF_TX_L3_CSUM,
++ DPNI_OFF_TX_L4_CSUM,
++};
++
++int dpni_set_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 config);
++
++int dpni_get_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 *config);
++
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u16 *qdid);
++
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *data_offset);
++
++#define DPNI_STATISTICS_CNT 7
++
++union dpni_statistics {
++ /**
++ * struct page_0 - Page_0 statistics structure
++ * @ingress_all_frames: Ingress frame count
++ * @ingress_all_bytes: Ingress byte count
++ * @ingress_multicast_frames: Ingress multicast frame count
++ * @ingress_multicast_bytes: Ingress multicast byte count
++ * @ingress_broadcast_frames: Ingress broadcast frame count
++ * @ingress_broadcast_bytes: Ingress broadcast byte count
++ */
++ struct {
++ u64 ingress_all_frames;
++ u64 ingress_all_bytes;
++ u64 ingress_multicast_frames;
++ u64 ingress_multicast_bytes;
++ u64 ingress_broadcast_frames;
++ u64 ingress_broadcast_bytes;
++ } page_0;
++ /**
++ * struct page_1 - Page_1 statistics structure
++ * @egress_all_frames: Egress frame count
++ * @egress_all_bytes: Egress byte count
++ * @egress_multicast_frames: Egress multicast frame count
++ * @egress_multicast_bytes: Egress multicast byte count
++ * @egress_broadcast_frames: Egress broadcast frame count
++ * @egress_broadcast_bytes: Egress broadcast byte count
++ */
++ struct {
++ u64 egress_all_frames;
++ u64 egress_all_bytes;
++ u64 egress_multicast_frames;
++ u64 egress_multicast_bytes;
++ u64 egress_broadcast_frames;
++ u64 egress_broadcast_bytes;
++ } page_1;
++ /**
++ * struct page_2 - Page_2 statistics structure
++ * @ingress_filtered_frames: Ingress filtered frame count
++ * @ingress_discarded_frames: Ingress discarded frame count
++ * @ingress_nobuffer_discards: Ingress discarded frame count
++ * due to lack of buffers
++ * @egress_discarded_frames: Egress discarded frame count
++ * @egress_confirmed_frames: Egress confirmed frame count
++ */
++ struct {
++ u64 ingress_filtered_frames;
++ u64 ingress_discarded_frames;
++ u64 ingress_nobuffer_discards;
++ u64 egress_discarded_frames;
++ u64 egress_confirmed_frames;
++ } page_2;
++ /**
++ * struct raw - raw statistics structure
++ */
++ struct {
++ u64 counter[DPNI_STATISTICS_CNT];
++ } raw;
++};
++
++int dpni_get_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 page,
++ union dpni_statistics *stat);
++
++int dpni_reset_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct - Structure representing DPNI link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ */
++struct dpni_link_cfg {
++ u32 rate;
++ u64 options;
++};
++
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg);
++
++/**
++ * struct dpni_link_state - Structure representing DPNI link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ * @up: Link state; '0' for down, '1' for up
++ */
++struct dpni_link_state {
++ u32 rate;
++ u64 options;
++ int up;
++};
++
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state);
++
++/**
++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
++ * @rate_limit: rate in Mbps
++ * @max_burst_size: burst size in bytes (up to 64KB)
++ */
++struct dpni_tx_shaping_cfg {
++ u32 rate_limit;
++ u16 max_burst_size;
++};
++
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_shaping_cfg *tx_shaper);
++
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length);
++
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *max_frame_length);
++
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en);
++
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en);
++
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
++
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6]);
++
++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cm_flags,
++ u16 token,
++ u8 mac_addr[6]);
++
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
++
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
++
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int unicast,
++ int multicast);
++
++/**
++ * enum dpni_dist_mode - DPNI distribution mode
++ * @DPNI_DIST_MODE_NONE: No distribution
++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
++ */
++enum dpni_dist_mode {
++ DPNI_DIST_MODE_NONE = 0,
++ DPNI_DIST_MODE_HASH = 1,
++ DPNI_DIST_MODE_FS = 2
++};
++
++/**
++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
++ */
++enum dpni_fs_miss_action {
++ DPNI_FS_MISS_DROP = 0,
++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
++ DPNI_FS_MISS_HASH = 2
++};
++
++/**
++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
++ * @miss_action: Miss action selection
++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
++ */
++struct dpni_fs_tbl_cfg {
++ enum dpni_fs_miss_action miss_action;
++ u16 default_flow_id;
++};
++
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
++ u8 *key_cfg_buf);
++
++/**
++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
++ * @dist_size: Set the distribution size;
++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
++ * 112,128,192,224,256,384,448,512,768,896,1024
++ * @dist_mode: Distribution mode
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpni_prepare_key_cfg() relevant only when
++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
++ * @fs_cfg: Flow Steering table configuration; only relevant if
++ * 'dist_mode = DPNI_DIST_MODE_FS'
++ */
++struct dpni_rx_tc_dist_cfg {
++ u16 dist_size;
++ enum dpni_dist_mode dist_mode;
++ u64 key_cfg_iova;
++ struct dpni_fs_tbl_cfg fs_cfg;
++};
++
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg);
++
++/**
++ * enum dpni_dest - DPNI destination types
++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
++ * does not generate FQDAN notifications; user is expected to
++ * dequeue from the queue based on polling or other user-defined
++ * method
++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON
++ * object; user is expected to dequeue from the DPCON channel
++ */
++enum dpni_dest {
++ DPNI_DEST_NONE = 0,
++ DPNI_DEST_DPIO = 1,
++ DPNI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpni_queue - Queue structure
++ * @user_context: User data, presented to the user along with any frames from
++ * this queue. Not relevant for Tx queues.
++ */
++struct dpni_queue {
++/**
++ * struct destination - Destination structure
++ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
++ * Identifies either a DPIO or a DPCON object. Not relevant for
++ * Tx queues.
++ * @type: May be one of the following:
++ * 0 - No destination, queue can be manually queried, but will not
++ * push traffic or notifications to a DPIO;
++ * 1 - The destination is a DPIO. When traffic becomes available in
++ * the queue a FQDAN (FQ data available notification) will be
++ * generated to selected DPIO;
++ * 2 - The destination is a DPCON. The queue is associated with a
++ * DPCON object for the purpose of scheduling between multiple
++ * queues. The DPCON may be independently configured to
++ * generate notifications. Not relevant for Tx queues.
++ * @hold_active: Hold active, maintains a queue scheduled for longer
++ * in a DPIO during dequeue to reduce spread of traffic.
++ * Only relevant if queues are not affined to a single DPIO.
++ */
++ struct {
++ u16 id;
++ enum dpni_dest type;
++ char hold_active;
++ u8 priority;
++ } destination;
++ u64 user_context;
++ struct {
++ u64 value;
++ char stash_control;
++ } flc;
++};
++
++/**
++ * struct dpni_queue_id - Queue identification, used for enqueue commands
++ * or queue control
++ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
++ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
++ * for Tx queues.
++ */
++struct dpni_queue_id {
++ u32 fqid;
++ u16 qdbin;
++};
++
++/**
++ * Set User Context
++ */
++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
++#define DPNI_QUEUE_OPT_DEST 0x00000002
++#define DPNI_QUEUE_OPT_FLC 0x00000004
++#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
++
++int dpni_set_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ u8 options,
++ const struct dpni_queue *queue);
++
++int dpni_get_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_queue *queue,
++ struct dpni_queue_id *qid);
++
++/**
++ * enum dpni_congestion_unit - DPNI congestion units
++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
++ */
++enum dpni_congestion_unit {
++ DPNI_CONGESTION_UNIT_BYTES = 0,
++ DPNI_CONGESTION_UNIT_FRAMES
++};
++
++/**
++ * enum dpni_congestion_point - Structure representing congestion point
++ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
++ * QUEUE_INDEX
++ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
++ * define the DPNI this can be either per TC (default) or per
++ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
++ * QUEUE_INDEX is ignored if this type is used.
++ */
++enum dpni_congestion_point {
++ DPNI_CP_QUEUE,
++ DPNI_CP_GROUP,
++};
++
++/**
++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that
++ * channel; not relevant for 'DPNI_DEST_NONE' option
++ */
++struct dpni_dest_cfg {
++ enum dpni_dest dest_type;
++ int dest_id;
++ u8 priority;
++};
++
++/* DPNI congestion options */
++
++/**
++ * CSCN message is written to message_iova once entering a
++ * congestion state (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
++/**
++ * CSCN message is written to message_iova once exiting a
++ * congestion state (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
++/**
++ * CSCN write will attempt to allocate into a cache (coherent write);
++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
++ */
++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once entering a congestion state
++ * (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once exiting a congestion state
++ * (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
++ */
++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
++
++/**
++ * struct dpni_congestion_notification_cfg - congestion notification
++ * configuration
++ * @units: units type
++ * @threshold_entry: above this threshold we enter a congestion state.
++ * set it to '0' to disable it
++ * @threshold_exit: below this threshold we exit the congestion state.
++ * @message_ctx: The context that will be part of the CSCN message
++ * @message_iova: I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
++ * contained in 'options'
++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
++ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
++ */
++
++struct dpni_congestion_notification_cfg {
++ enum dpni_congestion_unit units;
++ u32 threshold_entry;
++ u32 threshold_exit;
++ u64 message_ctx;
++ u64 message_iova;
++ struct dpni_dest_cfg dest_cfg;
++ u16 notification_mode;
++};
++
++int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * struct dpni_taildrop - Structure representing the taildrop
++ * @enable: Indicates whether the taildrop is active or not.
++ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
++ * byte units, this field is ignored and assumed = 0 if
++ * CONGESTION_POINT is 0.
++ * @threshold: Threshold value, in units identified by UNITS field. Value 0
++ * cannot be used as a valid taildrop threshold, THRESHOLD must
++ * be > 0 if the taildrop is enabled.
++ */
++struct dpni_taildrop {
++ char enable;
++ enum dpni_congestion_unit units;
++ u32 threshold;
++};
++
++int dpni_set_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type q_type,
++ u8 tc,
++ u8 q_index,
++ struct dpni_taildrop *taildrop);
++
++int dpni_get_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type q_type,
++ u8 tc,
++ u8 q_index,
++ struct dpni_taildrop *taildrop);
++
++/**
++ * struct dpni_rule_cfg - Rule configuration for table lookup
++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
++ * @key_size: key and mask size (in bytes)
++ */
++struct dpni_rule_cfg {
++ u64 key_iova;
++ u64 mask_iova;
++ u8 key_size;
++};
++
++/**
++ * Discard matching traffic. If set, this takes precedence over any other
++ * configuration and matching traffic is always discarded.
++ */
++ #define DPNI_FS_OPT_DISCARD 0x1
++
++/**
++ * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
++ * override the FLC value set per queue.
++ * For more details check the Frame Descriptor section in the hardware
++ * documentation.
++ */
++#define DPNI_FS_OPT_SET_FLC 0x2
++
++/*
++ * Indicates whether the 6 lowest significant bits of FLC are used for stash
++ * control. If set, the 6 least significant bits in value are interpreted as
++ * follows:
++ * - bits 0-1: indicates the number of 64 byte units of context that are
++ * stashed. FLC value is interpreted as a memory address in this case,
++ * excluding the 6 LS bits.
++ * - bits 2-3: indicates the number of 64 byte units of frame annotation
++ * to be stashed. Annotation is placed at FD[ADDR].
++ * - bits 4-5: indicates the number of 64 byte units of frame data to be
++ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
++ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
++ */
++#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
++
++/**
++ * struct dpni_fs_action_cfg - Action configuration for table look-up
++ * @flc: FLC value for traffic matching this rule. Please check the Frame
++ * Descriptor section in the hardware documentation for more information.
++ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
++ * values are in range 0 to num_queue-1.
++ * @options: Any combination of DPNI_FS_OPT_ values.
++ */
++struct dpni_fs_action_cfg {
++ u64 flc;
++ u16 flow_id;
++ u16 options;
++};
++
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ u16 index,
++ const struct dpni_rule_cfg *cfg,
++ const struct dpni_fs_action_cfg *action);
++
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rule_cfg *cfg);
++
++#endif /* __FSL_DPNI_H */
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/net.h b/drivers/staging/fsl-dpaa2/ethernet/net.h
+new file mode 100644
+index 00000000..5020dee1
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
+@@ -0,0 +1,480 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_NET_H
++#define __FSL_NET_H
++
++#define LAST_HDR_INDEX 0xFFFFFFFF
++
++/*****************************************************************************/
++/* Protocol fields */
++/*****************************************************************************/
++
++/************************* Ethernet fields *********************************/
++#define NH_FLD_ETH_DA (1)
++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
++
++#define NH_FLD_ETH_ADDR_SIZE 6
++
++/*************************** VLAN fields ***********************************/
++#define NH_FLD_VLAN_VPRI (1)
++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
++
++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
++ NH_FLD_VLAN_CFI | \
++ NH_FLD_VLAN_VID)
++
++/************************ IP (generic) fields ******************************/
++#define NH_FLD_IP_VER (1)
++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
++
++#define NH_FLD_IP_PROTO_SIZE 1
++
++/***************************** IPV4 fields *********************************/
++#define NH_FLD_IPV4_VER (1)
++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
++
++#define NH_FLD_IPV4_ADDR_SIZE 4
++#define NH_FLD_IPV4_PROTO_SIZE 1
++
++/***************************** IPV6 fields *********************************/
++#define NH_FLD_IPV6_VER (1)
++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
++
++#define NH_FLD_IPV6_ADDR_SIZE 16
++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
++
++/***************************** ICMP fields *********************************/
++#define NH_FLD_ICMP_TYPE (1)
++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
++
++#define NH_FLD_ICMP_CODE_SIZE 1
++#define NH_FLD_ICMP_TYPE_SIZE 1
++
++/***************************** IGMP fields *********************************/
++#define NH_FLD_IGMP_VERSION (1)
++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
++
++/***************************** TCP fields **********************************/
++#define NH_FLD_TCP_PORT_SRC (1)
++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
++
++#define NH_FLD_TCP_PORT_SIZE 2
++
++/***************************** UDP fields **********************************/
++#define NH_FLD_UDP_PORT_SRC (1)
++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_UDP_PORT_SIZE 2
++
++/*************************** UDP-lite fields *******************************/
++#define NH_FLD_UDP_LITE_PORT_SRC (1)
++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
++#define NH_FLD_UDP_LITE_ALL_FIELDS \
++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
++
++#define NH_FLD_UDP_LITE_PORT_SIZE 2
++
++/*************************** UDP-encap-ESP fields **************************/
++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
++
++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_PORT_SRC (1)
++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_SCTP_PORT_SIZE 2
++
++/***************************** DCCP fields *********************************/
++#define NH_FLD_DCCP_PORT_SRC (1)
++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
++
++#define NH_FLD_DCCP_PORT_SIZE 2
++
++/***************************** IPHC fields *********************************/
++#define NH_FLD_IPHC_CID (1)
++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++
++/*************************** L2TPV2 fields *********************************/
++#define NH_FLD_L2TPV2_TYPE_BIT (1)
++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
++#define NH_FLD_L2TPV2_ALL_FIELDS \
++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
++
++/*************************** L2TPV3 fields *********************************/
++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
++
++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
++
++/**************************** PPP fields ***********************************/
++#define NH_FLD_PPP_PID (1)
++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
++
++/************************** PPPoE fields ***********************************/
++#define NH_FLD_PPPOE_VER (1)
++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
++
++/************************* PPP-Mux fields **********************************/
++#define NH_FLD_PPPMUX_PID (1)
++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
++
++/*********************** PPP-Mux sub-frame fields **************************/
++#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
++
++/*************************** LLC fields ************************************/
++#define NH_FLD_LLC_DSAP (1)
++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
++
++/*************************** NLPID fields **********************************/
++#define NH_FLD_NLPID_NLPID (1)
++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
++
++/*************************** SNAP fields ***********************************/
++#define NH_FLD_SNAP_OUI (1)
++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
++
++/*************************** LLC SNAP fields *******************************/
++#define NH_FLD_LLC_SNAP_TYPE (1)
++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
++
++#define NH_FLD_ARP_HTYPE (1)
++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
++
++/*************************** RFC2684 fields ********************************/
++#define NH_FLD_RFC2684_LLC (1)
++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
++
++/*************************** User defined fields ***************************/
++#define NH_FLD_USER_DEFINED_SRCPORT (1)
++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
++#define NH_FLD_USER_DEFINED_ALL_FIELDS \
++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
++
++/*************************** Payload fields ********************************/
++#define NH_FLD_PAYLOAD_BUFFER (1)
++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
++
++/*************************** GRE fields ************************************/
++#define NH_FLD_GRE_TYPE (1)
++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
++
++/*************************** MINENCAP fields *******************************/
++#define NH_FLD_MINENCAP_SRC_IP (1)
++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
++#define NH_FLD_MINENCAP_ALL_FIELDS \
++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
++
++/*************************** IPSEC AH fields *******************************/
++#define NH_FLD_IPSEC_AH_SPI (1)
++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
++
++/*************************** IPSEC ESP fields ******************************/
++#define NH_FLD_IPSEC_ESP_SPI (1)
++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
++
++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
++
++/*************************** MPLS fields ***********************************/
++#define NH_FLD_MPLS_LABEL_STACK (1)
++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
++
++/*************************** MACSEC fields *********************************/
++#define NH_FLD_MACSEC_SECTAG (1)
++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
++
++/*************************** GTP fields ************************************/
++#define NH_FLD_GTP_TEID (1)
++
++/* Protocol options */
++
++/* Ethernet options */
++#define NH_OPT_ETH_BROADCAST 1
++#define NH_OPT_ETH_MULTICAST 2
++#define NH_OPT_ETH_UNICAST 3
++#define NH_OPT_ETH_BPDU 4
++
++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
++/* also applicable for broadcast */
++
++/* VLAN options */
++#define NH_OPT_VLAN_CFI 1
++
++/* IPV4 options */
++#define NH_OPT_IPV4_UNICAST 1
++#define NH_OPT_IPV4_MULTICAST 2
++#define NH_OPT_IPV4_BROADCAST 3
++#define NH_OPT_IPV4_OPTION 4
++#define NH_OPT_IPV4_FRAG 5
++#define NH_OPT_IPV4_INITIAL_FRAG 6
++
++/* IPV6 options */
++#define NH_OPT_IPV6_UNICAST 1
++#define NH_OPT_IPV6_MULTICAST 2
++#define NH_OPT_IPV6_OPTION 3
++#define NH_OPT_IPV6_FRAG 4
++#define NH_OPT_IPV6_INITIAL_FRAG 5
++
++/* General IP options (may be used for any version) */
++#define NH_OPT_IP_FRAG 1
++#define NH_OPT_IP_INITIAL_FRAG 2
++#define NH_OPT_IP_OPTION 3
++
++/* Minenc. options */
++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
++
++/* GRE. options */
++#define NH_OPT_GRE_ROUTING_PRESENT 1
++
++/* TCP options */
++#define NH_OPT_TCP_OPTIONS 1
++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
++#define NH_OPT_TCP_CONTROL_LOW_BITS 3
++
++/* CAPWAP options */
++#define NH_OPT_CAPWAP_DTLS 1
++
++enum net_prot {
++ NET_PROT_NONE = 0,
++ NET_PROT_PAYLOAD,
++ NET_PROT_ETH,
++ NET_PROT_VLAN,
++ NET_PROT_IPV4,
++ NET_PROT_IPV6,
++ NET_PROT_IP,
++ NET_PROT_TCP,
++ NET_PROT_UDP,
++ NET_PROT_UDP_LITE,
++ NET_PROT_IPHC,
++ NET_PROT_SCTP,
++ NET_PROT_SCTP_CHUNK_DATA,
++ NET_PROT_PPPOE,
++ NET_PROT_PPP,
++ NET_PROT_PPPMUX,
++ NET_PROT_PPPMUX_SUBFRM,
++ NET_PROT_L2TPV2,
++ NET_PROT_L2TPV3_CTRL,
++ NET_PROT_L2TPV3_SESS,
++ NET_PROT_LLC,
++ NET_PROT_LLC_SNAP,
++ NET_PROT_NLPID,
++ NET_PROT_SNAP,
++ NET_PROT_MPLS,
++ NET_PROT_IPSEC_AH,
++ NET_PROT_IPSEC_ESP,
++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
++ NET_PROT_MACSEC,
++ NET_PROT_GRE,
++ NET_PROT_MINENCAP,
++ NET_PROT_DCCP,
++ NET_PROT_ICMP,
++ NET_PROT_IGMP,
++ NET_PROT_ARP,
++ NET_PROT_CAPWAP_DATA,
++ NET_PROT_CAPWAP_CTRL,
++ NET_PROT_RFC2684,
++ NET_PROT_ICMPV6,
++ NET_PROT_FCOE,
++ NET_PROT_FIP,
++ NET_PROT_ISCSI,
++ NET_PROT_GTP,
++ NET_PROT_USER_DEFINED_L2,
++ NET_PROT_USER_DEFINED_L3,
++ NET_PROT_USER_DEFINED_L4,
++ NET_PROT_USER_DEFINED_L5,
++ NET_PROT_USER_DEFINED_SHIM1,
++ NET_PROT_USER_DEFINED_SHIM2,
++
++ NET_PROT_DUMMY_LAST
++};
++
++/*! IEEE8021.Q */
++#define NH_IEEE8021Q_ETYPE 0x8100
++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
++ ((((u32)((etype) & 0xFFFF)) << 16) | \
++ (((u32)((pcp) & 0x07)) << 13) | \
++ (((u32)((dei) & 0x01)) << 12) | \
++ (((u32)((vlan_id) & 0xFFF))))
++
++#endif /* __FSL_NET_H */
+diff --git a/drivers/staging/fsl-dpaa2/ethsw/Kconfig b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
+new file mode 100644
+index 00000000..06c70408
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
+@@ -0,0 +1,6 @@
++config FSL_DPAA2_ETHSW
++ tristate "DPAA2 Ethernet Switch"
++ depends on FSL_MC_BUS && FSL_DPAA2
++ default y
++ ---help---
++ Prototype driver for DPAA2 Ethernet Switch.
+diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile
+new file mode 100644
+index 00000000..20eb3ac4
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
+@@ -0,0 +1,10 @@
++
++obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
++
++dpaa2-ethsw-objs := switch.o dpsw.o
++
++all:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++
++clean:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+new file mode 100644
+index 00000000..f7374d1c
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+@@ -0,0 +1,851 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPSW_CMD_H
++#define __FSL_DPSW_CMD_H
++
++/* DPSW Version */
++#define DPSW_VER_MAJOR 8
++#define DPSW_VER_MINOR 0
++
++#define DPSW_CMD_BASE_VERSION 1
++#define DPSW_CMD_ID_OFFSET 4
++
++#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
++#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
++
++#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
++
++#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
++#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
++#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
++#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
++#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
++
++#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
++#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
++#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
++#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
++#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
++#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
++#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
++#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
++
++#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
++
++#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
++
++#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
++
++#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
++#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
++#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
++#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
++#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
++#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
++#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
++#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
++#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
++#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
++#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
++#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
++
++#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
++#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
++
++#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
++
++#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
++#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
++#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
++#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
++#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
++#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
++#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
++
++#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
++
++#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
++#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
++#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
++#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
++#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
++#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
++#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
++#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
++#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
++#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
++#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
++#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
++
++#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
++#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
++#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
++#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
++#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
++#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
++#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
++#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
++#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
++#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
++
++#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
++#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
++#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
++#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
++#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
++#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
++#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
++
++#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
++#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
++#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
++#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
++
++/* Macros for accessing command fields smaller than 1byte */
++#define DPSW_MASK(field) \
++ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
++ DPSW_##field##_SHIFT)
++#define dpsw_set_field(var, field, val) \
++ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
++#define dpsw_get_field(var, field) \
++ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
++#define dpsw_get_bit(var, bit) \
++ (((var) >> (bit)) & GENMASK(0, 0))
++
++static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
++{
++ var |= (u64)val << bit & GENMASK(bit, bit);
++ return var;
++}
++
++struct dpsw_cmd_open {
++ __le32 dpsw_id;
++};
++
++#define DPSW_COMPONENT_TYPE_SHIFT 0
++#define DPSW_COMPONENT_TYPE_SIZE 4
++
++struct dpsw_cmd_create {
++ /* cmd word 0 */
++ __le16 num_ifs;
++ u8 max_fdbs;
++ u8 max_meters_per_if;
++ /* from LSB: only the first 4 bits */
++ u8 component_type;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le16 max_vlans;
++ __le16 max_fdb_entries;
++ __le16 fdb_aging_time;
++ __le16 max_fdb_mc_groups;
++ /* cmd word 2 */
++ __le64 options;
++};
++
++struct dpsw_cmd_destroy {
++ __le32 dpsw_id;
++};
++
++#define DPSW_ENABLE_SHIFT 0
++#define DPSW_ENABLE_SIZE 1
++
++struct dpsw_rsp_is_enabled {
++ /* from LSB: enable:1 */
++ u8 enabled;
++};
++
++struct dpsw_cmd_set_irq {
++ /* cmd word 0 */
++ u8 irq_index;
++ u8 pad[3];
++ __le32 irq_val;
++ /* cmd word 1 */
++ __le64 irq_addr;
++ /* cmd word 2 */
++ __le32 irq_num;
++};
++
++struct dpsw_cmd_get_irq {
++ __le32 pad;
++ u8 irq_index;
++};
++
++struct dpsw_rsp_get_irq {
++ /* cmd word 0 */
++ __le32 irq_val;
++ __le32 pad;
++ /* cmd word 1 */
++ __le64 irq_addr;
++ /* cmd word 2 */
++ __le32 irq_num;
++ __le32 irq_type;
++};
++
++struct dpsw_cmd_set_irq_enable {
++ u8 enable_state;
++ u8 pad[3];
++ u8 irq_index;
++};
++
++struct dpsw_cmd_get_irq_enable {
++ __le32 pad;
++ u8 irq_index;
++};
++
++struct dpsw_rsp_get_irq_enable {
++ u8 enable_state;
++};
++
++struct dpsw_cmd_set_irq_mask {
++ __le32 mask;
++ u8 irq_index;
++};
++
++struct dpsw_cmd_get_irq_mask {
++ __le32 pad;
++ u8 irq_index;
++};
++
++struct dpsw_rsp_get_irq_mask {
++ __le32 mask;
++};
++
++struct dpsw_cmd_get_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
++
++struct dpsw_rsp_get_irq_status {
++ __le32 status;
++};
++
++struct dpsw_cmd_clear_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
++
++#define DPSW_COMPONENT_TYPE_SHIFT 0
++#define DPSW_COMPONENT_TYPE_SIZE 4
++
++struct dpsw_rsp_get_attr {
++ /* cmd word 0 */
++ __le16 num_ifs;
++ u8 max_fdbs;
++ u8 num_fdbs;
++ __le16 max_vlans;
++ __le16 num_vlans;
++ /* cmd word 1 */
++ __le16 max_fdb_entries;
++ __le16 fdb_aging_time;
++ __le32 dpsw_id;
++ /* cmd word 2 */
++ __le16 mem_size;
++ __le16 max_fdb_mc_groups;
++ u8 max_meters_per_if;
++ /* from LSB only the ffirst 4 bits */
++ u8 component_type;
++ __le16 pad;
++ /* cmd word 3 */
++ __le64 options;
++};
++
++struct dpsw_cmd_set_reflection_if {
++ __le16 if_id;
++};
++
++struct dpsw_cmd_if_set_flooding {
++ __le16 if_id;
++ /* from LSB: enable:1 */
++ u8 enable;
++};
++
++struct dpsw_cmd_if_set_broadcast {
++ __le16 if_id;
++ /* from LSB: enable:1 */
++ u8 enable;
++};
++
++struct dpsw_cmd_if_set_multicast {
++ __le16 if_id;
++ /* from LSB: enable:1 */
++ u8 enable;
++};
++
++#define DPSW_VLAN_ID_SHIFT 0
++#define DPSW_VLAN_ID_SIZE 12
++#define DPSW_DEI_SHIFT 12
++#define DPSW_DEI_SIZE 1
++#define DPSW_PCP_SHIFT 13
++#define DPSW_PCP_SIZE 3
++
++struct dpsw_cmd_if_set_tci {
++ __le16 if_id;
++ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
++ __le16 conf;
++};
++
++struct dpsw_cmd_if_get_tci {
++ __le16 if_id;
++};
++
++struct dpsw_rsp_if_get_tci {
++ __le16 pad;
++ __le16 vlan_id;
++ u8 dei;
++ u8 pcp;
++};
++
++#define DPSW_STATE_SHIFT 0
++#define DPSW_STATE_SIZE 4
++
++struct dpsw_cmd_if_set_stp {
++ __le16 if_id;
++ __le16 vlan_id;
++ /* only the first LSB 4 bits */
++ u8 state;
++};
++
++#define DPSW_FRAME_TYPE_SHIFT 0
++#define DPSW_FRAME_TYPE_SIZE 4
++#define DPSW_UNACCEPTED_ACT_SHIFT 4
++#define DPSW_UNACCEPTED_ACT_SIZE 4
++
++struct dpsw_cmd_if_set_accepted_frames {
++ __le16 if_id;
++ /* from LSB: type:4 unaccepted_act:4 */
++ u8 unaccepted;
++};
++
++#define DPSW_ACCEPT_ALL_SHIFT 0
++#define DPSW_ACCEPT_ALL_SIZE 1
++
++struct dpsw_cmd_if_set_accept_all_vlan {
++ __le16 if_id;
++ /* only the least significant bit */
++ u8 accept_all;
++};
++
++#define DPSW_COUNTER_TYPE_SHIFT 0
++#define DPSW_COUNTER_TYPE_SIZE 5
++
++struct dpsw_cmd_if_get_counter {
++ __le16 if_id;
++ /* from LSB: type:5 */
++ u8 type;
++};
++
++struct dpsw_rsp_if_get_counter {
++ __le64 pad;
++ __le64 counter;
++};
++
++struct dpsw_cmd_if_set_counter {
++ /* cmd word 0 */
++ __le16 if_id;
++ /* from LSB: type:5 */
++ u8 type;
++ /* cmd word 1 */
++ __le64 counter;
++};
++
++#define DPSW_PRIORITY_SELECTOR_SHIFT 0
++#define DPSW_PRIORITY_SELECTOR_SIZE 3
++#define DPSW_SCHED_MODE_SHIFT 0
++#define DPSW_SCHED_MODE_SIZE 4
++
++struct dpsw_cmd_if_set_tx_selection {
++ __le16 if_id;
++ /* from LSB: priority_selector:3 */
++ u8 priority_selector;
++ u8 pad[5];
++ u8 tc_id[8];
++
++ struct dpsw_tc_sched {
++ __le16 delta_bandwidth;
++ u8 mode;
++ u8 pad;
++ } tc_sched[8];
++};
++
++#define DPSW_FILTER_SHIFT 0
++#define DPSW_FILTER_SIZE 2
++
++struct dpsw_cmd_if_reflection {
++ __le16 if_id;
++ __le16 vlan_id;
++ /* only 2 bits from the LSB */
++ u8 filter;
++};
++
++#define DPSW_MODE_SHIFT 0
++#define DPSW_MODE_SIZE 4
++#define DPSW_UNITS_SHIFT 4
++#define DPSW_UNITS_SIZE 4
++
++struct dpsw_cmd_if_set_flooding_metering {
++ /* cmd word 0 */
++ __le16 if_id;
++ u8 pad;
++ /* from LSB: mode:4 units:4 */
++ u8 mode_units;
++ __le32 cir;
++ /* cmd word 1 */
++ __le32 eir;
++ __le32 cbs;
++ /* cmd word 2 */
++ __le32 ebs;
++};
++
++struct dpsw_cmd_if_set_metering {
++ /* cmd word 0 */
++ __le16 if_id;
++ u8 tc_id;
++ /* from LSB: mode:4 units:4 */
++ u8 mode_units;
++ __le32 cir;
++ /* cmd word 1 */
++ __le32 eir;
++ __le32 cbs;
++ /* cmd word 2 */
++ __le32 ebs;
++};
++
++#define DPSW_EARLY_DROP_MODE_SHIFT 0
++#define DPSW_EARLY_DROP_MODE_SIZE 2
++#define DPSW_EARLY_DROP_UNIT_SHIFT 2
++#define DPSW_EARLY_DROP_UNIT_SIZE 2
++
++struct dpsw_prep_early_drop {
++ /* from LSB: mode:2 units:2 */
++ u8 conf;
++ u8 pad0[3];
++ __le32 tail_drop_threshold;
++ u8 green_drop_probability;
++ u8 pad1[7];
++ __le64 green_max_threshold;
++ __le64 green_min_threshold;
++ __le64 pad2;
++ u8 yellow_drop_probability;
++ u8 pad3[7];
++ __le64 yellow_max_threshold;
++ __le64 yellow_min_threshold;
++};
++
++struct dpsw_cmd_if_set_early_drop {
++ /* cmd word 0 */
++ u8 pad0;
++ u8 tc_id;
++ __le16 if_id;
++ __le32 pad1;
++ /* cmd word 1 */
++ __le64 early_drop_iova;
++};
++
++struct dpsw_cmd_custom_tpid {
++ __le16 pad;
++ __le16 tpid;
++};
++
++struct dpsw_cmd_if {
++ __le16 if_id;
++};
++
++#define DPSW_ADMIT_UNTAGGED_SHIFT 0
++#define DPSW_ADMIT_UNTAGGED_SIZE 4
++#define DPSW_ENABLED_SHIFT 5
++#define DPSW_ENABLED_SIZE 1
++#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
++#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
++
++struct dpsw_rsp_if_get_attr {
++ /* cmd word 0 */
++ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
++ u8 conf;
++ u8 pad1;
++ u8 num_tcs;
++ u8 pad2;
++ __le16 qdid;
++ /* cmd word 1 */
++ __le32 options;
++ __le32 pad3;
++ /* cmd word 2 */
++ __le32 rate;
++};
++
++struct dpsw_cmd_if_set_max_frame_length {
++ __le16 if_id;
++ __le16 frame_length;
++};
++
++struct dpsw_cmd_if_get_max_frame_length {
++ __le16 if_id;
++};
++
++struct dpsw_rsp_if_get_max_frame_length {
++ __le16 pad;
++ __le16 frame_length;
++};
++
++struct dpsw_cmd_if_set_link_cfg {
++ /* cmd word 0 */
++ __le16 if_id;
++ u8 pad[6];
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad1;
++ /* cmd word 2 */
++ __le64 options;
++};
++
++struct dpsw_cmd_if_get_link_state {
++ __le16 if_id;
++};
++
++#define DPSW_UP_SHIFT 0
++#define DPSW_UP_SIZE 1
++
++struct dpsw_rsp_if_get_link_state {
++ /* cmd word 0 */
++ __le32 pad0;
++ u8 up;
++ u8 pad1[3];
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad2;
++ /* cmd word 2 */
++ __le64 options;
++};
++
++struct dpsw_vlan_add {
++ __le16 fdb_id;
++ __le16 vlan_id;
++};
++
++struct dpsw_cmd_vlan_manage_if {
++ /* cmd word 0 */
++ __le16 pad0;
++ __le16 vlan_id;
++ __le32 pad1;
++ /* cmd word 1 */
++ __le64 if_id[4];
++};
++
++struct dpsw_cmd_vlan_remove {
++ __le16 pad;
++ __le16 vlan_id;
++};
++
++struct dpsw_cmd_vlan_get_attr {
++ __le16 vlan_id;
++};
++
++struct dpsw_rsp_vlan_get_attr {
++ /* cmd word 0 */
++ __le64 pad;
++ /* cmd word 1 */
++ __le16 fdb_id;
++ __le16 num_ifs;
++ __le16 num_untagged_ifs;
++ __le16 num_flooding_ifs;
++};
++
++struct dpsw_cmd_vlan_get_if {
++ __le16 vlan_id;
++};
++
++struct dpsw_rsp_vlan_get_if {
++ /* cmd word 0 */
++ __le16 pad0;
++ __le16 num_ifs;
++ u8 pad1[4];
++ /* cmd word 1 */
++ __le64 if_id[4];
++};
++
++struct dpsw_cmd_vlan_get_if_untagged {
++ __le16 vlan_id;
++};
++
++struct dpsw_rsp_vlan_get_if_untagged {
++ /* cmd word 0 */
++ __le16 pad0;
++ __le16 num_ifs;
++ u8 pad1[4];
++ /* cmd word 1 */
++ __le64 if_id[4];
++};
++
++struct dpsw_cmd_vlan_get_if_flooding {
++ __le16 vlan_id;
++};
++
++struct dpsw_rsp_vlan_get_if_flooding {
++ /* cmd word 0 */
++ __le16 pad0;
++ __le16 num_ifs;
++ u8 pad1[4];
++ /* cmd word 1 */
++ __le64 if_id[4];
++};
++
++struct dpsw_cmd_fdb_add {
++ __le32 pad;
++ __le16 fdb_aging_time;
++ __le16 num_fdb_entries;
++};
++
++struct dpsw_rsp_fdb_add {
++ __le16 fdb_id;
++};
++
++struct dpsw_cmd_fdb_remove {
++ __le16 fdb_id;
++};
++
++#define DPSW_ENTRY_TYPE_SHIFT 0
++#define DPSW_ENTRY_TYPE_SIZE 4
++
++struct dpsw_cmd_fdb_add_unicast {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ u8 mac_addr[6];
++ /* cmd word 1 */
++ u8 if_egress;
++ u8 pad;
++ /* only the first 4 bits from LSB */
++ u8 type;
++};
++
++struct dpsw_cmd_fdb_get_unicast {
++ __le16 fdb_id;
++ u8 mac_addr[6];
++};
++
++struct dpsw_rsp_fdb_get_unicast {
++ __le64 pad;
++ __le16 if_egress;
++ /* only first 4 bits from LSB */
++ u8 type;
++};
++
++struct dpsw_cmd_fdb_remove_unicast {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ u8 mac_addr[6];
++ /* cmd word 1 */
++ __le16 if_egress;
++ /* only the first 4 bits from LSB */
++ u8 type;
++};
++
++struct dpsw_cmd_fdb_add_multicast {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ __le16 num_ifs;
++ /* only the first 4 bits from LSB */
++ u8 type;
++ u8 pad[3];
++ /* cmd word 1 */
++ u8 mac_addr[6];
++ __le16 pad2;
++ /* cmd word 2 */
++ __le64 if_id[4];
++};
++
++struct dpsw_cmd_fdb_get_multicast {
++ __le16 fdb_id;
++ u8 mac_addr[6];
++};
++
++struct dpsw_rsp_fdb_get_multicast {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ __le16 num_ifs;
++ /* only the first 4 bits from LSB */
++ u8 type;
++ u8 pad1[5];
++ /* cmd word 2 */
++ __le64 if_id[4];
++};
++
++struct dpsw_cmd_fdb_remove_multicast {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ __le16 num_ifs;
++ /* only the first 4 bits from LSB */
++ u8 type;
++ u8 pad[3];
++ /* cmd word 1 */
++ u8 mac_addr[6];
++ __le16 pad2;
++ /* cmd word 2 */
++ __le64 if_id[4];
++};
++
++#define DPSW_LEARNING_MODE_SHIFT 0
++#define DPSW_LEARNING_MODE_SIZE 4
++
++struct dpsw_cmd_fdb_set_learning_mode {
++ __le16 fdb_id;
++ /* only the first 4 bits from LSB */
++ u8 mode;
++};
++
++struct dpsw_cmd_fdb_get_attr {
++ __le16 fdb_id;
++};
++
++struct dpsw_rsp_fdb_get_attr {
++ /* cmd word 0 */
++ __le16 pad;
++ __le16 max_fdb_entries;
++ __le16 fdb_aging_time;
++ __le16 num_fdb_mc_groups;
++ /* cmd word 1 */
++ __le16 max_fdb_mc_groups;
++ /* only the first 4 bits from LSB */
++ u8 learning_mode;
++};
++
++struct dpsw_cmd_acl_add {
++ __le16 pad;
++ __le16 max_entries;
++};
++
++struct dpsw_rsp_acl_add {
++ __le16 acl_id;
++};
++
++struct dpsw_cmd_acl_remove {
++ __le16 acl_id;
++};
++
++struct dpsw_prep_acl_entry {
++ u8 match_l2_dest_mac[6];
++ __le16 match_l2_tpid;
++
++ u8 match_l2_source_mac[6];
++ __le16 match_l2_vlan_id;
++
++ __le32 match_l3_dest_ip;
++ __le32 match_l3_source_ip;
++
++ __le16 match_l4_dest_port;
++ __le16 match_l4_source_port;
++ __le16 match_l2_ether_type;
++ u8 match_l2_pcp_dei;
++ u8 match_l3_dscp;
++
++ u8 mask_l2_dest_mac[6];
++ __le16 mask_l2_tpid;
++
++ u8 mask_l2_source_mac[6];
++ __le16 mask_l2_vlan_id;
++
++ __le32 mask_l3_dest_ip;
++ __le32 mask_l3_source_ip;
++
++ __le16 mask_l4_dest_port;
++ __le16 mask_l4_source_port;
++ __le16 mask_l2_ether_type;
++ u8 mask_l2_pcp_dei;
++ u8 mask_l3_dscp;
++
++ u8 match_l3_protocol;
++ u8 mask_l3_protocol;
++};
++
++#define DPSW_RESULT_ACTION_SHIFT 0
++#define DPSW_RESULT_ACTION_SIZE 4
++
++struct dpsw_cmd_acl_entry {
++ __le16 acl_id;
++ __le16 result_if_id;
++ __le32 precedence;
++ /* from LSB only the first 4 bits */
++ u8 result_action;
++ u8 pad[7];
++ __le64 pad2[4];
++ __le64 key_iova;
++};
++
++struct dpsw_cmd_acl_if {
++ /* cmd word 0 */
++ __le16 acl_id;
++ __le16 num_ifs;
++ __le32 pad;
++ /* cmd word 1 */
++ __le64 if_id[4];
++};
++
++struct dpsw_cmd_acl_get_attr {
++ __le16 acl_id;
++};
++
++struct dpsw_rsp_acl_get_attr {
++ /* cmd word 0 */
++ __le64 pad;
++ /* cmd word 1 */
++ __le16 max_entries;
++ __le16 num_entries;
++ __le16 num_ifs;
++};
++
++struct dpsw_rsp_ctrl_if_get_attr {
++ /* cmd word 0 */
++ __le64 pad;
++ /* cmd word 1 */
++ __le32 rx_fqid;
++ __le32 rx_err_fqid;
++ /* cmd word 2 */
++ __le32 tx_err_conf_fqid;
++};
++
++struct dpsw_cmd_ctrl_if_set_pools {
++ u8 num_dpbp;
++ /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
++ u8 backup_pool;
++ __le16 pad;
++ __le32 dpbp_id[8];
++ __le16 buffer_size[8];
++};
++
++struct dpsw_rsp_get_api_version {
++ __le16 version_major;
++ __le16 version_minor;
++};
++
++#endif /* __FSL_DPSW_CMD_H */
+diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+new file mode 100644
+index 00000000..179e98c8
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+@@ -0,0 +1,2762 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpsw.h"
++#include "dpsw-cmd.h"
++
++static void build_if_id_bitmap(__le64 *bmap,
++ const u16 *id,
++ const u16 num_ifs) {
++ int i;
++
++ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
++ bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
++ (id[i] % 64),
++ 1);
++}
++
++static void read_if_id_bitmap(u16 *if_id,
++ u16 *num_ifs,
++ __le64 *bmap) {
++ int bitmap[DPSW_MAX_IF] = { 0 };
++ int i, j = 0;
++ int count = 0;
++
++ for (i = 0; i < DPSW_MAX_IF; i++) {
++ bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
++ i % 64);
++ count += bitmap[i];
++ }
++
++ *num_ifs = (u16)count;
++
++ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
++ if (bitmap[i]) {
++ if_id[j] = (u16)i;
++ j++;
++ }
++ }
++}
++
++/**
++ * dpsw_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpsw_id: DPSW unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpsw_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpsw_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_open *cmd_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dpsw_cmd_open *)cmd.params;
++ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpsw_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_enable() - Enable DPSW functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_disable() - Disable DPSW functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_is_enabled() - Check if the DPSW is enabled
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpsw_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_rsp_is_enabled *cmd_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
++ *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ struct dpsw_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_set_irq *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
++ cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
++ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_get_irq() - Get IRQ information from the DPSW
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ int *type,
++ struct dpsw_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_get_irq *cmd_params;
++ struct dpsw_rsp_get_irq *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
++ irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
++ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
++ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
++ *type = le32_to_cpu(rsp_params->irq_type);
++
++ return 0;
++}
++
++/**
++ * dpsw_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_set_irq_enable *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
++ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_set_irq_mask *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_get_irq_status *cmd_params;
++ struct dpsw_rsp_get_irq_status *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
++
++ return 0;
++}
++
++/**
++ * dpsw_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_clear_irq_status *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_get_attributes() - Retrieve DPSW attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @attr: Returned DPSW attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpsw_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_rsp_get_attr *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ attr->max_fdbs = rsp_params->max_fdbs;
++ attr->num_fdbs = rsp_params->num_fdbs;
++ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
++ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
++ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
++ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
++ attr->id = le32_to_cpu(rsp_params->dpsw_id);
++ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
++ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
++ attr->max_meters_per_if = rsp_params->max_meters_per_if;
++ attr->options = le64_to_cpu(rsp_params->options);
++ attr->component_type = dpsw_get_field(rsp_params->component_type,
++ COMPONENT_TYPE);
++
++ return 0;
++}
++
++/**
++ * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Id
++ *
++ * Only one reflection receive interface is allowed per switch
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_set_reflection_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_link_cfg() - Set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface id
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_link_cfg *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_get_link_state - Return the link state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface id
++ * @state: Link state 1 - linkup, 0 - link down or disconnected
++ *
++ * @Return '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_link_state *cmd_params;
++ struct dpsw_rsp_if_get_link_state *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
++ state->up = dpsw_get_field(rsp_params->up, UP);
++
++ return 0;
++}
++
++/**
++ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_flooding *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->enable, ENABLE, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_broadcast *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->enable, ENABLE, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_multicast *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->enable, ENABLE, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_tci_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_tci *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
++ dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
++ dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
++ cmd_params->conf = cpu_to_le16(cmd_params->conf);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_tci_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_tci *cmd_params;
++ struct dpsw_rsp_if_get_tci *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
++ cfg->pcp = rsp_params->pcp;
++ cfg->dei = rsp_params->dei;
++ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
++
++ return 0;
++}
++
++/**
++ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: STP State configuration parameters
++ *
++ * The following STP states are supported -
++ * blocking, listening, learning, forwarding and disabled.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_stp_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_stp *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
++ dpsw_set_field(cmd_params->state, STATE, cfg->state);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_accepted_frames()
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Frame types configuration
++ *
++ * When is admit_only_vlan_tagged- the device will discard untagged
++ * frames or Priority-Tagged frames received on this interface.
++ * When admit_only_untagged- untagged frames or Priority-Tagged
++ * frames received on this interface will be accepted and assigned
++ * to a VID based on the PVID and VID Set for this interface.
++ * When admit_all - the device will accept VLAN tagged, untagged
++ * and priority tagged frames.
++ * The default is admit_all
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_accepted_frames_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_accepted_frames *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
++ dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
++ cfg->unaccept_act);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_accept_all_vlan()
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @accept_all: Accept or drop frames having different VLAN
++ *
++ * When this is accept (FALSE), the device will discard incoming
++ * frames for VLANs that do not include this interface in its
++ * Member set. When accept (TRUE), the interface will accept all incoming frames
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int accept_all)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_get_counter() - Get specific counter of particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: return value
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 *counter)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_counter *cmd_params;
++ struct dpsw_rsp_if_get_counter *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
++ *counter = le64_to_cpu(rsp_params->counter);
++
++ return 0;
++}
++
++/**
++ * dpsw_if_set_counter() - Set specific counter of particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: New counter value
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 counter)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_counter *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->counter = cpu_to_le64(counter);
++ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_tx_selection() - Function is used for mapping variety
++ * of frame fields
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Traffic class mapping configuration
++ *
++ * Function is used for mapping variety of frame fields (DSCP, PCP)
++ * to Traffic Class. Traffic class is a number
++ * in the range from 0 to 7
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_tx_selection_cfg *cfg)
++{
++ struct dpsw_cmd_if_set_tx_selection *cmd_params;
++ struct mc_command cmd = { 0 };
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
++ cfg->priority_selector);
++
++ for (i = 0; i < 8; i++) {
++ cmd_params->tc_sched[i].delta_bandwidth =
++ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
++ dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
++ cfg->tc_sched[i].mode);
++ cmd_params->tc_id[i] = cfg->tc_id[i];
++ }
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Reflection configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_reflection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_reflection *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
++ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Reflection configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_reflection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_reflection *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
++ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_flooding_metering() - Set flooding metering
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Metering parameters
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_metering_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_flooding_metering *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
++ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
++ cmd_params->cir = cpu_to_le32(cfg->cir);
++ cmd_params->eir = cpu_to_le32(cfg->eir);
++ cmd_params->cbs = cpu_to_le32(cfg->cbs);
++ cmd_params->ebs = cpu_to_le32(cfg->ebs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_set_metering() - Set interface metering for flooding
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @tc_id: Traffic class ID
++ * @cfg: Metering parameters
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 tc_id,
++ const struct dpsw_metering_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_metering *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->tc_id = tc_id;
++ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
++ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
++ cmd_params->cir = cpu_to_le32(cfg->cir);
++ cmd_params->eir = cpu_to_le32(cfg->eir);
++ cmd_params->cbs = cpu_to_le32(cfg->cbs);
++ cmd_params->ebs = cpu_to_le32(cfg->ebs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
++ * @cfg: Early-drop configuration
++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpsw_if_tc_set_early_drop
++ *
++ */
++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
++ u8 *early_drop_buf)
++{
++ struct dpsw_prep_early_drop *ext_params;
++
++ ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
++ dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
++ dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
++ ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
++ ext_params->green_drop_probability = cfg->green.drop_probability;
++ ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
++ ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
++ ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
++ ext_params->yellow_max_threshold =
++ cpu_to_le64(cfg->yellow.max_threshold);
++ ext_params->yellow_min_threshold =
++ cpu_to_le64(cfg->yellow.min_threshold);
++}
++
++/**
++ * dpsw_if_set_early_drop() - Set interface traffic class early-drop
++ * configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 64 bytes;
++ * Must be cacheline-aligned and DMA-able memory
++ *
++ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
++ * to prepare the early_drop_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 tc_id,
++ u64 early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_early_drop *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @cfg: Tag Protocol identifier
++ *
++ * API Configures a distinct Ethernet type value (or TPID value)
++ * to indicate a VLAN tag in addition to the common
++ * TPID values 0x8100 and 0x88A8.
++ * Two additional TPID's are supported
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpsw_custom_tpid_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_custom_tpid *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
++ cmd_params->tpid = cpu_to_le16(cfg->tpid);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @cfg: Tag Protocol identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpsw_custom_tpid_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_custom_tpid *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
++ cmd_params->tpid = cpu_to_le16(cfg->tpid);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_enable() - Enable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_disable() - Disable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_get_attributes() - Function obtains attributes of interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @attr: Returned interface attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_if_attr *attr)
++{
++ struct dpsw_rsp_if_get_attr *rsp_params;
++ struct dpsw_cmd_if *cmd_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
++ attr->num_tcs = rsp_params->num_tcs;
++ attr->rate = le32_to_cpu(rsp_params->rate);
++ attr->options = le32_to_cpu(rsp_params->options);
++ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
++ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
++ ACCEPT_ALL_VLAN);
++ attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
++ attr->qdid = le16_to_cpu(rsp_params->qdid);
++
++ return 0;
++}
++
++/**
++ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Maximum Frame Length
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u16 frame_length)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->frame_length = cpu_to_le16(frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Returned maximum Frame Length
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u16 *frame_length)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_max_frame_length *cmd_params;
++ struct dpsw_rsp_if_get_max_frame_length *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
++ *frame_length = le16_to_cpu(rsp_params->frame_length);
++
++ return 0;
++}
++
++/**
++ * dpsw_vlan_add() - Adding new VLAN to DPSW.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: VLAN configuration
++ *
++ * Only VLAN ID and FDB ID are required parameters here.
++ * 12 bit VLAN ID is defined in IEEE802.1Q.
++ * Adding a duplicate VLAN ID is not allowed.
++ * FDB ID can be shared across multiple VLANs. Shared learning
++ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
++ * with same fdb_id
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_vlan_add *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_vlan_add *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces to add
++ *
++ * It adds only interfaces not belonging to this VLAN yet,
++ * otherwise an error is generated and an entire command is
++ * ignored. This function can be called numerous times always
++ * providing required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
++ * transmitted as untagged.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be transmitted as untagged
++ *
++ * These interfaces should already belong to this VLAN.
++ * By default all interfaces are transmitted as tagged.
++ * Providing un-existing interface or untagged interface that is
++ * configured untagged already generates an error and the entire
++ * command is ignored.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
++ * included in flooding when frame with unknown destination
++ * unicast MAC arrived.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be used for flooding
++ *
++ * These interfaces should belong to this VLAN. By default all
++ * interfaces are included into flooding list. Providing
++ * un-existing interface or an interface that already in the
++ * flooding list generates an error and the entire command is
++ * ignored.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
++ *
++ * Interfaces must belong to this VLAN, otherwise an error
++ * is returned and an the command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
++ * converted from transmitted as untagged to transmit as tagged.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
++ *
++ * Interfaces provided by API have to belong to this VLAN and
++ * configured untagged, otherwise an error is returned and the
++ * command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
++ * removed from the flooding list.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces used for flooding
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_remove() - Remove an entire VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_remove *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_vlan_get_attributes() - Get VLAN attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @attr: Returned DPSW attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_get_attr *cmd_params;
++ struct dpsw_rsp_vlan_get_attr *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
++ attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
++ attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
++
++ return 0;
++}
++
++/**
++ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of interfaces belong to this VLAN
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_get_if *cmd_params;
++ struct dpsw_rsp_vlan_get_if *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++
++ return 0;
++}
++
++/**
++ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of flooding interfaces
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++
++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
++ struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++
++ return 0;
++}
++
++/**
++ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
++ * untagged
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of untagged interfaces
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
++ struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++
++ return 0;
++}
++
++/**
++ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
++ * the reference
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Returned Forwarding Database Identifier
++ * @cfg: FDB Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *fdb_id,
++ const struct dpsw_fdb_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_add *cmd_params;
++ struct dpsw_rsp_fdb_add *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
++ cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
++ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
++ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
++
++ return 0;
++}
++
++/**
++ * dpsw_fdb_remove() - Remove FDB from switch
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_remove *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_add_unicast *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
++ * unicast Ethernet address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Returned unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_get_unicast *cmd_params;
++ struct dpsw_rsp_fdb_get_unicast *rsp_params;
++ int err, i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
++ cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
++ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
++
++ return 0;
++}
++
++/**
++ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_remove_unicast *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
++ *
++ * If group doesn't exist, it will be created.
++ * It adds only interfaces not belonging to this multicast group
++ * yet, otherwise error will be generated and the command is
++ * ignored.
++ * This function may be called numerous times always providing
++ * required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_add_multicast *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
++ * address.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Returned multicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_get_multicast *cmd_params;
++ struct dpsw_rsp_fdb_get_multicast *rsp_params;
++ int err, i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
++ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++
++ return 0;
++}
++
++/**
++ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
++ * group.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
++ *
++ * Interfaces provided by this API have to exist in the group,
++ * otherwise an error will be returned and an entire command
++ * ignored. If there is no interface left in the group,
++ * an entire group is deleted
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_remove_multicast *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @mode: Learning mode
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ enum dpsw_fdb_learning_mode mode)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_fdb_get_attributes() - Get FDB attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @attr: Returned FDB attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ struct dpsw_fdb_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_get_attr *cmd_params;
++ struct dpsw_rsp_fdb_get_attr *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
++ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
++ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
++ attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
++ LEARNING_MODE);
++ attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
++ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
++
++ return 0;
++}
++
++/**
++ * dpsw_acl_add() - Adds ACL to L2 switch.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: Returned ACL ID, for the future reference
++ * @cfg: ACL configuration
++ *
++ * Create Access Control List. Multiple ACLs can be created and
++ * co-exist in L2 switch
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *acl_id,
++ const struct dpsw_acl_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_acl_add *cmd_params;
++ struct dpsw_rsp_acl_add *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
++ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
++ *acl_id = le16_to_cpu(rsp_params->acl_id);
++
++ return 0;
++}
++
++/**
++ * dpsw_acl_remove() - Removes ACL from L2 switch.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_acl_remove *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
++ cmd_params->acl_id = cpu_to_le16(acl_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
++ * @key: Key
++ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before adding or removing acl_entry
++ *
++ */
++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
++ u8 *entry_cfg_buf)
++{
++ struct dpsw_prep_acl_entry *ext_params;
++ int i;
++
++ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
++
++ for (i = 0; i < 6; i++) {
++ ext_params->match_l2_dest_mac[i] =
++ key->match.l2_dest_mac[5 - i];
++ ext_params->match_l2_source_mac[i] =
++ key->match.l2_source_mac[5 - i];
++ ext_params->mask_l2_dest_mac[i] =
++ key->mask.l2_dest_mac[5 - i];
++ ext_params->mask_l2_source_mac[i] =
++ key->mask.l2_source_mac[5 - i];
++ }
++
++ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
++ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
++ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
++ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
++ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
++ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
++ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
++ ext_params->match_l3_dscp = key->match.l3_dscp;
++ ext_params->match_l4_source_port =
++ cpu_to_le16(key->match.l4_source_port);
++
++ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
++ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
++ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
++ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
++ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
++ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
++ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
++ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
++ ext_params->mask_l3_dscp = key->mask.l3_dscp;
++ ext_params->match_l3_protocol = key->match.l3_protocol;
++ ext_params->mask_l3_protocol = key->mask.l3_protocol;
++}
++
++/**
++ * dpsw_acl_add_entry() - Adds an entry to ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: Entry configuration
++ *
++ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_entry_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_acl_entry *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
++ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
++ cmd_params->precedence = cpu_to_le32(cfg->precedence);
++ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
++ cfg->result.action);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_acl_remove_entry() - Removes an entry from ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: Entry configuration
++ *
++ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_entry_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_acl_entry *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
++ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
++ cmd_params->precedence = cpu_to_le32(cfg->precedence);
++ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
++ cfg->result.action);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: Interfaces list
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_acl_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
++ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: Interfaces list
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_acl_if *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
++ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_acl_get_attributes() - Get specific counter of particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL Identifier
++ * @attr: Returned ACL attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ struct dpsw_acl_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_acl_get_attr *cmd_params;
++ struct dpsw_rsp_acl_get_attr *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
++ cmd_params->acl_id = cpu_to_le16(acl_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
++ attr->max_entries = le16_to_cpu(rsp_params->max_entries);
++ attr->num_entries = le16_to_cpu(rsp_params->num_entries);
++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++
++ return 0;
++}
++
++/**
++ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @attr: Returned control interface attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpsw_ctrl_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
++ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
++ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
++ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
++
++ return 0;
++}
++
++/**
++ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @cfg: Buffer pools configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpsw_ctrl_if_pools_cfg *pools)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
++ cmd_params->num_dpbp = pools->num_dpbp;
++ for (i = 0; i < 8; i++) {
++ cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
++ i,
++ pools->pools[i].backup_pool);
++ cmd_params->buffer_size[i] =
++ cpu_to_le16(pools->pools[i].buffer_size);
++ cmd_params->dpbp_id[i] =
++ cpu_to_le32(pools->pools[i].dpbp_id);
++ }
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_ctrl_if_enable() - Enable control interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_ctrl_if_disable() - Function disables control interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpsw_get_api_version() - Get Data Path Switch API version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path switch API
++ * @minor_ver: Minor version of data path switch API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_command cmd = { 0 };
++ struct dpsw_rsp_get_api_version *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
++ cmd_flags,
++ 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->version_major);
++ *minor_ver = le16_to_cpu(rsp_params->version_minor);
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+new file mode 100644
+index 00000000..c91abeb4
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+@@ -0,0 +1,1269 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPSW_H
++#define __FSL_DPSW_H
++
++/* Data Path L2-Switch API
++ * Contains API for handling DPSW topology and functionality
++ */
++
++struct fsl_mc_io;
++
++/**
++ * DPSW general definitions
++ */
++
++/**
++ * Maximum number of traffic class priorities
++ */
++#define DPSW_MAX_PRIORITIES 8
++/**
++ * Maximum number of interfaces
++ */
++#define DPSW_MAX_IF 64
++
++int dpsw_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpsw_id,
++ u16 *token);
++
++int dpsw_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * DPSW options
++ */
++
++/**
++ * Disable flooding
++ */
++#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
++/**
++ * Disable Multicast
++ */
++#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
++/**
++ * Support control interface
++ */
++#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
++/**
++ * Disable flooding metering
++ */
++#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
++/**
++ * Enable metering
++ */
++#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
++
++/**
++ * enum dpsw_component_type - component type of a bridge
++ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
++ * enterprise VLAN bridge or of a Provider Bridge used
++ * to process C-tagged frames
++ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
++ * Provider Bridge
++ *
++ */
++enum dpsw_component_type {
++ DPSW_COMPONENT_TYPE_C_VLAN = 0,
++ DPSW_COMPONENT_TYPE_S_VLAN
++};
++
++/**
++ * struct dpsw_cfg - DPSW configuration
++ * @num_ifs: Number of external and internal interfaces
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpsw_cfg {
++ u16 num_ifs;
++ /**
++ * struct adv - Advanced parameters
++ * @options: Enable/Disable DPSW features (bitmap)
++ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
++ * @max_meters_per_if: Number of meters per interface
++ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
++ * @max_fdb_entries: Number of FDB entries for default FDB table;
++ * 0 - indicates default 1024 entries.
++ * @fdb_aging_time: Default FDB aging time for default FDB table;
++ * 0 - indicates default 300 seconds
++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
++ * 0 - indicates default 32
++ * @component_type: Indicates the component type of this bridge
++ */
++ struct {
++ u64 options;
++ u16 max_vlans;
++ u8 max_meters_per_if;
++ u8 max_fdbs;
++ u16 max_fdb_entries;
++ u16 fdb_aging_time;
++ u16 max_fdb_mc_groups;
++ enum dpsw_component_type component_type;
++ } adv;
++};
++
++int dpsw_create(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ const struct dpsw_cfg *cfg,
++ u32 *obj_id);
++
++int dpsw_destroy(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ u32 object_id);
++
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpsw_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * DPSW IRQ Index and Events
++ */
++
++#define DPSW_IRQ_INDEX_IF 0x0000
++#define DPSW_IRQ_INDEX_L2SW 0x0001
++
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
++
++/**
++ * struct dpsw_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpsw_irq_cfg {
++ u64 addr;
++ u32 val;
++ int irq_num;
++};
++
++int dpsw_set_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ struct dpsw_irq_cfg *irq_cfg);
++
++int dpsw_get_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ int *type,
++ struct dpsw_irq_cfg *irq_cfg);
++
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
++
++int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
++
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
++
++int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
++
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
++
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
++
++/**
++ * struct dpsw_attr - Structure representing DPSW attributes
++ * @id: DPSW object ID
++ * @options: Enable/Disable DPSW features
++ * @max_vlans: Maximum Number of VLANs
++ * @max_meters_per_if: Number of meters per interface
++ * @max_fdbs: Maximum Number of FDBs
++ * @max_fdb_entries: Number of FDB entries for default FDB table;
++ * 0 - indicates default 1024 entries.
++ * @fdb_aging_time: Default FDB aging time for default FDB table;
++ * 0 - indicates default 300 seconds
++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
++ * 0 - indicates default 32
++ * @mem_size: DPSW frame storage memory size
++ * @num_ifs: Number of interfaces
++ * @num_vlans: Current number of VLANs
++ * @num_fdbs: Current number of FDBs
++ * @component_type: Component type of this bridge
++ */
++struct dpsw_attr {
++ int id;
++ u64 options;
++ u16 max_vlans;
++ u8 max_meters_per_if;
++ u8 max_fdbs;
++ u16 max_fdb_entries;
++ u16 fdb_aging_time;
++ u16 max_fdb_mc_groups;
++ u16 num_ifs;
++ u16 mem_size;
++ u16 num_vlans;
++ u8 num_fdbs;
++ enum dpsw_component_type component_type;
++};
++
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpsw_attr *attr);
++
++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id);
++
++/**
++ * enum dpsw_action - Action selection for special/control frames
++ * @DPSW_ACTION_DROP: Drop frame
++ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
++ */
++enum dpsw_action {
++ DPSW_ACTION_DROP = 0,
++ DPSW_ACTION_REDIRECT = 1
++};
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpsw_link_cfg - Structure representing DPSW link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
++ */
++struct dpsw_link_cfg {
++ u32 rate;
++ u64 options;
++};
++
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_cfg *cfg);
++/**
++ * struct dpsw_link_state - Structure representing DPSW link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
++ * @up: 0 - covers two cases: down and disconnected, 1 - up
++ */
++struct dpsw_link_state {
++ u32 rate;
++ u64 options;
++ int up;
++};
++
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_state *state);
++
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int en);
++
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int en);
++
++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int en);
++
++/**
++ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
++ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
++ * to the IEEE 802.1p priority
++ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
++ * separately or in conjunction with PCP to indicate frames
++ * eligible to be dropped in the presence of congestion
++ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
++ * to which the frame belongs. The hexadecimal values
++ * of 0x000 and 0xFFF are reserved;
++ * all other values may be used as VLAN identifiers,
++ * allowing up to 4,094 VLANs
++ */
++struct dpsw_tci_cfg {
++ u8 pcp;
++ u8 dei;
++ u16 vlan_id;
++};
++
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_tci_cfg *cfg);
++
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_tci_cfg *cfg);
++
++/**
++ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
++ * @DPSW_STP_STATE_BLOCKING: Blocking state
++ * @DPSW_STP_STATE_LISTENING: Listening state
++ * @DPSW_STP_STATE_LEARNING: Learning state
++ * @DPSW_STP_STATE_FORWARDING: Forwarding state
++ *
++ */
++enum dpsw_stp_state {
++ DPSW_STP_STATE_BLOCKING = 0,
++ DPSW_STP_STATE_LISTENING = 1,
++ DPSW_STP_STATE_LEARNING = 2,
++ DPSW_STP_STATE_FORWARDING = 3
++};
++
++/**
++ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
++ * @vlan_id: VLAN ID STP state
++ * @state: STP state
++ */
++struct dpsw_stp_cfg {
++ u16 vlan_id;
++ enum dpsw_stp_state state;
++};
++
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_stp_cfg *cfg);
++
++/**
++ * enum dpsw_accepted_frames - Types of frames to accept
++ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority tagged frames
++ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * Priority-Tagged frames received on this interface.
++ *
++ */
++enum dpsw_accepted_frames {
++ DPSW_ADMIT_ALL = 1,
++ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
++};
++
++/**
++ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
++ * @type: Defines ingress accepted frames
++ * @unaccept_act: When a frame is not accepted, it may be discarded or
++ * redirected to control interface depending on this mode
++ */
++struct dpsw_accepted_frames_cfg {
++ enum dpsw_accepted_frames type;
++ enum dpsw_action unaccept_act;
++};
++
++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_accepted_frames_cfg *cfg);
++
++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ int accept_all);
++
++/**
++ * enum dpsw_counter - Counters types
++ * @DPSW_CNT_ING_FRAME: Counts ingress frames
++ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
++ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
++ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPSW_CNT_EGR_FRAME: Counts egress frames
++ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
++ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
++ */
++enum dpsw_counter {
++ DPSW_CNT_ING_FRAME = 0x0,
++ DPSW_CNT_ING_BYTE = 0x1,
++ DPSW_CNT_ING_FLTR_FRAME = 0x2,
++ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
++ DPSW_CNT_ING_MCAST_FRAME = 0x4,
++ DPSW_CNT_ING_MCAST_BYTE = 0x5,
++ DPSW_CNT_ING_BCAST_FRAME = 0x6,
++ DPSW_CNT_ING_BCAST_BYTES = 0x7,
++ DPSW_CNT_EGR_FRAME = 0x8,
++ DPSW_CNT_EGR_BYTE = 0x9,
++ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
++ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
++};
++
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 *counter);
++
++int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 counter);
++
++/**
++ * Maximum number of TC
++ */
++#define DPSW_MAX_TC 8
++
++/**
++ * enum dpsw_priority_selector - User priority
++ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
++ * refers to the IEEE 802.1p priority.
++ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
++ * field from IP header
++ *
++ */
++enum dpsw_priority_selector {
++ DPSW_UP_PCP = 0,
++ DPSW_UP_DSCP = 1
++};
++
++/**
++ * enum dpsw_schedule_mode - Traffic classes scheduling
++ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
++ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
++ */
++enum dpsw_schedule_mode {
++ DPSW_SCHED_STRICT_PRIORITY,
++ DPSW_SCHED_WEIGHTED
++};
++
++/**
++ * struct dpsw_tx_schedule_cfg - traffic class configuration
++ * @mode: Strict or weight-based scheduling
++ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
++ */
++struct dpsw_tx_schedule_cfg {
++ enum dpsw_schedule_mode mode;
++ u16 delta_bandwidth;
++};
++
++/**
++ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
++ * class configuration
++ * @priority_selector: Source for user priority regeneration
++ * @tc_id: The Regenerated User priority that the incoming
++ * User Priority is mapped to for this interface
++ * @tc_sched: Traffic classes configuration
++ */
++struct dpsw_tx_selection_cfg {
++ enum dpsw_priority_selector priority_selector;
++ u8 tc_id[DPSW_MAX_PRIORITIES];
++ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
++};
++
++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_tx_selection_cfg *cfg);
++
++/**
++ * enum dpsw_reflection_filter - Filter type for frames to reflect
++ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
++ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
++ * particular VLAN defined by vid parameter
++ *
++ */
++enum dpsw_reflection_filter {
++ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
++ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
++};
++
++/**
++ * struct dpsw_reflection_cfg - Structure representing reflection information
++ * @filter: Filter type for frames to reflect
++ * @vlan_id: Vlan Id to reflect; valid only when filter type is
++ * DPSW_INGRESS_VLAN
++ */
++struct dpsw_reflection_cfg {
++ enum dpsw_reflection_filter filter;
++ u16 vlan_id;
++};
++
++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_reflection_cfg *cfg);
++
++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_reflection_cfg *cfg);
++
++/**
++ * enum dpsw_metering_mode - Metering modes
++ * @DPSW_METERING_MODE_NONE: metering disabled
++ * @DPSW_METERING_MODE_RFC2698: RFC 2698
++ * @DPSW_METERING_MODE_RFC4115: RFC 4115
++ */
++enum dpsw_metering_mode {
++ DPSW_METERING_MODE_NONE = 0,
++ DPSW_METERING_MODE_RFC2698,
++ DPSW_METERING_MODE_RFC4115
++};
++
++/**
++ * enum dpsw_metering_unit - Metering count
++ * @DPSW_METERING_UNIT_BYTES: count bytes
++ * @DPSW_METERING_UNIT_FRAMES: count frames
++ */
++enum dpsw_metering_unit {
++ DPSW_METERING_UNIT_BYTES = 0,
++ DPSW_METERING_UNIT_FRAMES
++};
++
++/**
++ * struct dpsw_metering_cfg - Metering configuration
++ * @mode: metering modes
++ * @units: Bytes or frame units
++ * @cir: Committed information rate (CIR) in Kbits/s
++ * @eir: Peak information rate (PIR) Kbit/s rfc2698
++ * Excess information rate (EIR) Kbit/s rfc4115
++ * @cbs: Committed burst size (CBS) in bytes
++ * @ebs: Peak burst size (PBS) in bytes for rfc2698
++ * Excess bust size (EBS) in bytes rfc4115
++ *
++ */
++struct dpsw_metering_cfg {
++ enum dpsw_metering_mode mode;
++ enum dpsw_metering_unit units;
++ u32 cir;
++ u32 eir;
++ u32 cbs;
++ u32 ebs;
++};
++
++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_metering_cfg *cfg);
++
++int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 tc_id,
++ const struct dpsw_metering_cfg *cfg);
++
++/**
++ * enum dpsw_early_drop_unit - DPSW early drop unit
++ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
++ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
++ */
++enum dpsw_early_drop_unit {
++ DPSW_EARLY_DROP_UNIT_BYTE = 0,
++ DPSW_EARLY_DROP_UNIT_FRAMES
++};
++
++/**
++ * enum dpsw_early_drop_mode - DPSW early drop mode
++ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
++ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
++ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
++ */
++enum dpsw_early_drop_mode {
++ DPSW_EARLY_DROP_MODE_NONE = 0,
++ DPSW_EARLY_DROP_MODE_TAIL,
++ DPSW_EARLY_DROP_MODE_WRED
++};
++
++/**
++ * struct dpsw_wred_cfg - WRED configuration
++ * @max_threshold: maximum threshold that packets may be discarded. Above this
++ * threshold all packets are discarded; must be less than 2^39;
++ * approximated to be expressed as (x+256)*2^(y-1) due to HW
++ * implementation.
++ * @min_threshold: minimum threshold that packets may be discarded at
++ * @drop_probability: probability that a packet will be discarded (1-100,
++ * associated with the maximum threshold)
++ */
++struct dpsw_wred_cfg {
++ u64 min_threshold;
++ u64 max_threshold;
++ u8 drop_probability;
++};
++
++/**
++ * struct dpsw_early_drop_cfg - early-drop configuration
++ * @drop_mode: drop mode
++ * @units: count units
++ * @yellow: WRED - 'yellow' configuration
++ * @green: WRED - 'green' configuration
++ * @tail_drop_threshold: tail drop threshold
++ */
++struct dpsw_early_drop_cfg {
++ enum dpsw_early_drop_mode drop_mode;
++ enum dpsw_early_drop_unit units;
++ struct dpsw_wred_cfg yellow;
++ struct dpsw_wred_cfg green;
++ u32 tail_drop_threshold;
++};
++
++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
++ u8 *early_drop_buf);
++
++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 tc_id,
++ u64 early_drop_iova);
++
++/**
++ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
++ * @tpid: An additional tag protocol identifier
++ */
++struct dpsw_custom_tpid_cfg {
++ u16 tpid;
++};
++
++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpsw_custom_tpid_cfg *cfg);
++
++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpsw_custom_tpid_cfg *cfg);
++
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id);
++
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id);
++
++/**
++ * struct dpsw_if_attr - Structure representing DPSW interface attributes
++ * @num_tcs: Number of traffic classes
++ * @rate: Transmit rate in bits per second
++ * @options: Interface configuration options (bitmap)
++ * @enabled: Indicates if interface is enabled
++ * @accept_all_vlan: The device discards/accepts incoming frames
++ * for VLANs that do not include this interface
++ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
++ * discards untagged frames or priority-tagged frames received on
++ * this interface;
++ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
++ * tagged frames received on this interface are accepted
++ * @qdid: control frames transmit qdid
++ */
++struct dpsw_if_attr {
++ u8 num_tcs;
++ u32 rate;
++ u32 options;
++ int enabled;
++ int accept_all_vlan;
++ enum dpsw_accepted_frames admit_untagged;
++ u16 qdid;
++};
++
++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_if_attr *attr);
++
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u16 frame_length);
++
++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u16 *frame_length);
++
++/**
++ * struct dpsw_vlan_cfg - VLAN Configuration
++ * @fdb_id: Forwarding Data Base
++ */
++struct dpsw_vlan_cfg {
++ u16 fdb_id;
++};
++
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_cfg *cfg);
++
++/**
++ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
++ * @num_ifs: The number of interfaces that are assigned to the egress
++ * list for this VLAN
++ * @if_id: The set of interfaces that are
++ * assigned to the egress list for this VLAN
++ */
++struct dpsw_vlan_if_cfg {
++ u16 num_ifs;
++ u16 if_id[DPSW_MAX_IF];
++};
++
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id);
++
++/**
++ * struct dpsw_vlan_attr - VLAN attributes
++ * @fdb_id: Associated FDB ID
++ * @num_ifs: Number of interfaces
++ * @num_untagged_ifs: Number of untagged interfaces
++ * @num_flooding_ifs: Number of flooding interfaces
++ */
++struct dpsw_vlan_attr {
++ u16 fdb_id;
++ u16 num_ifs;
++ u16 num_untagged_ifs;
++ u16 num_flooding_ifs;
++};
++
++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_attr *attr);
++
++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * struct dpsw_fdb_cfg - FDB Configuration
++ * @num_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ */
++struct dpsw_fdb_cfg {
++ u16 num_fdb_entries;
++ u16 fdb_aging_time;
++};
++
++int dpsw_fdb_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *fdb_id,
++ const struct dpsw_fdb_cfg *cfg);
++
++int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id);
++
++/**
++ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
++ * @DPSW_FDB_ENTRY_STATIC: Static entry
++ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
++ */
++enum dpsw_fdb_entry_type {
++ DPSW_FDB_ENTRY_STATIC = 0,
++ DPSW_FDB_ENTRY_DINAMIC = 1
++};
++
++/**
++ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
++ * @type: Select static or dynamic entry
++ * @mac_addr: MAC address
++ * @if_egress: Egress interface ID
++ */
++struct dpsw_fdb_unicast_cfg {
++ enum dpsw_fdb_entry_type type;
++ u8 mac_addr[6];
++ u16 if_egress;
++};
++
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg);
++
++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ struct dpsw_fdb_unicast_cfg *cfg);
++
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg);
++
++/**
++ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
++ * @type: Select static or dynamic entry
++ * @mac_addr: MAC address
++ * @num_ifs: Number of external and internal interfaces
++ * @if_id: Egress interface IDs
++ */
++struct dpsw_fdb_multicast_cfg {
++ enum dpsw_fdb_entry_type type;
++ u8 mac_addr[6];
++ u16 num_ifs;
++ u16 if_id[DPSW_MAX_IF];
++};
++
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
++
++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ struct dpsw_fdb_multicast_cfg *cfg);
++
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
++
++/**
++ * enum dpsw_fdb_learning_mode - Auto-learning modes
++ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
++ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
++ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
++ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
++ *
++ * NONE - SECURE LEARNING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. DMAC destination
++ * 2. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Flooding list of interfaces
++ * 2. Control interface
++ * SECURE LEARING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Control interface
++ */
++enum dpsw_fdb_learning_mode {
++ DPSW_FDB_LEARNING_MODE_DIS = 0,
++ DPSW_FDB_LEARNING_MODE_HW = 1,
++ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
++ DPSW_FDB_LEARNING_MODE_SECURE = 3
++};
++
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ enum dpsw_fdb_learning_mode mode);
++
++/**
++ * struct dpsw_fdb_attr - FDB Attributes
++ * @max_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ * @learning_mode: Learning mode
++ * @num_fdb_mc_groups: Current number of multicast groups
++ * @max_fdb_mc_groups: Maximum number of multicast groups
++ */
++struct dpsw_fdb_attr {
++ u16 max_fdb_entries;
++ u16 fdb_aging_time;
++ enum dpsw_fdb_learning_mode learning_mode;
++ u16 num_fdb_mc_groups;
++ u16 max_fdb_mc_groups;
++};
++
++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ struct dpsw_fdb_attr *attr);
++
++/**
++ * struct dpsw_acl_cfg - ACL Configuration
++ * @max_entries: Number of FDB entries
++ */
++struct dpsw_acl_cfg {
++ u16 max_entries;
++};
++
++/**
++ * struct dpsw_acl_fields - ACL fields.
++ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
++ * slow protocols, MVRP, STP
++ * @l2_source_mac: Source MAC address
++ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
++ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
++ * Q-in-Q, IPv4, IPv6, PPPoE
++ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
++ * @l2_vlan_id: layer 2 VLAN ID
++ * @l2_ether_type: layer 2 Ethernet type
++ * @l3_dscp: Layer 3 differentiated services code point
++ * @l3_protocol: Tells the Network layer at the destination host, to which
++ * Protocol this packet belongs to. The following protocol are
++ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
++ * (encapsulation), GRE, PTP
++ * @l3_source_ip: Source IPv4 IP
++ * @l3_dest_ip: Destination IPv4 IP
++ * @l4_source_port: Source TCP/UDP Port
++ * @l4_dest_port: Destination TCP/UDP Port
++ */
++struct dpsw_acl_fields {
++ u8 l2_dest_mac[6];
++ u8 l2_source_mac[6];
++ u16 l2_tpid;
++ u8 l2_pcp_dei;
++ u16 l2_vlan_id;
++ u16 l2_ether_type;
++ u8 l3_dscp;
++ u8 l3_protocol;
++ u32 l3_source_ip;
++ u32 l3_dest_ip;
++ u16 l4_source_port;
++ u16 l4_dest_port;
++};
++
++/**
++ * struct dpsw_acl_key - ACL key
++ * @match: Match fields
++ * @mask: Mask: b'1 - valid, b'0 don't care
++ */
++struct dpsw_acl_key {
++ struct dpsw_acl_fields match;
++ struct dpsw_acl_fields mask;
++};
++
++/**
++ * enum dpsw_acl_action
++ * @DPSW_ACL_ACTION_DROP: Drop frame
++ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
++ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
++ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
++ */
++enum dpsw_acl_action {
++ DPSW_ACL_ACTION_DROP,
++ DPSW_ACL_ACTION_REDIRECT,
++ DPSW_ACL_ACTION_ACCEPT,
++ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
++};
++
++/**
++ * struct dpsw_acl_result - ACL action
++ * @action: Action should be taken when ACL entry hit
++ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
++ * action
++ */
++struct dpsw_acl_result {
++ enum dpsw_acl_action action;
++ u16 if_id;
++};
++
++/**
++ * struct dpsw_acl_entry_cfg - ACL entry
++ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
++ * to dpsw_acl_prepare_entry_cfg()
++ * @result: Required action when entry hit occurs
++ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
++ * during the lifetime of a Policy. It is user responsibility to
++ * space the priorities according to consequent rule additions.
++ */
++struct dpsw_acl_entry_cfg {
++ u64 key_iova;
++ struct dpsw_acl_result result;
++ int precedence;
++};
++
++int dpsw_acl_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *acl_id,
++ const struct dpsw_acl_cfg *cfg);
++
++int dpsw_acl_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id);
++
++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
++ uint8_t *entry_cfg_buf);
++
++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_entry_cfg *cfg);
++
++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_entry_cfg *cfg);
++
++/**
++ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
++ * @num_ifs: Number of interfaces
++ * @if_id: List of interfaces
++ */
++struct dpsw_acl_if_cfg {
++ u16 num_ifs;
++ u16 if_id[DPSW_MAX_IF];
++};
++
++int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_if_cfg *cfg);
++
++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ const struct dpsw_acl_if_cfg *cfg);
++
++/**
++ * struct dpsw_acl_attr - ACL Attributes
++ * @max_entries: Max number of ACL entries
++ * @num_entries: Number of used ACL entries
++ * @num_ifs: Number of interfaces associated with ACL
++ */
++struct dpsw_acl_attr {
++ u16 max_entries;
++ u16 num_entries;
++ u16 num_ifs;
++};
++
++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 acl_id,
++ struct dpsw_acl_attr *attr);
++/**
++ * struct dpsw_ctrl_if_attr - Control interface attributes
++ * @rx_fqid: Receive FQID
++ * @rx_err_fqid: Receive error FQID
++ * @tx_err_conf_fqid: Transmit error and confirmation FQID
++ */
++struct dpsw_ctrl_if_attr {
++ u32 rx_fqid;
++ u32 rx_err_fqid;
++ u32 tx_err_conf_fqid;
++};
++
++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpsw_ctrl_if_attr *attr);
++
++/**
++ * Maximum number of DPBP
++ */
++#define DPSW_MAX_DPBP 8
++
++/**
++ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpsw_ctrl_if_pools_cfg {
++ u8 num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ int dpbp_id;
++ u16 buffer_size;
++ int backup_pool;
++ } pools[DPSW_MAX_DPBP];
++};
++
++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpsw_ctrl_if_pools_cfg *cfg);
++
++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpsw_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++#endif /* __FSL_DPSW_H */
+diff --git a/drivers/staging/fsl-dpaa2/ethsw/switch.c b/drivers/staging/fsl-dpaa2/ethsw/switch.c
+new file mode 100644
+index 00000000..3f2c9648
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
+@@ -0,0 +1,1857 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/msi.h>
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++
++#include <uapi/linux/if_bridge.h>
++#include <net/netlink.h>
++
++#include "../../fsl-mc/include/mc.h"
++#include "dpsw.h"
++#include "dpsw-cmd.h"
++
++static const char ethsw_drv_version[] = "0.1";
++
++/* Minimal supported DPSE version */
++#define DPSW_MIN_VER_MAJOR 8
++#define DPSW_MIN_VER_MINOR 0
++
++/* IRQ index */
++#define DPSW_MAX_IRQ_NUM 2
++
++#define ETHSW_VLAN_MEMBER 1
++#define ETHSW_VLAN_UNTAGGED 2
++#define ETHSW_VLAN_PVID 4
++#define ETHSW_VLAN_GLOBAL 8
++
++/* Maximum Frame Length supported by HW (currently 10k) */
++#define DPAA2_MFL (10 * 1024)
++#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
++#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
++
++struct ethsw_port_priv {
++ struct net_device *netdev;
++ struct list_head list;
++ u16 port_index;
++ struct ethsw_dev_priv *ethsw_priv;
++ u8 stp_state;
++
++ char vlans[VLAN_VID_MASK + 1];
++
++};
++
++struct ethsw_dev_priv {
++ struct net_device *netdev;
++ struct fsl_mc_io *mc_io;
++ u16 dpsw_handle;
++ struct dpsw_attr sw_attr;
++ int dev_id;
++ /*TODO: redundant, we can use the slave dev list */
++ struct list_head port_list;
++
++ bool flood;
++ bool learning;
++
++ char vlans[VLAN_VID_MASK + 1];
++};
++
++static int ethsw_port_stop(struct net_device *netdev);
++static int ethsw_port_open(struct net_device *netdev);
++
++static inline void __get_priv(struct net_device *netdev,
++ struct ethsw_dev_priv **priv,
++ struct ethsw_port_priv **port_priv)
++{
++ struct ethsw_dev_priv *_priv = NULL;
++ struct ethsw_port_priv *_port_priv = NULL;
++
++ if (netdev->flags & IFF_MASTER) {
++ _priv = netdev_priv(netdev);
++ } else {
++ _port_priv = netdev_priv(netdev);
++ _priv = _port_priv->ethsw_priv;
++ }
++
++ if (priv)
++ *priv = _priv;
++ if (port_priv)
++ *port_priv = _port_priv;
++}
++
++/* -------------------------------------------------------------------------- */
++/* ethsw netdevice ops */
++
++static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
++{
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++static int ethsw_open(struct net_device *netdev)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err;
++
++ err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
++ if (err) {
++ netdev_err(netdev, "dpsw_enable err %d\n", err);
++ return err;
++ }
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++ err = dev_open(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
++ }
++
++ return 0;
++}
++
++static int ethsw_stop(struct net_device *netdev)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err;
++
++ err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
++ if (err) {
++ netdev_err(netdev, "dpsw_disable err %d\n", err);
++ return err;
++ }
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++ err = dev_close(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "dev_close err %d\n", err);
++ }
++
++ return 0;
++}
++
++static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ int err;
++
++ struct dpsw_vlan_cfg vcfg = {
++ /* TODO: add support for VLAN private FDBs */
++ .fdb_id = 0,
++ };
++ if (priv->vlans[vid]) {
++ netdev_err(netdev, "VLAN already configured\n");
++ return -EEXIST;
++ }
++
++ err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
++ return err;
++ }
++ priv->vlans[vid] = ETHSW_VLAN_MEMBER;
++
++ return 0;
++}
++
++static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ int err;
++
++ struct dpsw_vlan_if_cfg vcfg = {
++ .num_ifs = 1,
++ .if_id[0] = port_priv->port_index,
++ };
++
++ if (port_priv->vlans[vid]) {
++ netdev_err(netdev, "VLAN already configured\n");
++ return -EEXIST;
++ }
++
++ if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
++ netdev_err(netdev, "interface must be down to change PVID!\n");
++ return -EBUSY;
++ }
++
++ err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
++
++ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
++ err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
++ priv->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
++ err);
++ return err;
++ }
++ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
++ }
++
++ if (flags & BRIDGE_VLAN_INFO_PVID) {
++ struct dpsw_tci_cfg tci_cfg = {
++ /* TODO: at least add better defaults if these cannot
++ * be configured
++ */
++ .pcp = 0,
++ .dei = 0,
++ .vlan_id = vid,
++ };
++
++ err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
++ port_priv->port_index, &tci_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
++ }
++
++ return 0;
++}
++
++static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
++ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
++ .len = sizeof(struct bridge_vlan_info), },
++};
++
++static int ethsw_setlink_af_spec(struct net_device *netdev,
++ struct nlattr **tb)
++{
++ struct bridge_vlan_info *vinfo;
++ struct ethsw_dev_priv *priv = NULL;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err = 0;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
++ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
++ return -EOPNOTSUPP;
++ }
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ if (!port_priv || !priv->vlans[vinfo->vid]) {
++ /* command targets switch device or this is a new VLAN */
++ err = ethsw_add_vlan(priv->netdev, vinfo->vid);
++ if (err)
++ return err;
++
++ /* command targets switch device; mark it*/
++ if (!port_priv)
++ priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
++ }
++
++ if (port_priv) {
++ /* command targets switch port */
++ err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
++ [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
++ [IFLA_BRPORT_COST] = { .type = NLA_U32 },
++ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
++ [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
++ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
++ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
++ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
++ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
++};
++
++static int ethsw_set_learning(struct net_device *netdev, u8 flag)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ enum dpsw_fdb_learning_mode learn_mode;
++ int err;
++
++ if (flag)
++ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
++ else
++ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
++
++ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
++ 0, learn_mode);
++ if (err) {
++ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
++ }
++ priv->learning = !!flag;
++
++ return 0;
++}
++
++static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ int err;
++
++ err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
++ port_priv->port_index, (int)flag);
++ if (err) {
++ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
++ }
++ priv->flood = !!flag;
++
++ return 0;
++}
++
++static int ethsw_port_set_state(struct net_device *netdev, u8 state)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ u8 old_state = port_priv->stp_state;
++ int err;
++
++ struct dpsw_stp_cfg stp_cfg = {
++ .vlan_id = 1,
++ .state = state,
++ };
++ /* TODO: check port state, interface may be down */
++
++ if (state > BR_STATE_BLOCKING)
++ return -EINVAL;
++
++ if (state == port_priv->stp_state)
++ return 0;
++
++ if (state == BR_STATE_DISABLED) {
++ port_priv->stp_state = state;
++
++ err = ethsw_port_stop(netdev);
++ if (err)
++ goto error;
++ } else {
++ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
++ port_priv->port_index, &stp_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
++ return err;
++ }
++
++ port_priv->stp_state = state;
++
++ if (old_state == BR_STATE_DISABLED) {
++ err = ethsw_port_open(netdev);
++ if (err)
++ goto error;
++ }
++ }
++
++ return 0;
++error:
++ port_priv->stp_state = old_state;
++ return err;
++}
++
++static int ethsw_setlink_protinfo(struct net_device *netdev,
++ struct nlattr **tb)
++{
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err = 0;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ if (tb[IFLA_BRPORT_LEARNING]) {
++ u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
++
++ if (port_priv)
++ netdev_warn(netdev,
++ "learning set on whole switch dev\n");
++
++ err = ethsw_set_learning(priv->netdev, flag);
++ if (err)
++ return err;
++
++ } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
++ u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
++
++ err = ethsw_port_set_flood(port_priv->netdev, flag);
++ if (err)
++ return err;
++
++ } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
++ u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
++
++ err = ethsw_port_set_state(port_priv->netdev, state);
++ if (err)
++ return err;
++
++ } else {
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int ethsw_setlink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct nlattr *attr;
++ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
++ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
++ int err = 0;
++
++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (attr) {
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
++ ifla_br_policy);
++ if (err) {
++ netdev_err(netdev,
++ "nla_parse_nested for br_policy err %d\n",
++ err);
++ return err;
++ }
++
++ err = ethsw_setlink_af_spec(netdev, tb);
++ return err;
++ }
++
++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
++ if (attr) {
++ err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
++ ifla_brport_policy);
++ if (err) {
++ netdev_err(netdev,
++ "nla_parse_nested for brport_policy err %d\n",
++ err);
++ return err;
++ }
++
++ err = ethsw_setlink_protinfo(netdev, tb);
++ return err;
++ }
++
++ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
++ return -EOPNOTSUPP;
++}
++
++static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
++ struct ethsw_dev_priv *priv)
++{
++ u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
++ int iflink;
++ int err;
++
++ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
++ if (err)
++ goto nla_put_err;
++ if (netdev->addr_len) {
++ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
++ netdev->dev_addr);
++ if (err)
++ goto nla_put_err;
++ }
++
++ iflink = dev_get_iflink(netdev);
++ if (netdev->ifindex != iflink) {
++ err = nla_put_u32(skb, IFLA_LINK, iflink);
++ if (err)
++ goto nla_put_err;
++ }
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ return err;
++}
++
++static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
++ struct ethsw_port_priv *port_priv)
++{
++ struct nlattr *nest;
++ int err;
++
++ u8 stp_state = port_priv->stp_state;
++
++ if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
++ stp_state = BR_STATE_BLOCKING;
++
++ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed\n");
++ return -ENOMEM;
++ }
++
++ err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
++ port_priv->ethsw_priv->learning);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
++ port_priv->ethsw_priv->flood);
++ if (err)
++ goto nla_put_err;
++ nla_nest_end(skb, nest);
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
++ struct ethsw_dev_priv *priv,
++ struct ethsw_port_priv *port_priv)
++{
++ struct nlattr *nest;
++ struct bridge_vlan_info vinfo;
++ const char *vlans;
++ u16 i;
++ int err;
++
++ nest = nla_nest_start(skb, IFLA_AF_SPEC);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed");
++ return -ENOMEM;
++ }
++
++ if (port_priv)
++ vlans = port_priv->vlans;
++ else
++ vlans = priv->vlans;
++
++ for (i = 0; i < VLAN_VID_MASK + 1; i++) {
++ vinfo.flags = 0;
++ vinfo.vid = i;
++
++ if (vlans[i] & ETHSW_VLAN_UNTAGGED)
++ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
++
++ if (vlans[i] & ETHSW_VLAN_PVID)
++ vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
++
++ if (vlans[i] & ETHSW_VLAN_MEMBER) {
++ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
++ sizeof(vinfo), &vinfo);
++ if (err)
++ goto nla_put_err;
++ }
++ }
++
++ nla_nest_end(skb, nest);
++
++ return 0;
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *netdev, u32 filter_mask,
++ int nlflags)
++{
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv = NULL;
++ struct ifinfomsg *hdr;
++ struct nlmsghdr *nlh;
++ int err;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
++ if (!nlh)
++ return -EMSGSIZE;
++
++ hdr = nlmsg_data(nlh);
++ memset(hdr, 0, sizeof(*hdr));
++ hdr->ifi_family = AF_BRIDGE;
++ hdr->ifi_type = netdev->type;
++ hdr->ifi_index = netdev->ifindex;
++ hdr->ifi_flags = dev_get_flags(netdev);
++
++ err = __nla_put_netdev(skb, netdev, priv);
++ if (err)
++ goto nla_put_err;
++
++ if (port_priv) {
++ err = __nla_put_port(skb, netdev, port_priv);
++ if (err)
++ goto nla_put_err;
++ }
++
++ /* Check if the VID information is requested */
++ if (filter_mask & RTEXT_FILTER_BRVLAN) {
++ err = __nla_put_vlan(skb, netdev, priv, port_priv);
++ if (err)
++ goto nla_put_err;
++ }
++
++ nlmsg_end(skb, nlh);
++ return skb->len;
++
++nla_put_err:
++ nlmsg_cancel(skb, nlh);
++ return -EMSGSIZE;
++}
++
++static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *ppriv_local = NULL;
++ int err = 0;
++
++ if (!priv->vlans[vid])
++ return -ENOENT;
++
++ err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
++ if (err) {
++ netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
++ return err;
++ }
++ priv->vlans[vid] = 0;
++
++ list_for_each(pos, &priv->port_list) {
++ ppriv_local = list_entry(pos, struct ethsw_port_priv,
++ list);
++ ppriv_local->vlans[vid] = 0;
++ }
++
++ return 0;
++}
++
++static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
++ struct ethsw_port_priv *port_priv,
++ u16 vid)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *ppriv_local = NULL;
++ struct dpsw_vlan_if_cfg vcfg = {
++ .num_ifs = 1,
++ .if_id[0] = port_priv->port_index,
++ };
++ unsigned int count = 0;
++ int err = 0;
++
++ if (!port_priv->vlans[vid])
++ return -ENOENT;
++
++ /* VLAN will be deleted from switch if global flag is not set
++ * and is configured on only one port
++ */
++ if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
++ list_for_each(pos, &priv->port_list) {
++ ppriv_local = list_entry(pos, struct ethsw_port_priv,
++ list);
++ if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
++ count++;
++ }
++
++ if (count == 1)
++ return ethsw_dellink_switch(priv, vid);
++ }
++
++ err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] = 0;
++ return 0;
++}
++
++static int ethsw_dellink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
++ struct nlattr *spec;
++ struct bridge_vlan_info *vinfo;
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err = 0;
++
++ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (!spec)
++ return 0;
++
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
++ if (err)
++ return err;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO])
++ return -EOPNOTSUPP;
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ /* decide if command targets switch device or port */
++ if (!port_priv)
++ err = ethsw_dellink_switch(priv, vinfo->vid);
++ else
++ err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
++
++ return err;
++}
++
++static const struct net_device_ops ethsw_ops = {
++ .ndo_open = &ethsw_open,
++ .ndo_stop = &ethsw_stop,
++
++ .ndo_bridge_setlink = &ethsw_setlink,
++ .ndo_bridge_getlink = &ethsw_getlink,
++ .ndo_bridge_dellink = &ethsw_dellink,
++
++ .ndo_start_xmit = &ethsw_dropframe,
++};
++
++/*--------------------------------------------------------------------------- */
++/* switch port netdevice ops */
++
++static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state;
++ int err;
++
++ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index, &state);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
++ return err;
++ }
++
++ WARN_ONCE(state.up > 1, "Garbage read into link_state");
++
++ if (state.up)
++ netif_carrier_on(port_priv->netdev);
++ else
++ netif_carrier_off(port_priv->netdev);
++
++ return 0;
++}
++
++static int ethsw_port_open(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
++ return err;
++ }
++
++ /* sync carrier state */
++ err = _ethsw_port_carrier_state_sync(netdev);
++ if (err) {
++ netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
++ err);
++ goto err_carrier_sync;
++ }
++
++ return 0;
++
++err_carrier_sync:
++ dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index);
++ return err;
++}
++
++static int ethsw_port_stop(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++static int ethsw_port_fdb_add_uc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
++
++ entry.if_egress = port_priv->port_index;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
++
++ err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
++ return err;
++}
++
++static int ethsw_port_fdb_del_uc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
++
++ entry.if_egress = port_priv->port_index;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
++
++ err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
++ return err;
++}
++
++static int ethsw_port_fdb_add_mc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
++
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->port_index;
++
++ err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
++ return err;
++}
++
++static int ethsw_port_fdb_del_mc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
++
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->port_index;
++
++ err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
++ return err;
++}
++
++static int _lookup_address(struct net_device *netdev, int is_uc,
++ const unsigned char *addr)
++{
++ struct netdev_hw_addr *ha;
++ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
++
++ netif_addr_lock_bh(netdev);
++ list_for_each_entry(ha, &list->list, list) {
++ if (ether_addr_equal(ha->addr, addr)) {
++ netif_addr_unlock_bh(netdev);
++ return 1;
++ }
++ }
++ netif_addr_unlock_bh(netdev);
++ return 0;
++}
++
++static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid,
++ u16 flags)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ int err;
++
++ /* TODO: add replace support when added to iproute bridge */
++ if (!(flags & NLM_F_REQUEST)) {
++ netdev_err(netdev,
++ "ethsw_port_fdb_add unexpected flags value %08x\n",
++ flags);
++ return -EINVAL;
++ }
++
++ if (is_unicast_ether_addr(addr)) {
++ /* if entry cannot be replaced, return error if exists */
++ if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos,
++ struct ethsw_port_priv,
++ list);
++ if (_lookup_address(port_priv->netdev,
++ 1, addr))
++ return -EEXIST;
++ }
++ }
++
++ err = ethsw_port_fdb_add_uc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
++ err);
++ return err;
++ }
++
++ /* we might have replaced an existing entry for a different
++ * switch port, make sure the address doesn't linger in any
++ * port address list
++ */
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv,
++ list);
++ dev_uc_del(port_priv->netdev, addr);
++ }
++
++ err = dev_uc_add(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "dev_uc_add err %d\n", err);
++ return err;
++ }
++ } else {
++ struct dpsw_fdb_multicast_cfg entry = {
++ .type = DPSW_FDB_ENTRY_STATIC,
++ .num_ifs = 0,
++ };
++
++ /* check if address is already set on this port */
++ if (_lookup_address(netdev, 0, addr))
++ return -EEXIST;
++
++ /* check if the address exists on other port */
++ ether_addr_copy(entry.mac_addr, addr);
++ err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
++ 0, &entry);
++ if (!err) {
++ /* entry exists, can we replace it? */
++ if (flags & NLM_F_EXCL)
++ return -EEXIST;
++ } else if (err != -ENAVAIL) {
++ netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
++ err);
++ return err;
++ }
++
++ err = ethsw_port_fdb_add_mc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
++ err);
++ return err;
++ }
++
++ err = dev_mc_add(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "dev_mc_add err %d\n", err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ int err;
++
++ if (is_unicast_ether_addr(addr)) {
++ err = ethsw_port_fdb_del_uc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
++ err);
++ return err;
++ }
++
++ /* also delete if configured on port */
++ err = dev_uc_del(netdev, addr);
++ if (err && err != -ENOENT) {
++ netdev_err(netdev, "dev_uc_del err %d\n", err);
++ return err;
++ }
++ } else {
++ if (!_lookup_address(netdev, 0, addr))
++ return -ENOENT;
++
++ err = dev_mc_del(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "dev_mc_del err %d\n", err);
++ return err;
++ }
++
++ err = ethsw_port_fdb_del_mc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
++ err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++void ethsw_port_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_FRAME, &storage->rx_packets);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_EGR_FRAME, &storage->tx_packets);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_BYTE, &storage->rx_bytes);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_FRAME_DISCARD,
++ &storage->rx_dropped);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_FLTR_FRAME,
++ &tmp);
++ if (err)
++ goto error;
++ storage->rx_dropped += tmp;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_EGR_FRAME_DISCARD,
++ &storage->tx_dropped);
++ if (err)
++ goto error;
++
++ return;
++
++error:
++ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
++}
++
++static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
++ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
++ mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
++ return -EINVAL;
++ }
++
++ err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
++ 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ (u16)ETHSW_L2_MAX_FRM(mtu));
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_if_set_max_frame_length() err %d\n", err);
++ return err;
++ }
++
++ netdev->mtu = mtu;
++ return 0;
++}
++
++static const struct net_device_ops ethsw_port_ops = {
++ .ndo_open = &ethsw_port_open,
++ .ndo_stop = &ethsw_port_stop,
++
++ .ndo_fdb_add = &ethsw_port_fdb_add,
++ .ndo_fdb_del = &ethsw_port_fdb_del,
++ .ndo_fdb_dump = &ndo_dflt_fdb_dump,
++
++ .ndo_get_stats64 = &ethsw_port_get_stats,
++ .ndo_change_mtu = &ethsw_port_change_mtu,
++
++ .ndo_start_xmit = &ethsw_dropframe,
++};
++
++static void ethsw_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u16 version_major, version_minor;
++ int err;
++
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
++ strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
++
++ err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
++ &version_major,
++ &version_minor);
++ if (err)
++ strlcpy(drvinfo->fw_version, "N/A",
++ sizeof(drvinfo->fw_version));
++ else
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", version_major, version_minor);
++
++ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
++}
++
++static int ethsw_get_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state = {0};
++ int err = 0;
++
++ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ &state);
++ if (err) {
++ netdev_err(netdev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPSW side or there may not exist a DPMAC at all.
++ * Report only autoneg state, duplexity and speed.
++ */
++ if (state.options & DPSW_LINK_OPT_AUTONEG)
++ cmd->autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
++ cmd->autoneg = DUPLEX_FULL;
++ ethtool_cmd_speed_set(cmd, state.rate);
++
++out:
++ return err;
++}
++
++static int ethsw_set_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state = {0};
++ struct dpsw_link_cfg cfg = {0};
++ int err = 0;
++
++ netdev_dbg(netdev, "Setting link parameters...");
++
++ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ &state);
++ if (err) {
++ netdev_err(netdev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ /* Due to a temporary MC limitation, the DPSW port must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (netif_running(netdev)) {
++ netdev_info(netdev,
++ "Sorry, interface must be brought down first.\n");
++ return -EACCES;
++ }
++
++ cfg.options = state.options;
++ cfg.rate = ethtool_cmd_speed(cmd);
++ if (cmd->autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPSW_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
++ if (cmd->duplex == DUPLEX_HALF)
++ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
++
++ err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
++
++out:
++ return err;
++}
++
++static struct {
++ enum dpsw_counter id;
++ char name[ETH_GSTRING_LEN];
++} ethsw_ethtool_counters[] = {
++ {DPSW_CNT_ING_FRAME, "rx frames"},
++ {DPSW_CNT_ING_BYTE, "rx bytes"},
++ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
++ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
++ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
++ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
++ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
++ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
++ {DPSW_CNT_EGR_FRAME, "tx frames"},
++ {DPSW_CNT_EGR_BYTE, "tx bytes"},
++ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
++
++};
++
++static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(ethsw_ethtool_counters);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static void ethsw_ethtool_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
++{
++ u32 i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ break;
++ }
++}
++
++static void ethsw_ethtool_get_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u32 i;
++ int err;
++
++ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ ethsw_ethtool_counters[i].id,
++ &data[i]);
++ if (err)
++ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
++ ethsw_ethtool_counters[i].name, err);
++ }
++}
++
++static const struct ethtool_ops ethsw_port_ethtool_ops = {
++ .get_drvinfo = &ethsw_get_drvinfo,
++ .get_link = &ethtool_op_get_link,
++ .get_settings = &ethsw_get_settings,
++ .set_settings = &ethsw_set_settings,
++ .get_strings = &ethsw_ethtool_get_strings,
++ .get_ethtool_stats = &ethsw_ethtool_get_stats,
++ .get_sset_count = &ethsw_ethtool_get_sset_count,
++};
++
++/* -------------------------------------------------------------------------- */
++/* ethsw driver functions */
++
++static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv;
++ int err;
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv,
++ list);
++
++ err = _ethsw_port_carrier_state_sync(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "_ethsw_port_carrier_state_sync err %d\n",
++ err);
++ }
++
++ return 0;
++}
++
++static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++
++ struct fsl_mc_io *io = priv->mc_io;
++ u16 token = priv->dpsw_handle;
++ int irq_index = DPSW_IRQ_INDEX_IF;
++
++ /* Mask the events and the if_id reserved bits to be cleared on read */
++ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
++ int err;
++
++ err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
++ if (unlikely(err)) {
++ netdev_err(netdev, "Can't get irq status (err %d)", err);
++
++ err = dpsw_clear_irq_status(io, 0, token, irq_index,
++ 0xFFFFFFFF);
++ if (unlikely(err))
++ netdev_err(netdev, "Can't clear irq status (err %d)",
++ err);
++ goto out;
++ }
++
++ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
++ err = ethsw_links_state_update(priv);
++ if (unlikely(err))
++ goto out;
++ }
++
++out:
++ return IRQ_HANDLED;
++}
++
++static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
++ const int irq_index = DPSW_IRQ_INDEX_IF;
++ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++
++ err = fsl_mc_allocate_irqs(sw_dev);
++ if (unlikely(err)) {
++ dev_err(dev, "MC irqs allocation failed\n");
++ return err;
++ }
++
++ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
++ err = -EINVAL;
++ goto free_irq;
++ }
++
++ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
++ irq_index, 0);
++ if (unlikely(err)) {
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
++
++ irq = sw_dev->irqs[irq_index];
++
++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
++ ethsw_irq0_handler,
++ _ethsw_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), dev);
++ if (unlikely(err)) {
++ dev_err(dev, "devm_request_threaded_irq(): %d", err);
++ goto free_irq;
++ }
++
++ err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
++ irq_index, mask);
++ if (unlikely(err)) {
++ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
++ goto free_devm_irq;
++ }
++
++ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
++ irq_index, 1);
++ if (unlikely(err)) {
++ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
++ goto free_devm_irq;
++ }
++
++ return 0;
++
++free_devm_irq:
++ devm_free_irq(dev, irq->msi_desc->irq, dev);
++free_irq:
++ fsl_mc_free_irqs(sw_dev);
++ return err;
++}
++
++static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++
++ dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ devm_free_irq(dev,
++ sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
++ dev);
++ fsl_mc_free_irqs(sw_dev);
++}
++
++static int __cold
++ethsw_init(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_dev_priv *priv;
++ struct net_device *netdev;
++ int err = 0;
++ u16 i;
++ u16 version_major, version_minor;
++ const struct dpsw_stp_cfg stp_cfg = {
++ .vlan_id = 1,
++ .state = DPSW_STP_STATE_FORWARDING,
++ };
++
++ netdev = dev_get_drvdata(dev);
++ priv = netdev_priv(netdev);
++
++ priv->dev_id = sw_dev->obj_desc.id;
++
++ err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
++ if (err) {
++ dev_err(dev, "dpsw_open err %d\n", err);
++ goto err_exit;
++ }
++ if (!priv->dpsw_handle) {
++ dev_err(dev, "dpsw_open returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_exit;
++ }
++
++ err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
++ &priv->sw_attr);
++ if (err) {
++ dev_err(dev, "dpsw_get_attributes err %d\n", err);
++ goto err_close;
++ }
++
++ err = dpsw_get_api_version(priv->mc_io, 0,
++ &version_major,
++ &version_minor);
++ if (err) {
++ dev_err(dev, "dpsw_get_api_version err %d\n", err);
++ goto err_close;
++ }
++
++ /* Minimum supported DPSW version check */
++ if (version_major < DPSW_MIN_VER_MAJOR ||
++ (version_major == DPSW_MIN_VER_MAJOR &&
++ version_minor < DPSW_MIN_VER_MINOR)) {
++ dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
++ version_major,
++ version_minor,
++ DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
++ err = -ENOTSUPP;
++ goto err_close;
++ }
++
++ err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
++ if (err) {
++ dev_err(dev, "dpsw_reset err %d\n", err);
++ goto err_close;
++ }
++
++ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
++ DPSW_FDB_LEARNING_MODE_HW);
++ if (err) {
++ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ goto err_close;
++ }
++
++ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
++ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
++ &stp_cfg);
++ if (err) {
++ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
++ err, i);
++ goto err_close;
++ }
++
++ err = dpsw_if_set_broadcast(priv->mc_io, 0,
++ priv->dpsw_handle, i, 1);
++ if (err) {
++ dev_err(dev,
++ "dpsw_if_set_broadcast err %d for port %d\n",
++ err, i);
++ goto err_close;
++ }
++ }
++
++ return 0;
++
++err_close:
++ dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
++err_exit:
++ return err;
++}
++
++static int __cold
++ethsw_takedown(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct net_device *netdev;
++ struct ethsw_dev_priv *priv;
++ int err;
++
++ netdev = dev_get_drvdata(dev);
++ priv = netdev_priv(netdev);
++
++ err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
++ if (err)
++ dev_warn(dev, "dpsw_close err %d\n", err);
++
++ return 0;
++}
++
++static int __cold
++ethsw_remove(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev;
++ struct net_device *netdev;
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv;
++ struct list_head *pos;
++
++ dev = &sw_dev->dev;
++ netdev = dev_get_drvdata(dev);
++ priv = netdev_priv(netdev);
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++
++ rtnl_lock();
++ netdev_upper_dev_unlink(port_priv->netdev, netdev);
++ rtnl_unlock();
++
++ unregister_netdev(port_priv->netdev);
++ free_netdev(port_priv->netdev);
++ }
++
++ ethsw_teardown_irqs(sw_dev);
++
++ unregister_netdev(netdev);
++
++ ethsw_takedown(sw_dev);
++ fsl_mc_portal_free(priv->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++ free_netdev(netdev);
++
++ return 0;
++}
++
++static int __cold
++ethsw_probe(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev;
++ struct net_device *netdev = NULL;
++ struct ethsw_dev_priv *priv = NULL;
++ int err = 0;
++ u16 i;
++ const char def_mcast[ETH_ALEN] = {
++ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
++ };
++ char port_name[IFNAMSIZ];
++
++ dev = &sw_dev->dev;
++
++ /* register switch device, it's for management only - no I/O */
++ netdev = alloc_etherdev(sizeof(*priv));
++ if (!netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ return -ENOMEM;
++ }
++ netdev->netdev_ops = &ethsw_ops;
++
++ SET_NETDEV_DEV(netdev, dev);
++ dev_set_drvdata(dev, netdev);
++
++ priv = netdev_priv(netdev);
++ priv->netdev = netdev;
++
++ err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
++ if (err) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_free_netdev;
++ }
++ if (!priv->mc_io) {
++ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_free_netdev;
++ }
++
++ err = ethsw_init(sw_dev);
++ if (err) {
++ dev_err(dev, "switch init err %d\n", err);
++ goto err_free_cmdport;
++ }
++
++ netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
++
++ /* TODO: should we hold rtnl_lock here? We can't register_netdev under
++ * lock
++ */
++ dev_alloc_name(netdev, "sw%d");
++ err = register_netdev(netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ goto err_takedown;
++ }
++ if (err)
++ dev_info(dev, "register_netdev res %d\n", err);
++
++ /* VLAN 1 is implicitly configured on the switch */
++ priv->vlans[1] = ETHSW_VLAN_MEMBER;
++ /* Flooding, learning are implicitly enabled */
++ priv->learning = true;
++ priv->flood = true;
++
++ /* register switch ports */
++ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
++
++ INIT_LIST_HEAD(&priv->port_list);
++ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
++ struct net_device *port_netdev;
++ struct ethsw_port_priv *port_priv;
++
++ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
++ if (!port_netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ goto err_takedown;
++ }
++
++ port_priv = netdev_priv(port_netdev);
++ port_priv->netdev = port_netdev;
++ port_priv->ethsw_priv = priv;
++
++ port_priv->port_index = i;
++ port_priv->stp_state = BR_STATE_FORWARDING;
++ /* VLAN 1 is configured by default on all switch ports */
++ port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
++ ETHSW_VLAN_PVID;
++
++ SET_NETDEV_DEV(port_netdev, dev);
++ port_netdev->netdev_ops = &ethsw_port_ops;
++ port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
++
++ port_netdev->flags = port_netdev->flags |
++ IFF_PROMISC | IFF_SLAVE;
++
++ dev_alloc_name(port_netdev, port_name);
++ err = register_netdev(port_netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ free_netdev(port_netdev);
++ goto err_takedown;
++ }
++
++ rtnl_lock();
++
++ err = netdev_master_upper_dev_link(port_netdev, netdev,
++ NULL, NULL);
++ if (err) {
++ dev_err(dev, "netdev_master_upper_dev_link error %d\n",
++ err);
++ unregister_netdev(port_netdev);
++ free_netdev(port_netdev);
++ rtnl_unlock();
++ goto err_takedown;
++ }
++
++ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
++
++ rtnl_unlock();
++
++ list_add(&port_priv->list, &priv->port_list);
++
++ /* TODO: implmenet set_rm_mode instead of this */
++ err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
++ if (err)
++ dev_warn(&netdev->dev,
++ "ethsw_port_fdb_add_mc err %d\n", err);
++ }
++
++ /* the switch starts up enabled */
++ rtnl_lock();
++ err = dev_open(netdev);
++ rtnl_unlock();
++ if (err)
++ dev_warn(dev, "dev_open err %d\n", err);
++
++ /* setup irqs */
++ err = ethsw_setup_irqs(sw_dev);
++ if (unlikely(err)) {
++ dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
++ goto err_takedown;
++ }
++
++ dev_info(&netdev->dev,
++ "probed %d port switch\n", priv->sw_attr.num_ifs);
++ return 0;
++
++err_takedown:
++ ethsw_remove(sw_dev);
++err_free_cmdport:
++ fsl_mc_portal_free(priv->mc_io);
++err_free_netdev:
++ dev_set_drvdata(dev, NULL);
++ free_netdev(netdev);
++
++ return err;
++}
++
++static const struct fsl_mc_device_id ethsw_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpsw",
++ },
++ {}
++};
++
++static struct fsl_mc_driver eth_sw_drv = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = ethsw_probe,
++ .remove = ethsw_remove,
++ .match_id_table = ethsw_match_id_table,
++};
++
++module_fsl_mc_driver(eth_sw_drv);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
+diff --git a/drivers/staging/fsl-dpaa2/evb/Kconfig b/drivers/staging/fsl-dpaa2/evb/Kconfig
+new file mode 100644
+index 00000000..3534f697
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
+@@ -0,0 +1,7 @@
++config FSL_DPAA2_EVB
++ tristate "DPAA2 Edge Virtual Bridge"
++ depends on FSL_MC_BUS && FSL_DPAA2
++ select VLAN_8021Q
++ default y
++ ---help---
++ Prototype driver for DPAA2 Edge Virtual Bridge.
+diff --git a/drivers/staging/fsl-dpaa2/evb/Makefile b/drivers/staging/fsl-dpaa2/evb/Makefile
+new file mode 100644
+index 00000000..ecc529d7
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/Makefile
+@@ -0,0 +1,10 @@
++
++obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
++
++dpaa2-evb-objs := evb.o dpdmux.o
++
++all:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++
++clean:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
+new file mode 100644
+index 00000000..66306804
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
+@@ -0,0 +1,279 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPDMUX_CMD_H
++#define _FSL_DPDMUX_CMD_H
++
++/* DPDMUX Version */
++#define DPDMUX_VER_MAJOR 6
++#define DPDMUX_VER_MINOR 1
++
++#define DPDMUX_CMD_BASE_VER 1
++#define DPDMUX_CMD_ID_OFFSET 4
++
++#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
++
++/* Command IDs */
++#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
++#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
++#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
++#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
++#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
++
++#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
++#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
++#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
++#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
++#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
++
++#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
++#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
++#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
++#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
++#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
++#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
++
++#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
++
++#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
++
++#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
++#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
++#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
++#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
++
++#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
++#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
++#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
++#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
++#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
++
++#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
++#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
++#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
++
++#define DPDMUX_MASK(field) \
++ GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
++ DPDMUX_##field##_SHIFT)
++#define dpdmux_set_field(var, field, val) \
++ ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
++#define dpdmux_get_field(var, field) \
++ (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
++
++struct dpdmux_cmd_open {
++ u32 dpdmux_id;
++};
++
++struct dpdmux_cmd_create {
++ u8 method;
++ u8 manip;
++ u16 num_ifs;
++ u32 pad;
++
++ u16 adv_max_dmat_entries;
++ u16 adv_max_mc_groups;
++ u16 adv_max_vlan_ids;
++ u16 pad1;
++
++ u64 options;
++};
++
++struct dpdmux_cmd_destroy {
++ u32 dpdmux_id;
++};
++
++#define DPDMUX_ENABLE_SHIFT 0
++#define DPDMUX_ENABLE_SIZE 1
++
++struct dpdmux_rsp_is_enabled {
++ u8 en;
++};
++
++struct dpdmux_cmd_set_irq_enable {
++ u8 enable;
++ u8 pad[3];
++ u8 irq_index;
++};
++
++struct dpdmux_cmd_get_irq_enable {
++ u32 pad;
++ u8 irq_index;
++};
++
++struct dpdmux_rsp_get_irq_enable {
++ u8 enable;
++};
++
++struct dpdmux_cmd_set_irq_mask {
++ u32 mask;
++ u8 irq_index;
++};
++
++struct dpdmux_cmd_get_irq_mask {
++ u32 pad;
++ u8 irq_index;
++};
++
++struct dpdmux_rsp_get_irq_mask {
++ u32 mask;
++};
++
++struct dpdmux_cmd_get_irq_status {
++ u32 status;
++ u8 irq_index;
++};
++
++struct dpdmux_rsp_get_irq_status {
++ u32 status;
++};
++
++struct dpdmux_cmd_clear_irq_status {
++ u32 status;
++ u8 irq_index;
++};
++
++struct dpdmux_rsp_get_attr {
++ u8 method;
++ u8 manip;
++ u16 num_ifs;
++ u16 mem_size;
++ u16 pad;
++
++ u64 pad1;
++
++ u32 id;
++ u32 pad2;
++
++ u64 options;
++};
++
++struct dpdmux_cmd_set_max_frame_length {
++ u16 max_frame_length;
++};
++
++#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
++#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
++#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
++#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
++
++struct dpdmux_cmd_if_set_accepted_frames {
++ u16 if_id;
++ u8 frames_options;
++};
++
++struct dpdmux_cmd_if {
++ u16 if_id;
++};
++
++struct dpdmux_rsp_if_get_attr {
++ u8 pad[3];
++ u8 enabled;
++ u8 pad1[3];
++ u8 accepted_frames_type;
++ u32 rate;
++};
++
++struct dpdmux_cmd_if_l2_rule {
++ u16 if_id;
++ u8 mac_addr5;
++ u8 mac_addr4;
++ u8 mac_addr3;
++ u8 mac_addr2;
++ u8 mac_addr1;
++ u8 mac_addr0;
++
++ u32 pad;
++ u16 vlan_id;
++};
++
++struct dpdmux_cmd_if_get_counter {
++ u16 if_id;
++ u8 counter_type;
++};
++
++struct dpdmux_rsp_if_get_counter {
++ u64 pad;
++ u64 counter;
++};
++
++struct dpdmux_cmd_if_set_link_cfg {
++ u16 if_id;
++ u16 pad[3];
++
++ u32 rate;
++ u32 pad1;
++
++ u64 options;
++};
++
++struct dpdmux_cmd_if_get_link_state {
++ u16 if_id;
++};
++
++struct dpdmux_rsp_if_get_link_state {
++ u32 pad;
++ u8 up;
++ u8 pad1[3];
++
++ u32 rate;
++ u32 pad2;
++
++ u64 options;
++};
++
++struct dpdmux_rsp_get_api_version {
++ u16 major;
++ u16 minor;
++};
++
++struct dpdmux_set_custom_key {
++ u64 pad[6];
++ u64 key_cfg_iova;
++};
++
++struct dpdmux_cmd_add_custom_cls_entry {
++ u8 pad[3];
++ u8 key_size;
++ u16 pad1;
++ u16 dest_if;
++ u64 key_iova;
++ u64 mask_iova;
++};
++
++struct dpdmux_cmd_remove_custom_cls_entry {
++ u8 pad[3];
++ u8 key_size;
++ u32 pad1;
++ u64 key_iova;
++ u64 mask_iova;
++};
++
++#endif /* _FSL_DPDMUX_CMD_H */
+diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.c b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
+new file mode 100644
+index 00000000..f7a87633
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
+@@ -0,0 +1,1112 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpdmux.h"
++#include "dpdmux-cmd.h"
++
++/**
++ * dpdmux_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpdmux_id: DPDMUX unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpdmux_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpdmux_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_open *cmd_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dpdmux_cmd_open *)cmd.params;
++ cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpdmux_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_create() - Create the DPDMUX object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @obj_id: returned object id
++ *
++ * Create the DPDMUX object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * The function accepts an authentication token of a parent
++ * container that this object should be assigned to. The token
++ * can be '0' so the object will be assigned to the default container.
++ * The newly created object can be opened with the returned
++ * object id and using the container's associated tokens and MC portals.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_create(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ const struct dpdmux_cfg *cfg,
++ u32 *obj_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_create *cmd_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
++ cmd_flags,
++ dprc_token);
++ cmd_params = (struct dpdmux_cmd_create *)cmd.params;
++ cmd_params->method = cfg->method;
++ cmd_params->manip = cfg->manip;
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ cmd_params->adv_max_dmat_entries =
++ cpu_to_le16(cfg->adv.max_dmat_entries);
++ cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
++ cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
++ cmd_params->options = cpu_to_le64(cfg->adv.options);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *obj_id = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @object_id: The object id; it must be a valid id within the container that
++ * created this object;
++ *
++ * The function accepts the authentication token of the parent container that
++ * created the object (not the one that currently owns the object). The object
++ * is searched within parent using the provided 'object_id'.
++ * All tokens to the object must be closed before calling destroy.
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpdmux_destroy(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ u32 object_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_destroy *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
++ cmd_flags,
++ dprc_token);
++ cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
++ cmd_params->dpdmux_id = cpu_to_le32(object_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_enable() - Enable DPDMUX functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_disable() - Disable DPDMUX functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_rsp_is_enabled *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
++ *en = dpdmux_get_field(rsp_params->en, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_set_irq_enable *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
++ cmd_params->enable = en;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_get_irq_enable() - Get overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_get_irq_enable *cmd_params;
++ struct dpdmux_rsp_get_irq_enable *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
++ *en = rsp_params->enable;
++
++ return 0;
++}
++
++/**
++ * dpdmux_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_set_irq_mask *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_get_irq_mask *cmd_params;
++ struct dpdmux_rsp_get_irq_mask *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
++ *mask = le32_to_cpu(rsp_params->mask);
++
++ return 0;
++}
++
++/**
++ * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_get_irq_status *cmd_params;
++ struct dpdmux_rsp_get_irq_status *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
++
++ return 0;
++}
++
++/**
++ * dpdmux_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_clear_irq_status *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_get_attributes() - Retrieve DPDMUX attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpdmux_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_rsp_get_attr *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
++ attr->id = le32_to_cpu(rsp_params->id);
++ attr->options = le64_to_cpu(rsp_params->options);
++ attr->method = rsp_params->method;
++ attr->manip = rsp_params->manip;
++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
++
++ return 0;
++}
++
++/**
++ * dpdmux_if_enable() - Enable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
++{
++ struct dpdmux_cmd_if *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_if_disable() - Disable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
++{
++ struct dpdmux_cmd_if *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @max_frame_length: The required maximum frame length
++ *
++ * Update the maximum frame length on all DMUX interfaces.
++ * In case of VEPA, the maximum frame length on all dmux interfaces
++ * will be updated with the minimum value of the mfls of the connected
++ * dpnis and the actual value of dmux mfl.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_set_max_frame_length *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
++ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_ul_reset_counters() - Function resets the uplink counter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_if_set_accepted_frames() - Set the accepted frame types
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
++ * @cfg: Frame types configuration
++ *
++ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
++ * priority-tagged frames are discarded.
++ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
++ * priority-tagged frames are accepted.
++ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
++ * untagged and priority-tagged frame are accepted;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpdmux_accepted_frames *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
++ cfg->type);
++ dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
++ cfg->unaccept_act);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
++ * @attr: Interface attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpdmux_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_if *cmd_params;
++ struct dpdmux_rsp_if_get_attr *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
++ attr->rate = le32_to_cpu(rsp_params->rate);
++ attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
++ attr->accept_frame_type =
++ dpdmux_get_field(rsp_params->accepted_frames_type,
++ ACCEPTED_FRAMES_TYPE);
++
++ return 0;
++}
++
++/**
++ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Destination interface ID
++ * @rule: L2 rule
++ *
++ * Function removes a L2 rule from DPDMUX table
++ * or adds an interface to an existing multicast address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpdmux_l2_rule *rule)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_if_l2_rule *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
++ cmd_params->mac_addr5 = rule->mac_addr[5];
++ cmd_params->mac_addr4 = rule->mac_addr[4];
++ cmd_params->mac_addr3 = rule->mac_addr[3];
++ cmd_params->mac_addr2 = rule->mac_addr[2];
++ cmd_params->mac_addr1 = rule->mac_addr[1];
++ cmd_params->mac_addr0 = rule->mac_addr[0];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Destination interface ID
++ * @rule: L2 rule
++ *
++ * Function adds a L2 rule into DPDMUX table
++ * or adds an interface to an existing multicast address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpdmux_l2_rule *rule)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_if_l2_rule *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
++ cmd_params->mac_addr5 = rule->mac_addr[5];
++ cmd_params->mac_addr4 = rule->mac_addr[4];
++ cmd_params->mac_addr3 = rule->mac_addr[3];
++ cmd_params->mac_addr2 = rule->mac_addr[2];
++ cmd_params->mac_addr1 = rule->mac_addr[1];
++ cmd_params->mac_addr0 = rule->mac_addr[0];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface Id
++ * @counter_type: counter type
++ * @counter: Returned specific counter information
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpdmux_counter_type counter_type,
++ u64 *counter)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_if_get_counter *cmd_params;
++ struct dpdmux_rsp_if_get_counter *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->counter_type = counter_type;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
++ *counter = le64_to_cpu(rsp_params->counter);
++
++ return 0;
++}
++
++/**
++ * dpdmux_if_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpdmux_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_if_set_link_cfg *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_if_get_link_state - Return the link state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @state: link state
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpdmux_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_cmd_if_get_link_state *cmd_params;
++ struct dpdmux_rsp_if_get_link_state *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
++ state->up = dpdmux_get_field(rsp_params->up, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpdmux_set_custom_key - Set a custom classification key.
++ *
++ * This API is only available for DPDMUX instance created with
++ * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
++ * classification table using dpdmux_add_custom_cls_entry.
++ *
++ * Calls to dpdmux_set_custom_key remove all existing classification entries
++ * that may have been added previously using dpdmux_add_custom_cls_entry.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @key_cfg_iova: DMA address of a configuration structure set up using
++ * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u64 key_cfg_iova)
++{
++ struct dpdmux_set_custom_key *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
++ cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
++ *
++ * This API is only available for DPDMUX instances created with
++ * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
++ * composition rule must be set up using dpdmux_set_custom_key.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @rule: Classification rule to insert. Rules cannot be duplicated, if a
++ * matching rule already exists, the action will be replaced.
++ * @action: Action to perform for matching traffic.
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpdmux_rule_cfg *rule,
++ struct dpdmux_cls_action *action)
++{
++ struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
++ cmd_flags,
++ token);
++
++ cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
++ cmd_params->key_size = rule->key_size;
++ cmd_params->dest_if = cpu_to_le16(action->dest_if);
++ cmd_params->key_iova = cpu_to_le64(rule->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
++ *
++ * This API is only available for DPDMUX instances created with
++ * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
++ * entries previously inserted using dpdmux_add_custom_cls_entry.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @rule: Classification rule to remove
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpdmux_rule_cfg *rule)
++{
++ struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
++ cmd_params->key_size = rule->key_size;
++ cmd_params->key_iova = cpu_to_le64(rule->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpdmux_get_api_version() - Get Data Path Demux API version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path demux API
++ * @minor_ver: Minor version of data path demux API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_command cmd = { 0 };
++ struct dpdmux_rsp_get_api_version *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
++ cmd_flags,
++ 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->major);
++ *minor_ver = le16_to_cpu(rsp_params->minor);
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.h b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
+new file mode 100644
+index 00000000..a6ccc7ef
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
+@@ -0,0 +1,453 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPDMUX_H
++#define __FSL_DPDMUX_H
++
++struct fsl_mc_io;
++
++/* Data Path Demux API
++ * Contains API for handling DPDMUX topology and functionality
++ */
++
++int dpdmux_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpdmux_id,
++ u16 *token);
++
++int dpdmux_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * DPDMUX general options
++ */
++
++/**
++ * Enable bridging between internal interfaces
++ */
++#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
++
++/**
++ * Mask support for classification
++ */
++#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
++
++#define DPDMUX_IRQ_INDEX_IF 0x0000
++#define DPDMUX_IRQ_INDEX 0x0001
++
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
++
++/**
++ * enum dpdmux_manip - DPDMUX manipulation operations
++ * @DPDMUX_MANIP_NONE: No manipulation on frames
++ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
++ */
++enum dpdmux_manip {
++ DPDMUX_MANIP_NONE = 0x0,
++ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
++};
++
++/**
++ * enum dpdmux_method - DPDMUX method options
++ * @DPDMUX_METHOD_NONE: no DPDMUX method
++ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
++ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
++ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
++ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
++ */
++enum dpdmux_method {
++ DPDMUX_METHOD_NONE = 0x0,
++ DPDMUX_METHOD_C_VLAN_MAC = 0x1,
++ DPDMUX_METHOD_MAC = 0x2,
++ DPDMUX_METHOD_C_VLAN = 0x3,
++ DPDMUX_METHOD_S_VLAN = 0x4,
++ DPDMUX_METHOD_CUSTOM = 0x5
++};
++
++/**
++ * struct dpdmux_cfg - DPDMUX configuration parameters
++ * @method: Defines the operation method for the DPDMUX address table
++ * @manip: Required manipulation operation
++ * @num_ifs: Number of interfaces (excluding the uplink interface)
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpdmux_cfg {
++ enum dpdmux_method method;
++ enum dpdmux_manip manip;
++ u16 num_ifs;
++ /**
++ * struct adv - Advanced parameters
++ * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
++ * @max_dmat_entries: Maximum entries in DPDMUX address table
++ * 0 - indicates default: 64 entries per interface.
++ * @max_mc_groups: Number of multicast groups in DPDMUX table
++ * 0 - indicates default: 32 multicast groups
++ * @max_vlan_ids: max vlan ids allowed in the system -
++ * relevant only case of working in mac+vlan method.
++ * 0 - indicates default 16 vlan ids.
++ */
++ struct {
++ u64 options;
++ u16 max_dmat_entries;
++ u16 max_mc_groups;
++ u16 max_vlan_ids;
++ } adv;
++};
++
++int dpdmux_create(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ const struct dpdmux_cfg *cfg,
++ u32 *obj_id);
++
++int dpdmux_destroy(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ u32 object_id);
++
++int dpdmux_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpdmux_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpdmux_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
++
++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
++
++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
++
++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
++
++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
++
++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
++
++/**
++ * struct dpdmux_attr - Structure representing DPDMUX attributes
++ * @id: DPDMUX object ID
++ * @options: Configuration options (bitmap)
++ * @method: DPDMUX address table method
++ * @manip: DPDMUX manipulation type
++ * @num_ifs: Number of interfaces (excluding the uplink interface)
++ * @mem_size: DPDMUX frame storage memory size
++ */
++struct dpdmux_attr {
++ int id;
++ u64 options;
++ enum dpdmux_method method;
++ enum dpdmux_manip manip;
++ u16 num_ifs;
++ u16 mem_size;
++};
++
++int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpdmux_attr *attr);
++
++int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length);
++
++/**
++ * enum dpdmux_counter_type - Counter types
++ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
++ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
++ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
++ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
++ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
++ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ */
++enum dpdmux_counter_type {
++ DPDMUX_CNT_ING_FRAME = 0x0,
++ DPDMUX_CNT_ING_BYTE = 0x1,
++ DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
++ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
++ DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
++ DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
++ DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
++ DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
++ DPDMUX_CNT_EGR_FRAME = 0x8,
++ DPDMUX_CNT_EGR_BYTE = 0x9,
++ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
++};
++
++/**
++ * enum dpdmux_accepted_frames_type - DPDMUX frame types
++ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority-tagged frames
++ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * priority-tagged frames that are received on this
++ * interface
++ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
++ * received on this interface are accepted
++ */
++enum dpdmux_accepted_frames_type {
++ DPDMUX_ADMIT_ALL = 0,
++ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
++ DPDMUX_ADMIT_ONLY_UNTAGGED = 2
++};
++
++/**
++ * enum dpdmux_action - DPDMUX action for un-accepted frames
++ * @DPDMUX_ACTION_DROP: Drop un-accepted frames
++ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
++ * control interface
++ */
++enum dpdmux_action {
++ DPDMUX_ACTION_DROP = 0,
++ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
++};
++
++/**
++ * struct dpdmux_accepted_frames - Frame types configuration
++ * @type: Defines ingress accepted frames
++ * @unaccept_act: Defines action on frames not accepted
++ */
++struct dpdmux_accepted_frames {
++ enum dpdmux_accepted_frames_type type;
++ enum dpdmux_action unaccept_act;
++};
++
++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpdmux_accepted_frames *cfg);
++
++/**
++ * struct dpdmux_if_attr - Structure representing frame types configuration
++ * @rate: Configured interface rate (in bits per second)
++ * @enabled: Indicates if interface is enabled
++ * @accept_frame_type: Indicates type of accepted frames for the interface
++ */
++struct dpdmux_if_attr {
++ u32 rate;
++ int enabled;
++ enum dpdmux_accepted_frames_type accept_frame_type;
++};
++
++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpdmux_if_attr *attr);
++
++int dpdmux_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id);
++
++int dpdmux_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id);
++
++/**
++ * struct dpdmux_l2_rule - Structure representing L2 rule
++ * @mac_addr: MAC address
++ * @vlan_id: VLAN ID
++ */
++struct dpdmux_l2_rule {
++ u8 mac_addr[6];
++ u16 vlan_id;
++};
++
++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpdmux_l2_rule *rule);
++
++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpdmux_l2_rule *rule);
++
++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpdmux_counter_type counter_type,
++ u64 *counter);
++
++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
++ */
++struct dpdmux_link_cfg {
++ u32 rate;
++ u64 options;
++};
++
++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpdmux_link_cfg *cfg);
++/**
++ * struct dpdmux_link_state - Structure representing DPDMUX link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
++ * @up: 0 - down, 1 - up
++ */
++struct dpdmux_link_state {
++ u32 rate;
++ u64 options;
++ int up;
++};
++
++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpdmux_link_state *state);
++
++int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u64 key_cfg_iova);
++
++/**
++ * struct dpdmux_rule_cfg - Custom classification rule.
++ *
++ * @key_iova: DMA address of buffer storing the look-up value
++ * @mask_iova: DMA address of the mask used for TCAM classification
++ * @key_size: size, in bytes, of the look-up value. This must match the size
++ * of the look-up key defined using dpdmux_set_custom_key, otherwise the
++ * entry will never be hit
++ */
++struct dpdmux_rule_cfg {
++ u64 key_iova;
++ u64 mask_iova;
++ u8 key_size;
++};
++
++/**
++ * struct dpdmux_cls_action - Action to execute for frames matching the
++ * classification entry
++ *
++ * @dest_if: Interface to forward the frames to. Port numbering is similar to
++ * the one used to connect interfaces:
++ * - 0 is the uplink port,
++ * - all others are downlink ports.
++ */
++struct dpdmux_cls_action {
++ u16 dest_if;
++};
++
++int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpdmux_rule_cfg *rule,
++ struct dpdmux_cls_action *action);
++
++int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpdmux_rule_cfg *rule);
++
++int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++#endif /* __FSL_DPDMUX_H */
+diff --git a/drivers/staging/fsl-dpaa2/evb/evb.c b/drivers/staging/fsl-dpaa2/evb/evb.c
+new file mode 100644
+index 00000000..9ee09b42
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/evb.c
+@@ -0,0 +1,1350 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/module.h>
++#include <linux/msi.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++
++#include <uapi/linux/if_bridge.h>
++#include <net/netlink.h>
++
++#include "../../fsl-mc/include/mc.h"
++
++#include "dpdmux.h"
++#include "dpdmux-cmd.h"
++
++static const char evb_drv_version[] = "0.1";
++
++/* Minimal supported DPDMUX version */
++#define DPDMUX_MIN_VER_MAJOR 6
++#define DPDMUX_MIN_VER_MINOR 0
++
++/* IRQ index */
++#define DPDMUX_MAX_IRQ_NUM 2
++
++/* MAX FRAME LENGTH (currently 10k) */
++#define EVB_MAX_FRAME_LENGTH (10 * 1024)
++/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
++#define EVB_MIN_FRAME_LENGTH 68
++
++struct evb_port_priv {
++ struct net_device *netdev;
++ struct list_head list;
++ u16 port_index;
++ struct evb_priv *evb_priv;
++ u8 vlans[VLAN_VID_MASK + 1];
++};
++
++struct evb_priv {
++ /* keep first */
++ struct evb_port_priv uplink;
++
++ struct fsl_mc_io *mc_io;
++ struct list_head port_list;
++ struct dpdmux_attr attr;
++ u16 mux_handle;
++ int dev_id;
++};
++
++static int _evb_port_carrier_state_sync(struct net_device *netdev)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_link_state state;
++ int err;
++
++ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index, &state);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
++ return err;
++ }
++
++ WARN_ONCE(state.up > 1, "Garbage read into link_state");
++
++ if (state.up)
++ netif_carrier_on(port_priv->netdev);
++ else
++ netif_carrier_off(port_priv->netdev);
++
++ return 0;
++}
++
++static int evb_port_open(struct net_device *netdev)
++{
++ int err;
++
++ /* FIXME: enable port when support added */
++
++ err = _evb_port_carrier_state_sync(netdev);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
++ err);
++ return err;
++ }
++
++ return 0;
++}
++
++static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
++{
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++static int evb_links_state_update(struct evb_priv *priv)
++{
++ struct evb_port_priv *port_priv;
++ struct list_head *pos;
++ int err;
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct evb_port_priv, list);
++
++ err = _evb_port_carrier_state_sync(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "_evb_port_carrier_state_sync err %d\n",
++ err);
++ }
++
++ return 0;
++}
++
++static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ struct fsl_mc_io *io = priv->mc_io;
++ u16 token = priv->mux_handle;
++ int irq_index = DPDMUX_IRQ_INDEX_IF;
++
++ /* Mask the events and the if_id reserved bits to be cleared on read */
++ u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
++ int err;
++
++ /* Sanity check */
++ if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
++ goto out;
++ if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
++ goto out;
++
++ err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
++ if (unlikely(err)) {
++ netdev_err(netdev, "Can't get irq status (err %d)", err);
++ err = dpdmux_clear_irq_status(io, 0, token, irq_index,
++ 0xFFFFFFFF);
++ if (unlikely(err))
++ netdev_err(netdev, "Can't clear irq status (err %d)",
++ err);
++ goto out;
++ }
++
++ if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
++ err = evb_links_state_update(priv);
++ if (unlikely(err))
++ goto out;
++ }
++
++out:
++ return IRQ_HANDLED;
++}
++
++static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
++ const int irq_index = DPDMUX_IRQ_INDEX_IF;
++ u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
++
++ err = fsl_mc_allocate_irqs(evb_dev);
++ if (unlikely(err)) {
++ dev_err(dev, "MC irqs allocation failed\n");
++ return err;
++ }
++
++ if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
++ err = -EINVAL;
++ goto free_irq;
++ }
++
++ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
++ irq_index, 0);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
++
++ irq = evb_dev->irqs[irq_index];
++
++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
++ evb_irq0_handler,
++ _evb_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), dev);
++ if (unlikely(err)) {
++ dev_err(dev, "devm_request_threaded_irq(): %d", err);
++ goto free_irq;
++ }
++
++ err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
++ irq_index, mask);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
++ goto free_devm_irq;
++ }
++
++ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
++ irq_index, 1);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
++ goto free_devm_irq;
++ }
++
++ return 0;
++
++free_devm_irq:
++ devm_free_irq(dev, irq->msi_desc->irq, dev);
++free_irq:
++ fsl_mc_free_irqs(evb_dev);
++ return err;
++}
++
++static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++
++ dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
++ DPDMUX_IRQ_INDEX_IF, 0);
++
++ devm_free_irq(dev,
++ evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
++ dev);
++ fsl_mc_free_irqs(evb_dev);
++}
++
++static int evb_port_add_rule(struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_l2_rule rule = { .vlan_id = vid };
++ int err;
++
++ if (addr)
++ ether_addr_copy(rule.mac_addr, addr);
++
++ err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index, &rule);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
++ return err;
++}
++
++static int evb_port_del_rule(struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_l2_rule rule = { .vlan_id = vid };
++ int err;
++
++ if (addr)
++ ether_addr_copy(rule.mac_addr, addr);
++
++ err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index, &rule);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
++ return err;
++}
++
++static bool _lookup_address(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct netdev_hw_addr *ha;
++ struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
++ &netdev->uc : &netdev->mc;
++
++ netif_addr_lock_bh(netdev);
++ list_for_each_entry(ha, &list->list, list) {
++ if (ether_addr_equal(ha->addr, addr)) {
++ netif_addr_unlock_bh(netdev);
++ return true;
++ }
++ }
++ netif_addr_unlock_bh(netdev);
++ return false;
++}
++
++static inline int evb_port_fdb_prep(struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 *vid,
++ bool del)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++
++ *vid = 0;
++
++ if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
++ evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
++ netdev_err(netdev,
++ "EVB mode does not support MAC classification\n");
++ return -EOPNOTSUPP;
++ }
++
++ /* check if the address is configured on this port */
++ if (_lookup_address(netdev, addr)) {
++ if (!del)
++ return -EEXIST;
++ } else {
++ if (del)
++ return -ENOENT;
++ }
++
++ if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
++ if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
++ netdev_err(netdev, "invalid vlan size %d\n",
++ nla_len(tb[NDA_VLAN]));
++ return -EINVAL;
++ }
++
++ *vid = nla_get_u16(tb[NDA_VLAN]);
++
++ if (!*vid || *vid >= VLAN_VID_MASK) {
++ netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
++ return -EINVAL;
++ }
++ } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
++ netdev_err(netdev,
++ "EVB mode requires explicit VLAN configuration\n");
++ return -EINVAL;
++ } else if (tb[NDA_VLAN]) {
++ netdev_warn(netdev, "VLAN not supported, argument ignored\n");
++ }
++
++ return 0;
++}
++
++static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid, u16 flags)
++{
++ u16 _vid;
++ int err;
++
++ /* TODO: add replace support when added to iproute bridge */
++ if (!(flags & NLM_F_REQUEST)) {
++ netdev_err(netdev,
++ "evb_port_fdb_add unexpected flags value %08x\n",
++ flags);
++ return -EINVAL;
++ }
++
++ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
++ if (unlikely(err))
++ return err;
++
++ err = evb_port_add_rule(netdev, addr, _vid);
++ if (unlikely(err))
++ return err;
++
++ if (is_unicast_ether_addr(addr)) {
++ err = dev_uc_add(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_uc_add err %d\n", err);
++ return err;
++ }
++ } else {
++ err = dev_mc_add(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_mc_add err %d\n", err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ u16 _vid;
++ int err;
++
++ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
++ if (unlikely(err))
++ return err;
++
++ err = evb_port_del_rule(netdev, addr, _vid);
++ if (unlikely(err))
++ return err;
++
++ if (is_unicast_ether_addr(addr)) {
++ err = dev_uc_del(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_uc_del err %d\n", err);
++ return err;
++ }
++ } else {
++ err = dev_mc_del(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_mc_del err %d\n", err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int evb_change_mtu(struct net_device *netdev,
++ int mtu)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ struct list_head *pos;
++ int err = 0;
++
++ /* This operation is not permitted on downlinks */
++ if (port_priv->port_index > 0)
++ return -EPERM;
++
++ if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
++ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
++ mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
++ return -EINVAL;
++ }
++
++ err = dpdmux_set_max_frame_length(evb_priv->mc_io,
++ 0,
++ evb_priv->mux_handle,
++ (uint16_t)mtu);
++
++ if (unlikely(err)) {
++ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
++ err);
++ return err;
++ }
++
++ /* Update the max frame length for downlinks */
++ list_for_each(pos, &evb_priv->port_list) {
++ port_priv = list_entry(pos, struct evb_port_priv, list);
++ port_priv->netdev->mtu = mtu;
++ }
++
++ netdev->mtu = mtu;
++ return 0;
++}
++
++static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
++ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
++ .len = sizeof(struct bridge_vlan_info), },
++};
++
++static int evb_setlink_af_spec(struct net_device *netdev,
++ struct nlattr **tb)
++{
++ struct bridge_vlan_info *vinfo;
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ int err = 0;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
++ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
++ return -EOPNOTSUPP;
++ }
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ err = evb_port_add_rule(netdev, NULL, vinfo->vid);
++ if (unlikely(err))
++ return err;
++
++ port_priv->vlans[vinfo->vid] = 1;
++
++ return 0;
++}
++
++static int evb_setlink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ struct nlattr *attr;
++ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
++ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
++ int err = 0;
++
++ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
++ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
++ netdev_err(netdev,
++ "EVB mode does not support VLAN only classification\n");
++ return -EOPNOTSUPP;
++ }
++
++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (attr) {
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
++ ifla_br_policy);
++ if (unlikely(err)) {
++ netdev_err(netdev,
++ "nla_parse_nested for br_policy err %d\n",
++ err);
++ return err;
++ }
++
++ err = evb_setlink_af_spec(netdev, tb);
++ return err;
++ }
++
++ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
++ return -EOPNOTSUPP;
++}
++
++static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ u8 operstate = netif_running(netdev) ?
++ netdev->operstate : IF_OPER_DOWN;
++ int iflink;
++ int err;
++
++ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
++ if (unlikely(err))
++ goto nla_put_err;
++ if (netdev->addr_len) {
++ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
++ netdev->dev_addr);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ iflink = dev_get_iflink(netdev);
++ if (netdev->ifindex != iflink) {
++ err = nla_put_u32(skb, IFLA_LINK, iflink);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ return err;
++}
++
++static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct nlattr *nest;
++ int err;
++
++ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed\n");
++ return -ENOMEM;
++ }
++
++ err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
++ if (unlikely(err))
++ goto nla_put_err;
++ nla_nest_end(skb, nest);
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct nlattr *nest;
++ struct bridge_vlan_info vinfo;
++ const u8 *vlans = port_priv->vlans;
++ u16 i;
++ int err;
++
++ nest = nla_nest_start(skb, IFLA_AF_SPEC);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < VLAN_VID_MASK + 1; i++) {
++ if (!vlans[i])
++ continue;
++
++ vinfo.flags = 0;
++ vinfo.vid = i;
++
++ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
++ sizeof(vinfo), &vinfo);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ nla_nest_end(skb, nest);
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *netdev, u32 filter_mask, int nlflags)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ struct ifinfomsg *hdr;
++ struct nlmsghdr *nlh;
++ int err;
++
++ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
++ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
++ return 0;
++ }
++
++ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
++ if (!nlh)
++ return -EMSGSIZE;
++
++ hdr = nlmsg_data(nlh);
++ memset(hdr, 0, sizeof(*hdr));
++ hdr->ifi_family = AF_BRIDGE;
++ hdr->ifi_type = netdev->type;
++ hdr->ifi_index = netdev->ifindex;
++ hdr->ifi_flags = dev_get_flags(netdev);
++
++ err = __nla_put_netdev(skb, netdev);
++ if (unlikely(err))
++ goto nla_put_err;
++
++ err = __nla_put_port(skb, netdev);
++ if (unlikely(err))
++ goto nla_put_err;
++
++ /* Check if the VID information is requested */
++ if (filter_mask & RTEXT_FILTER_BRVLAN) {
++ err = __nla_put_vlan(skb, netdev);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ nlmsg_end(skb, nlh);
++ return skb->len;
++
++nla_put_err:
++ nlmsg_cancel(skb, nlh);
++ return -EMSGSIZE;
++}
++
++static int evb_dellink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
++ struct nlattr *spec;
++ struct bridge_vlan_info *vinfo;
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ int err = 0;
++
++ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (!spec)
++ return 0;
++
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
++ if (unlikely(err))
++ return err;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO])
++ return -EOPNOTSUPP;
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ err = evb_port_del_rule(netdev, NULL, vinfo->vid);
++ if (unlikely(err)) {
++ netdev_err(netdev, "evb_port_del_rule err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vinfo->vid] = 0;
++
++ return 0;
++}
++
++void evb_port_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_FRAME_DISCARD,
++ &storage->rx_dropped);
++ if (unlikely(err)) {
++ storage->rx_dropped = tmp;
++ goto error;
++ }
++ storage->rx_dropped += tmp;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_MCAST_FRAME,
++ &storage->multicast);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_EGR_FRAME_DISCARD,
++ &storage->tx_dropped);
++ if (unlikely(err))
++ goto error;
++
++ return;
++
++error:
++ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
++}
++
++static const struct net_device_ops evb_port_ops = {
++ .ndo_open = &evb_port_open,
++
++ .ndo_start_xmit = &evb_dropframe,
++
++ .ndo_fdb_add = &evb_port_fdb_add,
++ .ndo_fdb_del = &evb_port_fdb_del,
++
++ .ndo_get_stats64 = &evb_port_get_stats,
++ .ndo_change_mtu = &evb_change_mtu,
++};
++
++static void evb_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ u16 version_major, version_minor;
++ int err;
++
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
++ strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
++
++ err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
++ &version_major,
++ &version_minor);
++ if (err)
++ strlcpy(drvinfo->fw_version, "N/A",
++ sizeof(drvinfo->fw_version));
++ else
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", version_major, version_minor);
++
++ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
++}
++
++static int evb_get_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_link_state state = {0};
++ int err = 0;
++
++ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ &state);
++ if (err) {
++ netdev_err(netdev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPDMUX side or there may not exist a DPMAC at all.
++ * Report only autoneg state, duplexity and speed.
++ */
++ if (state.options & DPDMUX_LINK_OPT_AUTONEG)
++ cmd->autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
++ cmd->duplex = DUPLEX_FULL;
++ ethtool_cmd_speed_set(cmd, state.rate);
++
++out:
++ return err;
++}
++
++static int evb_set_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_link_state state = {0};
++ struct dpdmux_link_cfg cfg = {0};
++ int err = 0;
++
++ netdev_dbg(netdev, "Setting link parameters...");
++
++ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ &state);
++ if (err) {
++ netdev_err(netdev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ /* Due to a temporary MC limitation, the DPDMUX port must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (netif_running(netdev)) {
++ netdev_info(netdev,
++ "Sorry, interface must be brought down first.\n");
++ return -EACCES;
++ }
++
++ cfg.options = state.options;
++ cfg.rate = ethtool_cmd_speed(cmd);
++ if (cmd->autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
++ if (cmd->duplex == DUPLEX_HALF)
++ cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
++
++ err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
++
++out:
++ return err;
++}
++
++static struct {
++ enum dpdmux_counter_type id;
++ char name[ETH_GSTRING_LEN];
++} evb_ethtool_counters[] = {
++ {DPDMUX_CNT_ING_FRAME, "rx frames"},
++ {DPDMUX_CNT_ING_BYTE, "rx bytes"},
++ {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
++ {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
++ {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
++ {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
++ {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
++ {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
++ {DPDMUX_CNT_EGR_FRAME, "tx frames"},
++ {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
++ {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
++};
++
++static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(evb_ethtool_counters);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static void evb_ethtool_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
++{
++ u32 i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ break;
++ }
++}
++
++static void evb_ethtool_get_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ u32 i;
++ int err;
++
++ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ evb_ethtool_counters[i].id,
++ &data[i]);
++ if (err)
++ netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
++ evb_ethtool_counters[i].name, err);
++ }
++}
++
++static const struct ethtool_ops evb_port_ethtool_ops = {
++ .get_drvinfo = &evb_get_drvinfo,
++ .get_link = &ethtool_op_get_link,
++ .get_settings = &evb_get_settings,
++ .set_settings = &evb_set_settings,
++ .get_strings = &evb_ethtool_get_strings,
++ .get_ethtool_stats = &evb_ethtool_get_stats,
++ .get_sset_count = &evb_ethtool_get_sset_count,
++};
++
++static int evb_open(struct net_device *netdev)
++{
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err = 0;
++
++ err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_enable err %d\n", err);
++
++ return err;
++}
++
++static int evb_close(struct net_device *netdev)
++{
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err = 0;
++
++ err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_disable err %d\n", err);
++
++ return err;
++}
++
++static const struct net_device_ops evb_ops = {
++ .ndo_start_xmit = &evb_dropframe,
++ .ndo_open = &evb_open,
++ .ndo_stop = &evb_close,
++
++ .ndo_bridge_setlink = &evb_setlink,
++ .ndo_bridge_getlink = &evb_getlink,
++ .ndo_bridge_dellink = &evb_dellink,
++
++ .ndo_get_stats64 = &evb_port_get_stats,
++ .ndo_change_mtu = &evb_change_mtu,
++};
++
++static int evb_takedown(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err;
++
++ err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err))
++ dev_warn(dev, "dpdmux_close err %d\n", err);
++
++ return 0;
++}
++
++static int evb_init(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ u16 version_major;
++ u16 version_minor;
++ int err = 0;
++
++ priv->dev_id = evb_dev->obj_desc.id;
++
++ err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_open err %d\n", err);
++ goto err_exit;
++ }
++ if (!priv->mux_handle) {
++ dev_err(dev, "dpdmux_open returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_exit;
++ }
++
++ err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
++ &priv->attr);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_get_attributes err %d\n", err);
++ goto err_close;
++ }
++
++ err = dpdmux_get_api_version(priv->mc_io, 0,
++ &version_major,
++ &version_minor);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_get_api_version err %d\n", err);
++ goto err_close;
++ }
++
++ /* Minimum supported DPDMUX version check */
++ if (version_major < DPDMUX_MIN_VER_MAJOR ||
++ (version_major == DPDMUX_MIN_VER_MAJOR &&
++ version_minor < DPDMUX_MIN_VER_MINOR)) {
++ dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
++ version_major, version_minor,
++ DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
++ err = -ENOTSUPP;
++ goto err_close;
++ }
++
++ err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_reset err %d\n", err);
++ goto err_close;
++ }
++
++ return 0;
++
++err_close:
++ dpdmux_close(priv->mc_io, 0, priv->mux_handle);
++err_exit:
++ return err;
++}
++
++static int evb_remove(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ struct evb_port_priv *port_priv;
++ struct list_head *pos;
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct evb_port_priv, list);
++
++ rtnl_lock();
++ netdev_upper_dev_unlink(port_priv->netdev, netdev);
++ rtnl_unlock();
++
++ unregister_netdev(port_priv->netdev);
++ free_netdev(port_priv->netdev);
++ }
++
++ evb_teardown_irqs(evb_dev);
++
++ unregister_netdev(netdev);
++
++ evb_takedown(evb_dev);
++ fsl_mc_portal_free(priv->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++ free_netdev(netdev);
++
++ return 0;
++}
++
++static int evb_probe(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev;
++ struct evb_priv *priv = NULL;
++ struct net_device *netdev = NULL;
++ char port_name[IFNAMSIZ];
++ int i;
++ int err = 0;
++
++ dev = &evb_dev->dev;
++
++ /* register switch device, it's for management only - no I/O */
++ netdev = alloc_etherdev(sizeof(*priv));
++ if (!netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ return -ENOMEM;
++ }
++ netdev->netdev_ops = &evb_ops;
++
++ dev_set_drvdata(dev, netdev);
++
++ priv = netdev_priv(netdev);
++
++ err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
++ if (unlikely(err)) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_free_netdev;
++ }
++ if (!priv->mc_io) {
++ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_free_netdev;
++ }
++
++ err = evb_init(evb_dev);
++ if (unlikely(err)) {
++ dev_err(dev, "evb init err %d\n", err);
++ goto err_free_cmdport;
++ }
++
++ INIT_LIST_HEAD(&priv->port_list);
++ netdev->flags |= IFF_PROMISC | IFF_MASTER;
++
++ dev_alloc_name(netdev, "evb%d");
++
++ /* register switch ports */
++ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
++
++ /* only register downlinks? */
++ for (i = 0; i < priv->attr.num_ifs + 1; i++) {
++ struct net_device *port_netdev;
++ struct evb_port_priv *port_priv;
++
++ if (i) {
++ port_netdev =
++ alloc_etherdev(sizeof(struct evb_port_priv));
++ if (!port_netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ goto err_takedown;
++ }
++
++ port_priv = netdev_priv(port_netdev);
++
++ port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
++
++ dev_alloc_name(port_netdev, port_name);
++ } else {
++ port_netdev = netdev;
++ port_priv = &priv->uplink;
++ }
++
++ port_priv->netdev = port_netdev;
++ port_priv->evb_priv = priv;
++ port_priv->port_index = i;
++
++ SET_NETDEV_DEV(port_netdev, dev);
++
++ if (i) {
++ port_netdev->netdev_ops = &evb_port_ops;
++
++ err = register_netdev(port_netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev err %d\n", err);
++ free_netdev(port_netdev);
++ goto err_takedown;
++ }
++
++ rtnl_lock();
++ err = netdev_master_upper_dev_link(port_netdev, netdev,
++ NULL, NULL);
++ if (unlikely(err)) {
++ dev_err(dev, "netdev_master_upper_dev_link err %d\n",
++ err);
++ unregister_netdev(port_netdev);
++ free_netdev(port_netdev);
++ rtnl_unlock();
++ goto err_takedown;
++ }
++ rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
++ IFF_SLAVE, GFP_KERNEL);
++ rtnl_unlock();
++
++ list_add(&port_priv->list, &priv->port_list);
++ } else {
++ err = register_netdev(netdev);
++
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ goto err_takedown;
++ }
++ }
++
++ port_netdev->ethtool_ops = &evb_port_ethtool_ops;
++
++ /* ports are up from init */
++ rtnl_lock();
++ err = dev_open(port_netdev);
++ rtnl_unlock();
++ if (unlikely(err))
++ dev_warn(dev, "dev_open err %d\n", err);
++ }
++
++ /* setup irqs */
++ err = evb_setup_irqs(evb_dev);
++ if (unlikely(err)) {
++ dev_warn(dev, "evb_setup_irqs err %d\n", err);
++ goto err_takedown;
++ }
++
++ dev_info(dev, "probed evb device with %d ports\n",
++ priv->attr.num_ifs);
++ return 0;
++
++err_takedown:
++ evb_remove(evb_dev);
++err_free_cmdport:
++ fsl_mc_portal_free(priv->mc_io);
++err_free_netdev:
++ return err;
++}
++
++static const struct fsl_mc_device_id evb_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpdmux",
++ },
++ {}
++};
++
++static struct fsl_mc_driver evb_drv = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = evb_probe,
++ .remove = evb_remove,
++ .match_id_table = evb_match_id_table,
++};
++
++module_fsl_mc_driver(evb_drv);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
+diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig
+new file mode 100644
+index 00000000..c94f7c1b
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
+@@ -0,0 +1,23 @@
++config FSL_DPAA2_MAC
++ tristate "DPAA2 MAC / PHY interface"
++ depends on FSL_MC_BUS && FSL_DPAA2
++ select MDIO_BUS_MUX_MMIOREG
++ select FSL_XGMAC_MDIO
++ select FIXED_PHY
++ ---help---
++ Prototype driver for DPAA2 MAC / PHY interface object.
++ This driver works as a proxy between phylib including phy drivers and
++ the MC firmware. It receives updates on link state changes from PHY
++ lib and forwards them to MC and receives interrupt from MC whenever
++ a request is made to change the link state.
++
++
++config FSL_DPAA2_MAC_NETDEVS
++ bool "Expose net interfaces for PHYs"
++ default n
++ depends on FSL_DPAA2_MAC
++ ---help---
++ Exposes macX net interfaces which allow direct control over MACs and
++ PHYs.
++ .
++ Leave disabled if unsure.
+diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile
+new file mode 100644
+index 00000000..bda94101
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/Makefile
+@@ -0,0 +1,10 @@
++
++obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
++
++dpaa2-mac-objs := mac.o dpmac.o
++
++all:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++
++clean:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
+new file mode 100644
+index 00000000..abdc3c0d
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
+@@ -0,0 +1,172 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPMAC_CMD_H
++#define _FSL_DPMAC_CMD_H
++
++/* DPMAC Version */
++#define DPMAC_VER_MAJOR 4
++#define DPMAC_VER_MINOR 2
++#define DPMAC_CMD_BASE_VERSION 1
++#define DPMAC_CMD_ID_OFFSET 4
++
++#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
++#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
++#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
++#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
++#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
++
++#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
++#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
++
++#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
++#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
++#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
++#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
++#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
++#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
++
++#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
++#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
++#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
++
++#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
++
++/* Macros for accessing command fields smaller than 1byte */
++#define DPMAC_MASK(field) \
++ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
++ DPMAC_##field##_SHIFT)
++#define dpmac_set_field(var, field, val) \
++ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
++#define dpmac_get_field(var, field) \
++ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
++
++struct dpmac_cmd_open {
++ u32 dpmac_id;
++};
++
++struct dpmac_cmd_create {
++ u32 mac_id;
++};
++
++struct dpmac_cmd_destroy {
++ u32 dpmac_id;
++};
++
++struct dpmac_cmd_set_irq_enable {
++ u8 enable;
++ u8 pad[3];
++ u8 irq_index;
++};
++
++struct dpmac_cmd_get_irq_enable {
++ u32 pad;
++ u8 irq_index;
++};
++
++struct dpmac_rsp_get_irq_enable {
++ u8 enabled;
++};
++
++struct dpmac_cmd_set_irq_mask {
++ u32 mask;
++ u8 irq_index;
++};
++
++struct dpmac_cmd_get_irq_mask {
++ u32 pad;
++ u8 irq_index;
++};
++
++struct dpmac_rsp_get_irq_mask {
++ u32 mask;
++};
++
++struct dpmac_cmd_get_irq_status {
++ u32 status;
++ u8 irq_index;
++};
++
++struct dpmac_rsp_get_irq_status {
++ u32 status;
++};
++
++struct dpmac_cmd_clear_irq_status {
++ u32 status;
++ u8 irq_index;
++};
++
++struct dpmac_rsp_get_attributes {
++ u8 eth_if;
++ u8 link_type;
++ u16 id;
++ u32 max_rate;
++};
++
++struct dpmac_rsp_get_link_cfg {
++ u64 options;
++ u32 rate;
++};
++
++#define DPMAC_STATE_SIZE 1
++#define DPMAC_STATE_SHIFT 0
++
++struct dpmac_cmd_set_link_state {
++ u64 options;
++ u32 rate;
++ u32 pad;
++ /* only least significant bit is valid */
++ u8 up;
++};
++
++struct dpmac_cmd_get_counter {
++ u8 type;
++};
++
++struct dpmac_rsp_get_counter {
++ u64 pad;
++ u64 counter;
++};
++
++struct dpmac_rsp_get_api_version {
++ u16 major;
++ u16 minor;
++};
++
++struct dpmac_cmd_set_port_mac_addr {
++ u8 pad[2];
++ u8 addr[6];
++};
++
++#endif /* _FSL_DPMAC_CMD_H */
+diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c
+new file mode 100644
+index 00000000..f7827423
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
+@@ -0,0 +1,620 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpmac.h"
++#include "dpmac-cmd.h"
++
++/**
++ * dpmac_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpmac_id: DPMAC unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpmac_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpmac_id,
++ u16 *token)
++{
++ struct dpmac_cmd_open *cmd_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dpmac_cmd_open *)cmd.params;
++ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return err;
++}
++
++/**
++ * dpmac_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_create() - Create the DPMAC object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @obj_id: Returned object id
++ *
++ * Create the DPMAC object, allocate required resources and
++ * perform required initialization.
++ *
++ * The function accepts an authentication token of a parent
++ * container that this object should be assigned to. The token
++ * can be '0' so the object will be assigned to the default container.
++ * The newly created object can be opened with the returned
++ * object id and using the container's associated tokens and MC portals.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_create(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ const struct dpmac_cfg *cfg,
++ u32 *obj_id)
++{
++ struct dpmac_cmd_create *cmd_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
++ cmd_flags,
++ dprc_token);
++ cmd_params = (struct dpmac_cmd_create *)cmd.params;
++ cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *obj_id = mc_cmd_read_object_id(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @object_id: The object id; it must be a valid id within the container that
++ * created this object;
++ *
++ * The function accepts the authentication token of the parent container that
++ * created the object (not the one that currently owns the object). The object
++ * is searched within parent using the provided 'object_id'.
++ * All tokens to the object must be closed before calling destroy.
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpmac_destroy(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ u32 object_id)
++{
++ struct dpmac_cmd_destroy *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
++ cmd_flags,
++ dprc_token);
++ cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
++ cmd_params->dpmac_id = cpu_to_le32(object_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
++{
++ struct dpmac_cmd_set_irq_enable *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->enable = en;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en)
++{
++ struct dpmac_cmd_get_irq_enable *cmd_params;
++ struct dpmac_rsp_get_irq_enable *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
++ *en = rsp_params->enabled;
++
++ return 0;
++}
++
++/**
++ * dpmac_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
++{
++ struct dpmac_cmd_set_irq_mask *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask)
++{
++ struct dpmac_cmd_get_irq_mask *cmd_params;
++ struct dpmac_rsp_get_irq_mask *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
++ *mask = le32_to_cpu(rsp_params->mask);
++
++ return 0;
++}
++
++/**
++ * dpmac_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
++{
++ struct dpmac_cmd_get_irq_status *cmd_params;
++ struct dpmac_rsp_get_irq_status *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
++
++ return 0;
++}
++
++/**
++ * dpmac_clear_irq_status() - Clear a pending interrupt's status
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @status: Bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
++{
++ struct dpmac_cmd_clear_irq_status *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_get_attributes - Retrieve DPMAC attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_attr *attr)
++{
++ struct dpmac_rsp_get_attributes *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
++ attr->eth_if = rsp_params->eth_if;
++ attr->link_type = rsp_params->link_type;
++ attr->id = le16_to_cpu(rsp_params->id);
++ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
++
++ return 0;
++}
++
++/**
++ * dpmac_get_link_cfg() - Get Ethernet link configuration
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Returned structure with the link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_cfg *cfg)
++{
++ struct dpmac_rsp_get_link_cfg *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
++ cfg->options = le64_to_cpu(rsp_params->options);
++ cfg->rate = le32_to_cpu(rsp_params->rate);
++
++ return 0;
++}
++
++/**
++ * dpmac_set_link_state() - Set the Ethernet link status
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @link_state: Link state configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_state *link_state)
++{
++ struct dpmac_cmd_set_link_state *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
++ cmd_params->options = cpu_to_le64(link_state->options);
++ cmd_params->rate = cpu_to_le32(link_state->rate);
++ cmd_params->up = dpmac_get_field(link_state->up, STATE);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_get_counter() - Read a specific DPMAC counter
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @type: The requested counter
++ * @counter: Returned counter value
++ *
++ * Return: The requested counter; '0' otherwise.
++ */
++int dpmac_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpmac_counter type,
++ u64 *counter)
++{
++ struct dpmac_cmd_get_counter *dpmac_cmd;
++ struct dpmac_rsp_get_counter *dpmac_rsp;
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
++ cmd_flags,
++ token);
++ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
++ dpmac_cmd->type = type;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
++ *counter = le64_to_cpu(dpmac_rsp->counter);
++
++ return 0;
++}
++
++/* untested */
++int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 addr[6])
++{
++ struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
++ cmd_flags,
++ token);
++ dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
++ dpmac_cmd->addr[0] = addr[5];
++ dpmac_cmd->addr[1] = addr[4];
++ dpmac_cmd->addr[2] = addr[3];
++ dpmac_cmd->addr[3] = addr[2];
++ dpmac_cmd->addr[4] = addr[1];
++ dpmac_cmd->addr[5] = addr[0];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_get_api_version() - Get Data Path MAC version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path mac API
++ * @minor_ver: Minor version of data path mac API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct dpmac_rsp_get_api_version *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
++ cmd_flags,
++ 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->major);
++ *minor_ver = le16_to_cpu(rsp_params->minor);
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h
+new file mode 100644
+index 00000000..32d4ada2
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
+@@ -0,0 +1,342 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPMAC_H
++#define __FSL_DPMAC_H
++
++/* Data Path MAC API
++ * Contains initialization APIs and runtime control APIs for DPMAC
++ */
++
++struct fsl_mc_io;
++
++int dpmac_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpmac_id,
++ u16 *token);
++
++int dpmac_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * enum dpmac_link_type - DPMAC link type
++ * @DPMAC_LINK_TYPE_NONE: No link
++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
++ */
++enum dpmac_link_type {
++ DPMAC_LINK_TYPE_NONE,
++ DPMAC_LINK_TYPE_FIXED,
++ DPMAC_LINK_TYPE_PHY,
++ DPMAC_LINK_TYPE_BACKPLANE
++};
++
++/**
++ * enum dpmac_eth_if - DPMAC Ethrnet interface
++ * @DPMAC_ETH_IF_MII: MII interface
++ * @DPMAC_ETH_IF_RMII: RMII interface
++ * @DPMAC_ETH_IF_SMII: SMII interface
++ * @DPMAC_ETH_IF_GMII: GMII interface
++ * @DPMAC_ETH_IF_RGMII: RGMII interface
++ * @DPMAC_ETH_IF_SGMII: SGMII interface
++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
++ * @DPMAC_ETH_IF_XAUI: XAUI interface
++ * @DPMAC_ETH_IF_XFI: XFI interface
++ */
++enum dpmac_eth_if {
++ DPMAC_ETH_IF_MII,
++ DPMAC_ETH_IF_RMII,
++ DPMAC_ETH_IF_SMII,
++ DPMAC_ETH_IF_GMII,
++ DPMAC_ETH_IF_RGMII,
++ DPMAC_ETH_IF_SGMII,
++ DPMAC_ETH_IF_QSGMII,
++ DPMAC_ETH_IF_XAUI,
++ DPMAC_ETH_IF_XFI
++};
++
++/**
++ * struct dpmac_cfg - Structure representing DPMAC configuration
++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
++ * the MAC IDs are continuous.
++ * For example: 2 WRIOPs, 16 MACs in each:
++ * MAC IDs for the 1st WRIOP: 1-16,
++ * MAC IDs for the 2nd WRIOP: 17-32.
++ */
++struct dpmac_cfg {
++ u16 mac_id;
++};
++
++int dpmac_create(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ const struct dpmac_cfg *cfg,
++ u32 *obj_id);
++
++int dpmac_destroy(struct fsl_mc_io *mc_io,
++ u16 dprc_token,
++ u32 cmd_flags,
++ u32 object_id);
++
++/**
++ * DPMAC IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPMAC_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
++
++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
++
++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
++
++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
++
++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
++
++int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
++
++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
++
++/**
++ * struct dpmac_attr - Structure representing DPMAC attributes
++ * @id: DPMAC object ID
++ * @max_rate: Maximum supported rate - in Mbps
++ * @eth_if: Ethernet interface
++ * @link_type: link type
++ */
++struct dpmac_attr {
++ u16 id;
++ u32 max_rate;
++ enum dpmac_eth_if eth_if;
++ enum dpmac_link_type link_type;
++};
++
++int dpmac_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_attr *attr);
++
++/**
++ * DPMAC link configuration/state options
++ */
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration
++ * @rate: Link's rate - in Mbps
++ * @options: Enable/Disable DPMAC link cfg features (bitmap)
++ */
++struct dpmac_link_cfg {
++ u32 rate;
++ u64 options;
++};
++
++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_cfg *cfg);
++
++/**
++ * struct dpmac_link_state - DPMAC link configuration request
++ * @rate: Rate in Mbps
++ * @options: Enable/Disable DPMAC link cfg features (bitmap)
++ * @up: Link state
++ */
++struct dpmac_link_state {
++ u32 rate;
++ u64 options;
++ int up;
++};
++
++int dpmac_set_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_state *link_state);
++
++/**
++ * enum dpmac_counter - DPMAC counter types
++ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
++ * (up to max frame length specified),
++ * good or bad.
++ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
++ * with a wrong CRC
++ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
++ * specified, with a bad frame check sequence.
++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
++ * Occurs when a receive FIFO overflows.
++ * Includes also frames truncated as a result of
++ * the receive FIFO overflow.
++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
++ * (optional used for wrong SFD).
++ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
++ * bytes long with a good CRC.
++ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
++ * specified, with a good frame check sequence.
++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
++ * (regular and PFC).
++ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
++ * frames and valid pause frames.
++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
++ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
++ * (except for undersized/fragment frame).
++ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
++ * frames and valid pause frames transmitted.
++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
++ * pause frames.
++ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
++ * pause frames.
++ */
++enum dpmac_counter {
++ DPMAC_CNT_ING_FRAME_64,
++ DPMAC_CNT_ING_FRAME_127,
++ DPMAC_CNT_ING_FRAME_255,
++ DPMAC_CNT_ING_FRAME_511,
++ DPMAC_CNT_ING_FRAME_1023,
++ DPMAC_CNT_ING_FRAME_1518,
++ DPMAC_CNT_ING_FRAME_1519_MAX,
++ DPMAC_CNT_ING_FRAG,
++ DPMAC_CNT_ING_JABBER,
++ DPMAC_CNT_ING_FRAME_DISCARD,
++ DPMAC_CNT_ING_ALIGN_ERR,
++ DPMAC_CNT_EGR_UNDERSIZED,
++ DPMAC_CNT_ING_OVERSIZED,
++ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
++ DPMAC_CNT_ING_BYTE,
++ DPMAC_CNT_ING_MCAST_FRAME,
++ DPMAC_CNT_ING_BCAST_FRAME,
++ DPMAC_CNT_ING_ALL_FRAME,
++ DPMAC_CNT_ING_UCAST_FRAME,
++ DPMAC_CNT_ING_ERR_FRAME,
++ DPMAC_CNT_EGR_BYTE,
++ DPMAC_CNT_EGR_MCAST_FRAME,
++ DPMAC_CNT_EGR_BCAST_FRAME,
++ DPMAC_CNT_EGR_UCAST_FRAME,
++ DPMAC_CNT_EGR_ERR_FRAME,
++ DPMAC_CNT_ING_GOOD_FRAME,
++ DPMAC_CNT_ENG_GOOD_FRAME
++};
++
++int dpmac_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpmac_counter type,
++ u64 *counter);
++
++/**
++ * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
++ * port. This is not used for filtering, MAC is always in
++ * promiscuous mode, it is passed to DPNIs through DPNI API for
++ * application used.
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @addr: MAC address to set
++ *
++ * Return: The requested counter; '0' otherwise.
++ */
++int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 addr[6]);
++
++int dpmac_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++#endif /* __FSL_DPMAC_H */
+diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c
+new file mode 100644
+index 00000000..30169639
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -0,0 +1,666 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/msi.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++
++#include <uapi/linux/if_bridge.h>
++#include <net/netlink.h>
++
++#include <linux/of.h>
++#include <linux/of_mdio.h>
++#include <linux/of_net.h>
++#include <linux/phy.h>
++#include <linux/phy_fixed.h>
++
++#include "../../fsl-mc/include/mc.h"
++#include "../../fsl-mc/include/mc-sys.h"
++
++#include "dpmac.h"
++#include "dpmac-cmd.h"
++
++struct dpaa2_mac_priv {
++ struct net_device *netdev;
++ struct fsl_mc_device *mc_dev;
++ struct dpmac_attr attr;
++ struct dpmac_link_state old_state;
++};
++
++/* TODO: fix the 10G modes, mapping can't be right:
++ * XGMII is paralel
++ * XAUI is serial, using 8b/10b encoding
++ * XFI is also serial but using 64b/66b encoding
++ * they can't all map to XGMII...
++ *
++ * This must be kept in sync with enum dpmac_eth_if.
++ */
++static phy_interface_t dpaa2_mac_iface_mode[] = {
++ PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
++ PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
++ PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
++ PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
++ PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
++ PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
++ PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
++ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
++ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
++};
++
++static void dpaa2_mac_link_changed(struct net_device *netdev)
++{
++ struct phy_device *phydev;
++ struct dpmac_link_state state = { 0 };
++ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
++ int err;
++
++ /* the PHY just notified us of link state change */
++ phydev = netdev->phydev;
++
++ state.up = !!phydev->link;
++ if (phydev->link) {
++ state.rate = phydev->speed;
++
++ if (!phydev->duplex)
++ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
++ if (phydev->autoneg)
++ state.options |= DPMAC_LINK_OPT_AUTONEG;
++
++ netif_carrier_on(netdev);
++ } else {
++ netif_carrier_off(netdev);
++ }
++
++ if (priv->old_state.up != state.up ||
++ priv->old_state.rate != state.rate ||
++ priv->old_state.options != state.options) {
++ priv->old_state = state;
++ phy_print_status(phydev);
++ }
++
++ /* We must interrogate MC at all times, because we don't know
++ * when and whether a potential DPNI may have read the link state.
++ */
++ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
++ priv->mc_dev->mc_handle, &state);
++ if (unlikely(err))
++ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
++}
++
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++static int dpaa2_mac_open(struct net_device *netdev)
++{
++ /* start PHY state machine */
++ phy_start(netdev->phydev);
++
++ return 0;
++}
++
++static int dpaa2_mac_stop(struct net_device *netdev)
++{
++ if (!netdev->phydev)
++ goto done;
++
++ /* stop PHY state machine */
++ phy_stop(netdev->phydev);
++
++ /* signal link down to firmware */
++ netdev->phydev->link = 0;
++ dpaa2_mac_link_changed(netdev);
++
++done:
++ return 0;
++}
++
++static int dpaa2_mac_get_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ return phy_ethtool_gset(netdev->phydev, cmd);
++}
++
++static int dpaa2_mac_set_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ return phy_ethtool_sset(netdev->phydev, cmd);
++}
++
++static void dpaa2_mac_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_MCAST_FRAME,
++ &storage->tx_packets);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
++ if (err)
++ goto error;
++ storage->tx_packets += tmp;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
++ if (err)
++ goto error;
++ storage->tx_packets += tmp;
++
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
++ if (err)
++ goto error;
++
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_FRAME_DISCARD,
++ &storage->rx_dropped);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_OVERSIZED, &tmp);
++ if (err)
++ goto error;
++ storage->rx_errors += tmp;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
++ if (err)
++ goto error;
++
++ return;
++error:
++ netdev_err(netdev, "dpmac_get_counter err %d\n", err);
++}
++
++static struct {
++ enum dpmac_counter id;
++ char name[ETH_GSTRING_LEN];
++} dpaa2_mac_counters[] = {
++ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
++ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
++ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
++ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
++ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
++ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
++ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
++ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
++ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
++ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
++ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
++ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
++ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
++ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
++ {DPMAC_CNT_ING_FRAG, "rx frags"},
++ {DPMAC_CNT_ING_JABBER, "rx jabber"},
++ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
++ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
++ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
++ {DPMAC_CNT_ING_BYTE, "rx bytes"},
++ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
++ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
++ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
++ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
++ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
++ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
++ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
++ {DPMAC_CNT_EGR_BYTE, "tx bytes"},
++
++};
++
++static void dpaa2_mac_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
++{
++ int i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ dpaa2_mac_counters[i].name,
++ ETH_GSTRING_LEN);
++ break;
++ }
++}
++
++static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
++ int i;
++ int err;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
++ err = dpmac_get_counter(priv->mc_dev->mc_io,
++ 0,
++ priv->mc_dev->mc_handle,
++ dpaa2_mac_counters[i].id, &data[i]);
++ if (err)
++ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
++ dpaa2_mac_counters[i].name, err);
++ }
++}
++
++static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(dpaa2_mac_counters);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static const struct net_device_ops dpaa2_mac_ndo_ops = {
++ .ndo_start_xmit = &dpaa2_mac_drop_frame,
++ .ndo_open = &dpaa2_mac_open,
++ .ndo_stop = &dpaa2_mac_stop,
++ .ndo_get_stats64 = &dpaa2_mac_get_stats,
++};
++
++static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
++ .get_settings = &dpaa2_mac_get_settings,
++ .set_settings = &dpaa2_mac_set_settings,
++ .get_strings = &dpaa2_mac_get_strings,
++ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
++ .get_sset_count = &dpaa2_mac_get_sset_count,
++};
++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++
++static void configure_link(struct dpaa2_mac_priv *priv,
++ struct dpmac_link_cfg *cfg)
++{
++ struct phy_device *phydev = priv->netdev->phydev;
++
++ if (unlikely(!phydev))
++ return;
++
++ phydev->speed = cfg->rate;
++ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
++
++ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
++ phydev->autoneg = 1;
++ phydev->advertising |= ADVERTISED_Autoneg;
++ } else {
++ phydev->autoneg = 0;
++ phydev->advertising &= ~ADVERTISED_Autoneg;
++ }
++
++ phy_start_aneg(phydev);
++}
++
++static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
++ struct dpmac_link_cfg link_cfg;
++ u32 status;
++ int err;
++
++ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, &status);
++ if (unlikely(err || !status))
++ return IRQ_NONE;
++
++ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
++ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
++ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ &link_cfg);
++ if (unlikely(err))
++ goto out;
++
++ configure_link(priv, &link_cfg);
++ }
++
++out:
++ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, status);
++
++ return IRQ_HANDLED;
++}
++
++static int setup_irqs(struct fsl_mc_device *mc_dev)
++{
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
++
++ err = fsl_mc_allocate_irqs(mc_dev);
++ if (err) {
++ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
++ return err;
++ }
++
++ irq = mc_dev->irqs[0];
++ err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
++ NULL, &dpaa2_mac_irq_handler,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(&mc_dev->dev), &mc_dev->dev);
++ if (err) {
++ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
++ err);
++ goto free_irq;
++ }
++
++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ);
++ if (err) {
++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
++ goto free_irq;
++ }
++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, 1);
++ if (err) {
++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
++
++ return 0;
++
++free_irq:
++ fsl_mc_free_irqs(mc_dev);
++
++ return err;
++}
++
++static void teardown_irqs(struct fsl_mc_device *mc_dev)
++{
++ int err;
++
++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, 0);
++ if (err)
++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
++
++ fsl_mc_free_irqs(mc_dev);
++}
++
++static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
++{
++ struct device_node *dpmacs, *dpmac = NULL;
++ struct device_node *mc_node = dev->of_node;
++ u32 id;
++ int err;
++
++ dpmacs = of_find_node_by_name(mc_node, "dpmacs");
++ if (!dpmacs) {
++ dev_err(dev, "No dpmacs subnode in device-tree\n");
++ return NULL;
++ }
++
++ while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
++ err = of_property_read_u32(dpmac, "reg", &id);
++ if (err)
++ continue;
++ if (id == dpmac_id)
++ return dpmac;
++ }
++
++ return NULL;
++}
++
++static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
++{
++ struct device *dev;
++ struct dpaa2_mac_priv *priv = NULL;
++ struct device_node *phy_node, *dpmac_node;
++ struct net_device *netdev;
++ phy_interface_t if_mode;
++ int err = 0;
++
++ dev = &mc_dev->dev;
++
++ /* prepare a net_dev structure to make the phy lib API happy */
++ netdev = alloc_etherdev(sizeof(*priv));
++ if (!netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ err = -ENOMEM;
++ goto err_exit;
++ }
++ priv = netdev_priv(netdev);
++ priv->mc_dev = mc_dev;
++ priv->netdev = netdev;
++
++ SET_NETDEV_DEV(netdev, dev);
++
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
++#endif
++
++ dev_set_drvdata(dev, priv);
++
++ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
++ if (err || !mc_dev->mc_io) {
++ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
++ err = -ENODEV;
++ goto err_free_netdev;
++ }
++
++ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
++ &mc_dev->mc_handle);
++ if (err || !mc_dev->mc_handle) {
++ dev_err(dev, "dpmac_open error: %d\n", err);
++ err = -ENODEV;
++ goto err_free_mcp;
++ }
++
++ err = dpmac_get_attributes(mc_dev->mc_io, 0,
++ mc_dev->mc_handle, &priv->attr);
++ if (err) {
++ dev_err(dev, "dpmac_get_attributes err %d\n", err);
++ err = -EINVAL;
++ goto err_close;
++ }
++
++ /* Look up the DPMAC node in the device-tree. */
++ dpmac_node = find_dpmac_node(dev, priv->attr.id);
++ if (!dpmac_node) {
++ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
++ err = -ENODEV;
++ goto err_close;
++ }
++
++ err = setup_irqs(mc_dev);
++ if (err) {
++ err = -EFAULT;
++ goto err_close;
++ }
++
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ /* OPTIONAL, register netdev just to make it visible to the user */
++ netdev->netdev_ops = &dpaa2_mac_ndo_ops;
++ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
++
++ /* phy starts up enabled so netdev should be up too */
++ netdev->flags |= IFF_UP;
++
++ err = register_netdev(priv->netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ err = -ENODEV;
++ goto err_free_irq;
++ }
++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++
++ /* probe the PHY as a fixed-link if the link type declared in DPC
++ * explicitly mandates this
++ */
++
++ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
++ if (!phy_node) {
++ goto probe_fixed_link;
++ }
++
++ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
++ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
++ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
++ phy_modes(if_mode), priv->attr.eth_if);
++ } else {
++ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
++ priv->attr.eth_if);
++ goto probe_fixed_link;
++ }
++
++ /* try to connect to the PHY */
++ netdev->phydev = of_phy_connect(netdev, phy_node,
++ &dpaa2_mac_link_changed, 0, if_mode);
++ if (!netdev->phydev) {
++ /* No need for dev_err(); the kernel's loud enough as it is. */
++ dev_dbg(dev, "Can't of_phy_connect() now.\n");
++ /* We might be waiting for the MDIO MUX to probe, so defer
++ * our own probing.
++ */
++ err = -EPROBE_DEFER;
++ goto err_defer;
++ }
++ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
++
++probe_fixed_link:
++ if (!netdev->phydev) {
++ struct fixed_phy_status status = {
++ .link = 1,
++ /* fixed-phys don't support 10Gbps speed for now */
++ .speed = 1000,
++ .duplex = 1,
++ };
++
++ /* try to register a fixed link phy */
++ netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1,
++ NULL);
++ if (!netdev->phydev || IS_ERR(netdev->phydev)) {
++ dev_err(dev, "error trying to register fixed PHY\n");
++ /* So we don't crash unregister_netdev() later on */
++ netdev->phydev = NULL;
++ err = -EFAULT;
++ goto err_no_phy;
++ }
++ dev_info(dev, "Registered fixed PHY.\n");
++ }
++
++ /* start PHY state machine */
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ dpaa2_mac_open(netdev);
++#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++ phy_start(netdev->phydev);
++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++ return 0;
++
++err_defer:
++err_no_phy:
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ unregister_netdev(netdev);
++err_free_irq:
++#endif
++ teardown_irqs(mc_dev);
++err_close:
++ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++err_free_mcp:
++ fsl_mc_portal_free(mc_dev->mc_io);
++err_free_netdev:
++ free_netdev(netdev);
++err_exit:
++ return err;
++}
++
++static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
++{
++ struct device *dev = &mc_dev->dev;
++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
++
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ unregister_netdev(priv->netdev);
++#endif
++ teardown_irqs(priv->mc_dev);
++ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
++ fsl_mc_portal_free(priv->mc_dev->mc_io);
++ free_netdev(priv->netdev);
++
++ dev_set_drvdata(dev, NULL);
++ kfree(priv);
++
++ return 0;
++}
++
++static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpmac",
++ },
++ { .vendor = 0x0 }
++};
++MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
++
++static struct fsl_mc_driver dpaa2_mac_drv = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_mac_probe,
++ .remove = dpaa2_mac_remove,
++ .match_id_table = dpaa2_mac_match_id_table,
++};
++
++module_fsl_mc_driver(dpaa2_mac_drv);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
+diff --git a/drivers/staging/fsl-dpaa2/rtc/Makefile b/drivers/staging/fsl-dpaa2/rtc/Makefile
+new file mode 100644
+index 00000000..541a7acd
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
+@@ -0,0 +1,10 @@
++
++obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o
++
++dpaa2-rtc-objs := rtc.o dprtc.o
++
++all:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++
++clean:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
+new file mode 100644
+index 00000000..618c7e54
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
+@@ -0,0 +1,160 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPRTC_CMD_H
++#define _FSL_DPRTC_CMD_H
++
++/* DPRTC Version */
++#define DPRTC_VER_MAJOR 2
++#define DPRTC_VER_MINOR 0
++
++/* Command versioning */
++#define DPRTC_CMD_BASE_VERSION 1
++#define DPRTC_CMD_ID_OFFSET 4
++
++#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
++#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
++#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
++#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
++#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
++
++#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
++#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
++#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
++#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
++#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
++
++#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
++#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
++#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
++#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
++#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
++#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
++
++#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
++#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
++#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
++#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
++#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
++#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
++#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
++#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
++#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
++#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
++#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
++
++/* Macros for accessing command fields smaller than 1byte */
++#define DPRTC_MASK(field) \
++ GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
++ DPRTC_##field##_SHIFT)
++#define dprtc_get_field(var, field) \
++ (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
++
++#pragma pack(push, 1)
++struct dprtc_cmd_open {
++ uint32_t dprtc_id;
++};
++
++struct dprtc_cmd_destroy {
++ uint32_t object_id;
++};
++
++#define DPRTC_ENABLE_SHIFT 0
++#define DPRTC_ENABLE_SIZE 1
++
++struct dprtc_rsp_is_enabled {
++ uint8_t en;
++};
++
++struct dprtc_cmd_get_irq {
++ uint32_t pad;
++ uint8_t irq_index;
++};
++
++struct dprtc_cmd_set_irq_enable {
++ uint8_t en;
++ uint8_t pad[3];
++ uint8_t irq_index;
++};
++
++struct dprtc_rsp_get_irq_enable {
++ uint8_t en;
++};
++
++struct dprtc_cmd_set_irq_mask {
++ uint32_t mask;
++ uint8_t irq_index;
++};
++
++struct dprtc_rsp_get_irq_mask {
++ uint32_t mask;
++};
++
++struct dprtc_cmd_get_irq_status {
++ uint32_t status;
++ uint8_t irq_index;
++};
++
++struct dprtc_rsp_get_irq_status {
++ uint32_t status;
++};
++
++struct dprtc_cmd_clear_irq_status {
++ uint32_t status;
++ uint8_t irq_index;
++};
++
++struct dprtc_rsp_get_attributes {
++ uint32_t pad;
++ uint32_t id;
++};
++
++struct dprtc_cmd_set_clock_offset {
++ uint64_t offset;
++};
++
++struct dprtc_get_freq_compensation {
++ uint32_t freq_compensation;
++};
++
++struct dprtc_time {
++ uint64_t time;
++};
++
++struct dprtc_rsp_get_api_version {
++ uint16_t major;
++ uint16_t minor;
++};
++#pragma pack(pop)
++#endif /* _FSL_DPRTC_CMD_H */
+diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.c b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
+new file mode 100644
+index 00000000..399177e4
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
+@@ -0,0 +1,746 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dprtc.h"
++#include "dprtc-cmd.h"
++
++/**
++ * dprtc_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dprtc_id: DPRTC unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dprtc_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dprtc_id,
++ uint16_t *token)
++{
++ struct dprtc_cmd_open *cmd_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dprtc_cmd_open *)cmd.params;
++ cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return err;
++}
++
++/**
++ * dprtc_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_create() - Create the DPRTC object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @obj_id: Returned object id
++ *
++ * Create the DPRTC object, allocate required resources and
++ * perform required initialization.
++ *
++ * The function accepts an authentication token of a parent
++ * container that this object should be assigned to. The token
++ * can be '0' so the object will be assigned to the default container.
++ * The newly created object can be opened with the returned
++ * object id and using the container's associated tokens and MC portals.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_create(struct fsl_mc_io *mc_io,
++ uint16_t dprc_token,
++ uint32_t cmd_flags,
++ const struct dprtc_cfg *cfg,
++ uint32_t *obj_id)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ (void)(cfg); /* unused */
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
++ cmd_flags,
++ dprc_token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *obj_id = mc_cmd_read_object_id(&cmd);
++
++ return 0;
++}
++
++/**
++ * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @object_id: The object id; it must be a valid id within the container that
++ * created this object;
++ *
++ * The function accepts the authentication token of the parent container that
++ * created the object (not the one that currently owns the object). The object
++ * is searched within parent using the provided 'object_id'.
++ * All tokens to the object must be closed before calling destroy.
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dprtc_destroy(struct fsl_mc_io *mc_io,
++ uint16_t dprc_token,
++ uint32_t cmd_flags,
++ uint32_t object_id)
++{
++ struct dprtc_cmd_destroy *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
++ cmd_flags,
++ dprc_token);
++ cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
++ cmd_params->object_id = cpu_to_le32(object_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct dprtc_rsp_is_enabled *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
++ *en = dprtc_get_field(rsp_params->en, ENABLE);
++
++ return 0;
++}
++
++int dprtc_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct dprtc_cmd_set_irq_enable *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->en = en;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct dprtc_rsp_get_irq_enable *rsp_params;
++ struct dprtc_cmd_get_irq *cmd_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
++ *en = rsp_params->en;
++
++ return 0;
++}
++
++/**
++ * dprtc_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct dprtc_cmd_set_irq_mask *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct dprtc_rsp_get_irq_mask *rsp_params;
++ struct dprtc_cmd_get_irq *cmd_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
++ *mask = le32_to_cpu(rsp_params->mask);
++
++ return 0;
++}
++
++/**
++ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct dprtc_cmd_get_irq_status *cmd_params;
++ struct dprtc_rsp_get_irq_status *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
++ *status = rsp_params->status;
++
++ return 0;
++}
++
++/**
++ * dprtc_clear_irq_status() - Clear a pending interrupt's status
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @status: Bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct dprtc_cmd_clear_irq_status *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->status = cpu_to_le32(status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_get_attributes - Retrieve DPRTC attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprtc_attr *attr)
++{
++ struct dprtc_rsp_get_attributes *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
++ attr->id = le32_to_cpu(rsp_params->id);
++
++ return 0;
++}
++
++/**
++ * dprtc_set_clock_offset() - Sets the clock's offset
++ * (usually relative to another clock).
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @offset: New clock offset (in nanoseconds).
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int64_t offset)
++{
++ struct dprtc_cmd_set_clock_offset *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
++ cmd_params->offset = cpu_to_le64(offset);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @freq_compensation: The new frequency compensation value to set.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t freq_compensation)
++{
++ struct dprtc_get_freq_compensation *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
++ cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @freq_compensation: Frequency compensation value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t *freq_compensation)
++{
++ struct dprtc_get_freq_compensation *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
++ *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
++
++ return 0;
++}
++
++/**
++ * dprtc_get_time() - Returns the current RTC time.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @time: Current RTC time.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t *time)
++{
++ struct dprtc_time *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprtc_time *)cmd.params;
++ *time = le64_to_cpu(rsp_params->time);
++
++ return 0;
++}
++
++/**
++ * dprtc_set_time() - Updates current RTC time.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @time: New RTC time.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time)
++{
++ struct dprtc_time *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_time *)cmd.params;
++ cmd_params->time = cpu_to_le64(time);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_set_alarm() - Defines and sets alarm.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @time: In nanoseconds, the time when the alarm
++ * should go off - must be a multiple of
++ * 1 microsecond
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_alarm(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token, uint64_t time)
++{
++ struct dprtc_time *cmd_params;
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprtc_time *)cmd.params;
++ cmd_params->time = cpu_to_le64(time);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dprtc_get_api_version() - Get Data Path Real Time Counter API version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path real time counter API
++ * @minor_ver: Minor version of data path real time counter API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_api_version(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t *major_ver,
++ uint16_t *minor_ver)
++{
++ struct dprtc_rsp_get_api_version *rsp_params;
++ struct mc_command cmd = { 0 };
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
++ cmd_flags,
++ 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->major);
++ *minor_ver = le16_to_cpu(rsp_params->minor);
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.h b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
+new file mode 100644
+index 00000000..fc96cac6
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
+@@ -0,0 +1,172 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPRTC_H
++#define __FSL_DPRTC_H
++
++/* Data Path Real Time Counter API
++ * Contains initialization APIs and runtime control APIs for RTC
++ */
++
++struct fsl_mc_io;
++
++/**
++ * Number of irq's
++ */
++#define DPRTC_MAX_IRQ_NUM 1
++#define DPRTC_IRQ_INDEX 0
++
++/**
++ * Interrupt event masks:
++ */
++
++/**
++ * Interrupt event mask indicating alarm event had occurred
++ */
++#define DPRTC_EVENT_ALARM 0x40000000
++/**
++ * Interrupt event mask indicating periodic pulse event had occurred
++ */
++#define DPRTC_EVENT_PPS 0x08000000
++
++int dprtc_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dprtc_id,
++ uint16_t *token);
++
++int dprtc_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dprtc_cfg - Structure representing DPRTC configuration
++ * @options: place holder
++ */
++struct dprtc_cfg {
++ uint32_t options;
++};
++
++int dprtc_create(struct fsl_mc_io *mc_io,
++ uint16_t dprc_token,
++ uint32_t cmd_flags,
++ const struct dprtc_cfg *cfg,
++ uint32_t *obj_id);
++
++int dprtc_destroy(struct fsl_mc_io *mc_io,
++ uint16_t dprc_token,
++ uint32_t cmd_flags,
++ uint32_t object_id);
++
++int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int64_t offset);
++
++int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t freq_compensation);
++
++int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t *freq_compensation);
++
++int dprtc_get_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t *time);
++
++int dprtc_set_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time);
++
++int dprtc_set_alarm(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time);
++
++int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dprtc_attr - Structure representing DPRTC attributes
++ * @id: DPRTC object ID
++ */
++struct dprtc_attr {
++ int id;
++};
++
++int dprtc_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprtc_attr *attr);
++
++int dprtc_get_api_version(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t *major_ver,
++ uint16_t *minor_ver);
++
++#endif /* __FSL_DPRTC_H */
+diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
+new file mode 100644
+index 00000000..0afc6538
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
+@@ -0,0 +1,243 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/ptp_clock_kernel.h>
++
++#include "../../fsl-mc/include/mc.h"
++#include "../../fsl-mc/include/mc-sys.h"
++
++#include "dprtc.h"
++#include "dprtc-cmd.h"
++
++#define N_EXT_TS 2
++
++struct ptp_clock *clock;
++struct fsl_mc_device *rtc_mc_dev;
++u32 freqCompensation;
++
++/* PTP clock operations */
++static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
++{
++ u64 adj;
++ u32 diff, tmr_add;
++ int neg_adj = 0;
++ int err = 0;
++ struct fsl_mc_device *mc_dev = rtc_mc_dev;
++ struct device *dev = &mc_dev->dev;
++
++ if (ppb < 0) {
++ neg_adj = 1;
++ ppb = -ppb;
++ }
++
++ tmr_add = freqCompensation;
++ adj = tmr_add;
++ adj *= ppb;
++ diff = div_u64(adj, 1000000000ULL);
++
++ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
++
++ err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
++ mc_dev->mc_handle, tmr_add);
++ if (err)
++ dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
++ return 0;
++}
++
++static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
++{
++ s64 now;
++ int err = 0;
++ struct fsl_mc_device *mc_dev = rtc_mc_dev;
++ struct device *dev = &mc_dev->dev;
++
++ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
++ if (err) {
++ dev_err(dev, "dprtc_get_time err %d\n", err);
++ return 0;
++ }
++
++ now += delta;
++
++ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
++ if (err) {
++ dev_err(dev, "dprtc_set_time err %d\n", err);
++ return 0;
++ }
++ return 0;
++}
++
++static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
++{
++ u64 ns;
++ u32 remainder;
++ int err = 0;
++ struct fsl_mc_device *mc_dev = rtc_mc_dev;
++ struct device *dev = &mc_dev->dev;
++
++ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
++ if (err) {
++ dev_err(dev, "dprtc_get_time err %d\n", err);
++ return 0;
++ }
++
++ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
++ ts->tv_nsec = remainder;
++ return 0;
++}
++
++static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
++ const struct timespec *ts)
++{
++ u64 ns;
++ int err = 0;
++ struct fsl_mc_device *mc_dev = rtc_mc_dev;
++ struct device *dev = &mc_dev->dev;
++
++ ns = ts->tv_sec * 1000000000ULL;
++ ns += ts->tv_nsec;
++
++ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
++ if (err)
++ dev_err(dev, "dprtc_set_time err %d\n", err);
++ return 0;
++}
++
++static struct ptp_clock_info ptp_dpaa2_caps = {
++ .owner = THIS_MODULE,
++ .name = "dpaa2 clock",
++ .max_adj = 512000,
++ .n_alarm = 0,
++ .n_ext_ts = N_EXT_TS,
++ .n_per_out = 0,
++ .n_pins = 0,
++ .pps = 1,
++ .adjfreq = ptp_dpaa2_adjfreq,
++ .adjtime = ptp_dpaa2_adjtime,
++ .gettime64 = ptp_dpaa2_gettime,
++ .settime64 = ptp_dpaa2_settime,
++};
++
++static int rtc_probe(struct fsl_mc_device *mc_dev)
++{
++ struct device *dev;
++ int err = 0;
++ int dpaa2_phc_index;
++ u32 tmr_add = 0;
++
++ if (!mc_dev)
++ return -EFAULT;
++
++ dev = &mc_dev->dev;
++
++ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
++ if (unlikely(err)) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_exit;
++ }
++ if (!mc_dev->mc_io) {
++ dev_err(dev,
++ "fsl_mc_portal_allocate returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_exit;
++ }
++
++ err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
++ &mc_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dprtc_open err %d\n", err);
++ goto err_free_mcp;
++ }
++ if (!mc_dev->mc_handle) {
++ dev_err(dev, "dprtc_open returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_free_mcp;
++ }
++
++ rtc_mc_dev = mc_dev;
++
++ err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
++ mc_dev->mc_handle, &tmr_add);
++ if (err) {
++ dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
++ goto err_close;
++ }
++ freqCompensation = tmr_add;
++
++ clock = ptp_clock_register(&ptp_dpaa2_caps, dev);
++ if (IS_ERR(clock)) {
++ err = PTR_ERR(clock);
++ goto err_close;
++ }
++ dpaa2_phc_index = ptp_clock_index(clock);
++
++ return 0;
++err_close:
++ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++err_free_mcp:
++ fsl_mc_portal_free(mc_dev->mc_io);
++err_exit:
++ return err;
++}
++
++static int rtc_remove(struct fsl_mc_device *mc_dev)
++{
++ ptp_clock_unregister(clock);
++ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++ fsl_mc_portal_free(mc_dev->mc_io);
++
++ return 0;
++}
++
++static const struct fsl_mc_device_id rtc_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dprtc",
++ },
++ {}
++};
++
++static struct fsl_mc_driver rtc_drv = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = rtc_probe,
++ .remove = rtc_remove,
++ .match_id_table = rtc_match_id_table,
++};
++
++module_fsl_mc_driver(rtc_drv);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch b/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch
new file mode 100644
index 0000000000..bc7641015c
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch
@@ -0,0 +1,149 @@
+From 505eb62bdb7a4cc25b13491dd5c68d0741c5d6da Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:21:13 +0800
+Subject: [PATCH] ata: support layerscape
+
+This is a integrated patch for layerscape sata support.
+
+Signed-off-by: Tang Yuantian <Yuantian.Tang@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/ata/ahci_qoriq.c | 63 ++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 56 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
+index 1eba8dff..2f30a39f 100644
+--- a/drivers/ata/ahci_qoriq.c
++++ b/drivers/ata/ahci_qoriq.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale QorIQ AHCI SATA platform driver
+ *
+- * Copyright 2015 Freescale, Inc.
++ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Tang Yuantian <Yuantian.Tang@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -46,23 +46,32 @@
+ #define LS1021A_AXICC_ADDR 0xC0
+
+ #define SATA_ECC_DISABLE 0x00020000
++#define ECC_DIS_ARMV8_CH2 0x80000000
++#define ECC_DIS_LS1088A 0x40000000
+
+ enum ahci_qoriq_type {
+ AHCI_LS1021A,
+ AHCI_LS1043A,
+ AHCI_LS2080A,
++ AHCI_LS1046A,
++ AHCI_LS1088A,
++ AHCI_LS2088A,
+ };
+
+ struct ahci_qoriq_priv {
+ struct ccsr_ahci *reg_base;
+ enum ahci_qoriq_type type;
+ void __iomem *ecc_addr;
++ bool is_dmacoherent;
+ };
+
+ static const struct of_device_id ahci_qoriq_of_match[] = {
+ { .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
+ { .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
+ { .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
++ { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
++ { .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
++ { .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
+@@ -154,6 +163,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
+
+ switch (qpriv->type) {
+ case AHCI_LS1021A:
++ if (!qpriv->ecc_addr)
++ return -EINVAL;
+ writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2);
+@@ -161,19 +172,56 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
+ writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
+ writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+- writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
++ if (qpriv->is_dmacoherent)
++ writel(AHCI_PORT_AXICC_CFG,
++ reg_base + LS1021A_AXICC_ADDR);
+ break;
+
+ case AHCI_LS1043A:
++ if (!qpriv->ecc_addr)
++ return -EINVAL;
++ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
++ qpriv->ecc_addr);
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
++ if (qpriv->is_dmacoherent)
++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ break;
+
+ case AHCI_LS2080A:
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
++ if (qpriv->is_dmacoherent)
++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
++ break;
++
++ case AHCI_LS1046A:
++ if (!qpriv->ecc_addr)
++ return -EINVAL;
++ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
++ qpriv->ecc_addr);
++ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
++ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
++ if (qpriv->is_dmacoherent)
++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
++ break;
++
++ case AHCI_LS1088A:
++ if (!qpriv->ecc_addr)
++ return -EINVAL;
++ writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
++ qpriv->ecc_addr);
++ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
++ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
++ if (qpriv->is_dmacoherent)
++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
++ break;
++
++ case AHCI_LS2088A:
++ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
++ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
++ if (qpriv->is_dmacoherent)
++ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ break;
+ }
+
+@@ -204,13 +252,14 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
+
+ qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+
+- if (qoriq_priv->type == AHCI_LS1021A) {
+- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+- "sata-ecc");
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ "sata-ecc");
++ if (res) {
+ qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qoriq_priv->ecc_addr))
+ return PTR_ERR(qoriq_priv->ecc_addr);
+ }
++ qoriq_priv->is_dmacoherent = of_dma_is_coherent(np);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch b/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch
new file mode 100644
index 0000000000..881e3848b4
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch
@@ -0,0 +1,312 @@
+From bd3df6d053a28d5aa630524c9087c21def30e764 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:09:35 +0800
+Subject: [PATCH] clk: support layerscape
+
+This is a integrated patch for layerscape clock support.
+
+Signed-off-by: Yuantian Tang <andy.tang@nxp.com>
+Signed-off-by: Mingkai Hu <mingkai.hu@nxp.com>
+Signed-off-by: Scott Wood <oss@buserror.net>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/clk/clk-qoriq.c | 170 ++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 156 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+index 80ae2a51..0e7de00a 100644
+--- a/drivers/clk/clk-qoriq.c
++++ b/drivers/clk/clk-qoriq.c
+@@ -12,6 +12,7 @@
+
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
++#include <linux/clkdev.h>
+ #include <linux/fsl/guts.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -87,7 +88,7 @@ struct clockgen {
+ struct device_node *node;
+ void __iomem *regs;
+ struct clockgen_chipinfo info; /* mutable copy */
+- struct clk *sysclk;
++ struct clk *sysclk, *coreclk;
+ struct clockgen_pll pll[6];
+ struct clk *cmux[NUM_CMUX];
+ struct clk *hwaccel[NUM_HWACCEL];
+@@ -266,6 +267,39 @@ static const struct clockgen_muxinfo ls1043a_hwa2 = {
+ },
+ };
+
++static const struct clockgen_muxinfo ls1046a_hwa1 = {
++ {
++ {},
++ {},
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
++ },
++};
++
++static const struct clockgen_muxinfo ls1046a_hwa2 = {
++ {
++ {},
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
++ {},
++ {},
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ },
++};
++
++static const struct clockgen_muxinfo ls1012a_cmux = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ {},
++ [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ }
++};
++
+ static const struct clockgen_muxinfo t1023_hwa1 = {
+ {
+ {},
+@@ -488,6 +522,42 @@ static const struct clockgen_chipinfo chipinfo[] = {
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+ },
++ {
++ .compat = "fsl,ls1046a-clockgen",
++ .init_periph = t2080_init_periph,
++ .cmux_groups = {
++ &t1040_cmux
++ },
++ .hwaccel = {
++ &ls1046a_hwa1, &ls1046a_hwa2
++ },
++ .cmux_to_group = {
++ 0, -1
++ },
++ .pll_mask = 0x07,
++ .flags = CG_PLL_8BIT,
++ },
++ {
++ .compat = "fsl,ls1088a-clockgen",
++ .cmux_groups = {
++ &clockgen2_cmux_cga12
++ },
++ .cmux_to_group = {
++ 0, 0, -1
++ },
++ .pll_mask = 0x07,
++ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
++ },
++ {
++ .compat = "fsl,ls1012a-clockgen",
++ .cmux_groups = {
++ &ls1012a_cmux
++ },
++ .cmux_to_group = {
++ 0, -1
++ },
++ .pll_mask = 0x03,
++ },
+ {
+ .compat = "fsl,ls2080a-clockgen",
+ .cmux_groups = {
+@@ -846,7 +916,12 @@ static void __init create_muxes(struct clockgen *cg)
+
+ static void __init clockgen_init(struct device_node *np);
+
+-/* Legacy nodes may get probed before the parent clockgen node */
++/*
++ * Legacy nodes may get probed before the parent clockgen node.
++ * It is assumed that device trees with legacy nodes will not
++ * contain a "clocks" property -- otherwise the input clocks may
++ * not be initialized at this point.
++ */
+ static void __init legacy_init_clockgen(struct device_node *np)
+ {
+ if (!clockgen.node)
+@@ -887,18 +962,13 @@ static struct clk __init
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
+ }
+
+-static struct clk *sysclk_from_parent(const char *name)
++static struct clk __init *input_clock(const char *name, struct clk *clk)
+ {
+- struct clk *clk;
+- const char *parent_name;
+-
+- clk = of_clk_get(clockgen.node, 0);
+- if (IS_ERR(clk))
+- return clk;
++ const char *input_name;
+
+ /* Register the input clock under the desired name. */
+- parent_name = __clk_get_name(clk);
+- clk = clk_register_fixed_factor(NULL, name, parent_name,
++ input_name = __clk_get_name(clk);
++ clk = clk_register_fixed_factor(NULL, name, input_name,
+ 0, 1, 1);
+ if (IS_ERR(clk))
+ pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
+@@ -907,6 +977,29 @@ static struct clk *sysclk_from_parent(const char *name)
+ return clk;
+ }
+
++static struct clk __init *input_clock_by_name(const char *name,
++ const char *dtname)
++{
++ struct clk *clk;
++
++ clk = of_clk_get_by_name(clockgen.node, dtname);
++ if (IS_ERR(clk))
++ return clk;
++
++ return input_clock(name, clk);
++}
++
++static struct clk __init *input_clock_by_index(const char *name, int idx)
++{
++ struct clk *clk;
++
++ clk = of_clk_get(clockgen.node, 0);
++ if (IS_ERR(clk))
++ return clk;
++
++ return input_clock(name, clk);
++}
++
+ static struct clk * __init create_sysclk(const char *name)
+ {
+ struct device_node *sysclk;
+@@ -916,7 +1009,11 @@ static struct clk * __init create_sysclk(const char *name)
+ if (!IS_ERR(clk))
+ return clk;
+
+- clk = sysclk_from_parent(name);
++ clk = input_clock_by_name(name, "sysclk");
++ if (!IS_ERR(clk))
++ return clk;
++
++ clk = input_clock_by_index(name, 0);
+ if (!IS_ERR(clk))
+ return clk;
+
+@@ -927,7 +1024,27 @@ static struct clk * __init create_sysclk(const char *name)
+ return clk;
+ }
+
+- pr_err("%s: No input clock\n", __func__);
++ pr_err("%s: No input sysclk\n", __func__);
++ return NULL;
++}
++
++static struct clk * __init create_coreclk(const char *name)
++{
++ struct clk *clk;
++
++ clk = input_clock_by_name(name, "coreclk");
++ if (!IS_ERR(clk))
++ return clk;
++
++ /*
++ * This indicates a mix of legacy nodes with the new coreclk
++ * mechanism, which should never happen. If this error occurs,
++ * don't use the wrong input clock just because coreclk isn't
++ * ready yet.
++ */
++ if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
++ return clk;
++
+ return NULL;
+ }
+
+@@ -950,11 +1067,19 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
+ u32 __iomem *reg;
+ u32 mult;
+ struct clockgen_pll *pll = &cg->pll[idx];
++ const char *input = "cg-sysclk";
+ int i;
+
+ if (!(cg->info.pll_mask & (1 << idx)))
+ return;
+
++ if (cg->coreclk && idx != PLATFORM_PLL) {
++ if (IS_ERR(cg->coreclk))
++ return;
++
++ input = "cg-coreclk";
++ }
++
+ if (cg->info.flags & CG_VER3) {
+ switch (idx) {
+ case PLATFORM_PLL:
+@@ -1000,12 +1125,13 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
+
+ for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
+ struct clk *clk;
++ int ret;
+
+ snprintf(pll->div[i].name, sizeof(pll->div[i].name),
+ "cg-pll%d-div%d", idx, i + 1);
+
+ clk = clk_register_fixed_factor(NULL,
+- pll->div[i].name, "cg-sysclk", 0, mult, i + 1);
++ pll->div[i].name, input, 0, mult, i + 1);
+ if (IS_ERR(clk)) {
+ pr_err("%s: %s: register failed %ld\n",
+ __func__, pll->div[i].name, PTR_ERR(clk));
+@@ -1013,6 +1139,11 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
+ }
+
+ pll->div[i].clk = clk;
++ ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
++ if (ret != 0)
++ pr_err("%s: %s: register to lookup table failed %ld\n",
++ __func__, pll->div[i].name, PTR_ERR(clk));
++
+ }
+ }
+
+@@ -1142,6 +1273,13 @@ static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
+ goto bad_args;
+ clk = pll->div[idx].clk;
+ break;
++ case 5:
++ if (idx != 0)
++ goto bad_args;
++ clk = cg->coreclk;
++ if (IS_ERR(clk))
++ clk = NULL;
++ break;
+ default:
+ goto bad_args;
+ }
+@@ -1253,6 +1391,7 @@ static void __init clockgen_init(struct device_node *np)
+ clockgen.info.flags |= CG_CMUX_GE_PLAT;
+
+ clockgen.sysclk = create_sysclk("cg-sysclk");
++ clockgen.coreclk = create_coreclk("cg-coreclk");
+ create_plls(&clockgen);
+ create_muxes(&clockgen);
+
+@@ -1273,8 +1412,11 @@ static void __init clockgen_init(struct device_node *np)
+
+ CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
+ CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
+ CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
+ CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
+ CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
+
+ /* Legacy nodes */
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch b/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch
new file mode 100644
index 0000000000..9ea5c407dd
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch
@@ -0,0 +1,370 @@
+From a9ebdf9fa18fd317a4e97f46e8c5263898094864 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:20:10 +0800
+Subject: [PATCH] cpufreq: support layerscape
+
+This is a integrated patch for layerscape pm support.
+
+Signed-off-by: Tang Yuantian <Yuantian.Tang@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/cpufreq/Kconfig | 2 +-
+ drivers/cpufreq/qoriq-cpufreq.c | 176 +++++++++++++++-------------------------
+ drivers/firmware/psci.c | 12 ++-
+ 3 files changed, 77 insertions(+), 113 deletions(-)
+
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index d8b164a7..bc9264c7 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -332,7 +332,7 @@ endif
+
+ config QORIQ_CPUFREQ
+ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
+- depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
++ depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
+ depends on !CPU_THERMAL || THERMAL
+ select CLK_QORIQ
+ help
+diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
+index 53d8c3fb..e2ea433a 100644
+--- a/drivers/cpufreq/qoriq-cpufreq.c
++++ b/drivers/cpufreq/qoriq-cpufreq.c
+@@ -11,6 +11,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/clk.h>
++#include <linux/clk-provider.h>
+ #include <linux/cpufreq.h>
+ #include <linux/cpu_cooling.h>
+ #include <linux/errno.h>
+@@ -22,10 +23,6 @@
+ #include <linux/slab.h>
+ #include <linux/smp.h>
+
+-#if !defined(CONFIG_ARM)
+-#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
+-#endif
+-
+ /**
+ * struct cpu_data
+ * @pclk: the parent clock of cpu
+@@ -37,73 +34,51 @@ struct cpu_data {
+ struct thermal_cooling_device *cdev;
+ };
+
++/*
++ * Don't use cpufreq on this SoC -- used when the SoC would have otherwise
++ * matched a more generic compatible.
++ */
++#define SOC_BLACKLIST 1
++
+ /**
+ * struct soc_data - SoC specific data
+- * @freq_mask: mask the disallowed frequencies
+- * @flag: unique flags
++ * @flags: SOC_xxx
+ */
+ struct soc_data {
+- u32 freq_mask[4];
+- u32 flag;
+-};
+-
+-#define FREQ_MASK 1
+-/* see hardware specification for the allowed frqeuencies */
+-static const struct soc_data sdata[] = {
+- { /* used by p2041 and p3041 */
+- .freq_mask = {0x8, 0x8, 0x2, 0x2},
+- .flag = FREQ_MASK,
+- },
+- { /* used by p5020 */
+- .freq_mask = {0x8, 0x2},
+- .flag = FREQ_MASK,
+- },
+- { /* used by p4080, p5040 */
+- .freq_mask = {0},
+- .flag = 0,
+- },
++ u32 flags;
+ };
+
+-/*
+- * the minimum allowed core frequency, in Hz
+- * for chassis v1.0, >= platform frequency
+- * for chassis v2.0, >= platform frequency / 2
+- */
+-static u32 min_cpufreq;
+-static const u32 *fmask;
+-
+-#if defined(CONFIG_ARM)
+-static int get_cpu_physical_id(int cpu)
+-{
+- return topology_core_id(cpu);
+-}
+-#else
+-static int get_cpu_physical_id(int cpu)
+-{
+- return get_hard_smp_processor_id(cpu);
+-}
+-#endif
+-
+ static u32 get_bus_freq(void)
+ {
+ struct device_node *soc;
+ u32 sysfreq;
++ struct clk *pltclk;
++ int ret;
+
++ /* get platform freq by searching bus-frequency property */
+ soc = of_find_node_by_type(NULL, "soc");
+- if (!soc)
+- return 0;
+-
+- if (of_property_read_u32(soc, "bus-frequency", &sysfreq))
+- sysfreq = 0;
++ if (soc) {
++ ret = of_property_read_u32(soc, "bus-frequency", &sysfreq);
++ of_node_put(soc);
++ if (!ret)
++ return sysfreq;
++ }
+
+- of_node_put(soc);
++ /* get platform freq by its clock name */
++ pltclk = clk_get(NULL, "cg-pll0-div1");
++ if (IS_ERR(pltclk)) {
++ pr_err("%s: can't get bus frequency %ld\n",
++ __func__, PTR_ERR(pltclk));
++ return PTR_ERR(pltclk);
++ }
+
+- return sysfreq;
++ return clk_get_rate(pltclk);
+ }
+
+-static struct device_node *cpu_to_clk_node(int cpu)
++static struct clk *cpu_to_clk(int cpu)
+ {
+- struct device_node *np, *clk_np;
++ struct device_node *np;
++ struct clk *clk;
+
+ if (!cpu_present(cpu))
+ return NULL;
+@@ -112,37 +87,28 @@ static struct device_node *cpu_to_clk_node(int cpu)
+ if (!np)
+ return NULL;
+
+- clk_np = of_parse_phandle(np, "clocks", 0);
+- if (!clk_np)
+- return NULL;
+-
++ clk = of_clk_get(np, 0);
+ of_node_put(np);
+-
+- return clk_np;
++ return clk;
+ }
+
+ /* traverse cpu nodes to get cpu mask of sharing clock wire */
+ static void set_affected_cpus(struct cpufreq_policy *policy)
+ {
+- struct device_node *np, *clk_np;
+ struct cpumask *dstp = policy->cpus;
++ struct clk *clk;
+ int i;
+
+- np = cpu_to_clk_node(policy->cpu);
+- if (!np)
+- return;
+-
+ for_each_present_cpu(i) {
+- clk_np = cpu_to_clk_node(i);
+- if (!clk_np)
++ clk = cpu_to_clk(i);
++ if (IS_ERR(clk)) {
++ pr_err("%s: no clock for cpu %d\n", __func__, i);
+ continue;
++ }
+
+- if (clk_np == np)
++ if (clk_is_match(policy->clk, clk))
+ cpumask_set_cpu(i, dstp);
+-
+- of_node_put(clk_np);
+ }
+- of_node_put(np);
+ }
+
+ /* reduce the duplicated frequencies in frequency table */
+@@ -198,10 +164,11 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
+
+ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ {
+- struct device_node *np, *pnode;
++ struct device_node *np;
+ int i, count, ret;
+- u32 freq, mask;
++ u32 freq;
+ struct clk *clk;
++ const struct clk_hw *hwclk;
+ struct cpufreq_frequency_table *table;
+ struct cpu_data *data;
+ unsigned int cpu = policy->cpu;
+@@ -221,17 +188,13 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ goto err_nomem2;
+ }
+
+- pnode = of_parse_phandle(np, "clocks", 0);
+- if (!pnode) {
+- pr_err("%s: could not get clock information\n", __func__);
+- goto err_nomem2;
+- }
++ hwclk = __clk_get_hw(policy->clk);
++ count = clk_hw_get_num_parents(hwclk);
+
+- count = of_property_count_strings(pnode, "clock-names");
+ data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->pclk) {
+ pr_err("%s: no memory\n", __func__);
+- goto err_node;
++ goto err_nomem2;
+ }
+
+ table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
+@@ -240,23 +203,11 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ goto err_pclk;
+ }
+
+- if (fmask)
+- mask = fmask[get_cpu_physical_id(cpu)];
+- else
+- mask = 0x0;
+-
+ for (i = 0; i < count; i++) {
+- clk = of_clk_get(pnode, i);
++ clk = clk_hw_get_parent_by_index(hwclk, i)->clk;
+ data->pclk[i] = clk;
+ freq = clk_get_rate(clk);
+- /*
+- * the clock is valid if its frequency is not masked
+- * and large than minimum allowed frequency.
+- */
+- if (freq < min_cpufreq || (mask & (1 << i)))
+- table[i].frequency = CPUFREQ_ENTRY_INVALID;
+- else
+- table[i].frequency = freq / 1000;
++ table[i].frequency = freq / 1000;
+ table[i].driver_data = i;
+ }
+ freq_table_redup(table, count);
+@@ -282,7 +233,6 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ policy->cpuinfo.transition_latency = u64temp + 1;
+
+ of_node_put(np);
+- of_node_put(pnode);
+
+ return 0;
+
+@@ -290,10 +240,7 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ kfree(table);
+ err_pclk:
+ kfree(data->pclk);
+-err_node:
+- of_node_put(pnode);
+ err_nomem2:
+- policy->driver_data = NULL;
+ kfree(data);
+ err_np:
+ of_node_put(np);
+@@ -357,12 +304,25 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
+ .attr = cpufreq_generic_attr,
+ };
+
++static const struct soc_data blacklist = {
++ .flags = SOC_BLACKLIST,
++};
++
+ static const struct of_device_id node_matches[] __initconst = {
+- { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
+- { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
+- { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
+- { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], },
+- { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], },
++ /* e6500 cannot use cpufreq due to erratum A-008083 */
++ { .compatible = "fsl,b4420-clockgen", &blacklist },
++ { .compatible = "fsl,b4860-clockgen", &blacklist },
++ { .compatible = "fsl,t2080-clockgen", &blacklist },
++ { .compatible = "fsl,t4240-clockgen", &blacklist },
++
++ { .compatible = "fsl,ls1012a-clockgen", },
++ { .compatible = "fsl,ls1021a-clockgen", },
++ { .compatible = "fsl,ls1043a-clockgen", },
++ { .compatible = "fsl,ls1046a-clockgen", },
++ { .compatible = "fsl,ls1088a-clockgen", },
++ { .compatible = "fsl,ls2080a-clockgen", },
++ { .compatible = "fsl,p4080-clockgen", },
++ { .compatible = "fsl,qoriq-clockgen-1.0", },
+ { .compatible = "fsl,qoriq-clockgen-2.0", },
+ {}
+ };
+@@ -380,16 +340,12 @@ static int __init qoriq_cpufreq_init(void)
+
+ match = of_match_node(node_matches, np);
+ data = match->data;
+- if (data) {
+- if (data->flag)
+- fmask = data->freq_mask;
+- min_cpufreq = get_bus_freq();
+- } else {
+- min_cpufreq = get_bus_freq() / 2;
+- }
+
+ of_node_put(np);
+
++ if (data && data->flags & SOC_BLACKLIST)
++ return -ENODEV;
++
+ ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
+ if (!ret)
+ pr_info("Freescale QorIQ CPU frequency scaling driver\n");
+diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
+index 8263429e..323c9fc0 100644
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -418,8 +418,12 @@ CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
+
+ static int psci_system_suspend(unsigned long unused)
+ {
+- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+- virt_to_phys(cpu_resume), 0, 0);
++ u32 state;
++
++ state = ( 2 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) |
++ (1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT);
++
++ return psci_cpu_suspend(state, virt_to_phys(cpu_resume));
+ }
+
+ static int psci_system_suspend_enter(suspend_state_t state)
+@@ -439,6 +443,8 @@ static void __init psci_init_system_suspend(void)
+ if (!IS_ENABLED(CONFIG_SUSPEND))
+ return;
+
++ suspend_set_ops(&psci_suspend_ops);
++
+ ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+@@ -516,6 +522,8 @@ static void __init psci_0_2_set_functions(void)
+ arm_pm_restart = psci_sys_reset;
+
+ pm_power_off = psci_sys_poweroff;
++ psci_init_system_suspend();
++ suspend_set_ops(&psci_suspend_ops);
+ }
+
+ /*
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch b/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch
new file mode 100644
index 0000000000..4340247d6e
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch
@@ -0,0 +1,26853 @@
+From 0a5b97d1f524c1769b4059e3c7123b52755f7121 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Wed, 27 Sep 2017 15:02:01 +0800
+Subject: [PATCH] crypto: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is a integrated patch for layerscape sec support.
+
+Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
+Signed-off-by: Fabio Estevam <festevam@gmail.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
+Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
+Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
+Signed-off-by: Andrew Lutomirski <luto@kernel.org>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
+Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ crypto/Kconfig | 30 +
+ crypto/Makefile | 4 +
+ crypto/acompress.c | 169 +
+ crypto/algboss.c | 12 +-
+ crypto/crypto_user.c | 19 +
+ crypto/scompress.c | 356 ++
+ crypto/tcrypt.c | 17 +-
+ crypto/testmgr.c | 1701 ++++----
+ crypto/testmgr.h | 1125 +++---
+ crypto/tls.c | 607 +++
+ drivers/crypto/caam/Kconfig | 72 +-
+ drivers/crypto/caam/Makefile | 15 +-
+ drivers/crypto/caam/caamalg.c | 2125 +++-------
+ drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++
+ drivers/crypto/caam/caamalg_desc.h | 127 +
+ drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++
+ drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++
+ drivers/crypto/caam/caamalg_qi2.h | 265 ++
+ drivers/crypto/caam/caamhash.c | 521 +--
+ drivers/crypto/caam/caampkc.c | 471 ++-
+ drivers/crypto/caam/caampkc.h | 58 +
+ drivers/crypto/caam/caamrng.c | 16 +-
+ drivers/crypto/caam/compat.h | 1 +
+ drivers/crypto/caam/ctrl.c | 356 +-
+ drivers/crypto/caam/ctrl.h | 2 +
+ drivers/crypto/caam/desc.h | 52 +-
+ drivers/crypto/caam/desc_constr.h | 139 +-
+ drivers/crypto/caam/dpseci.c | 859 ++++
+ drivers/crypto/caam/dpseci.h | 395 ++
+ drivers/crypto/caam/dpseci_cmd.h | 261 ++
+ drivers/crypto/caam/error.c | 127 +-
+ drivers/crypto/caam/error.h | 10 +-
+ drivers/crypto/caam/intern.h | 31 +-
+ drivers/crypto/caam/jr.c | 55 +-
+ drivers/crypto/caam/key_gen.c | 32 +-
+ drivers/crypto/caam/key_gen.h | 36 +-
+ drivers/crypto/caam/pdb.h | 62 +
+ drivers/crypto/caam/pkc_desc.c | 36 +
+ drivers/crypto/caam/qi.c | 797 ++++
+ drivers/crypto/caam/qi.h | 204 +
+ drivers/crypto/caam/regs.h | 63 +-
+ drivers/crypto/caam/sg_sw_qm.h | 126 +
+ drivers/crypto/caam/sg_sw_qm2.h | 81 +
+ drivers/crypto/caam/sg_sw_sec4.h | 60 +-
+ drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
+ drivers/staging/wilc1000/linux_wlan.c | 2 +-
+ drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
+ include/crypto/acompress.h | 269 ++
+ include/crypto/internal/acompress.h | 81 +
+ include/crypto/internal/scompress.h | 136 +
+ include/linux/crypto.h | 3 +
+ include/uapi/linux/cryptouser.h | 5 +
+ scripts/spelling.txt | 3 +
+ sound/soc/amd/acp-pcm-dma.c | 2 +-
+ 54 files changed, 17263 insertions(+), 3955 deletions(-)
+ create mode 100644 crypto/acompress.c
+ create mode 100644 crypto/scompress.c
+ create mode 100644 crypto/tls.c
+ create mode 100644 drivers/crypto/caam/caamalg_desc.c
+ create mode 100644 drivers/crypto/caam/caamalg_desc.h
+ create mode 100644 drivers/crypto/caam/caamalg_qi.c
+ create mode 100644 drivers/crypto/caam/caamalg_qi2.c
+ create mode 100644 drivers/crypto/caam/caamalg_qi2.h
+ create mode 100644 drivers/crypto/caam/dpseci.c
+ create mode 100644 drivers/crypto/caam/dpseci.h
+ create mode 100644 drivers/crypto/caam/dpseci_cmd.h
+ create mode 100644 drivers/crypto/caam/qi.c
+ create mode 100644 drivers/crypto/caam/qi.h
+ create mode 100644 drivers/crypto/caam/sg_sw_qm.h
+ create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
+ create mode 100644 include/crypto/acompress.h
+ create mode 100644 include/crypto/internal/acompress.h
+ create mode 100644 include/crypto/internal/scompress.h
+
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 17be110a..00e145e2 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -102,6 +102,15 @@ config CRYPTO_KPP
+ select CRYPTO_ALGAPI
+ select CRYPTO_KPP2
+
++config CRYPTO_ACOMP2
++ tristate
++ select CRYPTO_ALGAPI2
++
++config CRYPTO_ACOMP
++ tristate
++ select CRYPTO_ALGAPI
++ select CRYPTO_ACOMP2
++
+ config CRYPTO_RSA
+ tristate "RSA algorithm"
+ select CRYPTO_AKCIPHER
+@@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
+ select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
++ select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
+
+ config CRYPTO_USER
+ tristate "Userspace cryptographic algorithm configuration"
+@@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
+ a sequence number xored with a salt. This is the default
+ algorithm for CBC.
+
++config CRYPTO_TLS
++ tristate "TLS support"
++ select CRYPTO_AEAD
++ select CRYPTO_BLKCIPHER
++ select CRYPTO_MANAGER
++ select CRYPTO_HASH
++ select CRYPTO_NULL
++ select CRYPTO_AUTHENC
++ help
++ Support for TLS 1.0 record encryption and decryption
++
++ This module adds support for encryption/decryption of TLS 1.0 frames
++ using blockcipher algorithms. The name of the resulting algorithm is
++ "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
++ algorithms are used (e.g. aes-generic, sha1-generic), but hardware
++ accelerated versions will be used automatically if available.
++
++ User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
++ operations through AF_ALG or cryptodev interfaces
++
+ comment "Block modes"
+
+ config CRYPTO_CBC
+diff --git a/crypto/Makefile b/crypto/Makefile
+index 9e52b3c5..936d2b73 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
+ rsa_generic-y += rsa-pkcs1pad.o
+ obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
+
++obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
++obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
++
+ cryptomgr-y := algboss.o testmgr.o
+
+ obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
+@@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
+ obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
+ obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
+ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
++obj-$(CONFIG_CRYPTO_TLS) += tls.o
+ obj-$(CONFIG_CRYPTO_LZO) += lzo.o
+ obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
+ obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
+diff --git a/crypto/acompress.c b/crypto/acompress.c
+new file mode 100644
+index 00000000..887783d8
+--- /dev/null
++++ b/crypto/acompress.c
+@@ -0,0 +1,169 @@
++/*
++ * Asynchronous Compression operations
++ *
++ * Copyright (c) 2016, Intel Corporation
++ * Authors: Weigang Li <weigang.li@intel.com>
++ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/crypto.h>
++#include <crypto/algapi.h>
++#include <linux/cryptouser.h>
++#include <net/netlink.h>
++#include <crypto/internal/acompress.h>
++#include <crypto/internal/scompress.h>
++#include "internal.h"
++
++static const struct crypto_type crypto_acomp_type;
++
++#ifdef CONFIG_NET
++static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++ struct crypto_report_acomp racomp;
++
++ strncpy(racomp.type, "acomp", sizeof(racomp.type));
++
++ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
++ sizeof(struct crypto_report_acomp), &racomp))
++ goto nla_put_failure;
++ return 0;
++
++nla_put_failure:
++ return -EMSGSIZE;
++}
++#else
++static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++ return -ENOSYS;
++}
++#endif
++
++static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
++ __attribute__ ((unused));
++
++static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
++{
++ seq_puts(m, "type : acomp\n");
++}
++
++static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
++{
++ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
++ struct acomp_alg *alg = crypto_acomp_alg(acomp);
++
++ alg->exit(acomp);
++}
++
++static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
++{
++ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
++ struct acomp_alg *alg = crypto_acomp_alg(acomp);
++
++ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
++ return crypto_init_scomp_ops_async(tfm);
++
++ acomp->compress = alg->compress;
++ acomp->decompress = alg->decompress;
++ acomp->dst_free = alg->dst_free;
++ acomp->reqsize = alg->reqsize;
++
++ if (alg->exit)
++ acomp->base.exit = crypto_acomp_exit_tfm;
++
++ if (alg->init)
++ return alg->init(acomp);
++
++ return 0;
++}
++
++static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
++{
++ int extsize = crypto_alg_extsize(alg);
++
++ if (alg->cra_type != &crypto_acomp_type)
++ extsize += sizeof(struct crypto_scomp *);
++
++ return extsize;
++}
++
++static const struct crypto_type crypto_acomp_type = {
++ .extsize = crypto_acomp_extsize,
++ .init_tfm = crypto_acomp_init_tfm,
++#ifdef CONFIG_PROC_FS
++ .show = crypto_acomp_show,
++#endif
++ .report = crypto_acomp_report,
++ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
++ .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
++ .type = CRYPTO_ALG_TYPE_ACOMPRESS,
++ .tfmsize = offsetof(struct crypto_acomp, base),
++};
++
++struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
++ u32 mask)
++{
++ return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
++}
++EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
++
++struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
++{
++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
++ struct acomp_req *req;
++
++ req = __acomp_request_alloc(acomp);
++ if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
++ return crypto_acomp_scomp_alloc_ctx(req);
++
++ return req;
++}
++EXPORT_SYMBOL_GPL(acomp_request_alloc);
++
++void acomp_request_free(struct acomp_req *req)
++{
++ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
++
++ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
++ crypto_acomp_scomp_free_ctx(req);
++
++ if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
++ acomp->dst_free(req->dst);
++ req->dst = NULL;
++ }
++
++ __acomp_request_free(req);
++}
++EXPORT_SYMBOL_GPL(acomp_request_free);
++
++int crypto_register_acomp(struct acomp_alg *alg)
++{
++ struct crypto_alg *base = &alg->base;
++
++ base->cra_type = &crypto_acomp_type;
++ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
++ base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
++
++ return crypto_register_alg(base);
++}
++EXPORT_SYMBOL_GPL(crypto_register_acomp);
++
++int crypto_unregister_acomp(struct acomp_alg *alg)
++{
++ return crypto_unregister_alg(&alg->base);
++}
++EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Asynchronous compression type");
+diff --git a/crypto/algboss.c b/crypto/algboss.c
+index 4bde25d6..ccb85e17 100644
+--- a/crypto/algboss.c
++++ b/crypto/algboss.c
+@@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
+ memcpy(param->alg, alg->cra_name, sizeof(param->alg));
+ type = alg->cra_flags;
+
+- /* This piece of crap needs to disappear into per-type test hooks. */
+-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+- type |= CRYPTO_ALG_TESTED;
+-#else
+- if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
+- CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
+- ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+- CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+- alg->cra_ablkcipher.ivsize))
++ /* Do not test internal algorithms. */
++ if (type & CRYPTO_ALG_INTERNAL)
+ type |= CRYPTO_ALG_TESTED;
+-#endif
+
+ param->type = type;
+
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 1c570548..a90404a0 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -112,6 +112,21 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+ return -EMSGSIZE;
+ }
+
++static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
++{
++ struct crypto_report_acomp racomp;
++
++ strncpy(racomp.type, "acomp", sizeof(racomp.type));
++
++ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
++ sizeof(struct crypto_report_acomp), &racomp))
++ goto nla_put_failure;
++ return 0;
++
++nla_put_failure:
++ return -EMSGSIZE;
++}
++
+ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_akcipher rakcipher;
+@@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg,
+ goto nla_put_failure;
+
+ break;
++ case CRYPTO_ALG_TYPE_ACOMPRESS:
++ if (crypto_report_acomp(skb, alg))
++ goto nla_put_failure;
+
++ break;
+ case CRYPTO_ALG_TYPE_AKCIPHER:
+ if (crypto_report_akcipher(skb, alg))
+ goto nla_put_failure;
+diff --git a/crypto/scompress.c b/crypto/scompress.c
+new file mode 100644
+index 00000000..35e396d1
+--- /dev/null
++++ b/crypto/scompress.c
+@@ -0,0 +1,356 @@
++/*
++ * Synchronous Compression operations
++ *
++ * Copyright 2015 LG Electronics Inc.
++ * Copyright (c) 2016, Intel Corporation
++ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/crypto.h>
++#include <linux/vmalloc.h>
++#include <crypto/algapi.h>
++#include <linux/cryptouser.h>
++#include <net/netlink.h>
++#include <linux/scatterlist.h>
++#include <crypto/scatterwalk.h>
++#include <crypto/internal/acompress.h>
++#include <crypto/internal/scompress.h>
++#include "internal.h"
++
++static const struct crypto_type crypto_scomp_type;
++static void * __percpu *scomp_src_scratches;
++static void * __percpu *scomp_dst_scratches;
++static int scomp_scratch_users;
++static DEFINE_MUTEX(scomp_lock);
++
++#ifdef CONFIG_NET
++static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++ struct crypto_report_comp rscomp;
++
++ strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
++
++ if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
++ sizeof(struct crypto_report_comp), &rscomp))
++ goto nla_put_failure;
++ return 0;
++
++nla_put_failure:
++ return -EMSGSIZE;
++}
++#else
++static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++ return -ENOSYS;
++}
++#endif
++
++static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
++ __attribute__ ((unused));
++
++static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
++{
++ seq_puts(m, "type : scomp\n");
++}
++
++static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
++{
++ return 0;
++}
++
++static void crypto_scomp_free_scratches(void * __percpu *scratches)
++{
++ int i;
++
++ if (!scratches)
++ return;
++
++ for_each_possible_cpu(i)
++ vfree(*per_cpu_ptr(scratches, i));
++
++ free_percpu(scratches);
++}
++
++static void * __percpu *crypto_scomp_alloc_scratches(void)
++{
++ void * __percpu *scratches;
++ int i;
++
++ scratches = alloc_percpu(void *);
++ if (!scratches)
++ return NULL;
++
++ for_each_possible_cpu(i) {
++ void *scratch;
++
++ scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
++ if (!scratch)
++ goto error;
++ *per_cpu_ptr(scratches, i) = scratch;
++ }
++
++ return scratches;
++
++error:
++ crypto_scomp_free_scratches(scratches);
++ return NULL;
++}
++
++static void crypto_scomp_free_all_scratches(void)
++{
++ if (!--scomp_scratch_users) {
++ crypto_scomp_free_scratches(scomp_src_scratches);
++ crypto_scomp_free_scratches(scomp_dst_scratches);
++ scomp_src_scratches = NULL;
++ scomp_dst_scratches = NULL;
++ }
++}
++
++static int crypto_scomp_alloc_all_scratches(void)
++{
++ if (!scomp_scratch_users++) {
++ scomp_src_scratches = crypto_scomp_alloc_scratches();
++ if (!scomp_src_scratches)
++ return -ENOMEM;
++ scomp_dst_scratches = crypto_scomp_alloc_scratches();
++ if (!scomp_dst_scratches)
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++static void crypto_scomp_sg_free(struct scatterlist *sgl)
++{
++ int i, n;
++ struct page *page;
++
++ if (!sgl)
++ return;
++
++ n = sg_nents(sgl);
++ for_each_sg(sgl, sgl, n, i) {
++ page = sg_page(sgl);
++ if (page)
++ __free_page(page);
++ }
++
++ kfree(sgl);
++}
++
++static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
++{
++ struct scatterlist *sgl;
++ struct page *page;
++ int i, n;
++
++ n = ((size - 1) >> PAGE_SHIFT) + 1;
++
++ sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
++ if (!sgl)
++ return NULL;
++
++ sg_init_table(sgl, n);
++
++ for (i = 0; i < n; i++) {
++ page = alloc_page(gfp);
++ if (!page)
++ goto err;
++ sg_set_page(sgl + i, page, PAGE_SIZE, 0);
++ }
++
++ return sgl;
++
++err:
++ sg_mark_end(sgl + i);
++ crypto_scomp_sg_free(sgl);
++ return NULL;
++}
++
++static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
++{
++ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
++ void **tfm_ctx = acomp_tfm_ctx(tfm);
++ struct crypto_scomp *scomp = *tfm_ctx;
++ void **ctx = acomp_request_ctx(req);
++ const int cpu = get_cpu();
++ u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
++ u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
++ int ret;
++
++ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (req->dst && !req->dlen) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
++ req->dlen = SCOMP_SCRATCH_SIZE;
++
++ scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
++ if (dir)
++ ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
++ scratch_dst, &req->dlen, *ctx);
++ else
++ ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
++ scratch_dst, &req->dlen, *ctx);
++ if (!ret) {
++ if (!req->dst) {
++ req->dst = crypto_scomp_sg_alloc(req->dlen,
++ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
++ GFP_KERNEL : GFP_ATOMIC);
++ if (!req->dst)
++ goto out;
++ }
++ scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
++ 1);
++ }
++out:
++ put_cpu();
++ return ret;
++}
++
++static int scomp_acomp_compress(struct acomp_req *req)
++{
++ return scomp_acomp_comp_decomp(req, 1);
++}
++
++static int scomp_acomp_decompress(struct acomp_req *req)
++{
++ return scomp_acomp_comp_decomp(req, 0);
++}
++
++static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
++{
++ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
++
++ crypto_free_scomp(*ctx);
++}
++
++int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
++{
++ struct crypto_alg *calg = tfm->__crt_alg;
++ struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
++ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
++ struct crypto_scomp *scomp;
++
++ if (!crypto_mod_get(calg))
++ return -EAGAIN;
++
++ scomp = crypto_create_tfm(calg, &crypto_scomp_type);
++ if (IS_ERR(scomp)) {
++ crypto_mod_put(calg);
++ return PTR_ERR(scomp);
++ }
++
++ *ctx = scomp;
++ tfm->exit = crypto_exit_scomp_ops_async;
++
++ crt->compress = scomp_acomp_compress;
++ crt->decompress = scomp_acomp_decompress;
++ crt->dst_free = crypto_scomp_sg_free;
++ crt->reqsize = sizeof(void *);
++
++ return 0;
++}
++
++struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
++{
++ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
++ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
++ struct crypto_scomp *scomp = *tfm_ctx;
++ void *ctx;
++
++ ctx = crypto_scomp_alloc_ctx(scomp);
++ if (IS_ERR(ctx)) {
++ kfree(req);
++ return NULL;
++ }
++
++ *req->__ctx = ctx;
++
++ return req;
++}
++
++void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
++{
++ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
++ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
++ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
++ struct crypto_scomp *scomp = *tfm_ctx;
++ void *ctx = *req->__ctx;
++
++ if (ctx)
++ crypto_scomp_free_ctx(scomp, ctx);
++}
++
++static const struct crypto_type crypto_scomp_type = {
++ .extsize = crypto_alg_extsize,
++ .init_tfm = crypto_scomp_init_tfm,
++#ifdef CONFIG_PROC_FS
++ .show = crypto_scomp_show,
++#endif
++ .report = crypto_scomp_report,
++ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
++ .maskset = CRYPTO_ALG_TYPE_MASK,
++ .type = CRYPTO_ALG_TYPE_SCOMPRESS,
++ .tfmsize = offsetof(struct crypto_scomp, base),
++};
++
++int crypto_register_scomp(struct scomp_alg *alg)
++{
++ struct crypto_alg *base = &alg->base;
++ int ret = -ENOMEM;
++
++ mutex_lock(&scomp_lock);
++ if (crypto_scomp_alloc_all_scratches())
++ goto error;
++
++ base->cra_type = &crypto_scomp_type;
++ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
++ base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
++
++ ret = crypto_register_alg(base);
++ if (ret)
++ goto error;
++
++ mutex_unlock(&scomp_lock);
++ return ret;
++
++error:
++ crypto_scomp_free_all_scratches();
++ mutex_unlock(&scomp_lock);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(crypto_register_scomp);
++
++int crypto_unregister_scomp(struct scomp_alg *alg)
++{
++ int ret;
++
++ mutex_lock(&scomp_lock);
++ ret = crypto_unregister_alg(&alg->base);
++ crypto_scomp_free_all_scratches();
++ mutex_unlock(&scomp_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Synchronous compression type");
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index ae22f05d..bbb35eed 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -74,7 +74,7 @@ static char *check[] = {
+ "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
+ "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
+ "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
+- NULL
++ "rsa", NULL
+ };
+
+ struct tcrypt_result {
+@@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
+ ret += tcrypt_test("hmac(sha3-512)");
+ break;
+
++ case 115:
++ ret += tcrypt_test("rsa");
++ break;
++
+ case 150:
+ ret += tcrypt_test("ansi_cprng");
+ break;
+@@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
+ case 190:
+ ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
+ break;
++ case 191:
++ ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
++ break;
+ case 200:
+ test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+@@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
+ test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48);
+ test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
+- speed_template_32_48_64);
++ speed_template_32_64);
+ test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
+- speed_template_32_48_64);
++ speed_template_32_64);
+ test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+@@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
+ test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48);
+ test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
+- speed_template_32_48_64);
++ speed_template_32_64);
+ test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
+- speed_template_32_48_64);
++ speed_template_32_64);
+ test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 62dffa00..73d91fba 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -33,6 +33,7 @@
+ #include <crypto/drbg.h>
+ #include <crypto/akcipher.h>
+ #include <crypto/kpp.h>
++#include <crypto/acompress.h>
+
+ #include "internal.h"
+
+@@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
+ */
+ #define IDX1 32
+ #define IDX2 32400
+-#define IDX3 1
++#define IDX3 1511
+ #define IDX4 8193
+ #define IDX5 22222
+ #define IDX6 17101
+@@ -82,47 +83,54 @@ struct tcrypt_result {
+
+ struct aead_test_suite {
+ struct {
+- struct aead_testvec *vecs;
++ const struct aead_testvec *vecs;
+ unsigned int count;
+ } enc, dec;
+ };
+
+ struct cipher_test_suite {
+ struct {
+- struct cipher_testvec *vecs;
++ const struct cipher_testvec *vecs;
+ unsigned int count;
+ } enc, dec;
+ };
+
+ struct comp_test_suite {
+ struct {
+- struct comp_testvec *vecs;
++ const struct comp_testvec *vecs;
+ unsigned int count;
+ } comp, decomp;
+ };
+
+ struct hash_test_suite {
+- struct hash_testvec *vecs;
++ const struct hash_testvec *vecs;
+ unsigned int count;
+ };
+
+ struct cprng_test_suite {
+- struct cprng_testvec *vecs;
++ const struct cprng_testvec *vecs;
+ unsigned int count;
+ };
+
+ struct drbg_test_suite {
+- struct drbg_testvec *vecs;
++ const struct drbg_testvec *vecs;
+ unsigned int count;
+ };
+
++struct tls_test_suite {
++ struct {
++ struct tls_testvec *vecs;
++ unsigned int count;
++ } enc, dec;
++};
++
+ struct akcipher_test_suite {
+- struct akcipher_testvec *vecs;
++ const struct akcipher_testvec *vecs;
+ unsigned int count;
+ };
+
+ struct kpp_test_suite {
+- struct kpp_testvec *vecs;
++ const struct kpp_testvec *vecs;
+ unsigned int count;
+ };
+
+@@ -139,12 +147,14 @@ struct alg_test_desc {
+ struct hash_test_suite hash;
+ struct cprng_test_suite cprng;
+ struct drbg_test_suite drbg;
++ struct tls_test_suite tls;
+ struct akcipher_test_suite akcipher;
+ struct kpp_test_suite kpp;
+ } suite;
+ };
+
+-static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
++static const unsigned int IDX[8] = {
++ IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
+
+ static void hexdump(unsigned char *buf, unsigned int len)
+ {
+@@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
+ }
+
+ static int ahash_partial_update(struct ahash_request **preq,
+- struct crypto_ahash *tfm, struct hash_testvec *template,
++ struct crypto_ahash *tfm, const struct hash_testvec *template,
+ void *hash_buff, int k, int temp, struct scatterlist *sg,
+ const char *algo, char *result, struct tcrypt_result *tresult)
+ {
+@@ -259,11 +269,12 @@ static int ahash_partial_update(struct ahash_request **preq,
+ return ret;
+ }
+
+-static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+- unsigned int tcount, bool use_digest,
+- const int align_offset)
++static int __test_hash(struct crypto_ahash *tfm,
++ const struct hash_testvec *template, unsigned int tcount,
++ bool use_digest, const int align_offset)
+ {
+ const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
++ size_t digest_size = crypto_ahash_digestsize(tfm);
+ unsigned int i, j, k, temp;
+ struct scatterlist sg[8];
+ char *result;
+@@ -274,7 +285,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ char *xbuf[XBUFSIZE];
+ int ret = -ENOMEM;
+
+- result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
++ result = kmalloc(digest_size, GFP_KERNEL);
+ if (!result)
+ return ret;
+ key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
+@@ -304,7 +315,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ goto out;
+
+ j++;
+- memset(result, 0, MAX_DIGEST_SIZE);
++ memset(result, 0, digest_size);
+
+ hash_buff = xbuf[0];
+ hash_buff += align_offset;
+@@ -379,7 +390,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ continue;
+
+ j++;
+- memset(result, 0, MAX_DIGEST_SIZE);
++ memset(result, 0, digest_size);
+
+ temp = 0;
+ sg_init_table(sg, template[i].np);
+@@ -457,7 +468,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ continue;
+
+ j++;
+- memset(result, 0, MAX_DIGEST_SIZE);
++ memset(result, 0, digest_size);
+
+ ret = -EINVAL;
+ hash_buff = xbuf[0];
+@@ -536,7 +547,8 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ return ret;
+ }
+
+-static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
++static int test_hash(struct crypto_ahash *tfm,
++ const struct hash_testvec *template,
+ unsigned int tcount, bool use_digest)
+ {
+ unsigned int alignmask;
+@@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ }
+
+ static int __test_aead(struct crypto_aead *tfm, int enc,
+- struct aead_testvec *template, unsigned int tcount,
++ const struct aead_testvec *template, unsigned int tcount,
+ const bool diff_dst, const int align_offset)
+ {
+ const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
+@@ -955,7 +967,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
+ }
+
+ static int test_aead(struct crypto_aead *tfm, int enc,
+- struct aead_testvec *template, unsigned int tcount)
++ const struct aead_testvec *template, unsigned int tcount)
+ {
+ unsigned int alignmask;
+ int ret;
+@@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead *tfm, int enc,
+ return 0;
+ }
+
++static int __test_tls(struct crypto_aead *tfm, int enc,
++ struct tls_testvec *template, unsigned int tcount,
++ const bool diff_dst)
++{
++ const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
++ unsigned int i, k, authsize;
++ char *q;
++ struct aead_request *req;
++ struct scatterlist *sg;
++ struct scatterlist *sgout;
++ const char *e, *d;
++ struct tcrypt_result result;
++ void *input;
++ void *output;
++ void *assoc;
++ char *iv;
++ char *key;
++ char *xbuf[XBUFSIZE];
++ char *xoutbuf[XBUFSIZE];
++ char *axbuf[XBUFSIZE];
++ int ret = -ENOMEM;
++
++ if (testmgr_alloc_buf(xbuf))
++ goto out_noxbuf;
++
++ if (diff_dst && testmgr_alloc_buf(xoutbuf))
++ goto out_nooutbuf;
++
++ if (testmgr_alloc_buf(axbuf))
++ goto out_noaxbuf;
++
++ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
++ if (!iv)
++ goto out_noiv;
++
++ key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
++ if (!key)
++ goto out_nokey;
++
++ sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
++ if (!sg)
++ goto out_nosg;
++
++ sgout = sg + 8;
++
++ d = diff_dst ? "-ddst" : "";
++ e = enc ? "encryption" : "decryption";
++
++ init_completion(&result.completion);
++
++ req = aead_request_alloc(tfm, GFP_KERNEL);
++ if (!req) {
++ pr_err("alg: tls%s: Failed to allocate request for %s\n",
++ d, algo);
++ goto out;
++ }
++
++ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++ tcrypt_complete, &result);
++
++ for (i = 0; i < tcount; i++) {
++ input = xbuf[0];
++ assoc = axbuf[0];
++
++ ret = -EINVAL;
++ if (WARN_ON(template[i].ilen > PAGE_SIZE ||
++ template[i].alen > PAGE_SIZE))
++ goto out;
++
++ memcpy(assoc, template[i].assoc, template[i].alen);
++ memcpy(input, template[i].input, template[i].ilen);
++
++ if (template[i].iv)
++ memcpy(iv, template[i].iv, MAX_IVLEN);
++ else
++ memset(iv, 0, MAX_IVLEN);
++
++ crypto_aead_clear_flags(tfm, ~0);
++
++ if (template[i].klen > MAX_KEYLEN) {
++ pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
++ d, i, algo, template[i].klen, MAX_KEYLEN);
++ ret = -EINVAL;
++ goto out;
++ }
++ memcpy(key, template[i].key, template[i].klen);
++
++ ret = crypto_aead_setkey(tfm, key, template[i].klen);
++ if (!ret == template[i].fail) {
++ pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
++ d, i, algo, crypto_aead_get_flags(tfm));
++ goto out;
++ } else if (ret)
++ continue;
++
++ authsize = 20;
++ ret = crypto_aead_setauthsize(tfm, authsize);
++ if (ret) {
++ pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
++ d, authsize, i, algo);
++ goto out;
++ }
++
++ k = !!template[i].alen;
++ sg_init_table(sg, k + 1);
++ sg_set_buf(&sg[0], assoc, template[i].alen);
++ sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
++ template[i].ilen));
++ output = input;
++
++ if (diff_dst) {
++ sg_init_table(sgout, k + 1);
++ sg_set_buf(&sgout[0], assoc, template[i].alen);
++
++ output = xoutbuf[0];
++ sg_set_buf(&sgout[k], output,
++ (enc ? template[i].rlen : template[i].ilen));
++ }
++
++ aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
++ template[i].ilen, iv);
++
++ aead_request_set_ad(req, template[i].alen);
++
++ ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
++
++ switch (ret) {
++ case 0:
++ if (template[i].novrfy) {
++ /* verification was supposed to fail */
++ pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
++ d, e, i, algo);
++ /* so really, we got a bad message */
++ ret = -EBADMSG;
++ goto out;
++ }
++ break;
++ case -EINPROGRESS:
++ case -EBUSY:
++ wait_for_completion(&result.completion);
++ reinit_completion(&result.completion);
++ ret = result.err;
++ if (!ret)
++ break;
++ case -EBADMSG:
++ /* verification failure was expected */
++ if (template[i].novrfy)
++ continue;
++ /* fall through */
++ default:
++ pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
++ d, e, i, algo, -ret);
++ goto out;
++ }
++
++ q = output;
++ if (memcmp(q, template[i].result, template[i].rlen)) {
++ pr_err("alg: tls%s: Test %d failed on %s for %s\n",
++ d, i, e, algo);
++ hexdump(q, template[i].rlen);
++ pr_err("should be:\n");
++ hexdump(template[i].result, template[i].rlen);
++ ret = -EINVAL;
++ goto out;
++ }
++ }
++
++out:
++ aead_request_free(req);
++
++ kfree(sg);
++out_nosg:
++ kfree(key);
++out_nokey:
++ kfree(iv);
++out_noiv:
++ testmgr_free_buf(axbuf);
++out_noaxbuf:
++ if (diff_dst)
++ testmgr_free_buf(xoutbuf);
++out_nooutbuf:
++ testmgr_free_buf(xbuf);
++out_noxbuf:
++ return ret;
++}
++
++static int test_tls(struct crypto_aead *tfm, int enc,
++ struct tls_testvec *template, unsigned int tcount)
++{
++ int ret;
++ /* test 'dst == src' case */
++ ret = __test_tls(tfm, enc, template, tcount, false);
++ if (ret)
++ return ret;
++ /* test 'dst != src' case */
++ return __test_tls(tfm, enc, template, tcount, true);
++}
++
++static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
++ u32 type, u32 mask)
++{
++ struct crypto_aead *tfm;
++ int err = 0;
++
++ tfm = crypto_alloc_aead(driver, type, mask);
++ if (IS_ERR(tfm)) {
++ pr_err("alg: aead: Failed to load transform for %s: %ld\n",
++ driver, PTR_ERR(tfm));
++ return PTR_ERR(tfm);
++ }
++
++ if (desc->suite.tls.enc.vecs) {
++ err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
++ desc->suite.tls.enc.count);
++ if (err)
++ goto out;
++ }
++
++ if (!err && desc->suite.tls.dec.vecs)
++ err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
++ desc->suite.tls.dec.count);
++
++out:
++ crypto_free_aead(tfm);
++ return err;
++}
++
+ static int test_cipher(struct crypto_cipher *tfm, int enc,
+- struct cipher_testvec *template, unsigned int tcount)
++ const struct cipher_testvec *template,
++ unsigned int tcount)
+ {
+ const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
+ unsigned int i, j, k;
+@@ -1066,7 +1306,8 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
+ }
+
+ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
+- struct cipher_testvec *template, unsigned int tcount,
++ const struct cipher_testvec *template,
++ unsigned int tcount,
+ const bool diff_dst, const int align_offset)
+ {
+ const char *algo =
+@@ -1330,7 +1571,8 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
+ }
+
+ static int test_skcipher(struct crypto_skcipher *tfm, int enc,
+- struct cipher_testvec *template, unsigned int tcount)
++ const struct cipher_testvec *template,
++ unsigned int tcount)
+ {
+ unsigned int alignmask;
+ int ret;
+@@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_skcipher *tfm, int enc,
+ return 0;
+ }
+
+-static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
+- struct comp_testvec *dtemplate, int ctcount, int dtcount)
++static int test_comp(struct crypto_comp *tfm,
++ const struct comp_testvec *ctemplate,
++ const struct comp_testvec *dtemplate,
++ int ctcount, int dtcount)
+ {
+ const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
+ unsigned int i;
+@@ -1442,7 +1686,154 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
+ return ret;
+ }
+
+-static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
++static int test_acomp(struct crypto_acomp *tfm,
++ const struct comp_testvec *ctemplate,
++ const struct comp_testvec *dtemplate,
++ int ctcount, int dtcount)
++{
++ const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
++ unsigned int i;
++ char *output;
++ int ret;
++ struct scatterlist src, dst;
++ struct acomp_req *req;
++ struct tcrypt_result result;
++
++ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
++ if (!output)
++ return -ENOMEM;
++
++ for (i = 0; i < ctcount; i++) {
++ unsigned int dlen = COMP_BUF_SIZE;
++ int ilen = ctemplate[i].inlen;
++ void *input_vec;
++
++ input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
++ if (!input_vec) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ memset(output, 0, dlen);
++ init_completion(&result.completion);
++ sg_init_one(&src, input_vec, ilen);
++ sg_init_one(&dst, output, dlen);
++
++ req = acomp_request_alloc(tfm);
++ if (!req) {
++ pr_err("alg: acomp: request alloc failed for %s\n",
++ algo);
++ kfree(input_vec);
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ acomp_request_set_params(req, &src, &dst, ilen, dlen);
++ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++ tcrypt_complete, &result);
++
++ ret = wait_async_op(&result, crypto_acomp_compress(req));
++ if (ret) {
++ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
++ i + 1, algo, -ret);
++ kfree(input_vec);
++ acomp_request_free(req);
++ goto out;
++ }
++
++ if (req->dlen != ctemplate[i].outlen) {
++ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
++ i + 1, algo, req->dlen);
++ ret = -EINVAL;
++ kfree(input_vec);
++ acomp_request_free(req);
++ goto out;
++ }
++
++ if (memcmp(output, ctemplate[i].output, req->dlen)) {
++ pr_err("alg: acomp: Compression test %d failed for %s\n",
++ i + 1, algo);
++ hexdump(output, req->dlen);
++ ret = -EINVAL;
++ kfree(input_vec);
++ acomp_request_free(req);
++ goto out;
++ }
++
++ kfree(input_vec);
++ acomp_request_free(req);
++ }
++
++ for (i = 0; i < dtcount; i++) {
++ unsigned int dlen = COMP_BUF_SIZE;
++ int ilen = dtemplate[i].inlen;
++ void *input_vec;
++
++ input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
++ if (!input_vec) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ memset(output, 0, dlen);
++ init_completion(&result.completion);
++ sg_init_one(&src, input_vec, ilen);
++ sg_init_one(&dst, output, dlen);
++
++ req = acomp_request_alloc(tfm);
++ if (!req) {
++ pr_err("alg: acomp: request alloc failed for %s\n",
++ algo);
++ kfree(input_vec);
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ acomp_request_set_params(req, &src, &dst, ilen, dlen);
++ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++ tcrypt_complete, &result);
++
++ ret = wait_async_op(&result, crypto_acomp_decompress(req));
++ if (ret) {
++ pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
++ i + 1, algo, -ret);
++ kfree(input_vec);
++ acomp_request_free(req);
++ goto out;
++ }
++
++ if (req->dlen != dtemplate[i].outlen) {
++ pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
++ i + 1, algo, req->dlen);
++ ret = -EINVAL;
++ kfree(input_vec);
++ acomp_request_free(req);
++ goto out;
++ }
++
++ if (memcmp(output, dtemplate[i].output, req->dlen)) {
++ pr_err("alg: acomp: Decompression test %d failed for %s\n",
++ i + 1, algo);
++ hexdump(output, req->dlen);
++ ret = -EINVAL;
++ kfree(input_vec);
++ acomp_request_free(req);
++ goto out;
++ }
++
++ kfree(input_vec);
++ acomp_request_free(req);
++ }
++
++ ret = 0;
++
++out:
++ kfree(output);
++ return ret;
++}
++
++static int test_cprng(struct crypto_rng *tfm,
++ const struct cprng_testvec *template,
+ unsigned int tcount)
+ {
+ const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
+@@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
+ struct crypto_aead *tfm;
+ int err = 0;
+
+- tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ tfm = crypto_alloc_aead(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
+ "%ld\n", driver, PTR_ERR(tfm));
+@@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
+ struct crypto_cipher *tfm;
+ int err = 0;
+
+- tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ tfm = crypto_alloc_cipher(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ printk(KERN_ERR "alg: cipher: Failed to load transform for "
+ "%s: %ld\n", driver, PTR_ERR(tfm));
+@@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
+ struct crypto_skcipher *tfm;
+ int err = 0;
+
+- tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ tfm = crypto_alloc_skcipher(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ printk(KERN_ERR "alg: skcipher: Failed to load transform for "
+ "%s: %ld\n", driver, PTR_ERR(tfm));
+@@ -1593,22 +1984,38 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
+ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
+ u32 type, u32 mask)
+ {
+- struct crypto_comp *tfm;
++ struct crypto_comp *comp;
++ struct crypto_acomp *acomp;
+ int err;
++ u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
++
++ if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
++ acomp = crypto_alloc_acomp(driver, type, mask);
++ if (IS_ERR(acomp)) {
++ pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
++ driver, PTR_ERR(acomp));
++ return PTR_ERR(acomp);
++ }
++ err = test_acomp(acomp, desc->suite.comp.comp.vecs,
++ desc->suite.comp.decomp.vecs,
++ desc->suite.comp.comp.count,
++ desc->suite.comp.decomp.count);
++ crypto_free_acomp(acomp);
++ } else {
++ comp = crypto_alloc_comp(driver, type, mask);
++ if (IS_ERR(comp)) {
++ pr_err("alg: comp: Failed to load transform for %s: %ld\n",
++ driver, PTR_ERR(comp));
++ return PTR_ERR(comp);
++ }
+
+- tfm = crypto_alloc_comp(driver, type, mask);
+- if (IS_ERR(tfm)) {
+- printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
+- "%ld\n", driver, PTR_ERR(tfm));
+- return PTR_ERR(tfm);
+- }
+-
+- err = test_comp(tfm, desc->suite.comp.comp.vecs,
+- desc->suite.comp.decomp.vecs,
+- desc->suite.comp.comp.count,
+- desc->suite.comp.decomp.count);
++ err = test_comp(comp, desc->suite.comp.comp.vecs,
++ desc->suite.comp.decomp.vecs,
++ desc->suite.comp.comp.count,
++ desc->suite.comp.decomp.count);
+
+- crypto_free_comp(tfm);
++ crypto_free_comp(comp);
++ }
+ return err;
+ }
+
+@@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
+ struct crypto_ahash *tfm;
+ int err;
+
+- tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ tfm = crypto_alloc_ahash(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
+ "%ld\n", driver, PTR_ERR(tfm));
+@@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
+ if (err)
+ goto out;
+
+- tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ tfm = crypto_alloc_shash(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
+ "%ld\n", driver, PTR_ERR(tfm));
+@@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
+ struct crypto_rng *rng;
+ int err;
+
+- rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ rng = crypto_alloc_rng(driver, type, mask);
+ if (IS_ERR(rng)) {
+ printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
+ "%ld\n", driver, PTR_ERR(rng));
+@@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
+ }
+
+
+-static int drbg_cavs_test(struct drbg_testvec *test, int pr,
++static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
+ const char *driver, u32 type, u32 mask)
+ {
+ int ret = -EAGAIN;
+@@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
+ if (!buf)
+ return -ENOMEM;
+
+- drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ drng = crypto_alloc_rng(driver, type, mask);
+ if (IS_ERR(drng)) {
+ printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
+ "%s\n", driver);
+@@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
+ int err = 0;
+ int pr = 0;
+ int i = 0;
+- struct drbg_testvec *template = desc->suite.drbg.vecs;
++ const struct drbg_testvec *template = desc->suite.drbg.vecs;
+ unsigned int tcount = desc->suite.drbg.count;
+
+ if (0 == memcmp(driver, "drbg_pr_", 8))
+@@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
+
+ }
+
+-static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
++static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
+ const char *alg)
+ {
+ struct kpp_request *req;
+@@ -1888,7 +2295,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
+ }
+
+ static int test_kpp(struct crypto_kpp *tfm, const char *alg,
+- struct kpp_testvec *vecs, unsigned int tcount)
++ const struct kpp_testvec *vecs, unsigned int tcount)
+ {
+ int ret, i;
+
+@@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
+ struct crypto_kpp *tfm;
+ int err = 0;
+
+- tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ tfm = crypto_alloc_kpp(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+@@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
+ }
+
+ static int test_akcipher_one(struct crypto_akcipher *tfm,
+- struct akcipher_testvec *vecs)
++ const struct akcipher_testvec *vecs)
+ {
+ char *xbuf[XBUFSIZE];
+ struct akcipher_request *req;
+@@ -2044,7 +2451,8 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
+ }
+
+ static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
+- struct akcipher_testvec *vecs, unsigned int tcount)
++ const struct akcipher_testvec *vecs,
++ unsigned int tcount)
+ {
+ const char *algo =
+ crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
+@@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
+ struct crypto_akcipher *tfm;
+ int err = 0;
+
+- tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
++ tfm = crypto_alloc_akcipher(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+@@ -2088,112 +2496,23 @@ static int alg_test_null(const struct alg_test_desc *desc,
+ return 0;
+ }
+
++#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
++
+ /* Please keep this list sorted by algorithm name. */
+ static const struct alg_test_desc alg_test_descs[] = {
+ {
+- .alg = "__cbc-cast5-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__cbc-cast6-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__cbc-serpent-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__cbc-serpent-avx2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__cbc-serpent-sse2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__cbc-twofish-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-aes-aesni",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+- }, {
+- .alg = "__driver-cbc-camellia-aesni",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-camellia-aesni-avx2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-cast5-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-cast6-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-serpent-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-serpent-avx2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-serpent-sse2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-cbc-twofish-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-aes-aesni",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+- }, {
+- .alg = "__driver-ecb-camellia-aesni",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-camellia-aesni-avx2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-cast5-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-cast6-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-serpent-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-serpent-avx2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-serpent-sse2",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-ecb-twofish-avx",
+- .test = alg_test_null,
+- }, {
+- .alg = "__driver-gcm-aes-aesni",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+- }, {
+- .alg = "__ghash-pclmulqdqni",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+- }, {
+ .alg = "ansi_cprng",
+ .test = alg_test_cprng,
+ .suite = {
+- .cprng = {
+- .vecs = ansi_cprng_aes_tv_template,
+- .count = ANSI_CPRNG_AES_TEST_VECTORS
+- }
++ .cprng = __VECS(ansi_cprng_aes_tv_template)
+ }
+ }, {
+ .alg = "authenc(hmac(md5),ecb(cipher_null))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
+- .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
+- .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
++ .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha1_aes_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA1_AES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha1_des_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA1_DES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha1_des3_ede_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha1_ecb_cipher_null_enc_tv_temp,
+- .count =
+- HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
+- },
+- .dec = {
+- .vecs =
+- hmac_sha1_ecb_cipher_null_dec_tv_temp,
+- .count =
+- HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
++ .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
+ }
+ }
+ }, {
+@@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha224_des_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA224_DES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha224_des3_ede_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha256_aes_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA256_AES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha256_des_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA256_DES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha256_des3_ede_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha384_des_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA384_DES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha384_des3_ede_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha512_aes_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA512_AES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha512_des_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA512_DES_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs =
+- hmac_sha512_des3_ede_cbc_enc_tv_temp,
+- .count =
+- HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
+- }
++ .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
+ }
+ }
+ }, {
+@@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_cbc_enc_tv_template,
+- .count = AES_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_cbc_dec_tv_template,
+- .count = AES_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_cbc_enc_tv_template),
++ .dec = __VECS(aes_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = anubis_cbc_enc_tv_template,
+- .count = ANUBIS_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = anubis_cbc_dec_tv_template,
+- .count = ANUBIS_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(anubis_cbc_enc_tv_template),
++ .dec = __VECS(anubis_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = bf_cbc_enc_tv_template,
+- .count = BF_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = bf_cbc_dec_tv_template,
+- .count = BF_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(bf_cbc_enc_tv_template),
++ .dec = __VECS(bf_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = camellia_cbc_enc_tv_template,
+- .count = CAMELLIA_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = camellia_cbc_dec_tv_template,
+- .count = CAMELLIA_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(camellia_cbc_enc_tv_template),
++ .dec = __VECS(camellia_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast5_cbc_enc_tv_template,
+- .count = CAST5_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast5_cbc_dec_tv_template,
+- .count = CAST5_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast5_cbc_enc_tv_template),
++ .dec = __VECS(cast5_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast6_cbc_enc_tv_template,
+- .count = CAST6_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast6_cbc_dec_tv_template,
+- .count = CAST6_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast6_cbc_enc_tv_template),
++ .dec = __VECS(cast6_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = des_cbc_enc_tv_template,
+- .count = DES_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = des_cbc_dec_tv_template,
+- .count = DES_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(des_cbc_enc_tv_template),
++ .dec = __VECS(des_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = des3_ede_cbc_enc_tv_template,
+- .count = DES3_EDE_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = des3_ede_cbc_dec_tv_template,
+- .count = DES3_EDE_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(des3_ede_cbc_enc_tv_template),
++ .dec = __VECS(des3_ede_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = serpent_cbc_enc_tv_template,
+- .count = SERPENT_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = serpent_cbc_dec_tv_template,
+- .count = SERPENT_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(serpent_cbc_enc_tv_template),
++ .dec = __VECS(serpent_cbc_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = tf_cbc_enc_tv_template,
+- .count = TF_CBC_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = tf_cbc_dec_tv_template,
+- .count = TF_CBC_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(tf_cbc_enc_tv_template),
++ .dec = __VECS(tf_cbc_dec_tv_template)
+ }
+ }
++ }, {
++ .alg = "cbcmac(aes)",
++ .fips_allowed = 1,
++ .test = alg_test_hash,
++ .suite = {
++ .hash = __VECS(aes_cbcmac_tv_template)
++ }
+ }, {
+ .alg = "ccm(aes)",
+ .test = alg_test_aead,
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = aes_ccm_enc_tv_template,
+- .count = AES_CCM_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_ccm_dec_tv_template,
+- .count = AES_CCM_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_ccm_enc_tv_template),
++ .dec = __VECS(aes_ccm_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = chacha20_enc_tv_template,
+- .count = CHACHA20_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = chacha20_enc_tv_template,
+- .count = CHACHA20_ENC_TEST_VECTORS
+- },
++ .enc = __VECS(chacha20_enc_tv_template),
++ .dec = __VECS(chacha20_enc_tv_template),
+ }
+ }
+ }, {
+@@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = aes_cmac128_tv_template,
+- .count = CMAC_AES_TEST_VECTORS
+- }
++ .hash = __VECS(aes_cmac128_tv_template)
+ }
+ }, {
+ .alg = "cmac(des3_ede)",
+ .fips_allowed = 1,
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = des3_ede_cmac64_tv_template,
+- .count = CMAC_DES3_EDE_TEST_VECTORS
+- }
++ .hash = __VECS(des3_ede_cmac64_tv_template)
+ }
+ }, {
+ .alg = "compress_null",
+@@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .alg = "crc32",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = crc32_tv_template,
+- .count = CRC32_TEST_VECTORS
+- }
++ .hash = __VECS(crc32_tv_template)
+ }
+ }, {
+ .alg = "crc32c",
+ .test = alg_test_crc32c,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = crc32c_tv_template,
+- .count = CRC32C_TEST_VECTORS
+- }
++ .hash = __VECS(crc32c_tv_template)
+ }
+ }, {
+ .alg = "crct10dif",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = crct10dif_tv_template,
+- .count = CRCT10DIF_TEST_VECTORS
+- }
++ .hash = __VECS(crct10dif_tv_template)
+ }
+- }, {
+- .alg = "cryptd(__driver-cbc-aes-aesni)",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+- }, {
+- .alg = "cryptd(__driver-cbc-camellia-aesni)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-cbc-serpent-avx2)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-aes-aesni)",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+- }, {
+- .alg = "cryptd(__driver-ecb-camellia-aesni)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-cast5-avx)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-cast6-avx)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-serpent-avx)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-serpent-avx2)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-serpent-sse2)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-ecb-twofish-avx)",
+- .test = alg_test_null,
+- }, {
+- .alg = "cryptd(__driver-gcm-aes-aesni)",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+- }, {
+- .alg = "cryptd(__ghash-pclmulqdqni)",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+ }, {
+ .alg = "ctr(aes)",
+ .test = alg_test_skcipher,
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_ctr_enc_tv_template,
+- .count = AES_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_ctr_dec_tv_template,
+- .count = AES_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_ctr_enc_tv_template),
++ .dec = __VECS(aes_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = bf_ctr_enc_tv_template,
+- .count = BF_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = bf_ctr_dec_tv_template,
+- .count = BF_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(bf_ctr_enc_tv_template),
++ .dec = __VECS(bf_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = camellia_ctr_enc_tv_template,
+- .count = CAMELLIA_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = camellia_ctr_dec_tv_template,
+- .count = CAMELLIA_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(camellia_ctr_enc_tv_template),
++ .dec = __VECS(camellia_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast5_ctr_enc_tv_template,
+- .count = CAST5_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast5_ctr_dec_tv_template,
+- .count = CAST5_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast5_ctr_enc_tv_template),
++ .dec = __VECS(cast5_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast6_ctr_enc_tv_template,
+- .count = CAST6_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast6_ctr_dec_tv_template,
+- .count = CAST6_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast6_ctr_enc_tv_template),
++ .dec = __VECS(cast6_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = des_ctr_enc_tv_template,
+- .count = DES_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = des_ctr_dec_tv_template,
+- .count = DES_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(des_ctr_enc_tv_template),
++ .dec = __VECS(des_ctr_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "ctr(des3_ede)",
+ .test = alg_test_skcipher,
++ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = des3_ede_ctr_enc_tv_template,
+- .count = DES3_EDE_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = des3_ede_ctr_dec_tv_template,
+- .count = DES3_EDE_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(des3_ede_ctr_enc_tv_template),
++ .dec = __VECS(des3_ede_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = serpent_ctr_enc_tv_template,
+- .count = SERPENT_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = serpent_ctr_dec_tv_template,
+- .count = SERPENT_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(serpent_ctr_enc_tv_template),
++ .dec = __VECS(serpent_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = tf_ctr_enc_tv_template,
+- .count = TF_CTR_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = tf_ctr_dec_tv_template,
+- .count = TF_CTR_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(tf_ctr_enc_tv_template),
++ .dec = __VECS(tf_ctr_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cts_mode_enc_tv_template,
+- .count = CTS_MODE_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cts_mode_dec_tv_template,
+- .count = CTS_MODE_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cts_mode_enc_tv_template),
++ .dec = __VECS(cts_mode_dec_tv_template)
+ }
+ }
+ }, {
+@@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .comp = {
+- .comp = {
+- .vecs = deflate_comp_tv_template,
+- .count = DEFLATE_COMP_TEST_VECTORS
+- },
+- .decomp = {
+- .vecs = deflate_decomp_tv_template,
+- .count = DEFLATE_DECOMP_TEST_VECTORS
+- }
++ .comp = __VECS(deflate_comp_tv_template),
++ .decomp = __VECS(deflate_decomp_tv_template)
+ }
+ }
+ }, {
+@@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+- .kpp = {
+- .vecs = dh_tv_template,
+- .count = DH_TEST_VECTORS
+- }
++ .kpp = __VECS(dh_tv_template)
+ }
+ }, {
+ .alg = "digest_null",
+@@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_nopr_ctr_aes128_tv_template,
+- .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
+- }
++ .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
+ }
+ }, {
+ .alg = "drbg_nopr_ctr_aes192",
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_nopr_ctr_aes192_tv_template,
+- .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
+- }
++ .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
+ }
+ }, {
+ .alg = "drbg_nopr_ctr_aes256",
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_nopr_ctr_aes256_tv_template,
+- .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
+- }
++ .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
+ }
+ }, {
+ /*
+@@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_nopr_hmac_sha256_tv_template,
+- .count =
+- ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
+- }
++ .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
+ }
+ }, {
+ /* covered by drbg_nopr_hmac_sha256 test */
+@@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_nopr_sha256_tv_template,
+- .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
+- }
++ .drbg = __VECS(drbg_nopr_sha256_tv_template)
+ }
+ }, {
+ /* covered by drbg_nopr_sha256 test */
+@@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_pr_ctr_aes128_tv_template,
+- .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
+- }
++ .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
+ }
+ }, {
+ /* covered by drbg_pr_ctr_aes128 test */
+@@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_pr_hmac_sha256_tv_template,
+- .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
+- }
++ .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
+ }
+ }, {
+ /* covered by drbg_pr_hmac_sha256 test */
+@@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_drbg,
+ .fips_allowed = 1,
+ .suite = {
+- .drbg = {
+- .vecs = drbg_pr_sha256_tv_template,
+- .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
+- }
++ .drbg = __VECS(drbg_pr_sha256_tv_template)
+ }
+ }, {
+ /* covered by drbg_pr_sha256 test */
+@@ -3033,24 +3055,14 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .alg = "drbg_pr_sha512",
+ .fips_allowed = 1,
+ .test = alg_test_null,
+- }, {
+- .alg = "ecb(__aes-aesni)",
+- .test = alg_test_null,
+- .fips_allowed = 1,
+ }, {
+ .alg = "ecb(aes)",
+ .test = alg_test_skcipher,
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_enc_tv_template,
+- .count = AES_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_dec_tv_template,
+- .count = AES_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_enc_tv_template),
++ .dec = __VECS(aes_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = anubis_enc_tv_template,
+- .count = ANUBIS_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = anubis_dec_tv_template,
+- .count = ANUBIS_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(anubis_enc_tv_template),
++ .dec = __VECS(anubis_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = arc4_enc_tv_template,
+- .count = ARC4_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = arc4_dec_tv_template,
+- .count = ARC4_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(arc4_enc_tv_template),
++ .dec = __VECS(arc4_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = bf_enc_tv_template,
+- .count = BF_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = bf_dec_tv_template,
+- .count = BF_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(bf_enc_tv_template),
++ .dec = __VECS(bf_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = camellia_enc_tv_template,
+- .count = CAMELLIA_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = camellia_dec_tv_template,
+- .count = CAMELLIA_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(camellia_enc_tv_template),
++ .dec = __VECS(camellia_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast5_enc_tv_template,
+- .count = CAST5_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast5_dec_tv_template,
+- .count = CAST5_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast5_enc_tv_template),
++ .dec = __VECS(cast5_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast6_enc_tv_template,
+- .count = CAST6_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast6_dec_tv_template,
+- .count = CAST6_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast6_enc_tv_template),
++ .dec = __VECS(cast6_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = des_enc_tv_template,
+- .count = DES_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = des_dec_tv_template,
+- .count = DES_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(des_enc_tv_template),
++ .dec = __VECS(des_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = des3_ede_enc_tv_template,
+- .count = DES3_EDE_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = des3_ede_dec_tv_template,
+- .count = DES3_EDE_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(des3_ede_enc_tv_template),
++ .dec = __VECS(des3_ede_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = khazad_enc_tv_template,
+- .count = KHAZAD_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = khazad_dec_tv_template,
+- .count = KHAZAD_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(khazad_enc_tv_template),
++ .dec = __VECS(khazad_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = seed_enc_tv_template,
+- .count = SEED_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = seed_dec_tv_template,
+- .count = SEED_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(seed_enc_tv_template),
++ .dec = __VECS(seed_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = serpent_enc_tv_template,
+- .count = SERPENT_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = serpent_dec_tv_template,
+- .count = SERPENT_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(serpent_enc_tv_template),
++ .dec = __VECS(serpent_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = tea_enc_tv_template,
+- .count = TEA_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = tea_dec_tv_template,
+- .count = TEA_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(tea_enc_tv_template),
++ .dec = __VECS(tea_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = tnepres_enc_tv_template,
+- .count = TNEPRES_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = tnepres_dec_tv_template,
+- .count = TNEPRES_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(tnepres_enc_tv_template),
++ .dec = __VECS(tnepres_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = tf_enc_tv_template,
+- .count = TF_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = tf_dec_tv_template,
+- .count = TF_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(tf_enc_tv_template),
++ .dec = __VECS(tf_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = xeta_enc_tv_template,
+- .count = XETA_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = xeta_dec_tv_template,
+- .count = XETA_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(xeta_enc_tv_template),
++ .dec = __VECS(xeta_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = xtea_enc_tv_template,
+- .count = XTEA_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = xtea_dec_tv_template,
+- .count = XTEA_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(xtea_enc_tv_template),
++ .dec = __VECS(xtea_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+- .kpp = {
+- .vecs = ecdh_tv_template,
+- .count = ECDH_TEST_VECTORS
+- }
++ .kpp = __VECS(ecdh_tv_template)
+ }
+ }, {
+ .alg = "gcm(aes)",
+@@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = aes_gcm_enc_tv_template,
+- .count = AES_GCM_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_gcm_dec_tv_template,
+- .count = AES_GCM_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_gcm_enc_tv_template),
++ .dec = __VECS(aes_gcm_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = ghash_tv_template,
+- .count = GHASH_TEST_VECTORS
+- }
++ .hash = __VECS(ghash_tv_template)
+ }
+ }, {
+ .alg = "hmac(crc32)",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = bfin_crc_tv_template,
+- .count = BFIN_CRC_TEST_VECTORS
+- }
++ .hash = __VECS(bfin_crc_tv_template)
+ }
+ }, {
+ .alg = "hmac(md5)",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = hmac_md5_tv_template,
+- .count = HMAC_MD5_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_md5_tv_template)
+ }
+ }, {
+ .alg = "hmac(rmd128)",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = hmac_rmd128_tv_template,
+- .count = HMAC_RMD128_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_rmd128_tv_template)
+ }
+ }, {
+ .alg = "hmac(rmd160)",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = hmac_rmd160_tv_template,
+- .count = HMAC_RMD160_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_rmd160_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha1)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha1_tv_template,
+- .count = HMAC_SHA1_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha1_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha224)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha224_tv_template,
+- .count = HMAC_SHA224_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha224_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha256)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha256_tv_template,
+- .count = HMAC_SHA256_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha256_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha3-224)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha3_224_tv_template,
+- .count = HMAC_SHA3_224_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha3_224_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha3-256)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha3_256_tv_template,
+- .count = HMAC_SHA3_256_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha3_256_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha3-384)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha3_384_tv_template,
+- .count = HMAC_SHA3_384_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha3_384_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha3-512)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha3_512_tv_template,
+- .count = HMAC_SHA3_512_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha3_512_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha384)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha384_tv_template,
+- .count = HMAC_SHA384_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha384_tv_template)
+ }
+ }, {
+ .alg = "hmac(sha512)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = hmac_sha512_tv_template,
+- .count = HMAC_SHA512_TEST_VECTORS
+- }
++ .hash = __VECS(hmac_sha512_tv_template)
+ }
+ }, {
+ .alg = "jitterentropy_rng",
+@@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_kw_enc_tv_template,
+- .count = ARRAY_SIZE(aes_kw_enc_tv_template)
+- },
+- .dec = {
+- .vecs = aes_kw_dec_tv_template,
+- .count = ARRAY_SIZE(aes_kw_dec_tv_template)
+- }
++ .enc = __VECS(aes_kw_enc_tv_template),
++ .dec = __VECS(aes_kw_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_lrw_enc_tv_template,
+- .count = AES_LRW_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_lrw_dec_tv_template,
+- .count = AES_LRW_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_lrw_enc_tv_template),
++ .dec = __VECS(aes_lrw_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = camellia_lrw_enc_tv_template,
+- .count = CAMELLIA_LRW_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = camellia_lrw_dec_tv_template,
+- .count = CAMELLIA_LRW_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(camellia_lrw_enc_tv_template),
++ .dec = __VECS(camellia_lrw_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast6_lrw_enc_tv_template,
+- .count = CAST6_LRW_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast6_lrw_dec_tv_template,
+- .count = CAST6_LRW_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast6_lrw_enc_tv_template),
++ .dec = __VECS(cast6_lrw_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = serpent_lrw_enc_tv_template,
+- .count = SERPENT_LRW_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = serpent_lrw_dec_tv_template,
+- .count = SERPENT_LRW_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(serpent_lrw_enc_tv_template),
++ .dec = __VECS(serpent_lrw_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = tf_lrw_enc_tv_template,
+- .count = TF_LRW_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = tf_lrw_dec_tv_template,
+- .count = TF_LRW_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(tf_lrw_enc_tv_template),
++ .dec = __VECS(tf_lrw_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .comp = {
+- .comp = {
+- .vecs = lz4_comp_tv_template,
+- .count = LZ4_COMP_TEST_VECTORS
+- },
+- .decomp = {
+- .vecs = lz4_decomp_tv_template,
+- .count = LZ4_DECOMP_TEST_VECTORS
+- }
++ .comp = __VECS(lz4_comp_tv_template),
++ .decomp = __VECS(lz4_decomp_tv_template)
+ }
+ }
+ }, {
+@@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .comp = {
+- .comp = {
+- .vecs = lz4hc_comp_tv_template,
+- .count = LZ4HC_COMP_TEST_VECTORS
+- },
+- .decomp = {
+- .vecs = lz4hc_decomp_tv_template,
+- .count = LZ4HC_DECOMP_TEST_VECTORS
+- }
++ .comp = __VECS(lz4hc_comp_tv_template),
++ .decomp = __VECS(lz4hc_decomp_tv_template)
+ }
+ }
+ }, {
+@@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .comp = {
+- .comp = {
+- .vecs = lzo_comp_tv_template,
+- .count = LZO_COMP_TEST_VECTORS
+- },
+- .decomp = {
+- .vecs = lzo_decomp_tv_template,
+- .count = LZO_DECOMP_TEST_VECTORS
+- }
++ .comp = __VECS(lzo_comp_tv_template),
++ .decomp = __VECS(lzo_decomp_tv_template)
+ }
+ }
+ }, {
+ .alg = "md4",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = md4_tv_template,
+- .count = MD4_TEST_VECTORS
+- }
++ .hash = __VECS(md4_tv_template)
+ }
+ }, {
+ .alg = "md5",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = md5_tv_template,
+- .count = MD5_TEST_VECTORS
+- }
++ .hash = __VECS(md5_tv_template)
+ }
+ }, {
+ .alg = "michael_mic",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = michael_mic_tv_template,
+- .count = MICHAEL_MIC_TEST_VECTORS
+- }
++ .hash = __VECS(michael_mic_tv_template)
+ }
+ }, {
+ .alg = "ofb(aes)",
+@@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_ofb_enc_tv_template,
+- .count = AES_OFB_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_ofb_dec_tv_template,
+- .count = AES_OFB_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_ofb_enc_tv_template),
++ .dec = __VECS(aes_ofb_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = fcrypt_pcbc_enc_tv_template,
+- .count = FCRYPT_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = fcrypt_pcbc_dec_tv_template,
+- .count = FCRYPT_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(fcrypt_pcbc_enc_tv_template),
++ .dec = __VECS(fcrypt_pcbc_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "poly1305",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = poly1305_tv_template,
+- .count = POLY1305_TEST_VECTORS
+- }
++ .hash = __VECS(poly1305_tv_template)
+ }
+ }, {
+ .alg = "rfc3686(ctr(aes))",
+@@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_ctr_rfc3686_enc_tv_template,
+- .count = AES_CTR_3686_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_ctr_rfc3686_dec_tv_template,
+- .count = AES_CTR_3686_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
++ .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = aes_gcm_rfc4106_enc_tv_template,
+- .count = AES_GCM_4106_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_gcm_rfc4106_dec_tv_template,
+- .count = AES_GCM_4106_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
++ .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = aes_ccm_rfc4309_enc_tv_template,
+- .count = AES_CCM_4309_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_ccm_rfc4309_dec_tv_template,
+- .count = AES_CCM_4309_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
++ .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
+ }
+ }
+ }, {
+@@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = aes_gcm_rfc4543_enc_tv_template,
+- .count = AES_GCM_4543_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_gcm_rfc4543_dec_tv_template,
+- .count = AES_GCM_4543_DEC_TEST_VECTORS
+- },
++ .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
++ .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
+ }
+ }
+ }, {
+@@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = rfc7539_enc_tv_template,
+- .count = RFC7539_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = rfc7539_dec_tv_template,
+- .count = RFC7539_DEC_TEST_VECTORS
+- },
++ .enc = __VECS(rfc7539_enc_tv_template),
++ .dec = __VECS(rfc7539_dec_tv_template),
+ }
+ }
+ }, {
+@@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+- .enc = {
+- .vecs = rfc7539esp_enc_tv_template,
+- .count = RFC7539ESP_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = rfc7539esp_dec_tv_template,
+- .count = RFC7539ESP_DEC_TEST_VECTORS
+- },
++ .enc = __VECS(rfc7539esp_enc_tv_template),
++ .dec = __VECS(rfc7539esp_dec_tv_template),
+ }
+ }
+ }, {
+ .alg = "rmd128",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = rmd128_tv_template,
+- .count = RMD128_TEST_VECTORS
+- }
++ .hash = __VECS(rmd128_tv_template)
+ }
+ }, {
+ .alg = "rmd160",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = rmd160_tv_template,
+- .count = RMD160_TEST_VECTORS
+- }
++ .hash = __VECS(rmd160_tv_template)
+ }
+ }, {
+ .alg = "rmd256",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = rmd256_tv_template,
+- .count = RMD256_TEST_VECTORS
+- }
++ .hash = __VECS(rmd256_tv_template)
+ }
+ }, {
+ .alg = "rmd320",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = rmd320_tv_template,
+- .count = RMD320_TEST_VECTORS
+- }
++ .hash = __VECS(rmd320_tv_template)
+ }
+ }, {
+ .alg = "rsa",
+ .test = alg_test_akcipher,
+ .fips_allowed = 1,
+ .suite = {
+- .akcipher = {
+- .vecs = rsa_tv_template,
+- .count = RSA_TEST_VECTORS
+- }
++ .akcipher = __VECS(rsa_tv_template)
+ }
+ }, {
+ .alg = "salsa20",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = salsa20_stream_enc_tv_template,
+- .count = SALSA20_STREAM_ENC_TEST_VECTORS
+- }
++ .enc = __VECS(salsa20_stream_enc_tv_template)
+ }
+ }
+ }, {
+@@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha1_tv_template,
+- .count = SHA1_TEST_VECTORS
+- }
++ .hash = __VECS(sha1_tv_template)
+ }
+ }, {
+ .alg = "sha224",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha224_tv_template,
+- .count = SHA224_TEST_VECTORS
+- }
++ .hash = __VECS(sha224_tv_template)
+ }
+ }, {
+ .alg = "sha256",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha256_tv_template,
+- .count = SHA256_TEST_VECTORS
+- }
++ .hash = __VECS(sha256_tv_template)
+ }
+ }, {
+ .alg = "sha3-224",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha3_224_tv_template,
+- .count = SHA3_224_TEST_VECTORS
+- }
++ .hash = __VECS(sha3_224_tv_template)
+ }
+ }, {
+ .alg = "sha3-256",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha3_256_tv_template,
+- .count = SHA3_256_TEST_VECTORS
+- }
++ .hash = __VECS(sha3_256_tv_template)
+ }
+ }, {
+ .alg = "sha3-384",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha3_384_tv_template,
+- .count = SHA3_384_TEST_VECTORS
+- }
++ .hash = __VECS(sha3_384_tv_template)
+ }
+ }, {
+ .alg = "sha3-512",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha3_512_tv_template,
+- .count = SHA3_512_TEST_VECTORS
+- }
++ .hash = __VECS(sha3_512_tv_template)
+ }
+ }, {
+ .alg = "sha384",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha384_tv_template,
+- .count = SHA384_TEST_VECTORS
+- }
++ .hash = __VECS(sha384_tv_template)
+ }
+ }, {
+ .alg = "sha512",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+- .hash = {
+- .vecs = sha512_tv_template,
+- .count = SHA512_TEST_VECTORS
+- }
++ .hash = __VECS(sha512_tv_template)
+ }
+ }, {
+ .alg = "tgr128",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = tgr128_tv_template,
+- .count = TGR128_TEST_VECTORS
+- }
++ .hash = __VECS(tgr128_tv_template)
+ }
+ }, {
+ .alg = "tgr160",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = tgr160_tv_template,
+- .count = TGR160_TEST_VECTORS
+- }
++ .hash = __VECS(tgr160_tv_template)
+ }
+ }, {
+ .alg = "tgr192",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = tgr192_tv_template,
+- .count = TGR192_TEST_VECTORS
++ .hash = __VECS(tgr192_tv_template)
++ }
++ }, {
++ .alg = "tls10(hmac(sha1),cbc(aes))",
++ .test = alg_test_tls,
++ .suite = {
++ .tls = {
++ .enc = __VECS(tls_enc_tv_template),
++ .dec = __VECS(tls_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "vmac(aes)",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = aes_vmac128_tv_template,
+- .count = VMAC_AES_TEST_VECTORS
+- }
++ .hash = __VECS(aes_vmac128_tv_template)
+ }
+ }, {
+ .alg = "wp256",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = wp256_tv_template,
+- .count = WP256_TEST_VECTORS
+- }
++ .hash = __VECS(wp256_tv_template)
+ }
+ }, {
+ .alg = "wp384",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = wp384_tv_template,
+- .count = WP384_TEST_VECTORS
+- }
++ .hash = __VECS(wp384_tv_template)
+ }
+ }, {
+ .alg = "wp512",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = wp512_tv_template,
+- .count = WP512_TEST_VECTORS
+- }
++ .hash = __VECS(wp512_tv_template)
+ }
+ }, {
+ .alg = "xcbc(aes)",
+ .test = alg_test_hash,
+ .suite = {
+- .hash = {
+- .vecs = aes_xcbc128_tv_template,
+- .count = XCBC_AES_TEST_VECTORS
+- }
++ .hash = __VECS(aes_xcbc128_tv_template)
+ }
+ }, {
+ .alg = "xts(aes)",
+@@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = aes_xts_enc_tv_template,
+- .count = AES_XTS_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = aes_xts_dec_tv_template,
+- .count = AES_XTS_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(aes_xts_enc_tv_template),
++ .dec = __VECS(aes_xts_dec_tv_template)
+ }
+ }
+ }, {
+@@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = camellia_xts_enc_tv_template,
+- .count = CAMELLIA_XTS_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = camellia_xts_dec_tv_template,
+- .count = CAMELLIA_XTS_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(camellia_xts_enc_tv_template),
++ .dec = __VECS(camellia_xts_dec_tv_template)
+ }
+ }
+ }, {
+@@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = cast6_xts_enc_tv_template,
+- .count = CAST6_XTS_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = cast6_xts_dec_tv_template,
+- .count = CAST6_XTS_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(cast6_xts_enc_tv_template),
++ .dec = __VECS(cast6_xts_dec_tv_template)
+ }
+ }
+ }, {
+@@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = serpent_xts_enc_tv_template,
+- .count = SERPENT_XTS_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = serpent_xts_dec_tv_template,
+- .count = SERPENT_XTS_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(serpent_xts_enc_tv_template),
++ .dec = __VECS(serpent_xts_dec_tv_template)
+ }
+ }
+ }, {
+@@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_test_descs[] = {
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+- .enc = {
+- .vecs = tf_xts_enc_tv_template,
+- .count = TF_XTS_ENC_TEST_VECTORS
+- },
+- .dec = {
+- .vecs = tf_xts_dec_tv_template,
+- .count = TF_XTS_DEC_TEST_VECTORS
+- }
++ .enc = __VECS(tf_xts_enc_tv_template),
++ .dec = __VECS(tf_xts_dec_tv_template)
+ }
+ }
+ }
+diff --git a/crypto/testmgr.h b/crypto/testmgr.h
+index 9033088c..ce9f4334 100644
+--- a/crypto/testmgr.h
++++ b/crypto/testmgr.h
+@@ -34,9 +34,9 @@
+
+ struct hash_testvec {
+ /* only used with keyed hash algorithms */
+- char *key;
+- char *plaintext;
+- char *digest;
++ const char *key;
++ const char *plaintext;
++ const char *digest;
+ unsigned char tap[MAX_TAP];
+ unsigned short psize;
+ unsigned char np;
+@@ -63,11 +63,11 @@ struct hash_testvec {
+ */
+
+ struct cipher_testvec {
+- char *key;
+- char *iv;
+- char *iv_out;
+- char *input;
+- char *result;
++ const char *key;
++ const char *iv;
++ const char *iv_out;
++ const char *input;
++ const char *result;
+ unsigned short tap[MAX_TAP];
+ int np;
+ unsigned char also_non_np;
+@@ -80,11 +80,11 @@ struct cipher_testvec {
+ };
+
+ struct aead_testvec {
+- char *key;
+- char *iv;
+- char *input;
+- char *assoc;
+- char *result;
++ const char *key;
++ const char *iv;
++ const char *input;
++ const char *assoc;
++ const char *result;
+ unsigned char tap[MAX_TAP];
+ unsigned char atap[MAX_TAP];
+ int np;
+@@ -99,10 +99,10 @@ struct aead_testvec {
+ };
+
+ struct cprng_testvec {
+- char *key;
+- char *dt;
+- char *v;
+- char *result;
++ const char *key;
++ const char *dt;
++ const char *v;
++ const char *result;
+ unsigned char klen;
+ unsigned short dtlen;
+ unsigned short vlen;
+@@ -111,24 +111,38 @@ struct cprng_testvec {
+ };
+
+ struct drbg_testvec {
+- unsigned char *entropy;
++ const unsigned char *entropy;
+ size_t entropylen;
+- unsigned char *entpra;
+- unsigned char *entprb;
++ const unsigned char *entpra;
++ const unsigned char *entprb;
+ size_t entprlen;
+- unsigned char *addtla;
+- unsigned char *addtlb;
++ const unsigned char *addtla;
++ const unsigned char *addtlb;
+ size_t addtllen;
+- unsigned char *pers;
++ const unsigned char *pers;
+ size_t perslen;
+- unsigned char *expected;
++ const unsigned char *expected;
+ size_t expectedlen;
+ };
+
++struct tls_testvec {
++ char *key; /* wrapped keys for encryption and authentication */
++ char *iv; /* initialization vector */
++ char *input; /* input data */
++ char *assoc; /* associated data: seq num, type, version, input len */
++ char *result; /* result data */
++ unsigned char fail; /* the test failure is expected */
++ unsigned char novrfy; /* dec verification failure expected */
++ unsigned char klen; /* key length */
++ unsigned short ilen; /* input data length */
++ unsigned short alen; /* associated data length */
++ unsigned short rlen; /* result length */
++};
++
+ struct akcipher_testvec {
+- unsigned char *key;
+- unsigned char *m;
+- unsigned char *c;
++ const unsigned char *key;
++ const unsigned char *m;
++ const unsigned char *c;
+ unsigned int key_len;
+ unsigned int m_size;
+ unsigned int c_size;
+@@ -136,27 +150,227 @@ struct akcipher_testvec {
+ };
+
+ struct kpp_testvec {
+- unsigned char *secret;
+- unsigned char *b_public;
+- unsigned char *expected_a_public;
+- unsigned char *expected_ss;
++ const unsigned char *secret;
++ const unsigned char *b_public;
++ const unsigned char *expected_a_public;
++ const unsigned char *expected_ss;
+ unsigned short secret_size;
+ unsigned short b_public_size;
+ unsigned short expected_a_public_size;
+ unsigned short expected_ss_size;
+ };
+
+-static char zeroed_string[48];
++static const char zeroed_string[48];
+
+ /*
+- * RSA test vectors. Borrowed from openSSL.
++ * TLS1.0 synthetic test vectors
+ */
+-#ifdef CONFIG_CRYPTO_FIPS
+-#define RSA_TEST_VECTORS 2
++static struct tls_testvec tls_enc_tv_template[] = {
++ {
++#ifdef __LITTLE_ENDIAN
++ .key = "\x08\x00" /* rta length */
++ "\x01\x00" /* rta type */
++#else
++ .key = "\x00\x08" /* rta length */
++ "\x00\x01" /* rta type */
++#endif
++ "\x00\x00\x00\x10" /* enc key length */
++ "authenticationkey20benckeyis16_bytes",
++ .klen = 8 + 20 + 16,
++ .iv = "iv0123456789abcd",
++ .input = "Single block msg",
++ .ilen = 16,
++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
++ "\x00\x03\x01\x00\x10",
++ .alen = 13,
++ .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
++ "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
++ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
++ "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
++ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
++ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
++ .rlen = 16 + 20 + 12,
++ }, {
++#ifdef __LITTLE_ENDIAN
++ .key = "\x08\x00" /* rta length */
++ "\x01\x00" /* rta type */
+ #else
+-#define RSA_TEST_VECTORS 5
++ .key = "\x00\x08" /* rta length */
++ "\x00\x01" /* rta type */
+ #endif
+-static struct akcipher_testvec rsa_tv_template[] = {
++ "\x00\x00\x00\x10" /* enc key length */
++ "authenticationkey20benckeyis16_bytes",
++ .klen = 8 + 20 + 16,
++ .iv = "iv0123456789abcd",
++ .input = "",
++ .ilen = 0,
++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
++ "\x00\x03\x01\x00\x00",
++ .alen = 13,
++ .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
++ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
++ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
++ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
++ .rlen = 20 + 12,
++ }, {
++#ifdef __LITTLE_ENDIAN
++ .key = "\x08\x00" /* rta length */
++ "\x01\x00" /* rta type */
++#else
++ .key = "\x00\x08" /* rta length */
++ "\x00\x01" /* rta type */
++#endif
++ "\x00\x00\x00\x10" /* enc key length */
++ "authenticationkey20benckeyis16_bytes",
++ .klen = 8 + 20 + 16,
++ .iv = "iv0123456789abcd",
++ .input = "285 bytes plaintext285 bytes plaintext285 bytes"
++ " plaintext285 bytes plaintext285 bytes plaintext285"
++ " bytes plaintext285 bytes plaintext285 bytes"
++ " plaintext285 bytes plaintext285 bytes plaintext285"
++ " bytes plaintext285 bytes plaintext285 bytes"
++ " plaintext285 bytes plaintext285 bytes plaintext285"
++ " bytes plaintext285 bytes plaintext",
++ .ilen = 285,
++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
++ "\x00\x03\x01\x01\x1d",
++ .alen = 13,
++ .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
++ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
++ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
++ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
++ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
++ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
++ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
++ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
++ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
++ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
++ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
++ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
++ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
++ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
++ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
++ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
++ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
++ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
++ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
++ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
++ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
++ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
++ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
++ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
++ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
++ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
++ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
++ .rlen = 285 + 20 + 15,
++ }
++};
++
++static struct tls_testvec tls_dec_tv_template[] = {
++ {
++#ifdef __LITTLE_ENDIAN
++ .key = "\x08\x00" /* rta length */
++ "\x01\x00" /* rta type */
++#else
++ .key = "\x00\x08" /* rta length */
++ "\x00\x01" /* rta type */
++#endif
++ "\x00\x00\x00\x10" /* enc key length */
++ "authenticationkey20benckeyis16_bytes",
++ .klen = 8 + 20 + 16,
++ .iv = "iv0123456789abcd",
++ .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
++ "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
++ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
++ "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
++ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
++ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
++ .ilen = 16 + 20 + 12,
++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
++ "\x00\x03\x01\x00\x30",
++ .alen = 13,
++ .result = "Single block msg",
++ .rlen = 16,
++ }, {
++#ifdef __LITTLE_ENDIAN
++ .key = "\x08\x00" /* rta length */
++ "\x01\x00" /* rta type */
++#else
++ .key = "\x00\x08" /* rta length */
++ "\x00\x01" /* rta type */
++#endif
++ "\x00\x00\x00\x10" /* enc key length */
++ "authenticationkey20benckeyis16_bytes",
++ .klen = 8 + 20 + 16,
++ .iv = "iv0123456789abcd",
++ .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
++ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
++ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
++ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
++ .ilen = 20 + 12,
++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
++ "\x00\x03\x01\x00\x20",
++ .alen = 13,
++ .result = "",
++ .rlen = 0,
++ }, {
++#ifdef __LITTLE_ENDIAN
++ .key = "\x08\x00" /* rta length */
++ "\x01\x00" /* rta type */
++#else
++ .key = "\x00\x08" /* rta length */
++ "\x00\x01" /* rta type */
++#endif
++ "\x00\x00\x00\x10" /* enc key length */
++ "authenticationkey20benckeyis16_bytes",
++ .klen = 8 + 20 + 16,
++ .iv = "iv0123456789abcd",
++ .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
++ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
++ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
++ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
++ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
++ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
++ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
++ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
++ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
++ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
++ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
++ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
++ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
++ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
++ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
++ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
++ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
++ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
++ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
++ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
++ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
++ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
++ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
++ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
++ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
++ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
++ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
++
++ .ilen = 285 + 20 + 15,
++ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
++ "\x00\x03\x01\x01\x40",
++ .alen = 13,
++ .result = "285 bytes plaintext285 bytes plaintext285 bytes"
++ " plaintext285 bytes plaintext285 bytes plaintext285"
++ " bytes plaintext285 bytes plaintext285 bytes"
++ " plaintext285 bytes plaintext285 bytes plaintext285"
++ " bytes plaintext285 bytes plaintext285 bytes"
++ " plaintext285 bytes plaintext285 bytes plaintext",
++ .rlen = 285,
++ }
++};
++
++/*
++ * RSA test vectors. Borrowed from openSSL.
++ */
++static const struct akcipher_testvec rsa_tv_template[] = {
+ {
+ #ifndef CONFIG_CRYPTO_FIPS
+ .key =
+@@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_template[] = {
+ .m_size = 8,
+ .c_size = 256,
+ .public_key_vec = true,
++#ifndef CONFIG_CRYPTO_FIPS
+ }, {
+ .key =
+ "\x30\x82\x09\x29" /* sequence of 2345 bytes */
+@@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_template[] = {
+ .key_len = 2349,
+ .m_size = 8,
+ .c_size = 512,
++#endif
+ }
+ };
+
+-#define DH_TEST_VECTORS 2
+-
+-struct kpp_testvec dh_tv_template[] = {
++static const struct kpp_testvec dh_tv_template[] = {
+ {
+ .secret =
+ #ifdef __LITTLE_ENDIAN
+@@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
+ }
+ };
+
+-#ifdef CONFIG_CRYPTO_FIPS
+-#define ECDH_TEST_VECTORS 1
+-#else
+-#define ECDH_TEST_VECTORS 2
+-#endif
+-struct kpp_testvec ecdh_tv_template[] = {
++static const struct kpp_testvec ecdh_tv_template[] = {
+ {
+ #ifndef CONFIG_CRYPTO_FIPS
+ .secret =
+@@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] = {
+ /*
+ * MD4 test vectors from RFC1320
+ */
+-#define MD4_TEST_VECTORS 7
+-
+-static struct hash_testvec md4_tv_template [] = {
++static const struct hash_testvec md4_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
+@@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_template [] = {
+ },
+ };
+
+-#define SHA3_224_TEST_VECTORS 3
+-static struct hash_testvec sha3_224_tv_template[] = {
++static const struct hash_testvec sha3_224_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
+@@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_template[] = {
+ },
+ };
+
+-#define SHA3_256_TEST_VECTORS 3
+-static struct hash_testvec sha3_256_tv_template[] = {
++static const struct hash_testvec sha3_256_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
+@@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_template[] = {
+ };
+
+
+-#define SHA3_384_TEST_VECTORS 3
+-static struct hash_testvec sha3_384_tv_template[] = {
++static const struct hash_testvec sha3_384_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
+@@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_template[] = {
+ };
+
+
+-#define SHA3_512_TEST_VECTORS 3
+-static struct hash_testvec sha3_512_tv_template[] = {
++static const struct hash_testvec sha3_512_tv_template[] = {
+ {
+ .plaintext = "",
+ .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
+@@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_template[] = {
+ /*
+ * MD5 test vectors from RFC1321
+ */
+-#define MD5_TEST_VECTORS 7
+-
+-static struct hash_testvec md5_tv_template[] = {
++static const struct hash_testvec md5_tv_template[] = {
+ {
+ .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
+ "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
+@@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_template[] = {
+ /*
+ * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
+ */
+-#define RMD128_TEST_VECTORS 10
+-
+-static struct hash_testvec rmd128_tv_template[] = {
++static const struct hash_testvec rmd128_tv_template[] = {
+ {
+ .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
+ "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
+@@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_template[] = {
+ /*
+ * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
+ */
+-#define RMD160_TEST_VECTORS 10
+-
+-static struct hash_testvec rmd160_tv_template[] = {
++static const struct hash_testvec rmd160_tv_template[] = {
+ {
+ .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
+ "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
+@@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_template[] = {
+ /*
+ * RIPEMD-256 test vectors
+ */
+-#define RMD256_TEST_VECTORS 8
+-
+-static struct hash_testvec rmd256_tv_template[] = {
++static const struct hash_testvec rmd256_tv_template[] = {
+ {
+ .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
+ "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
+@@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_template[] = {
+ /*
+ * RIPEMD-320 test vectors
+ */
+-#define RMD320_TEST_VECTORS 8
+-
+-static struct hash_testvec rmd320_tv_template[] = {
++static const struct hash_testvec rmd320_tv_template[] = {
+ {
+ .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
+ "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
+@@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_template[] = {
+ }
+ };
+
+-#define CRCT10DIF_TEST_VECTORS 3
+-static struct hash_testvec crct10dif_tv_template[] = {
++static const struct hash_testvec crct10dif_tv_template[] = {
+ {
+- .plaintext = "abc",
+- .psize = 3,
+-#ifdef __LITTLE_ENDIAN
+- .digest = "\x3b\x44",
+-#else
+- .digest = "\x44\x3b",
+-#endif
+- }, {
+- .plaintext = "1234567890123456789012345678901234567890"
+- "123456789012345678901234567890123456789",
+- .psize = 79,
+-#ifdef __LITTLE_ENDIAN
+- .digest = "\x70\x4b",
+-#else
+- .digest = "\x4b\x70",
+-#endif
+- }, {
+- .plaintext =
+- "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+- .psize = 56,
+-#ifdef __LITTLE_ENDIAN
+- .digest = "\xe3\x9c",
+-#else
+- .digest = "\x9c\xe3",
+-#endif
+- .np = 2,
+- .tap = { 28, 28 }
++ .plaintext = "abc",
++ .psize = 3,
++ .digest = (u8 *)(u16 []){ 0x443b },
++ }, {
++ .plaintext = "1234567890123456789012345678901234567890"
++ "123456789012345678901234567890123456789",
++ .psize = 79,
++ .digest = (u8 *)(u16 []){ 0x4b70 },
++ .np = 2,
++ .tap = { 63, 16 },
++ }, {
++ .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
++ "ddddddddddddd",
++ .psize = 56,
++ .digest = (u8 *)(u16 []){ 0x9ce3 },
++ .np = 8,
++ .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
++ }, {
++ .plaintext = "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "123456789012345678901234567890123456789",
++ .psize = 319,
++ .digest = (u8 *)(u16 []){ 0x44c6 },
++ }, {
++ .plaintext = "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "1234567890123456789012345678901234567890"
++ "123456789012345678901234567890123456789",
++ .psize = 319,
++ .digest = (u8 *)(u16 []){ 0x44c6 },
++ .np = 4,
++ .tap = { 1, 255, 57, 6 },
+ }
+ };
+
+@@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_template[] = {
+ * SHA1 test vectors from from FIPS PUB 180-1
+ * Long vector from CAVS 5.0
+ */
+-#define SHA1_TEST_VECTORS 6
+-
+-static struct hash_testvec sha1_tv_template[] = {
++static const struct hash_testvec sha1_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_template[] = {
+ /*
+ * SHA224 test vectors from from FIPS PUB 180-2
+ */
+-#define SHA224_TEST_VECTORS 5
+-
+-static struct hash_testvec sha224_tv_template[] = {
++static const struct hash_testvec sha224_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_template[] = {
+ /*
+ * SHA256 test vectors from from NIST
+ */
+-#define SHA256_TEST_VECTORS 5
+-
+-static struct hash_testvec sha256_tv_template[] = {
++static const struct hash_testvec sha256_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_template[] = {
+ /*
+ * SHA384 test vectors from from NIST and kerneli
+ */
+-#define SHA384_TEST_VECTORS 6
+-
+-static struct hash_testvec sha384_tv_template[] = {
++static const struct hash_testvec sha384_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_template[] = {
+ /*
+ * SHA512 test vectors from from NIST and kerneli
+ */
+-#define SHA512_TEST_VECTORS 6
+-
+-static struct hash_testvec sha512_tv_template[] = {
++static const struct hash_testvec sha512_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_template[] = {
+ * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
+ * submission
+ */
+-#define WP512_TEST_VECTORS 8
+-
+-static struct hash_testvec wp512_tv_template[] = {
++static const struct hash_testvec wp512_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_template[] = {
+ },
+ };
+
+-#define WP384_TEST_VECTORS 8
+-
+-static struct hash_testvec wp384_tv_template[] = {
++static const struct hash_testvec wp384_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_template[] = {
+ },
+ };
+
+-#define WP256_TEST_VECTORS 8
+-
+-static struct hash_testvec wp256_tv_template[] = {
++static const struct hash_testvec wp256_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_template[] = {
+ /*
+ * TIGER test vectors from Tiger website
+ */
+-#define TGR192_TEST_VECTORS 6
+-
+-static struct hash_testvec tgr192_tv_template[] = {
++static const struct hash_testvec tgr192_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_template[] = {
+ },
+ };
+
+-#define TGR160_TEST_VECTORS 6
+-
+-static struct hash_testvec tgr160_tv_template[] = {
++static const struct hash_testvec tgr160_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_template[] = {
+ },
+ };
+
+-#define TGR128_TEST_VECTORS 6
+-
+-static struct hash_testvec tgr128_tv_template[] = {
++static const struct hash_testvec tgr128_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+@@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_template[] = {
+ },
+ };
+
+-#define GHASH_TEST_VECTORS 6
+-
+-static struct hash_testvec ghash_tv_template[] =
++static const struct hash_testvec ghash_tv_template[] =
+ {
+ {
+ .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
+@@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_template[] =
+ * HMAC-MD5 test vectors from RFC2202
+ * (These need to be fixed to not use strlen).
+ */
+-#define HMAC_MD5_TEST_VECTORS 7
+-
+-static struct hash_testvec hmac_md5_tv_template[] =
++static const struct hash_testvec hmac_md5_tv_template[] =
+ {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+@@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_template[] =
+ /*
+ * HMAC-RIPEMD128 test vectors from RFC2286
+ */
+-#define HMAC_RMD128_TEST_VECTORS 7
+-
+-static struct hash_testvec hmac_rmd128_tv_template[] = {
++static const struct hash_testvec hmac_rmd128_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ksize = 16,
+@@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_tv_template[] = {
+ /*
+ * HMAC-RIPEMD160 test vectors from RFC2286
+ */
+-#define HMAC_RMD160_TEST_VECTORS 7
+-
+-static struct hash_testvec hmac_rmd160_tv_template[] = {
++static const struct hash_testvec hmac_rmd160_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ksize = 20,
+@@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_tv_template[] = {
+ /*
+ * HMAC-SHA1 test vectors from RFC2202
+ */
+-#define HMAC_SHA1_TEST_VECTORS 7
+-
+-static struct hash_testvec hmac_sha1_tv_template[] = {
++static const struct hash_testvec hmac_sha1_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ksize = 20,
+@@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_template[] = {
+ /*
+ * SHA224 HMAC test vectors from RFC4231
+ */
+-#define HMAC_SHA224_TEST_VECTORS 4
+-
+-static struct hash_testvec hmac_sha224_tv_template[] = {
++static const struct hash_testvec hmac_sha224_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+@@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_tv_template[] = {
+ * HMAC-SHA256 test vectors from
+ * draft-ietf-ipsec-ciph-sha-256-01.txt
+ */
+-#define HMAC_SHA256_TEST_VECTORS 10
+-
+-static struct hash_testvec hmac_sha256_tv_template[] = {
++static const struct hash_testvec hmac_sha256_tv_template[] = {
+ {
+ .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
+ "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
+@@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_tv_template[] = {
+ },
+ };
+
+-#define CMAC_AES_TEST_VECTORS 6
+-
+-static struct hash_testvec aes_cmac128_tv_template[] = {
++static const struct hash_testvec aes_cmac128_tv_template[] = {
+ { /* From NIST Special Publication 800-38B, AES-128 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+@@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_tv_template[] = {
+ }
+ };
+
+-#define CMAC_DES3_EDE_TEST_VECTORS 4
++static const struct hash_testvec aes_cbcmac_tv_template[] = {
++ {
++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
++ .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
++ "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
++ .psize = 16,
++ .ksize = 16,
++ }, {
++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
++ "\x30",
++ .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
++ "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
++ .psize = 33,
++ .ksize = 16,
++ .np = 2,
++ .tap = { 7, 26 },
++ }, {
++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
++ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
++ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
++ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
++ "\xad\x2b\x41\x7b\xe6\x6c\x37",
++ .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
++ "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
++ .psize = 63,
++ .ksize = 16,
++ }, {
++ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
++ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
++ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
++ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
++ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
++ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
++ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
++ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
++ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
++ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
++ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
++ "\x1c",
++ .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
++ "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
++ .psize = 65,
++ .ksize = 32,
++ }
++};
+
+-static struct hash_testvec des3_ede_cmac64_tv_template[] = {
++static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
+ /*
+ * From NIST Special Publication 800-38B, Three Key TDEA
+ * Corrected test vectors from:
+@@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac64_tv_template[] = {
+ }
+ };
+
+-#define XCBC_AES_TEST_VECTORS 6
+-
+-static struct hash_testvec aes_xcbc128_tv_template[] = {
++static const struct hash_testvec aes_xcbc128_tv_template[] = {
+ {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+@@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
+ }
+ };
+
+-#define VMAC_AES_TEST_VECTORS 11
+-static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
+- '\x02', '\x03', '\x02', '\x02',
+- '\x02', '\x04', '\x01', '\x07',
+- '\x04', '\x01', '\x04', '\x03',};
+-static char vmac_string2[128] = {'a', 'b', 'c',};
+-static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
+- 'a', 'b', 'c', 'a', 'b', 'c',
+- 'a', 'b', 'c', 'a', 'b', 'c',
+- 'a', 'b', 'c', 'a', 'b', 'c',
+- 'a', 'b', 'c', 'a', 'b', 'c',
+- 'a', 'b', 'c', 'a', 'b', 'c',
+- 'a', 'b', 'c', 'a', 'b', 'c',
+- 'a', 'b', 'c', 'a', 'b', 'c',
+- };
+-
+-static char vmac_string4[17] = {'b', 'c', 'e', 'f',
+- 'i', 'j', 'l', 'm',
+- 'o', 'p', 'r', 's',
+- 't', 'u', 'w', 'x', 'z'};
+-
+-static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
+- 'o', 'l', 'k', ']', '%',
+- '9', '2', '7', '!', 'A'};
+-
+-static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
+- 'i', '!', '#', 'w', '0',
+- 'z', '/', '4', 'A', 'n'};
+-
+-static struct hash_testvec aes_vmac128_tv_template[] = {
++static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
++ '\x02', '\x03', '\x02', '\x02',
++ '\x02', '\x04', '\x01', '\x07',
++ '\x04', '\x01', '\x04', '\x03',};
++static const char vmac_string2[128] = {'a', 'b', 'c',};
++static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
++ 'a', 'b', 'c', 'a', 'b', 'c',
++ 'a', 'b', 'c', 'a', 'b', 'c',
++ 'a', 'b', 'c', 'a', 'b', 'c',
++ 'a', 'b', 'c', 'a', 'b', 'c',
++ 'a', 'b', 'c', 'a', 'b', 'c',
++ 'a', 'b', 'c', 'a', 'b', 'c',
++ 'a', 'b', 'c', 'a', 'b', 'c',
++ };
++
++static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
++ 'i', 'j', 'l', 'm',
++ 'o', 'p', 'r', 's',
++ 't', 'u', 'w', 'x', 'z'};
++
++static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
++ 'o', 'l', 'k', ']', '%',
++ '9', '2', '7', '!', 'A'};
++
++static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
++ 'i', '!', '#', 'w', '0',
++ 'z', '/', '4', 'A', 'n'};
++
++static const struct hash_testvec aes_vmac128_tv_template[] = {
+ {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+@@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_tv_template[] = {
+ * SHA384 HMAC test vectors from RFC4231
+ */
+
+-#define HMAC_SHA384_TEST_VECTORS 4
+-
+-static struct hash_testvec hmac_sha384_tv_template[] = {
++static const struct hash_testvec hmac_sha384_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+@@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_tv_template[] = {
+ * SHA512 HMAC test vectors from RFC4231
+ */
+
+-#define HMAC_SHA512_TEST_VECTORS 4
+-
+-static struct hash_testvec hmac_sha512_tv_template[] = {
++static const struct hash_testvec hmac_sha512_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+@@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
+ },
+ };
+
+-#define HMAC_SHA3_224_TEST_VECTORS 4
+-
+-static struct hash_testvec hmac_sha3_224_tv_template[] = {
++static const struct hash_testvec hmac_sha3_224_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+@@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224_tv_template[] = {
+ },
+ };
+
+-#define HMAC_SHA3_256_TEST_VECTORS 4
+-
+-static struct hash_testvec hmac_sha3_256_tv_template[] = {
++static const struct hash_testvec hmac_sha3_256_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+@@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256_tv_template[] = {
+ },
+ };
+
+-#define HMAC_SHA3_384_TEST_VECTORS 4
+-
+-static struct hash_testvec hmac_sha3_384_tv_template[] = {
++static const struct hash_testvec hmac_sha3_384_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+@@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384_tv_template[] = {
+ },
+ };
+
+-#define HMAC_SHA3_512_TEST_VECTORS 4
+-
+-static struct hash_testvec hmac_sha3_512_tv_template[] = {
++static const struct hash_testvec hmac_sha3_512_tv_template[] = {
+ {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+@@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512_tv_template[] = {
+ * Poly1305 test vectors from RFC7539 A.3.
+ */
+
+-#define POLY1305_TEST_VECTORS 11
+-
+-static struct hash_testvec poly1305_tv_template[] = {
++static const struct hash_testvec poly1305_tv_template[] = {
+ { /* Test Vector #1 */
+ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+@@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_template[] = {
+ /*
+ * DES test vectors.
+ */
+-#define DES_ENC_TEST_VECTORS 11
+-#define DES_DEC_TEST_VECTORS 5
+-#define DES_CBC_ENC_TEST_VECTORS 6
+-#define DES_CBC_DEC_TEST_VECTORS 5
+-#define DES_CTR_ENC_TEST_VECTORS 2
+-#define DES_CTR_DEC_TEST_VECTORS 2
+-#define DES3_EDE_ENC_TEST_VECTORS 4
+-#define DES3_EDE_DEC_TEST_VECTORS 4
+-#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
+-#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
+-#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
+-#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
+-
+-static struct cipher_testvec des_enc_tv_template[] = {
++static const struct cipher_testvec des_enc_tv_template[] = {
+ { /* From Applied Cryptography */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 8,
+@@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des_dec_tv_template[] = {
++static const struct cipher_testvec des_dec_tv_template[] = {
+ { /* From Applied Cryptography */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 8,
+@@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des_cbc_enc_tv_template[] = {
++static const struct cipher_testvec des_cbc_enc_tv_template[] = {
+ { /* From OpenSSL */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 8,
+@@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des_cbc_dec_tv_template[] = {
++static const struct cipher_testvec des_cbc_dec_tv_template[] = {
+ { /* FIPS Pub 81 */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 8,
+@@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des_ctr_enc_tv_template[] = {
++static const struct cipher_testvec des_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+@@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des_ctr_dec_tv_template[] = {
++static const struct cipher_testvec des_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+@@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des3_ede_enc_tv_template[] = {
++static const struct cipher_testvec des3_ede_enc_tv_template[] = {
+ { /* These are from openssl */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\x55\x55\x55\x55\x55\x55\x55\x55"
+@@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des3_ede_dec_tv_template[] = {
++static const struct cipher_testvec des3_ede_dec_tv_template[] = {
+ { /* These are from openssl */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\x55\x55\x55\x55\x55\x55\x55\x55"
+@@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
++static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
+ { /* Generated from openssl */
+ .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
+ "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
+@@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
++static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
+ { /* Generated from openssl */
+ .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
+ "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
+@@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
++static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+@@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
++static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+@@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
+ /*
+ * Blowfish test vectors.
+ */
+-#define BF_ENC_TEST_VECTORS 7
+-#define BF_DEC_TEST_VECTORS 7
+-#define BF_CBC_ENC_TEST_VECTORS 2
+-#define BF_CBC_DEC_TEST_VECTORS 2
+-#define BF_CTR_ENC_TEST_VECTORS 2
+-#define BF_CTR_DEC_TEST_VECTORS 2
+-
+-static struct cipher_testvec bf_enc_tv_template[] = {
++static const struct cipher_testvec bf_enc_tv_template[] = {
+ { /* DES test vectors from OpenSSL */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8,
+@@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec bf_dec_tv_template[] = {
++static const struct cipher_testvec bf_dec_tv_template[] = {
+ { /* DES test vectors from OpenSSL */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8,
+@@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec bf_cbc_enc_tv_template[] = {
++static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
+ { /* From OpenSSL */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
+@@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec bf_cbc_dec_tv_template[] = {
++static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
+ { /* From OpenSSL */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
+@@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec bf_ctr_enc_tv_template[] = {
++static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec bf_ctr_dec_tv_template[] = {
++static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
+ /*
+ * Twofish test vectors.
+ */
+-#define TF_ENC_TEST_VECTORS 4
+-#define TF_DEC_TEST_VECTORS 4
+-#define TF_CBC_ENC_TEST_VECTORS 5
+-#define TF_CBC_DEC_TEST_VECTORS 5
+-#define TF_CTR_ENC_TEST_VECTORS 2
+-#define TF_CTR_DEC_TEST_VECTORS 2
+-#define TF_LRW_ENC_TEST_VECTORS 8
+-#define TF_LRW_DEC_TEST_VECTORS 8
+-#define TF_XTS_ENC_TEST_VECTORS 5
+-#define TF_XTS_DEC_TEST_VECTORS 5
+-
+-static struct cipher_testvec tf_enc_tv_template[] = {
++static const struct cipher_testvec tf_enc_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_dec_tv_template[] = {
++static const struct cipher_testvec tf_dec_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_cbc_enc_tv_template[] = {
++static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
+ { /* Generated with Nettle */
+ .key = zeroed_string,
+ .klen = 16,
+@@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_cbc_dec_tv_template[] = {
++static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
+ { /* Reverse of the first four above */
+ .key = zeroed_string,
+ .klen = 16,
+@@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_ctr_enc_tv_template[] = {
++static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_ctr_dec_tv_template[] = {
++static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_lrw_enc_tv_template[] = {
++static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
+ /* Generated from AES-LRW test vectors */
+ {
+ .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+@@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_lrw_dec_tv_template[] = {
++static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
+ /* Generated from AES-LRW test vectors */
+ /* same as enc vectors with input and result reversed */
+ {
+@@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_xts_enc_tv_template[] = {
++static const struct cipher_testvec tf_xts_enc_tv_template[] = {
+ /* Generated from AES-XTS test vectors */
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+@@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tf_xts_dec_tv_template[] = {
++static const struct cipher_testvec tf_xts_dec_tv_template[] = {
+ /* Generated from AES-XTS test vectors */
+ /* same as enc vectors with input and result reversed */
+ {
+@@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = {
+ * Serpent test vectors. These are backwards because Serpent writes
+ * octet sequences in right-to-left mode.
+ */
+-#define SERPENT_ENC_TEST_VECTORS 5
+-#define SERPENT_DEC_TEST_VECTORS 5
+-
+-#define TNEPRES_ENC_TEST_VECTORS 4
+-#define TNEPRES_DEC_TEST_VECTORS 4
+-
+-#define SERPENT_CBC_ENC_TEST_VECTORS 1
+-#define SERPENT_CBC_DEC_TEST_VECTORS 1
+-
+-#define SERPENT_CTR_ENC_TEST_VECTORS 2
+-#define SERPENT_CTR_DEC_TEST_VECTORS 2
+-
+-#define SERPENT_LRW_ENC_TEST_VECTORS 8
+-#define SERPENT_LRW_DEC_TEST_VECTORS 8
+-
+-#define SERPENT_XTS_ENC_TEST_VECTORS 5
+-#define SERPENT_XTS_DEC_TEST_VECTORS 5
+-
+-static struct cipher_testvec serpent_enc_tv_template[] = {
++static const struct cipher_testvec serpent_enc_tv_template[] = {
+ {
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+@@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tnepres_enc_tv_template[] = {
++static const struct cipher_testvec tnepres_enc_tv_template[] = {
+ { /* KeySize=128, PT=0, I=1 */
+ .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+@@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc_tv_template[] = {
+ };
+
+
+-static struct cipher_testvec serpent_dec_tv_template[] = {
++static const struct cipher_testvec serpent_dec_tv_template[] = {
+ {
+ .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
+ "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
+@@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec tnepres_dec_tv_template[] = {
++static const struct cipher_testvec tnepres_dec_tv_template[] = {
+ {
+ .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
+ "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
+@@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
++static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
++static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
++static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
++static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
++static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
+ /* Generated from AES-LRW test vectors */
+ {
+ .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+@@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
++static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
+ /* Generated from AES-LRW test vectors */
+ /* same as enc vectors with input and result reversed */
+ {
+@@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_xts_enc_tv_template[] = {
++static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
+ /* Generated from AES-XTS test vectors */
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+@@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec serpent_xts_dec_tv_template[] = {
++static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
+ /* Generated from AES-XTS test vectors */
+ /* same as enc vectors with input and result reversed */
+ {
+@@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = {
+ };
+
+ /* Cast6 test vectors from RFC 2612 */
+-#define CAST6_ENC_TEST_VECTORS 4
+-#define CAST6_DEC_TEST_VECTORS 4
+-#define CAST6_CBC_ENC_TEST_VECTORS 1
+-#define CAST6_CBC_DEC_TEST_VECTORS 1
+-#define CAST6_CTR_ENC_TEST_VECTORS 2
+-#define CAST6_CTR_DEC_TEST_VECTORS 2
+-#define CAST6_LRW_ENC_TEST_VECTORS 1
+-#define CAST6_LRW_DEC_TEST_VECTORS 1
+-#define CAST6_XTS_ENC_TEST_VECTORS 1
+-#define CAST6_XTS_DEC_TEST_VECTORS 1
+-
+-static struct cipher_testvec cast6_enc_tv_template[] = {
++static const struct cipher_testvec cast6_enc_tv_template[] = {
+ {
+ .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
+ "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
+@@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_dec_tv_template[] = {
++static const struct cipher_testvec cast6_dec_tv_template[] = {
+ {
+ .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
+ "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
+@@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
++static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
++static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
++static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
++static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
++static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+ "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+@@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
++static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+ "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+@@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_xts_enc_tv_template[] = {
++static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+@@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast6_xts_dec_tv_template[] = {
++static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+@@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
+ /*
+ * AES test vectors.
+ */
+-#define AES_ENC_TEST_VECTORS 4
+-#define AES_DEC_TEST_VECTORS 4
+-#define AES_CBC_ENC_TEST_VECTORS 5
+-#define AES_CBC_DEC_TEST_VECTORS 5
+-#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
+-#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
+-#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
+-#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
+-#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
+-#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
+-#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
+-#define AES_LRW_ENC_TEST_VECTORS 8
+-#define AES_LRW_DEC_TEST_VECTORS 8
+-#define AES_XTS_ENC_TEST_VECTORS 5
+-#define AES_XTS_DEC_TEST_VECTORS 5
+-#define AES_CTR_ENC_TEST_VECTORS 5
+-#define AES_CTR_DEC_TEST_VECTORS 5
+-#define AES_OFB_ENC_TEST_VECTORS 1
+-#define AES_OFB_DEC_TEST_VECTORS 1
+-#define AES_CTR_3686_ENC_TEST_VECTORS 7
+-#define AES_CTR_3686_DEC_TEST_VECTORS 6
+-#define AES_GCM_ENC_TEST_VECTORS 9
+-#define AES_GCM_DEC_TEST_VECTORS 8
+-#define AES_GCM_4106_ENC_TEST_VECTORS 23
+-#define AES_GCM_4106_DEC_TEST_VECTORS 23
+-#define AES_GCM_4543_ENC_TEST_VECTORS 1
+-#define AES_GCM_4543_DEC_TEST_VECTORS 2
+-#define AES_CCM_ENC_TEST_VECTORS 8
+-#define AES_CCM_DEC_TEST_VECTORS 7
+-#define AES_CCM_4309_ENC_TEST_VECTORS 7
+-#define AES_CCM_4309_DEC_TEST_VECTORS 10
+-
+-static struct cipher_testvec aes_enc_tv_template[] = {
++static const struct cipher_testvec aes_enc_tv_template[] = {
+ { /* From FIPS-197 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+@@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_dec_tv_template[] = {
++static const struct cipher_testvec aes_dec_tv_template[] = {
+ { /* From FIPS-197 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+@@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_cbc_enc_tv_template[] = {
++static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
+ { /* From RFC 3602 */
+ .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
+ "\x51\x2e\x03\xd5\x34\x12\x00\x06",
+@@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_cbc_dec_tv_template[] = {
++static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
+ { /* From RFC 3602 */
+ .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
+ "\x51\x2e\x03\xd5\x34\x12\x00\x06",
+@@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
++static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
+ { /* Input data from RFC 2410 Case 1 */
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
+ },
+ };
+
+-static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
++static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
+ {
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
+ },
+ };
+
+-static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
+ { /* RFC 3602 Case 1 */
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
+ { /* Input data from RFC 2410 Case 1 */
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
+ },
+ };
+
+-static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
++static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
+ {
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
+ },
+ };
+
+-static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
+ { /* RFC 3602 Case 1 */
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
+ { /* RFC 3602 Case 1 */
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1
+-
+-static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
++static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+ #ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+@@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_lrw_enc_tv_template[] = {
++static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
+ /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
+ { /* LRW-32-AES 1 */
+ .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+@@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec aes_lrw_dec_tv_template[] = {
++static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
+ /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
+ /* same as enc vectors with input and result reversed */
+ { /* LRW-32-AES 1 */
+@@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec aes_xts_enc_tv_template[] = {
++static const struct cipher_testvec aes_xts_enc_tv_template[] = {
+ /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
+ { /* XTS-AES 1 */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+@@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec aes_xts_dec_tv_template[] = {
++static const struct cipher_testvec aes_xts_dec_tv_template[] = {
+ /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
+ { /* XTS-AES 1 */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+@@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
+ };
+
+
+-static struct cipher_testvec aes_ctr_enc_tv_template[] = {
++static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
+ { /* From NIST Special Publication 800-38A, Appendix F.5 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+@@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_ctr_dec_tv_template[] = {
++static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
+ { /* From NIST Special Publication 800-38A, Appendix F.5 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+@@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
++static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
+ { /* From RFC 3686 */
+ .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+@@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
++static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
+ { /* From RFC 3686 */
+ .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+@@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_ofb_enc_tv_template[] = {
++static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
+ /* From NIST Special Publication 800-38A, Appendix F.5 */
+ {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+@@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec aes_ofb_dec_tv_template[] = {
++static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
+ /* From NIST Special Publication 800-38A, Appendix F.5 */
+ {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+@@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_gcm_enc_tv_template[] = {
++static const struct aead_testvec aes_gcm_enc_tv_template[] = {
+ { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
+ .key = zeroed_string,
+ .klen = 16,
+@@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_gcm_dec_tv_template[] = {
++static const struct aead_testvec aes_gcm_dec_tv_template[] = {
+ { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
+ .key = zeroed_string,
+ .klen = 32,
+@@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
++static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
+ { /* Generated using Crypto++ */
+ .key = zeroed_string,
+ .klen = 20,
+@@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
++static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
+ { /* Generated using Crypto++ */
+ .key = zeroed_string,
+ .klen = 20,
+@@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
++static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
+ { /* From draft-mcgrew-gcm-test-01 */
+ .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
+ "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
+@@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
++static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
+ { /* From draft-mcgrew-gcm-test-01 */
+ .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
+ "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
+@@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
+ },
+ };
+
+-static struct aead_testvec aes_ccm_enc_tv_template[] = {
++static const struct aead_testvec aes_ccm_enc_tv_template[] = {
+ { /* From RFC 3610 */
+ .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+@@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_ccm_dec_tv_template[] = {
++static const struct aead_testvec aes_ccm_dec_tv_template[] = {
+ { /* From RFC 3610 */
+ .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+@@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = {
+ * These vectors are copied/generated from the ones for rfc4106 with
+ * the key truncated by one byte..
+ */
+-static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
++static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
+ { /* Generated using Crypto++ */
+ .key = zeroed_string,
+ .klen = 19,
+@@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
+ }
+ };
+
+-static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
++static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
+ { /* Generated using Crypto++ */
+ .key = zeroed_string,
+ .klen = 19,
+@@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
+ /*
+ * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
+ */
+-#define RFC7539_ENC_TEST_VECTORS 2
+-#define RFC7539_DEC_TEST_VECTORS 2
+-static struct aead_testvec rfc7539_enc_tv_template[] = {
++static const struct aead_testvec rfc7539_enc_tv_template[] = {
+ {
+ .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+@@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_tv_template[] = {
+ },
+ };
+
+-static struct aead_testvec rfc7539_dec_tv_template[] = {
++static const struct aead_testvec rfc7539_dec_tv_template[] = {
+ {
+ .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+@@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_tv_template[] = {
+ /*
+ * draft-irtf-cfrg-chacha20-poly1305
+ */
+-#define RFC7539ESP_DEC_TEST_VECTORS 1
+-#define RFC7539ESP_ENC_TEST_VECTORS 1
+-static struct aead_testvec rfc7539esp_enc_tv_template[] = {
++static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
+ {
+ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+ "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+@@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_enc_tv_template[] = {
+ },
+ };
+
+-static struct aead_testvec rfc7539esp_dec_tv_template[] = {
++static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
+ {
+ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+ "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+@@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_dec_tv_template[] = {
+ * semiblock of the ciphertext from the test vector. For decryption, iv is
+ * the first semiblock of the ciphertext.
+ */
+-static struct cipher_testvec aes_kw_enc_tv_template[] = {
++static const struct cipher_testvec aes_kw_enc_tv_template[] = {
+ {
+ .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
+ "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
+@@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec aes_kw_dec_tv_template[] = {
++static const struct cipher_testvec aes_kw_dec_tv_template[] = {
+ {
+ .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
+ "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
+@@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_tv_template[] = {
+ * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
+ * Only AES-128 is supported at this time.
+ */
+-#define ANSI_CPRNG_AES_TEST_VECTORS 6
+-
+-static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
++static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
+ {
+ .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
+ "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
+@@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+-static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
++static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
+@@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
+ },
+ };
+
+-static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
++static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
+@@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
+ },
+ };
+
+-static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
++static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
+@@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+-static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
++static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
+@@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
+ },
+ };
+
+-static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
++static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
+@@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
+ },
+ };
+
+-static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
++static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
+@@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
+ },
+ };
+
+-static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
++static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
+@@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
+ },
+ };
+
+-static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
++static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
+@@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
+ };
+
+ /* Cast5 test vectors from RFC 2144 */
+-#define CAST5_ENC_TEST_VECTORS 4
+-#define CAST5_DEC_TEST_VECTORS 4
+-#define CAST5_CBC_ENC_TEST_VECTORS 1
+-#define CAST5_CBC_DEC_TEST_VECTORS 1
+-#define CAST5_CTR_ENC_TEST_VECTORS 2
+-#define CAST5_CTR_DEC_TEST_VECTORS 2
+-
+-static struct cipher_testvec cast5_enc_tv_template[] = {
++static const struct cipher_testvec cast5_enc_tv_template[] = {
+ {
+ .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
+ "\x23\x45\x67\x89\x34\x56\x78\x9a",
+@@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast5_dec_tv_template[] = {
++static const struct cipher_testvec cast5_dec_tv_template[] = {
+ {
+ .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
+ "\x23\x45\x67\x89\x34\x56\x78\x9a",
+@@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
++static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
+@@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
++static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
+@@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
++static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
+@@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
++static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
+ { /* Generated from TF test vectors */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
+@@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
+ /*
+ * ARC4 test vectors from OpenSSL
+ */
+-#define ARC4_ENC_TEST_VECTORS 7
+-#define ARC4_DEC_TEST_VECTORS 7
+-
+-static struct cipher_testvec arc4_enc_tv_template[] = {
++static const struct cipher_testvec arc4_enc_tv_template[] = {
+ {
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 8,
+@@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec arc4_dec_tv_template[] = {
++static const struct cipher_testvec arc4_dec_tv_template[] = {
+ {
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 8,
+@@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv_template[] = {
+ /*
+ * TEA test vectors
+ */
+-#define TEA_ENC_TEST_VECTORS 4
+-#define TEA_DEC_TEST_VECTORS 4
+-
+-static struct cipher_testvec tea_enc_tv_template[] = {
++static const struct cipher_testvec tea_enc_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec tea_dec_tv_template[] = {
++static const struct cipher_testvec tea_dec_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_template[] = {
+ /*
+ * XTEA test vectors
+ */
+-#define XTEA_ENC_TEST_VECTORS 4
+-#define XTEA_DEC_TEST_VECTORS 4
+-
+-static struct cipher_testvec xtea_enc_tv_template[] = {
++static const struct cipher_testvec xtea_enc_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec xtea_dec_tv_template[] = {
++static const struct cipher_testvec xtea_dec_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = {
+ /*
+ * KHAZAD test vectors.
+ */
+-#define KHAZAD_ENC_TEST_VECTORS 5
+-#define KHAZAD_DEC_TEST_VECTORS 5
+-
+-static struct cipher_testvec khazad_enc_tv_template[] = {
++static const struct cipher_testvec khazad_enc_tv_template[] = {
+ {
+ .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+@@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec khazad_dec_tv_template[] = {
++static const struct cipher_testvec khazad_dec_tv_template[] = {
+ {
+ .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+@@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_tv_template[] = {
+ * Anubis test vectors.
+ */
+
+-#define ANUBIS_ENC_TEST_VECTORS 5
+-#define ANUBIS_DEC_TEST_VECTORS 5
+-#define ANUBIS_CBC_ENC_TEST_VECTORS 2
+-#define ANUBIS_CBC_DEC_TEST_VECTORS 2
+-
+-static struct cipher_testvec anubis_enc_tv_template[] = {
++static const struct cipher_testvec anubis_enc_tv_template[] = {
+ {
+ .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
+ "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
+@@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec anubis_dec_tv_template[] = {
++static const struct cipher_testvec anubis_dec_tv_template[] = {
+ {
+ .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
+ "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
+@@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
++static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
+ {
+ .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
+ "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
+@@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
++static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
+ {
+ .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
+ "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
+@@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
+ /*
+ * XETA test vectors
+ */
+-#define XETA_ENC_TEST_VECTORS 4
+-#define XETA_DEC_TEST_VECTORS 4
+-
+-static struct cipher_testvec xeta_enc_tv_template[] = {
++static const struct cipher_testvec xeta_enc_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec xeta_dec_tv_template[] = {
++static const struct cipher_testvec xeta_dec_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv_template[] = {
+ /*
+ * FCrypt test vectors
+ */
+-#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
+-#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
+-
+-static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
++static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
+ { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8,
+@@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
++static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
+ { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8,
+@@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
+ /*
+ * CAMELLIA test vectors.
+ */
+-#define CAMELLIA_ENC_TEST_VECTORS 4
+-#define CAMELLIA_DEC_TEST_VECTORS 4
+-#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
+-#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
+-#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
+-#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
+-#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
+-#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
+-#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
+-#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
+-
+-static struct cipher_testvec camellia_enc_tv_template[] = {
++static const struct cipher_testvec camellia_enc_tv_template[] = {
+ {
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+@@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_dec_tv_template[] = {
++static const struct cipher_testvec camellia_dec_tv_template[] = {
+ {
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+@@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
++static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
+ {
+ .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
+ "\x51\x2e\x03\xd5\x34\x12\x00\x06",
+@@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
++static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
+ {
+ .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
+ "\x51\x2e\x03\xd5\x34\x12\x00\x06",
+@@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
++static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
++static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+ "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+@@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
++static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
+ /* Generated from AES-LRW test vectors */
+ {
+ .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+@@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
++static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
+ /* Generated from AES-LRW test vectors */
+ /* same as enc vectors with input and result reversed */
+ {
+@@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_xts_enc_tv_template[] = {
++static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
+ /* Generated from AES-XTS test vectors */
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+@@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xts_enc_tv_template[] = {
+ },
+ };
+
+-static struct cipher_testvec camellia_xts_dec_tv_template[] = {
++static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
+ /* Generated from AES-XTS test vectors */
+ /* same as enc vectors with input and result reversed */
+ {
+@@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = {
+ /*
+ * SEED test vectors
+ */
+-#define SEED_ENC_TEST_VECTORS 4
+-#define SEED_DEC_TEST_VECTORS 4
+-
+-static struct cipher_testvec seed_enc_tv_template[] = {
++static const struct cipher_testvec seed_enc_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec seed_dec_tv_template[] = {
++static const struct cipher_testvec seed_dec_tv_template[] = {
+ {
+ .key = zeroed_string,
+ .klen = 16,
+@@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv_template[] = {
+ }
+ };
+
+-#define SALSA20_STREAM_ENC_TEST_VECTORS 5
+-static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
++static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
+ /*
+ * Testvectors from verified.test-vectors submitted to ECRYPT.
+ * They are truncated to size 39, 64, 111, 129 to test a variety
+@@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
+ },
+ };
+
+-#define CHACHA20_ENC_TEST_VECTORS 4
+-static struct cipher_testvec chacha20_enc_tv_template[] = {
++static const struct cipher_testvec chacha20_enc_tv_template[] = {
+ { /* RFC7539 A.2. Test Vector #1 */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+@@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_enc_tv_template[] = {
+ /*
+ * CTS (Cipher Text Stealing) mode tests
+ */
+-#define CTS_MODE_ENC_TEST_VECTORS 6
+-#define CTS_MODE_DEC_TEST_VECTORS 6
+-static struct cipher_testvec cts_mode_enc_tv_template[] = {
++static const struct cipher_testvec cts_mode_enc_tv_template[] = {
+ { /* from rfc3962 */
+ .klen = 16,
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+@@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_enc_tv_template[] = {
+ }
+ };
+
+-static struct cipher_testvec cts_mode_dec_tv_template[] = {
++static const struct cipher_testvec cts_mode_dec_tv_template[] = {
+ { /* from rfc3962 */
+ .klen = 16,
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+@@ -33308,10 +33351,7 @@ struct comp_testvec {
+ * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
+ */
+
+-#define DEFLATE_COMP_TEST_VECTORS 2
+-#define DEFLATE_DECOMP_TEST_VECTORS 2
+-
+-static struct comp_testvec deflate_comp_tv_template[] = {
++static const struct comp_testvec deflate_comp_tv_template[] = {
+ {
+ .inlen = 70,
+ .outlen = 38,
+@@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_tv_template[] = {
+ },
+ };
+
+-static struct comp_testvec deflate_decomp_tv_template[] = {
++static const struct comp_testvec deflate_decomp_tv_template[] = {
+ {
+ .inlen = 122,
+ .outlen = 191,
+@@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
+ /*
+ * LZO test vectors (null-terminated strings).
+ */
+-#define LZO_COMP_TEST_VECTORS 2
+-#define LZO_DECOMP_TEST_VECTORS 2
+-
+-static struct comp_testvec lzo_comp_tv_template[] = {
++static const struct comp_testvec lzo_comp_tv_template[] = {
+ {
+ .inlen = 70,
+ .outlen = 57,
+@@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_template[] = {
+ },
+ };
+
+-static struct comp_testvec lzo_decomp_tv_template[] = {
++static const struct comp_testvec lzo_decomp_tv_template[] = {
+ {
+ .inlen = 133,
+ .outlen = 159,
+@@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv_template[] = {
+ */
+ #define MICHAEL_MIC_TEST_VECTORS 6
+
+-static struct hash_testvec michael_mic_tv_template[] = {
++static const struct hash_testvec michael_mic_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ksize = 8,
+@@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_tv_template[] = {
+ /*
+ * CRC32 test vectors
+ */
+-#define CRC32_TEST_VECTORS 14
+-
+-static struct hash_testvec crc32_tv_template[] = {
++static const struct hash_testvec crc32_tv_template[] = {
+ {
+ .key = "\x87\xa9\xcb\xed",
+ .ksize = 4,
+@@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_template[] = {
+ /*
+ * CRC32C test vectors
+ */
+-#define CRC32C_TEST_VECTORS 15
+-
+-static struct hash_testvec crc32c_tv_template[] = {
++static const struct hash_testvec crc32c_tv_template[] = {
+ {
+ .psize = 0,
+ .digest = "\x00\x00\x00\x00",
+@@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_template[] = {
+ /*
+ * Blakcifn CRC test vectors
+ */
+-#define BFIN_CRC_TEST_VECTORS 6
+-
+-static struct hash_testvec bfin_crc_tv_template[] = {
++static const struct hash_testvec bfin_crc_tv_template[] = {
+ {
+ .psize = 0,
+ .digest = "\x00\x00\x00\x00",
+@@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_template[] = {
+
+ };
+
+-#define LZ4_COMP_TEST_VECTORS 1
+-#define LZ4_DECOMP_TEST_VECTORS 1
+-
+ static struct comp_testvec lz4_comp_tv_template[] = {
+ {
+ .inlen = 70,
+@@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv_template[] = {
+ },
+ };
+
+-#define LZ4HC_COMP_TEST_VECTORS 1
+-#define LZ4HC_DECOMP_TEST_VECTORS 1
+-
+ static struct comp_testvec lz4hc_comp_tv_template[] = {
+ {
+ .inlen = 70,
+diff --git a/crypto/tls.c b/crypto/tls.c
+new file mode 100644
+index 00000000..377226f5
+--- /dev/null
++++ b/crypto/tls.c
+@@ -0,0 +1,607 @@
++/*
++ * Copyright 2013 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++
++#include <crypto/internal/aead.h>
++#include <crypto/internal/hash.h>
++#include <crypto/internal/skcipher.h>
++#include <crypto/authenc.h>
++#include <crypto/null.h>
++#include <crypto/scatterwalk.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/rtnetlink.h>
++
++struct tls_instance_ctx {
++ struct crypto_ahash_spawn auth;
++ struct crypto_skcipher_spawn enc;
++};
++
++struct crypto_tls_ctx {
++ unsigned int reqoff;
++ struct crypto_ahash *auth;
++ struct crypto_skcipher *enc;
++ struct crypto_skcipher *null;
++};
++
++struct tls_request_ctx {
++ /*
++ * cryptlen holds the payload length in the case of encryption or
++ * payload_len + icv_len + padding_len in case of decryption
++ */
++ unsigned int cryptlen;
++ /* working space for partial results */
++ struct scatterlist tmp[2];
++ struct scatterlist cipher[2];
++ struct scatterlist dst[2];
++ char tail[];
++};
++
++struct async_op {
++ struct completion completion;
++ int err;
++};
++
++static void tls_async_op_done(struct crypto_async_request *req, int err)
++{
++ struct async_op *areq = req->data;
++
++ if (err == -EINPROGRESS)
++ return;
++
++ areq->err = err;
++ complete(&areq->completion);
++}
++
++static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
++ unsigned int keylen)
++{
++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
++ struct crypto_ahash *auth = ctx->auth;
++ struct crypto_skcipher *enc = ctx->enc;
++ struct crypto_authenc_keys keys;
++ int err = -EINVAL;
++
++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
++ goto badkey;
++
++ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
++ crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
++ CRYPTO_TFM_REQ_MASK);
++ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
++ crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
++ CRYPTO_TFM_RES_MASK);
++
++ if (err)
++ goto out;
++
++ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
++ crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
++ CRYPTO_TFM_REQ_MASK);
++ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
++ crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
++ CRYPTO_TFM_RES_MASK);
++
++out:
++ return err;
++
++badkey:
++ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ goto out;
++}
++
++/**
++ * crypto_tls_genicv - Calculate hmac digest for a TLS record
++ * @hash: (output) buffer to save the digest into
++ * @src: (input) scatterlist with the assoc and payload data
++ * @srclen: (input) size of the source buffer (assoclen + cryptlen)
++ * @req: (input) aead request
++ **/
++static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
++ unsigned int srclen, struct aead_request *req)
++{
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
++ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
++ struct async_op ahash_op;
++ struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
++ unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
++ int err = -EBADMSG;
++
++ /* Bail out if the request assoc len is 0 */
++ if (!req->assoclen)
++ return err;
++
++ init_completion(&ahash_op.completion);
++
++ /* the hash transform to be executed comes from the original request */
++ ahash_request_set_tfm(ahreq, ctx->auth);
++ /* prepare the hash request with input data and result pointer */
++ ahash_request_set_crypt(ahreq, src, hash, srclen);
++ /* set the notifier for when the async hash function returns */
++ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
++ tls_async_op_done, &ahash_op);
++
++ /* Calculate the digest on the given data. The result is put in hash */
++ err = crypto_ahash_digest(ahreq);
++ if (err == -EINPROGRESS) {
++ err = wait_for_completion_interruptible(&ahash_op.completion);
++ if (!err)
++ err = ahash_op.err;
++ }
++
++ return err;
++}
++
++/**
++ * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
++ * @hash: (output) buffer to save the digest and padding into
++ * @phashlen: (output) the size of digest + padding
++ * @req: (input) aead request
++ **/
++static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
++ struct aead_request *req)
++{
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ unsigned int hash_size = crypto_aead_authsize(tls);
++ unsigned int block_size = crypto_aead_blocksize(tls);
++ unsigned int srclen = req->cryptlen + hash_size;
++ unsigned int icvlen = req->cryptlen + req->assoclen;
++ unsigned int padlen;
++ int err;
++
++ err = crypto_tls_genicv(hash, req->src, icvlen, req);
++ if (err)
++ goto out;
++
++ /* add padding after digest */
++ padlen = block_size - (srclen % block_size);
++ memset(hash + hash_size, padlen - 1, padlen);
++
++ *phashlen = hash_size + padlen;
++out:
++ return err;
++}
++
++static int crypto_tls_copy_data(struct aead_request *req,
++ struct scatterlist *src,
++ struct scatterlist *dst,
++ unsigned int len)
++{
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
++ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
++
++ skcipher_request_set_tfm(skreq, ctx->null);
++ skcipher_request_set_callback(skreq, aead_request_flags(req),
++ NULL, NULL);
++ skcipher_request_set_crypt(skreq, src, dst, len, NULL);
++
++ return crypto_skcipher_encrypt(skreq);
++}
++
++static int crypto_tls_encrypt(struct aead_request *req)
++{
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
++ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
++ struct skcipher_request *skreq;
++ struct scatterlist *cipher = treq_ctx->cipher;
++ struct scatterlist *tmp = treq_ctx->tmp;
++ struct scatterlist *sg, *src, *dst;
++ unsigned int cryptlen, phashlen;
++ u8 *hash = treq_ctx->tail;
++ int err;
++
++ /*
++ * The hash result is saved at the beginning of the tls request ctx
++ * and is aligned as required by the hash transform. Enough space was
++ * allocated in crypto_tls_init_tfm to accommodate the difference. The
++ * requests themselves start later at treq_ctx->tail + ctx->reqoff so
++ * the result is not overwritten by the second (cipher) request.
++ */
++ hash = (u8 *)ALIGN((unsigned long)hash +
++ crypto_ahash_alignmask(ctx->auth),
++ crypto_ahash_alignmask(ctx->auth) + 1);
++
++ /*
++ * STEP 1: create ICV together with necessary padding
++ */
++ err = crypto_tls_gen_padicv(hash, &phashlen, req);
++ if (err)
++ return err;
++
++ /*
++ * STEP 2: Hash and padding are combined with the payload
++ * depending on the form it arrives. Scatter tables must have at least
++ * one page of data before chaining with another table and can't have
++ * an empty data page. The following code addresses these requirements.
++ *
++ * If the payload is empty, only the hash is encrypted, otherwise the
++ * payload scatterlist is merged with the hash. A special merging case
++ * is when the payload has only one page of data. In that case the
++ * payload page is moved to another scatterlist and prepared there for
++ * encryption.
++ */
++ if (req->cryptlen) {
++ src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
++
++ sg_init_table(cipher, 2);
++ sg_set_buf(cipher + 1, hash, phashlen);
++
++ if (sg_is_last(src)) {
++ sg_set_page(cipher, sg_page(src), req->cryptlen,
++ src->offset);
++ src = cipher;
++ } else {
++ unsigned int rem_len = req->cryptlen;
++
++ for (sg = src; rem_len > sg->length; sg = sg_next(sg))
++ rem_len -= min(rem_len, sg->length);
++
++ sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
++ sg_chain(sg, 1, cipher);
++ }
++ } else {
++ sg_init_one(cipher, hash, phashlen);
++ src = cipher;
++ }
++
++ /**
++ * If src != dst copy the associated data from source to destination.
++ * In both cases fast-forward passed the associated data in the dest.
++ */
++ if (req->src != req->dst) {
++ err = crypto_tls_copy_data(req, req->src, req->dst,
++ req->assoclen);
++ if (err)
++ return err;
++ }
++ dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
++
++ /*
++ * STEP 3: encrypt the frame and return the result
++ */
++ cryptlen = req->cryptlen + phashlen;
++
++ /*
++ * The hash and the cipher are applied at different times and their
++ * requests can use the same memory space without interference
++ */
++ skreq = (void *)(treq_ctx->tail + ctx->reqoff);
++ skcipher_request_set_tfm(skreq, ctx->enc);
++ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
++ skcipher_request_set_callback(skreq, aead_request_flags(req),
++ req->base.complete, req->base.data);
++ /*
++ * Apply the cipher transform. The result will be in req->dst when the
++ * asynchronuous call terminates
++ */
++ err = crypto_skcipher_encrypt(skreq);
++
++ return err;
++}
++
++static int crypto_tls_decrypt(struct aead_request *req)
++{
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
++ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
++ unsigned int cryptlen = req->cryptlen;
++ unsigned int hash_size = crypto_aead_authsize(tls);
++ unsigned int block_size = crypto_aead_blocksize(tls);
++ struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
++ struct scatterlist *tmp = treq_ctx->tmp;
++ struct scatterlist *src, *dst;
++
++ u8 padding[255]; /* padding can be 0-255 bytes */
++ u8 pad_size;
++ u16 *len_field;
++ u8 *ihash, *hash = treq_ctx->tail;
++
++ int paderr = 0;
++ int err = -EINVAL;
++ int i;
++ struct async_op ciph_op;
++
++ /*
++ * Rule out bad packets. The input packet length must be at least one
++ * byte more than the hash_size
++ */
++ if (cryptlen <= hash_size || cryptlen % block_size)
++ goto out;
++
++ /*
++ * Step 1 - Decrypt the source. Fast-forward past the associated data
++ * to the encrypted data. The result will be overwritten in place so
++ * that the decrypted data will be adjacent to the associated data. The
++ * last step (computing the hash) will have it's input data already
++ * prepared and ready to be accessed at req->src.
++ */
++ src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
++ dst = src;
++
++ init_completion(&ciph_op.completion);
++ skcipher_request_set_tfm(skreq, ctx->enc);
++ skcipher_request_set_callback(skreq, aead_request_flags(req),
++ tls_async_op_done, &ciph_op);
++ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
++ err = crypto_skcipher_decrypt(skreq);
++ if (err == -EINPROGRESS) {
++ err = wait_for_completion_interruptible(&ciph_op.completion);
++ if (!err)
++ err = ciph_op.err;
++ }
++ if (err)
++ goto out;
++
++ /*
++ * Step 2 - Verify padding
++ * Retrieve the last byte of the payload; this is the padding size.
++ */
++ cryptlen -= 1;
++ scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
++
++ /* RFC recommendation for invalid padding size. */
++ if (cryptlen < pad_size + hash_size) {
++ pad_size = 0;
++ paderr = -EBADMSG;
++ }
++ cryptlen -= pad_size;
++ scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
++
++ /* Padding content must be equal with pad_size. We verify it all */
++ for (i = 0; i < pad_size; i++)
++ if (padding[i] != pad_size)
++ paderr = -EBADMSG;
++
++ /*
++ * Step 3 - Verify hash
++ * Align the digest result as required by the hash transform. Enough
++ * space was allocated in crypto_tls_init_tfm
++ */
++ hash = (u8 *)ALIGN((unsigned long)hash +
++ crypto_ahash_alignmask(ctx->auth),
++ crypto_ahash_alignmask(ctx->auth) + 1);
++ /*
++ * Two bytes at the end of the associated data make the length field.
++ * It must be updated with the length of the cleartext message before
++ * the hash is calculated.
++ */
++ len_field = sg_virt(req->src) + req->assoclen - 2;
++ cryptlen -= hash_size;
++ *len_field = htons(cryptlen);
++
++ /* This is the hash from the decrypted packet. Save it for later */
++ ihash = hash + hash_size;
++ scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
++
++ /* Now compute and compare our ICV with the one from the packet */
++ err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
++ if (!err)
++ err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
++
++ if (req->src != req->dst) {
++ err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
++ req->assoclen);
++ if (err)
++ goto out;
++ }
++
++ /* return the first found error */
++ if (paderr)
++ err = paderr;
++
++out:
++ aead_request_complete(req, err);
++ return err;
++}
++
++static int crypto_tls_init_tfm(struct crypto_aead *tfm)
++{
++ struct aead_instance *inst = aead_alg_instance(tfm);
++ struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
++ struct crypto_ahash *auth;
++ struct crypto_skcipher *enc;
++ struct crypto_skcipher *null;
++ int err;
++
++ auth = crypto_spawn_ahash(&ictx->auth);
++ if (IS_ERR(auth))
++ return PTR_ERR(auth);
++
++ enc = crypto_spawn_skcipher(&ictx->enc);
++ err = PTR_ERR(enc);
++ if (IS_ERR(enc))
++ goto err_free_ahash;
++
++ null = crypto_get_default_null_skcipher2();
++ err = PTR_ERR(null);
++ if (IS_ERR(null))
++ goto err_free_skcipher;
++
++ ctx->auth = auth;
++ ctx->enc = enc;
++ ctx->null = null;
++
++ /*
++ * Allow enough space for two digests. The two digests will be compared
++ * during the decryption phase. One will come from the decrypted packet
++ * and the other will be calculated. For encryption, one digest is
++ * padded (up to a cipher blocksize) and chained with the payload
++ */
++ ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
++ crypto_ahash_alignmask(auth),
++ crypto_ahash_alignmask(auth) + 1) +
++ max(crypto_ahash_digestsize(auth),
++ crypto_skcipher_blocksize(enc));
++
++ crypto_aead_set_reqsize(tfm,
++ sizeof(struct tls_request_ctx) +
++ ctx->reqoff +
++ max_t(unsigned int,
++ crypto_ahash_reqsize(auth) +
++ sizeof(struct ahash_request),
++ crypto_skcipher_reqsize(enc) +
++ sizeof(struct skcipher_request)));
++
++ return 0;
++
++err_free_skcipher:
++ crypto_free_skcipher(enc);
++err_free_ahash:
++ crypto_free_ahash(auth);
++ return err;
++}
++
++static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
++{
++ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
++
++ crypto_free_ahash(ctx->auth);
++ crypto_free_skcipher(ctx->enc);
++ crypto_put_default_null_skcipher2();
++}
++
++static void crypto_tls_free(struct aead_instance *inst)
++{
++ struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
++
++ crypto_drop_skcipher(&ctx->enc);
++ crypto_drop_ahash(&ctx->auth);
++ kfree(inst);
++}
++
++static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
++{
++ struct crypto_attr_type *algt;
++ struct aead_instance *inst;
++ struct hash_alg_common *auth;
++ struct crypto_alg *auth_base;
++ struct skcipher_alg *enc;
++ struct tls_instance_ctx *ctx;
++ const char *enc_name;
++ int err;
++
++ algt = crypto_get_attr_type(tb);
++ if (IS_ERR(algt))
++ return PTR_ERR(algt);
++
++ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
++ return -EINVAL;
++
++ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
++ CRYPTO_ALG_TYPE_AHASH_MASK |
++ crypto_requires_sync(algt->type, algt->mask));
++ if (IS_ERR(auth))
++ return PTR_ERR(auth);
++
++ auth_base = &auth->base;
++
++ enc_name = crypto_attr_alg_name(tb[2]);
++ err = PTR_ERR(enc_name);
++ if (IS_ERR(enc_name))
++ goto out_put_auth;
++
++ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
++ err = -ENOMEM;
++ if (!inst)
++ goto out_put_auth;
++
++ ctx = aead_instance_ctx(inst);
++
++ err = crypto_init_ahash_spawn(&ctx->auth, auth,
++ aead_crypto_instance(inst));
++ if (err)
++ goto err_free_inst;
++
++ crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
++ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
++ crypto_requires_sync(algt->type,
++ algt->mask));
++ if (err)
++ goto err_drop_auth;
++
++ enc = crypto_spawn_skcipher_alg(&ctx->enc);
++
++ err = -ENAMETOOLONG;
++ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
++ "tls10(%s,%s)", auth_base->cra_name,
++ enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
++ goto err_drop_enc;
++
++ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++ "tls10(%s,%s)", auth_base->cra_driver_name,
++ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
++ goto err_drop_enc;
++
++ inst->alg.base.cra_flags = (auth_base->cra_flags |
++ enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
++ inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
++ auth_base->cra_priority;
++ inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
++ inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
++ enc->base.cra_alignmask;
++ inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
++
++ inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
++ inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
++ inst->alg.maxauthsize = auth->digestsize;
++
++ inst->alg.init = crypto_tls_init_tfm;
++ inst->alg.exit = crypto_tls_exit_tfm;
++
++ inst->alg.setkey = crypto_tls_setkey;
++ inst->alg.encrypt = crypto_tls_encrypt;
++ inst->alg.decrypt = crypto_tls_decrypt;
++
++ inst->free = crypto_tls_free;
++
++ err = aead_register_instance(tmpl, inst);
++ if (err)
++ goto err_drop_enc;
++
++out:
++ crypto_mod_put(auth_base);
++ return err;
++
++err_drop_enc:
++ crypto_drop_skcipher(&ctx->enc);
++err_drop_auth:
++ crypto_drop_ahash(&ctx->auth);
++err_free_inst:
++ kfree(inst);
++out_put_auth:
++ goto out;
++}
++
++static struct crypto_template crypto_tls_tmpl = {
++ .name = "tls10",
++ .create = crypto_tls_create,
++ .module = THIS_MODULE,
++};
++
++static int __init crypto_tls_module_init(void)
++{
++ return crypto_register_template(&crypto_tls_tmpl);
++}
++
++static void __exit crypto_tls_module_exit(void)
++{
++ crypto_unregister_template(&crypto_tls_tmpl);
++}
++
++module_init(crypto_tls_module_init);
++module_exit(crypto_tls_module_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("TLS 1.0 record encryption");
+diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
+index 64bf3024..3831a6f7 100644
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -1,6 +1,11 @@
++config CRYPTO_DEV_FSL_CAAM_COMMON
++ tristate
++
+ config CRYPTO_DEV_FSL_CAAM
+- tristate "Freescale CAAM-Multicore driver backend"
++ tristate "Freescale CAAM-Multicore platform driver backend"
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
++ select CRYPTO_DEV_FSL_CAAM_COMMON
++ select SOC_BUS
+ help
+ Enables the driver module for Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
+@@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
+ To compile this driver as a module, choose M here: the module
+ will be called caam.
+
++if CRYPTO_DEV_FSL_CAAM
++
++config CRYPTO_DEV_FSL_CAAM_DEBUG
++ bool "Enable debug output in CAAM driver"
++ help
++ Selecting this will enable printing of various debug
++ information in the CAAM driver.
++
+ config CRYPTO_DEV_FSL_CAAM_JR
+ tristate "Freescale CAAM Job Ring driver backend"
+- depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ help
+ Enables the driver module for Job Rings which are part of
+@@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
+ To compile this driver as a module, choose M here: the module
+ will be called caam_jr.
+
++if CRYPTO_DEV_FSL_CAAM_JR
++
+ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+ int "Job Ring size"
+- depends on CRYPTO_DEV_FSL_CAAM_JR
+ range 2 9
+ default "9"
+ help
+@@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+
+ config CRYPTO_DEV_FSL_CAAM_INTC
+ bool "Job Ring interrupt coalescing"
+- depends on CRYPTO_DEV_FSL_CAAM_JR
+ help
+ Enable the Job Ring's interrupt coalescing feature.
+
+@@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+
+ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ tristate "Register algorithm implementations with the Crypto API"
+- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+@@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ To compile this as a module, choose M here: the module
+ will be called caamalg.
+
++config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
++ tristate "Queue Interface as Crypto API backend"
++ depends on FSL_SDK_DPA && NET
++ default y
++ select CRYPTO_AUTHENC
++ select CRYPTO_BLKCIPHER
++ help
++ Selecting this will use CAAM Queue Interface (QI) for sending
++ & receiving crypto jobs to/from CAAM. This gives better performance
++ than job ring interface when the number of cores are more than the
++ number of job rings assigned to the kernel. The number of portals
++ assigned to the kernel should also be more than the number of
++ job rings.
++
++ To compile this as a module, choose M here: the module
++ will be called caamalg_qi.
++
+ config CRYPTO_DEV_FSL_CAAM_AHASH_API
+ tristate "Register hash algorithm implementations with Crypto API"
+- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_HASH
+ help
+@@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
+
+ config CRYPTO_DEV_FSL_CAAM_PKC_API
+ tristate "Register public key cryptography implementations with Crypto API"
+- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_RSA
+ help
+@@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
+
+ config CRYPTO_DEV_FSL_CAAM_RNG_API
+ tristate "Register caam device for hwrng API"
+- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_RNG
+ select HW_RANDOM
+@@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
+ To compile this as a module, choose M here: the module
+ will be called caamrng.
+
+-config CRYPTO_DEV_FSL_CAAM_IMX
+- def_bool SOC_IMX6 || SOC_IMX7D
+- depends on CRYPTO_DEV_FSL_CAAM
++endif # CRYPTO_DEV_FSL_CAAM_JR
+
+-config CRYPTO_DEV_FSL_CAAM_DEBUG
+- bool "Enable debug output in CAAM driver"
+- depends on CRYPTO_DEV_FSL_CAAM
+- help
+- Selecting this will enable printing of various debug
+- information in the CAAM driver.
++endif # CRYPTO_DEV_FSL_CAAM
++
++config CRYPTO_DEV_FSL_DPAA2_CAAM
++ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
++ depends on FSL_MC_DPIO
++ select CRYPTO_DEV_FSL_CAAM_COMMON
++ select CRYPTO_BLKCIPHER
++ select CRYPTO_AUTHENC
++ select CRYPTO_AEAD
++ ---help---
++ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
++ It handles DPSECI DPAA2 objects that sit on the Management Complex
++ (MC) fsl-mc bus.
++
++ To compile this as a module, choose M here: the module
++ will be called dpaa2_caam.
++
++config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
++ def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
++ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
++ CRYPTO_DEV_FSL_DPAA2_CAAM)
+diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
+index 08bf5515..01f73a25 100644
+--- a/drivers/crypto/caam/Makefile
++++ b/drivers/crypto/caam/Makefile
+@@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
+ ccflags-y := -DDEBUG
+ endif
+
++ccflags-y += -DVERSION=\"\"
++
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
+
+ caam-objs := ctrl.o
+-caam_jr-objs := jr.o key_gen.o error.o
++caam_jr-objs := jr.o key_gen.o
+ caam_pkc-y := caampkc.o pkc_desc.o
++ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
++ ccflags-y += -DCONFIG_CAAM_QI
++ caam-objs += qi.o
++endif
++
++obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
++
++dpaa2_caam-y := caamalg_qi2.o dpseci.o
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 0d743c63..abf2f52b 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -2,6 +2,7 @@
+ * caam - Freescale FSL CAAM support for crypto API
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
+ *
+ * Based on talitos crypto API driver.
+ *
+@@ -53,6 +54,7 @@
+ #include "error.h"
+ #include "sg_sw_sec4.h"
+ #include "key_gen.h"
++#include "caamalg_desc.h"
+
+ /*
+ * crypto alg
+@@ -62,8 +64,6 @@
+ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
+ CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+-#define CAAM_MAX_IV_LENGTH 16
+
+ #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
+ #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
+@@ -71,37 +71,6 @@
+ #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
+ CAAM_CMD_SZ * 5)
+
+-/* length of descriptors text */
+-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
+-
+-/* Note: Nonce is counted in enckeylen */
+-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+-
+-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+-
+-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+-
+-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+-
+-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+-
+-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+- 20 * CAAM_CMD_SZ)
+-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+- 15 * CAAM_CMD_SZ)
+-
+ #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
+ #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+@@ -112,47 +81,11 @@
+ #define debug(format, arg...)
+ #endif
+
+-#ifdef DEBUG
+-#include <linux/highmem.h>
+-
+-static void dbg_dump_sg(const char *level, const char *prefix_str,
+- int prefix_type, int rowsize, int groupsize,
+- struct scatterlist *sg, size_t tlen, bool ascii,
+- bool may_sleep)
+-{
+- struct scatterlist *it;
+- void *it_page;
+- size_t len;
+- void *buf;
+-
+- for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
+- /*
+- * make sure the scatterlist's page
+- * has a valid virtual memory mapping
+- */
+- it_page = kmap_atomic(sg_page(it));
+- if (unlikely(!it_page)) {
+- printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
+- return;
+- }
+-
+- buf = it_page + it->offset;
+- len = min_t(size_t, tlen, it->length);
+- print_hex_dump(level, prefix_str, prefix_type, rowsize,
+- groupsize, buf, len, ascii);
+- tlen -= len;
+-
+- kunmap_atomic(it_page);
+- }
+-}
+-#endif
+-
+ static struct list_head alg_list;
+
+ struct caam_alg_entry {
+ int class1_alg_type;
+ int class2_alg_type;
+- int alg_op;
+ bool rfc3686;
+ bool geniv;
+ };
+@@ -163,302 +96,67 @@ struct caam_aead_alg {
+ bool registered;
+ };
+
+-/* Set DK bit in class 1 operation if shared */
+-static inline void append_dec_op1(u32 *desc, u32 type)
+-{
+- u32 *jump_cmd, *uncond_jump_cmd;
+-
+- /* DK bit is valid only for AES */
+- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+- append_operation(desc, type | OP_ALG_AS_INITFINAL |
+- OP_ALG_DECRYPT);
+- return;
+- }
+-
+- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+- append_operation(desc, type | OP_ALG_AS_INITFINAL |
+- OP_ALG_DECRYPT);
+- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+- set_jump_tgt_here(desc, jump_cmd);
+- append_operation(desc, type | OP_ALG_AS_INITFINAL |
+- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+- set_jump_tgt_here(desc, uncond_jump_cmd);
+-}
+-
+-/*
+- * For aead functions, read payload and write payload,
+- * both of which are specified in req->src and req->dst
+- */
+-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+-{
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+-}
+-
+-/*
+- * For ablkcipher encrypt and decrypt, read from req->src and
+- * write to req->dst
+- */
+-static inline void ablkcipher_append_src_dst(u32 *desc)
+-{
+- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+-}
+-
+ /*
+ * per-session context
+ */
+ struct caam_ctx {
+- struct device *jrdev;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
++ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t sh_desc_enc_dma;
+ dma_addr_t sh_desc_dec_dma;
+ dma_addr_t sh_desc_givenc_dma;
+- u32 class1_alg_type;
+- u32 class2_alg_type;
+- u32 alg_op;
+- u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
+- unsigned int enckeylen;
+- unsigned int split_key_len;
+- unsigned int split_key_pad_len;
++ struct device *jrdev;
++ struct alginfo adata;
++ struct alginfo cdata;
+ unsigned int authsize;
+ };
+
+-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+- int keys_fit_inline, bool is_rfc3686)
+-{
+- u32 *nonce;
+- unsigned int enckeylen = ctx->enckeylen;
+-
+- /*
+- * RFC3686 specific:
+- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
+- * | enckeylen = encryption key size + nonce size
+- */
+- if (is_rfc3686)
+- enckeylen -= CTR_RFC3686_NONCE_SIZE;
+-
+- if (keys_fit_inline) {
+- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+- ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- append_key_as_imm(desc, (void *)ctx->key +
+- ctx->split_key_pad_len, enckeylen,
+- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- } else {
+- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- }
+-
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+- enckeylen);
+- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+- LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc,
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+- }
+-}
+-
+-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+- int keys_fit_inline, bool is_rfc3686)
+-{
+- u32 *key_jump_cmd;
+-
+- /* Note: Context registers are saved. */
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+-
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+-
+- set_jump_tgt_here(desc, key_jump_cmd);
+-}
+-
+ static int aead_null_set_sh_desc(struct crypto_aead *aead)
+ {
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
+ u32 *desc;
++ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
++ ctx->adata.keylen_pad;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
++ ctx->adata.key_inline = true;
++ ctx->adata.key_virt = ctx->key;
++ } else {
++ ctx->adata.key_inline = false;
++ ctx->adata.key_dma = ctx->key_dma;
++ }
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+- ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- else
+- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* assoclen + cryptlen = seqinlen */
+- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Prepare to read and write cryptlen + assoclen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+- MOVE_DEST_MATH3 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+- MOVE_DEST_DESCBUF |
+- MOVE_WAITCOMP |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Class 2 operation */
+- append_operation(desc, ctx->class2_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Read and write cryptlen bytes */
+- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+- MOVE_AUX_LS);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "aead null enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_dec;
++ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
++ ctx->adata.key_inline = true;
++ ctx->adata.key_virt = ctx->key;
++ } else {
++ ctx->adata.key_inline = false;
++ ctx->adata.key_dma = ctx->key_dma;
++ }
+
+ /* aead_decrypt shared descriptor */
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+- ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- else
+- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 2 operation */
+- append_operation(desc, ctx->class2_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- /* assoclen + cryptlen = seqoutlen */
+- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Prepare to read and write cryptlen + assoclen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+- MOVE_DEST_MATH2 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+- MOVE_DEST_DESCBUF |
+- MOVE_WAITCOMP |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Read and write cryptlen bytes */
+- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- /*
+- * Insert a NOP here, since we need at least 4 instructions between
+- * code patching the descriptor buffer and the location being patched.
+- */
+- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+- set_jump_tgt_here(desc, jump_cmd);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+- MOVE_AUX_LS);
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Load ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "aead null dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ desc = ctx->sh_desc_dec;
++ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+ }
+@@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline;
+- u32 geniv, moveiv;
+ u32 ctx1_iv_off = 0;
+- u32 *desc;
+- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
++ u32 *desc, *nonce = NULL;
++ u32 inl_mask;
++ unsigned int data_len[2];
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+@@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ return 0;
+
+ /* NULL encryption / decryption */
+- if (!ctx->enckeylen)
++ if (!ctx->cdata.keylen)
+ return aead_null_set_sh_desc(aead);
+
+ /*
+@@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+- if (is_rfc3686)
++ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
++ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
++ }
++
++ data_len[0] = ctx->adata.keylen_pad;
++ data_len[1] = ctx->cdata.keylen;
+
+ if (alg->caam.geniv)
+ goto skip_enc;
+@@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len + ctx->enckeylen +
+- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
+- CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- /* aead_encrypt shared descriptor */
+- desc = ctx->sh_desc_enc;
+-
+- /* Note: Context registers are saved. */
+- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+-
+- /* Class 2 operation */
+- append_operation(desc, ctx->class2_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
++ if (desc_inline_query(DESC_AEAD_ENC_LEN +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
+
+- /* Read and write assoclen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
+
+- /* Skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+- /* read assoc before reading payload */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+- FIFOLDST_VLF);
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Read and write cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ /* aead_encrypt shared descriptor */
++ desc = ctx->sh_desc_enc;
++ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
++ false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ skip_enc:
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len + ctx->enckeylen +
+- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
+- CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- /* aead_decrypt shared descriptor */
+- desc = ctx->sh_desc_dec;
+-
+- /* Note: Context registers are saved. */
+- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+-
+- /* Class 2 operation */
+- append_operation(desc, ctx->class2_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++ if (desc_inline_query(DESC_AEAD_DEC_LEN +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
+
+- /* Read and write assoclen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- if (alg->caam.geniv)
+- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
+ else
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* Skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++ ctx->adata.key_dma = ctx->key_dma;
+
+- /* read assoc before reading payload */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+- KEY_VLF);
+-
+- if (alg->caam.geniv) {
+- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- (ctx1_iv_off << LDST_OFFSET_SHIFT));
+- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+- }
+-
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- /* Choose operation */
+- if (ctr_mode)
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+- append_dec_op1(desc, ctx->class1_alg_type);
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+- /* Read and write cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+- aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+- /* Load ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ /* aead_decrypt shared descriptor */
++ desc = ctx->sh_desc_dec;
++ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, alg->caam.geniv, is_rfc3686,
++ nonce, ctx1_iv_off, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ if (!alg->caam.geniv)
+ goto skip_givenc;
+@@ -655,107 +277,32 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len + ctx->enckeylen +
+- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
+- CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
+
+- /* aead_givencrypt shared descriptor */
+- desc = ctx->sh_desc_enc;
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
+
+- /* Note: Context registers are saved. */
+- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+- if (is_rfc3686)
+- goto copy_iv;
+-
+- /* Generate IV */
+- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+- (ivsize << MOVE_LEN_SHIFT));
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+-copy_iv:
+- /* Copy IV to class 1 context */
+- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+- (ivsize << MOVE_LEN_SHIFT));
+-
+- /* Return to encryption */
+- append_operation(desc, ctx->class2_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Read and write assoclen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* ivsize + cryptlen = seqoutlen - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+-
+- /* Skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+-
+- /* read assoc before reading payload */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+- KEY_VLF);
+-
+- /* Copy iv from outfifo to class 2 fifo */
+- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+-
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Will write ivsize + cryptlen */
+- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Not need to reload iv */
+- append_seq_fifo_load(desc, ivsize,
+- FIFOLD_CLASS_SKIP);
+-
+- /* Will read cryptlen */
+- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+- FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ /* aead_givencrypt shared descriptor */
++ desc = ctx->sh_desc_enc;
++ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, is_rfc3686, nonce,
++ ctx1_iv_off, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ skip_givenc:
+ return 0;
+@@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
+ {
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd, *zero_payload_jump_cmd,
+- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 *desc;
++ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
+
+- if (!ctx->enckeylen || !ctx->authsize)
++ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+@@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ if (rem_bytes >= DESC_GCM_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
+
+ desc = ctx->sh_desc_enc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* skip key loading if they are loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD | JUMP_COND_SELF);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* if assoclen + cryptlen is ZERO, skip to ICV write */
+- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+-
+- /* if assoclen is ZERO, skip reading the assoc data */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+-
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+-
+- /* cryptlen = seqinlen - assoclen */
+- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+-
+- /* if cryptlen is ZERO jump to zero-payload commands */
+- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+-
+- /* read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+-
+- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* write encrypted data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* read payload data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+-
+- /* jump the zero-payload commands */
+- append_jump(desc, JUMP_TEST_ALL | 2);
+-
+- /* zero-payload commands */
+- set_jump_tgt_here(desc, zero_payload_jump_cmd);
+-
+- /* read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+-
+- /* There is no input data */
+- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+-
+- /* write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ if (rem_bytes >= DESC_GCM_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
+
+ desc = ctx->sh_desc_dec;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* skip key loading if they are loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL |
+- JUMP_TEST_ALL | JUMP_COND_SHRD |
+- JUMP_COND_SELF);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- /* if assoclen is ZERO, skip reading the assoc data */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+-
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+-
+- /* read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+-
+- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+-
+- /* cryptlen = seqoutlen - assoclen */
+- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /* jump to zero-payload command if cryptlen is zero */
+- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+-
+- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /* store encrypted data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* read payload data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- /* zero-payload command */
+- set_jump_tgt_here(desc, zero_payload_jump_cmd);
+-
+- /* read ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+ }
+@@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+ {
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 *desc;
++ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
+
+- if (!ctx->enckeylen || !ctx->authsize)
++ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+@@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
+
+ desc = ctx->sh_desc_enc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+-
+- /* Skip IV */
+- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+-
+- /* Will read cryptlen bytes */
+- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+-
+- /* Skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+-
+- /* cryptlen = seqoutlen - assoclen */
+- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Write encrypted data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* Read payload data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
+
+ desc = ctx->sh_desc_dec;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL |
+- JUMP_TEST_ALL | JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+-
+- /* Skip IV */
+- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+-
+- /* Will read cryptlen bytes */
+- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+-
+- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+-
+- /* Skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+-
+- /* Will write cryptlen bytes */
+- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Store payload data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* Read encrypted data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- /* Read ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+ }
+@@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+ {
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd;
+- u32 *read_move_cmd, *write_move_cmd;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 *desc;
++ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
+
+- if (!ctx->enckeylen || !ctx->authsize)
++ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+@@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_enc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* assoclen + cryptlen = seqinlen */
+- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Will read assoclen + cryptlen bytes */
+- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Will write assoclen + cryptlen bytes */
+- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Read and write assoclen + cryptlen bytes */
+- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- /* Move payload data to OFIFO */
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
++ if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
+ }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++
++ desc = ctx->sh_desc_enc;
++ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
+
+ desc = ctx->sh_desc_dec;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL |
+- JUMP_TEST_ALL | JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- /* assoclen + cryptlen = seqoutlen */
+- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Will read assoclen + cryptlen bytes */
+- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Will write assoclen + cryptlen bytes */
+- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Store payload data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* In-snoop assoclen + cryptlen data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- /* Move payload data to OFIFO */
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Read ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+ }
+@@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ return 0;
+ }
+
+-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
+- u32 authkeylen)
+-{
+- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+- ctx->split_key_pad_len, key_in, authkeylen,
+- ctx->alg_op);
+-}
+-
+ static int aead_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+ {
+- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ struct crypto_authenc_keys keys;
+@@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aead *aead,
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+- /* Pick class 2 key length from algorithm submask */
+- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+- OP_ALG_ALGSEL_SHIFT] * 2;
+- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+-
+- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+- goto badkey;
+-
+ #ifdef DEBUG
+ printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+- ctx->split_key_len, ctx->split_key_pad_len);
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ #endif
+
+- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
++ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
++ keys.authkeylen, CAAM_MAX_KEY_SIZE -
++ keys.enckeylen);
+ if (ret) {
+ goto badkey;
+ }
+
+ /* postpend encryption key to auth split key */
+- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+-
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+- keys.enckeylen, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+- ctx->split_key_pad_len + keys.enckeylen, 1);
++ ctx->adata.keylen_pad + keys.enckeylen, 1);
+ #endif
+-
+- ctx->enckeylen = keys.enckeylen;
+-
+- ret = aead_set_sh_desc(aead);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+- keys.enckeylen, DMA_TO_DEVICE);
+- }
+-
+- return ret;
++ ctx->cdata.keylen = keys.enckeylen;
++ return aead_set_sh_desc(aead);
+ badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+@@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead *aead,
+ {
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+@@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead *aead,
+ #endif
+
+ memcpy(ctx->key, key, keylen);
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+- ctx->enckeylen = keylen;
+-
+- ret = gcm_set_sh_desc(aead);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- }
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
+
+- return ret;
++ return gcm_set_sh_desc(aead);
+ }
+
+ static int rfc4106_setkey(struct crypto_aead *aead,
+@@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_aead *aead,
+ {
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+@@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_aead *aead,
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+- ctx->enckeylen = keylen - 4;
+-
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+-
+- ret = rfc4106_set_sh_desc(aead);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- }
+-
+- return ret;
++ ctx->cdata.keylen = keylen - 4;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++ DMA_TO_DEVICE);
++ return rfc4106_set_sh_desc(aead);
+ }
+
+ static int rfc4543_setkey(struct crypto_aead *aead,
+@@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_aead *aead,
+ {
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+@@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_aead *aead,
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+- ctx->enckeylen = keylen - 4;
+-
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+-
+- ret = rfc4543_set_sh_desc(aead);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- }
+-
+- return ret;
++ ctx->cdata.keylen = keylen - 4;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++ DMA_TO_DEVICE);
++ return rfc4543_set_sh_desc(aead);
+ }
+
+ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+ {
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+- u32 *key_jump_cmd;
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc;
+- u8 *nonce;
+- u32 geniv;
+ u32 ctx1_iv_off = 0;
+- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode &&
+ (strstr(alg_name, "rfc3686") != NULL));
+
++ memcpy(ctx->key, key, keylen);
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+@@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+- memcpy(ctx->key, key, keylen);
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+- ctx->enckeylen = keylen;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
+
+ /* ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- /* Load class1 key only */
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 |
+- KEY_DEST_CLASS_REG);
+-
+- /* Load nonce into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u8 *)key + keylen;
+- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+- LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+- }
+-
+- set_jump_tgt_here(desc, key_jump_cmd);
++ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
++ ctx1_iv_off);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+- /* Load iv */
+- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+-
+- /* Load counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- /* Load operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Perform operation */
+- ablkcipher_append_src_dst(desc);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+ /* ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
++ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
++ ctx1_iv_off);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- /* Load class1 key only */
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 |
+- KEY_DEST_CLASS_REG);
+-
+- /* Load nonce into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u8 *)key + keylen;
+- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+- LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+- }
+-
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* load IV */
+- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+-
+- /* Load counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- /* Choose operation */
+- if (ctr_mode)
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+- else
+- append_dec_op1(desc, ctx->class1_alg_type);
+-
+- /* Perform operation */
+- ablkcipher_append_src_dst(desc);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+ /* ablkcipher_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
++ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
++ ctx1_iv_off);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- /* Load class1 key only */
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 |
+- KEY_DEST_CLASS_REG);
+-
+- /* Load Nonce into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u8 *)key + keylen;
+- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+- LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+- }
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Generate IV */
+- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
+- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_INFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (crt->ivsize << MOVE_LEN_SHIFT) |
+- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Copy generated IV to memory */
+- append_seq_store(desc, crt->ivsize,
+- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+- (ctx1_iv_off << LDST_OFFSET_SHIFT));
+-
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- if (ctx1_iv_off)
+- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+- (1 << JUMP_OFFSET_SHIFT));
+-
+- /* Load operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Perform operation */
+- ablkcipher_append_src_dst(desc);
+-
+- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- return ret;
++ return 0;
+ }
+
+ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+@@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ {
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+- u32 *key_jump_cmd, *desc;
+- __be64 sector_size = cpu_to_be64(512);
++ u32 *desc;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(ablkcipher,
+@@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ }
+
+ memcpy(ctx->key, key, keylen);
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+- ctx->enckeylen = keylen;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
+
+ /* xts_ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- /* Load class1 keys only */
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+-
+- /* Load sector size with index 40 bytes (0x28) */
+- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
+- append_data(desc, (void *)&sector_size, 8);
+-
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /*
+- * create sequence for loading the sector index
+- * Upper 8B of IV - will be used as sector index
+- * Lower 8B of IV - will be discarded
+- */
+- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
+- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+-
+- /* Load operation */
+- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
+- OP_ALG_ENCRYPT);
+-
+- /* Perform operation */
+- ablkcipher_append_src_dst(desc);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* xts_ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- /* Load class1 key only */
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+-
+- /* Load sector size with index 40 bytes (0x28) */
+- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
+- append_data(desc, (void *)&sector_size, 8);
+-
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /*
+- * create sequence for loading the sector index
+- * Upper 8B of IV - will be used as sector index
+- * Lower 8B of IV - will be discarded
+- */
+- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
+- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+-
+- /* Load operation */
+- append_dec_op1(desc, ctx->class1_alg_type);
+-
+- /* Perform operation */
+- ablkcipher_append_src_dst(desc);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
+- desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+-#endif
++ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+ }
+
+ /*
+ * aead_edesc - s/w-extended aead descriptor
+- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+- * @src_nents: number of segments in input scatterlist
+- * @dst_nents: number of segments in output scatterlist
+- * @iv_dma: dma address of iv for checking continuity and link table
+- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
++ * @src_nents: number of segments in input s/w scatterlist
++ * @dst_nents: number of segments in output s/w scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
++ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+ struct aead_edesc {
+- int assoc_nents;
+ int src_nents;
+ int dst_nents;
+- dma_addr_t iv_dma;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
+@@ -1899,12 +739,12 @@ struct aead_edesc {
+
+ /*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+- * @src_nents: number of segments in input scatterlist
+- * @dst_nents: number of segments in output scatterlist
++ * @src_nents: number of segments in input s/w scatterlist
++ * @dst_nents: number of segments in output s/w scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
++ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+ struct ablkcipher_edesc {
+@@ -1924,10 +764,11 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
+ int sec4_sg_bytes)
+ {
+ if (dst != src) {
+- dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
+- dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
++ if (src_nents)
++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+- dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+@@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+
+- edesc = (struct ablkcipher_edesc *)((char *)desc -
+- offsetof(struct ablkcipher_edesc, hw_desc));
++ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+@@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ #endif
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+ ablkcipher_unmap(jrdev, edesc, req);
+
+@@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+
+- edesc = (struct ablkcipher_edesc *)((char *)desc -
+- offsetof(struct ablkcipher_edesc, hw_desc));
++ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+@@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ #endif
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+ ablkcipher_unmap(jrdev, edesc, req);
+
+@@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_request *req,
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (all_contig) {
+- src_dma = sg_dma_address(req->src);
++ src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+@@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_request *req,
+ out_options = in_options;
+
+ if (unlikely(req->src != req->dst)) {
+- if (!edesc->dst_nents) {
++ if (edesc->dst_nents == 1) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+@@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_request *req,
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
+ /* Append Salt */
+ if (!generic_gcm)
+- append_data(desc, ctx->key + ctx->enckeylen, 4);
++ append_data(desc, ctx->key + ctx->cdata.keylen, 4);
+ /* Append IV */
+ append_data(desc, req->iv, ivsize);
+ /* End of blank commands */
+@@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead_request *req,
+ struct caam_aead_alg, aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+ u32 *desc = edesc->hw_desc;
+@@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ int len, sec4_sg_index = 0;
+
+ #ifdef DEBUG
+- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+- printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
+- dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
++ pr_err("asked=%d, nbytes%d\n",
++ (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+ #endif
++ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+@@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+
+ if (likely(req->src == req->dst)) {
+- if (!edesc->src_nents && iv_contig) {
++ if (edesc->src_nents == 1 && iv_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+@@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ out_options = LDST_SGF;
+ }
+ } else {
+- if (!edesc->dst_nents) {
++ if (edesc->dst_nents == 1) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+@@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ int len, sec4_sg_index = 0;
+
+ #ifdef DEBUG
+- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
+ print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+- dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ #endif
++ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+- if (!edesc->src_nents) {
++ if (edesc->src_nents == 1) {
+ src_dma = sg_dma_address(req->src);
+ in_options = 0;
+ } else {
+@@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- int src_nents, dst_nents = 0;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+- int sgc;
+- bool all_contig = true;
+- int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
++ int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
+ unsigned int authsize = ctx->authsize;
+
+ if (unlikely(req->dst != req->src)) {
+- src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
+- dst_nents = sg_count(req->dst,
+- req->assoclen + req->cryptlen +
+- (encrypt ? authsize : (-authsize)));
+- } else {
+- src_nents = sg_count(req->src,
+- req->assoclen + req->cryptlen +
+- (encrypt ? authsize : 0));
+- }
+-
+- /* Check if data are contiguous. */
+- all_contig = !src_nents;
+- if (!all_contig) {
+- src_nents = src_nents ? : 1;
+- sec4_sg_len = src_nents;
+- }
+-
+- sec4_sg_len += dst_nents;
+-
+- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen);
++ if (unlikely(src_nents < 0)) {
++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen);
++ return ERR_PTR(src_nents);
++ }
+
+- /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+- GFP_DMA | flags);
+- if (!edesc) {
+- dev_err(jrdev, "could not allocate extended descriptor\n");
+- return ERR_PTR(-ENOMEM);
++ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize :
++ (-authsize)));
++ if (unlikely(dst_nents < 0)) {
++ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : (-authsize)));
++ return ERR_PTR(dst_nents);
++ }
++ } else {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(src_nents < 0)) {
++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : 0));
++ return ERR_PTR(src_nents);
++ }
+ }
+
+ if (likely(req->src == req->dst)) {
+- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+- DMA_BIDIRECTIONAL);
+- if (unlikely(!sgc)) {
++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
+ dev_err(jrdev, "unable to map source\n");
+- kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+- DMA_TO_DEVICE);
+- if (unlikely(!sgc)) {
+- dev_err(jrdev, "unable to map source\n");
+- kfree(edesc);
+- return ERR_PTR(-ENOMEM);
++ /* Cover also the case of null (zero length) input data */
++ if (src_nents) {
++ mapped_src_nents = dma_map_sg(jrdev, req->src,
++ src_nents, DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(jrdev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = 0;
+ }
+
+- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+- DMA_FROM_DEVICE);
+- if (unlikely(!sgc)) {
++ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
+ dev_err(jrdev, "unable to map destination\n");
+- dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+- DMA_TO_DEVICE);
+- kfree(edesc);
++ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
++ sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
++ sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
++
++ /* allocate space for base edesc and hw desc commands, link tables */
++ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
++ GFP_DMA | flags);
++ if (!edesc) {
++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+- *all_contig_ptr = all_contig;
++ *all_contig_ptr = !(mapped_src_nents > 1);
+
+ sec4_sg_index = 0;
+- if (!all_contig) {
+- sg_to_sec4_sg_last(req->src, src_nents,
+- edesc->sec4_sg + sec4_sg_index, 0);
+- sec4_sg_index += src_nents;
++ if (mapped_src_nents > 1) {
++ sg_to_sec4_sg_last(req->src, mapped_src_nents,
++ edesc->sec4_sg + sec4_sg_index, 0);
++ sec4_sg_index += mapped_src_nents;
+ }
+- if (dst_nents) {
+- sg_to_sec4_sg_last(req->dst, dst_nents,
++ if (mapped_dst_nents > 1) {
++ sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
+@@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_request *req)
+ u32 *desc;
+ int ret = 0;
+
+-#ifdef DEBUG
+- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
+- dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+- req->assoclen + req->cryptlen, 1, may_sleep);
+-#endif
++ caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++ req->assoclen + req->cryptlen, 1);
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+@@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+- int src_nents, dst_nents = 0, sec4_sg_bytes;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+- bool iv_contig = false;
+- int sgc;
++ bool in_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+- int sec4_sg_index;
++ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+
+- src_nents = sg_count(req->src, req->nbytes);
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (unlikely(src_nents < 0)) {
++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
++ req->nbytes);
++ return ERR_PTR(src_nents);
++ }
+
+- if (req->dst != req->src)
+- dst_nents = sg_count(req->dst, req->nbytes);
++ if (req->dst != req->src) {
++ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++ if (unlikely(dst_nents < 0)) {
++ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
++ req->nbytes);
++ return ERR_PTR(dst_nents);
++ }
++ }
+
+ if (likely(req->src == req->dst)) {
+- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+- DMA_BIDIRECTIONAL);
++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(jrdev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
+ } else {
+- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+- DMA_TO_DEVICE);
+- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+- DMA_FROM_DEVICE);
++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(jrdev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(jrdev, "unable to map destination\n");
++ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
++ return ERR_PTR(-ENOMEM);
++ }
+ }
+
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+- /*
+- * Check if iv can be contiguous with source and destination.
+- * If so, include it. If not, create scatterlist.
+- */
+- if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+- iv_contig = true;
+- else
+- src_nents = src_nents ? : 1;
+- sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+- sizeof(struct sec4_sg_entry);
++ if (mapped_src_nents == 1 &&
++ iv_dma + ivsize == sg_dma_address(req->src)) {
++ in_contig = true;
++ sec4_sg_ents = 0;
++ } else {
++ in_contig = false;
++ sec4_sg_ents = 1 + mapped_src_nents;
++ }
++ dst_sg_idx = sec4_sg_ents;
++ sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+- sec4_sg_index = 0;
+- if (!iv_contig) {
++ if (!in_contig) {
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+- sg_to_sec4_sg_last(req->src, src_nents,
++ sg_to_sec4_sg_last(req->src, mapped_src_nents,
+ edesc->sec4_sg + 1, 0);
+- sec4_sg_index += 1 + src_nents;
+ }
+
+- if (dst_nents) {
+- sg_to_sec4_sg_last(req->dst, dst_nents,
+- edesc->sec4_sg + sec4_sg_index, 0);
++ if (mapped_dst_nents > 1) {
++ sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
++ edesc->sec4_sg + dst_sg_idx, 0);
+ }
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, 0, 0);
++ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ sec4_sg_bytes, 1);
+ #endif
+
+- *iv_contig_out = iv_contig;
++ *iv_contig_out = in_contig;
+ return edesc;
+ }
+
+@@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ?
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+- int src_nents, dst_nents = 0, sec4_sg_bytes;
++ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+- bool iv_contig = false;
+- int sgc;
++ bool out_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+- int sec4_sg_index;
++ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+
+- src_nents = sg_count(req->src, req->nbytes);
+-
+- if (unlikely(req->dst != req->src))
+- dst_nents = sg_count(req->dst, req->nbytes);
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (unlikely(src_nents < 0)) {
++ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
++ req->nbytes);
++ return ERR_PTR(src_nents);
++ }
+
+ if (likely(req->src == req->dst)) {
+- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+- DMA_BIDIRECTIONAL);
++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(jrdev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dst_nents = src_nents;
++ mapped_dst_nents = src_nents;
+ } else {
+- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+- DMA_TO_DEVICE);
+- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+- DMA_FROM_DEVICE);
++ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(jrdev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++ if (unlikely(dst_nents < 0)) {
++ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
++ req->nbytes);
++ return ERR_PTR(dst_nents);
++ }
++
++ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(jrdev, "unable to map destination\n");
++ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
++ return ERR_PTR(-ENOMEM);
++ }
+ }
+
+ /*
+@@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+- if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
+- iv_contig = true;
+- else
+- dst_nents = dst_nents ? : 1;
+- sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+- sizeof(struct sec4_sg_entry);
++ sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
++ dst_sg_idx = sec4_sg_ents;
++ if (mapped_dst_nents == 1 &&
++ iv_dma + ivsize == sg_dma_address(req->dst)) {
++ out_contig = true;
++ } else {
++ out_contig = false;
++ sec4_sg_ents += 1 + mapped_dst_nents;
++ }
+
+ /* allocate space for base edesc and hw desc commands, link tables */
++ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+- sec4_sg_index = 0;
+- if (src_nents) {
+- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+- sec4_sg_index += src_nents;
+- }
++ if (mapped_src_nents > 1)
++ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
++ 0);
+
+- if (!iv_contig) {
+- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
++ if (!out_contig) {
++ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
+ iv_dma, ivsize, 0);
+- sec4_sg_index += 1;
+- sg_to_sec4_sg_last(req->dst, dst_nents,
+- edesc->sec4_sg + sec4_sg_index, 0);
++ sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
++ edesc->sec4_sg + dst_sg_idx + 1, 0);
+ }
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
++ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, 0, 0);
++ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->iv_dma = iv_dma;
+@@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ sec4_sg_bytes, 1);
+ #endif
+
+- *iv_contig_out = iv_contig;
++ *iv_contig_out = out_contig;
+ return edesc;
+ }
+
+@@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+- bool iv_contig;
++ bool iv_contig = false;
+ u32 *desc;
+ int ret = 0;
+
+@@ -2933,7 +1840,6 @@ struct caam_alg_template {
+ } template_u;
+ u32 class1_alg_type;
+ u32 class2_alg_type;
+- u32 alg_op;
+ };
+
+ static struct caam_alg_template driver_algs[] = {
+@@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ }
+ },
+ {
+@@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ }
+ },
+@@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+@@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
+ },
+@@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
+@@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+@@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
+@@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+@@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
+@@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+@@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
+@@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+@@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
+@@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+@@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
+@@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads[] = {
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+@@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
+
+ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+ {
++ dma_addr_t dma_addr;
++
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
+
++ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
++ offsetof(struct caam_ctx,
++ sh_desc_enc_dma),
++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
++ dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
++ caam_jr_free(ctx->jrdev);
++ return -ENOMEM;
++ }
++
++ ctx->sh_desc_enc_dma = dma_addr;
++ ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
++ sh_desc_dec);
++ ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
++ sh_desc_givenc);
++ ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
++
+ /* copy descriptor header template value */
+- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
++ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ return 0;
+ }
+@@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_aead *tfm)
+
+ static void caam_exit_common(struct caam_ctx *ctx)
+ {
+- if (ctx->sh_desc_enc_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
+- desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+- if (ctx->sh_desc_dec_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
+- desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
+- if (ctx->sh_desc_givenc_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+- desc_bytes(ctx->sh_desc_givenc),
+- DMA_TO_DEVICE);
+- if (ctx->key_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->key_dma))
+- dma_unmap_single(ctx->jrdev, ctx->key_dma,
+- ctx->enckeylen + ctx->split_key_pad_len,
+- DMA_TO_DEVICE);
+-
++ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
++ offsetof(struct caam_ctx, sh_desc_enc_dma),
++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ caam_jr_free(ctx->jrdev);
+ }
+
+@@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+
+ t_alg->caam.class1_alg_type = template->class1_alg_type;
+ t_alg->caam.class2_alg_type = template->class2_alg_type;
+- t_alg->caam.alg_op = template->alg_op;
+
+ return t_alg;
+ }
+diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
+new file mode 100644
+index 00000000..d162120a
+--- /dev/null
++++ b/drivers/crypto/caam/caamalg_desc.c
+@@ -0,0 +1,1913 @@
++/*
++ * Shared descriptors for aead, ablkcipher algorithms
++ *
++ * Copyright 2016 NXP
++ */
++
++#include "compat.h"
++#include "desc_constr.h"
++#include "caamalg_desc.h"
++
++/*
++ * For aead functions, read payload and write payload,
++ * both of which are specified in req->src and req->dst
++ */
++static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
++{
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
++ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
++}
++
++/* Set DK bit in class 1 operation if shared */
++static inline void append_dec_op1(u32 *desc, u32 type)
++{
++ u32 *jump_cmd, *uncond_jump_cmd;
++
++ /* DK bit is valid only for AES */
++ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
++ append_operation(desc, type | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT);
++ return;
++ }
++
++ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
++ append_operation(desc, type | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT);
++ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
++ set_jump_tgt_here(desc, jump_cmd);
++ append_operation(desc, type | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
++ set_jump_tgt_here(desc, uncond_jump_cmd);
++}
++
++/**
++ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
++ * (non-protocol) with no (null) encryption.
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions. Note that since a
++ * split key is to be used, the size of the split key itself is
++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ *
++ * Note: Requires an MDHA split key.
++ */
++void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
++ unsigned int icvsize)
++{
++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
++ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* assoclen + cryptlen = seqinlen */
++ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* Prepare to read and write cryptlen + assoclen bytes */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /*
++ * MOVE_LEN opcode is not available in all SEC HW revisions,
++ * thus need to do some magic, i.e. self-patch the descriptor
++ * buffer.
++ */
++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH3 |
++ (0x6 << MOVE_LEN_SHIFT));
++ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
++ MOVE_DEST_DESCBUF |
++ MOVE_WAITCOMP |
++ (0x8 << MOVE_LEN_SHIFT));
++
++ /* Class 2 operation */
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* Read and write cryptlen bytes */
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
++
++ set_move_tgt_here(desc, read_move_cmd);
++ set_move_tgt_here(desc, write_move_cmd);
++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
++ MOVE_AUX_LS);
++
++ /* Write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "aead null enc shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
++
++/**
++ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
++ * (non-protocol) with no (null) decryption.
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions. Note that since a
++ * split key is to be used, the size of the split key itself is
++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ *
++ * Note: Requires an MDHA split key.
++ */
++void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
++ unsigned int icvsize)
++{
++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
++ adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Class 2 operation */
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++
++ /* assoclen + cryptlen = seqoutlen */
++ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++
++ /* Prepare to read and write cryptlen + assoclen bytes */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
++
++ /*
++ * MOVE_LEN opcode is not available in all SEC HW revisions,
++ * thus need to do some magic, i.e. self-patch the descriptor
++ * buffer.
++ */
++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH2 |
++ (0x6 << MOVE_LEN_SHIFT));
++ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
++ MOVE_DEST_DESCBUF |
++ MOVE_WAITCOMP |
++ (0x8 << MOVE_LEN_SHIFT));
++
++ /* Read and write cryptlen bytes */
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
++
++ /*
++ * Insert a NOP here, since we need at least 4 instructions between
++ * code patching the descriptor buffer and the location being patched.
++ */
++ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
++ set_jump_tgt_here(desc, jump_cmd);
++
++ set_move_tgt_here(desc, read_move_cmd);
++ set_move_tgt_here(desc, write_move_cmd);
++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
++ MOVE_AUX_LS);
++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
++
++ /* Load ICV */
++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "aead null dec shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
++
++static void init_sh_desc_key_aead(u32 * const desc,
++ struct alginfo * const cdata,
++ struct alginfo * const adata,
++ const bool is_rfc3686, u32 *nonce)
++{
++ u32 *key_jump_cmd;
++ unsigned int enckeylen = cdata->keylen;
++
++ /* Note: Context registers are saved. */
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ /*
++ * RFC3686 specific:
++ * | key = {AUTH_KEY, ENC_KEY, NONCE}
++ * | enckeylen = encryption key size + nonce size
++ */
++ if (is_rfc3686)
++ enckeylen -= CTR_RFC3686_NONCE_SIZE;
++
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
++ adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, enckeylen,
++ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++
++ /* Load Counter into CONTEXT1 reg */
++ if (is_rfc3686) {
++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
++ LDST_CLASS_IND_CCB |
++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
++ append_move(desc,
++ MOVE_SRC_OUTFIFO |
++ MOVE_DEST_CLASS1CTX |
++ (16 << MOVE_OFFSET_SHIFT) |
++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
++ }
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++}
++
++/**
++ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
++ * (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * @adata: pointer to authentication transform definitions. Note that since a
++ * split key is to be used, the size of the split key itself is
++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
++ * @nonce: pointer to rfc3686 nonce
++ * @ctx1_iv_off: IV offset in CONTEXT1 register
++ * @is_qi: true when called from caam/qi
++ *
++ * Note: Requires an MDHA split key.
++ */
++void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool is_rfc3686,
++ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
++{
++ /* Note: Context registers are saved. */
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++
++ /* Class 2 operation */
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ (4 << LDST_OFFSET_SHIFT));
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ (ctx1_iv_off << LDST_OFFSET_SHIFT));
++ }
++
++ /* Read and write assoclen bytes */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /* Skip assoc data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++
++ /* read assoc before reading payload */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
++ FIFOLDST_VLF);
++
++ /* Load Counter into CONTEXT1 reg */
++ if (is_rfc3686)
++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
++ LDST_OFFSET_SHIFT));
++
++ /* Class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* Read and write cryptlen bytes */
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
++
++ /* Write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
++
++/**
++ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
++ * (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * @adata: pointer to authentication transform definitions. Note that since a
++ * split key is to be used, the size of the split key itself is
++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
++ * @nonce: pointer to rfc3686 nonce
++ * @ctx1_iv_off: IV offset in CONTEXT1 register
++ * @is_qi: true when called from caam/qi
++ *
++ * Note: Requires an MDHA split key.
++ */
++void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool geniv,
++ const bool is_rfc3686, u32 *nonce,
++ const u32 ctx1_iv_off, const bool is_qi)
++{
++ /* Note: Context registers are saved. */
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++
++ /* Class 2 operation */
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ (4 << LDST_OFFSET_SHIFT));
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ if (!geniv)
++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ (ctx1_iv_off << LDST_OFFSET_SHIFT));
++ }
++
++ /* Read and write assoclen bytes */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (geniv)
++ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
++ else
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /* Skip assoc data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++
++ /* read assoc before reading payload */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
++ KEY_VLF);
++
++ if (geniv) {
++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ (ctx1_iv_off << LDST_OFFSET_SHIFT));
++ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
++ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
++ }
++
++ /* Load Counter into CONTEXT1 reg */
++ if (is_rfc3686)
++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
++ LDST_OFFSET_SHIFT));
++
++ /* Choose operation */
++ if (ctx1_iv_off)
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT);
++ else
++ append_dec_op1(desc, cdata->algtype);
++
++ /* Read and write cryptlen bytes */
++ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
++
++ /* Load ICV */
++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
++
++/**
++ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
++ * (non-protocol) with HW-generated initialization
++ * vector.
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * @adata: pointer to authentication transform definitions. Note that since a
++ * split key is to be used, the size of the split key itself is
++ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
++ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
++ * @nonce: pointer to rfc3686 nonce
++ * @ctx1_iv_off: IV offset in CONTEXT1 register
++ * @is_qi: true when called from caam/qi
++ *
++ * Note: Requires an MDHA split key.
++ */
++void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool is_rfc3686,
++ u32 *nonce, const u32 ctx1_iv_off,
++ const bool is_qi)
++{
++ u32 geniv, moveiv;
++
++ /* Note: Context registers are saved. */
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ (4 << LDST_OFFSET_SHIFT));
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++ }
++
++ if (is_rfc3686) {
++ if (is_qi)
++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ (ctx1_iv_off << LDST_OFFSET_SHIFT));
++
++ goto copy_iv;
++ }
++
++ /* Generate IV */
++ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
++ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
++ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
++ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
++ append_move(desc, MOVE_WAITCOMP |
++ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
++ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
++ (ivsize << MOVE_LEN_SHIFT));
++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
++
++copy_iv:
++ /* Copy IV to class 1 context */
++ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
++ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
++ (ivsize << MOVE_LEN_SHIFT));
++
++ /* Return to encryption */
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* Read and write assoclen bytes */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /* Skip assoc data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++
++ /* read assoc before reading payload */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
++ KEY_VLF);
++
++ /* Copy iv from outfifo to class 2 fifo */
++ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
++ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
++ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
++ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
++
++ /* Load Counter into CONTEXT1 reg */
++ if (is_rfc3686)
++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
++ LDST_OFFSET_SHIFT));
++
++ /* Class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* Will write ivsize + cryptlen */
++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* Not need to reload iv */
++ append_seq_fifo_load(desc, ivsize,
++ FIFOLD_CLASS_SKIP);
++
++ /* Will read cryptlen */
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
++ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
++
++ /* Write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "aead givenc shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
++
++/**
++ * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
++ * with OP_ALG_AAI_CBC
++ * @adata: pointer to authentication transform definitions. Note that since a
++ * split key is to be used, the size of the split key itself is
++ * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
++ * OP_ALG_AAI_HMAC_PRECOMP.
++ * @assoclen: associated data length
++ * @ivsize: initialization vector size
++ * @authsize: authentication data size
++ * @blocksize: block cipher size
++ */
++void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int assoclen,
++ unsigned int ivsize, unsigned int authsize,
++ unsigned int blocksize)
++{
++ u32 *key_jump_cmd, *zero_payload_jump_cmd;
++ u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
++
++ /*
++ * Compute the index (in bytes) for the LOAD with destination of
++ * Class 1 Data Size Register and for the LOAD that generates padding
++ */
++ if (adata->key_inline) {
++ idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
++ cdata->keylen - 4 * CAAM_CMD_SZ;
++ idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
++ cdata->keylen - 2 * CAAM_CMD_SZ;
++ } else {
++ idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
++ 4 * CAAM_CMD_SZ;
++ idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
++ 2 * CAAM_CMD_SZ;
++ }
++
++ stidx = 1 << HDR_START_IDX_SHIFT;
++ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
++
++ /* skip key loading if they are loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
++ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* class 2 operation */
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++ /* class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* payloadlen = input data length - (assoclen + ivlen) */
++ append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
++
++ /* math1 = payloadlen + icvlen */
++ append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
++
++ /* padlen = block_size - math1 % block_size */
++ append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
++ append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
++
++ /* cryptlen = payloadlen + icvlen + padlen */
++ append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
++
++ /*
++ * update immediate data with the padding length value
++ * for the LOAD in the class 1 data size register.
++ */
++ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
++ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
++ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
++
++ /* overwrite PL field for the padding iNFO FIFO entry */
++ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
++ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
++ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
++
++ /* store encrypted payload, icv and padding */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
++
++ /* if payload length is zero, jump to zero-payload commands */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
++ JUMP_COND_MATH_Z);
++
++ /* load iv in context1 */
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
++ LDST_CLASS_1_CCB | ivsize);
++
++ /* read assoc for authentication */
++ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_MSG);
++ /* insnoop payload */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
++ FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
++
++ /* jump the zero-payload commands */
++ append_jump(desc, JUMP_TEST_ALL | 3);
++
++ /* zero-payload commands */
++ set_jump_tgt_here(desc, zero_payload_jump_cmd);
++
++ /* load iv in context1 */
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
++ LDST_CLASS_1_CCB | ivsize);
++
++ /* assoc data is the only data for authentication */
++ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
++
++ /* send icv to encryption */
++ append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
++ authsize);
++
++ /* update class 1 data size register with padding length */
++ append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
++ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
++
++ /* generate padding and send it to encryption */
++ genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
++ NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
++ append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc,
++ desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
++
++/**
++ * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
++ * with OP_ALG_AAI_CBC
++ * @adata: pointer to authentication transform definitions. Note that since a
++ * split key is to be used, the size of the split key itself is
++ * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
++ * OP_ALG_AAI_HMAC_PRECOMP.
++ * @assoclen: associated data length
++ * @ivsize: initialization vector size
++ * @authsize: authentication data size
++ * @blocksize: block cipher size
++ */
++void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int assoclen,
++ unsigned int ivsize, unsigned int authsize,
++ unsigned int blocksize)
++{
++ u32 stidx, jumpback;
++ u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
++ /*
++ * Pointer Size bool determines the size of address pointers.
++ * false - Pointers fit in one 32-bit word.
++ * true - Pointers fit in two 32-bit words.
++ */
++ static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
++
++ stidx = 1 << HDR_START_IDX_SHIFT;
++ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
++
++ /* skip key loading if they are loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* class 2 operation */
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++ /* class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT);
++
++ /* VSIL = input data length - 2 * block_size */
++ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
++ blocksize);
++
++ /*
++ * payloadlen + icvlen + padlen = input data length - (assoclen +
++ * ivsize)
++ */
++ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
++
++ /* skip data to the last but one cipher block */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
++
++ /* load iv for the last cipher block */
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
++ LDST_CLASS_1_CCB | ivsize);
++
++ /* read last cipher block */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
++ FIFOLD_TYPE_LAST1 | blocksize);
++
++ /* move decrypted block into math0 and math1 */
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
++ blocksize);
++
++ /* reset AES CHA */
++ append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
++ LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
++
++ /* rewind input sequence */
++ append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
++
++ /* key1 is in decryption form */
++ append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
++ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
++
++ /* load iv in context1 */
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
++ LDST_SRCDST_WORD_CLASS_CTX | ivsize);
++
++ /* read sequence number */
++ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
++ /* load Type, Version and Len fields in math0 */
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
++
++ /* compute (padlen - 1) */
++ append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
++
++ /* math2 = icvlen + (padlen - 1) + 1 */
++ append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
++
++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
++
++ /* VSOL = payloadlen + icvlen + padlen */
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
++
++#ifdef __LITTLE_ENDIAN
++ append_moveb(desc, MOVE_WAITCOMP |
++ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
++#endif
++ /* update Len field */
++ append_math_sub(desc, REG0, REG0, REG2, 8);
++
++ /* store decrypted payload, icv and padding */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
++
++ /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
++ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
++
++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
++ JUMP_COND_MATH_Z);
++
++ /* send Type, Version and Len(pre ICV) fields to authentication */
++ append_move(desc, MOVE_WAITCOMP |
++ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
++ (3 << MOVE_OFFSET_SHIFT) | 5);
++
++ /* outsnooping payload */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
++ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
++ FIFOLDST_VLF);
++ skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
++
++ set_jump_tgt_here(desc, zero_payload_jump_cmd);
++ /* send Type, Version and Len(pre ICV) fields to authentication */
++ append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
++ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
++ (3 << MOVE_OFFSET_SHIFT) | 5);
++
++ set_jump_tgt_here(desc, skip_zero_jump_cmd);
++ append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
++
++ /* load icvlen and padlen */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
++ FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
++
++ /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
++ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
++
++ /*
++ * Start a new input sequence using the SEQ OUT PTR command options,
++ * pointer and length used when the current output sequence was defined.
++ */
++ if (ps) {
++ /*
++ * Move the lower 32 bits of Shared Descriptor address, the
++ * SEQ OUT PTR command, Output Pointer (2 words) and
++ * Output Length into math registers.
++ */
++#ifdef __LITTLE_ENDIAN
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
++ 20);
++#else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
++ 20);
++#endif
++ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
++ append_math_and_imm_u32(desc, REG0, REG0, IMM,
++ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
++ /* Append a JUMP command after the copied fields */
++ jumpback = CMD_JUMP | (char)-9;
++ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
++ LDST_SRCDST_WORD_DECO_MATH2 |
++ (4 << LDST_OFFSET_SHIFT));
++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
++ /* Move the updated fields back to the Job Descriptor */
++#ifdef __LITTLE_ENDIAN
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
++ 24);
++#else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
++ 24);
++#endif
++ /*
++ * Read the new SEQ IN PTR command, Input Pointer, Input Length
++ * and then jump back to the next command from the
++ * Shared Descriptor.
++ */
++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
++ } else {
++ /*
++ * Move the SEQ OUT PTR command, Output Pointer (1 word) and
++ * Output Length into math registers.
++ */
++#ifdef __LITTLE_ENDIAN
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
++ 12);
++#else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
++ 12);
++#endif
++ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
++ append_math_and_imm_u64(desc, REG0, REG0, IMM,
++ ~(((u64)(CMD_SEQ_IN_PTR ^
++ CMD_SEQ_OUT_PTR)) << 32));
++ /* Append a JUMP command after the copied fields */
++ jumpback = CMD_JUMP | (char)-7;
++ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
++ LDST_SRCDST_WORD_DECO_MATH1 |
++ (4 << LDST_OFFSET_SHIFT));
++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
++ /* Move the updated fields back to the Job Descriptor */
++#ifdef __LITTLE_ENDIAN
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
++ 16);
++#else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
++ 16);
++#endif
++ /*
++ * Read the new SEQ IN PTR command, Input Pointer, Input Length
++ * and then jump back to the next command from the
++ * Shared Descriptor.
++ */
++ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
++ }
++
++ /* skip payload */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
++ /* check icv */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
++ FIFOLD_TYPE_LAST2 | authsize);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc,
++ desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
++
++/**
++ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
++ *zero_assoc_jump_cmd2;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* skip key loading if they are loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ (4 << LDST_OFFSET_SHIFT));
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
++ ivsize);
++ } else {
++ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
++ CAAM_CMD_SZ);
++ }
++
++ /* if assoclen + cryptlen is ZERO, skip to ICV write */
++ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
++ JUMP_COND_MATH_Z);
++
++ if (is_qi)
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
++
++ /* if assoclen is ZERO, skip reading the assoc data */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
++ JUMP_COND_MATH_Z);
++
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /* skip assoc data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++
++ /* cryptlen = seqinlen - assoclen */
++ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
++
++ /* if cryptlen is ZERO jump to zero-payload commands */
++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
++ JUMP_COND_MATH_Z);
++
++ /* read assoc data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
++ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
++
++ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* write encrypted data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
++
++ /* read payload data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
++
++ /* jump to ICV writing */
++ if (is_qi)
++ append_jump(desc, JUMP_TEST_ALL | 4);
++ else
++ append_jump(desc, JUMP_TEST_ALL | 2);
++
++ /* zero-payload commands */
++ set_jump_tgt_here(desc, zero_payload_jump_cmd);
++
++ /* read assoc data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
++ if (is_qi)
++ /* jump to ICV writing */
++ append_jump(desc, JUMP_TEST_ALL | 2);
++
++ /* There is no input data */
++ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
++
++ if (is_qi)
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
++ FIFOLD_TYPE_LAST1);
++
++ /* write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
++
++/**
++ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* skip key loading if they are loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL |
++ JUMP_TEST_ALL | JUMP_COND_SHRD);
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ (4 << LDST_OFFSET_SHIFT));
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
++ }
++
++ /* if assoclen is ZERO, skip reading the assoc data */
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
++ JUMP_COND_MATH_Z);
++
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /* skip assoc data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++
++ /* read assoc data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
++
++ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
++
++ /* cryptlen = seqoutlen - assoclen */
++ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++
++ /* jump to zero-payload command if cryptlen is zero */
++ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
++ JUMP_COND_MATH_Z);
++
++ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++
++ /* store encrypted data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
++
++ /* read payload data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
++
++ /* zero-payload command */
++ set_jump_tgt_here(desc, zero_payload_jump_cmd);
++
++ /* read ICV */
++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
++
++/**
++ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
++ * (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Skip key loading if it is loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ (4 << LDST_OFFSET_SHIFT));
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ /* Read salt and IV */
++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV);
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
++ }
++
++ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /* Read assoc data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
++
++ /* Skip IV */
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
++
++ /* Will read cryptlen bytes */
++ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
++
++ /* Skip assoc data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++
++ /* cryptlen = seqoutlen - assoclen */
++ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* Write encrypted data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
++
++ /* Read payload data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
++
++ /* Write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
++
++/**
++ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
++ * (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Skip key loading if it is loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ (4 << LDST_OFFSET_SHIFT));
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ /* Read salt and IV */
++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV);
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
++ }
++
++ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++
++ /* Read assoc data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
++
++ /* Skip IV */
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
++
++ /* Will read cryptlen bytes */
++ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
++
++ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
++
++ /* Skip assoc data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
++
++ /* Will write cryptlen bytes */
++ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++
++ /* Store payload data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
++
++ /* Read encrypted data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
++
++ /* Read ICV */
++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
++
++/**
++ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
++ * (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Skip key loading if it is loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ if (is_qi) {
++ /* assoclen is not needed, skip it */
++ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
++
++ /* Read salt and IV */
++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV);
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
++ }
++
++ /* assoclen + cryptlen = seqinlen */
++ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /*
++ * MOVE_LEN opcode is not available in all SEC HW revisions,
++ * thus need to do some magic, i.e. self-patch the descriptor
++ * buffer.
++ */
++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
++ (0x6 << MOVE_LEN_SHIFT));
++ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
++ (0x8 << MOVE_LEN_SHIFT));
++
++ /* Will read assoclen + cryptlen bytes */
++ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* Will write assoclen + cryptlen bytes */
++ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* Read and write assoclen + cryptlen bytes */
++ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
++
++ set_move_tgt_here(desc, read_move_cmd);
++ set_move_tgt_here(desc, write_move_cmd);
++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
++ /* Move payload data to OFIFO */
++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
++
++ /* Write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
++
++/**
++ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
++ * (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Skip key loading if it is loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++ if (cdata->key_inline)
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ else
++ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Class 1 operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++
++ if (is_qi) {
++ /* assoclen is not needed, skip it */
++ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
++
++ /* Read salt and IV */
++ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
++ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV);
++ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
++ }
++
++ /* assoclen + cryptlen = seqoutlen */
++ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++
++ /*
++ * MOVE_LEN opcode is not available in all SEC HW revisions,
++ * thus need to do some magic, i.e. self-patch the descriptor
++ * buffer.
++ */
++ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
++ (0x6 << MOVE_LEN_SHIFT));
++ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
++ (0x8 << MOVE_LEN_SHIFT));
++
++ /* Will read assoclen + cryptlen bytes */
++ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++
++ /* Will write assoclen + cryptlen bytes */
++ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++
++ /* Store payload data */
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
++
++ /* In-snoop assoclen + cryptlen data */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
++ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
++
++ set_move_tgt_here(desc, read_move_cmd);
++ set_move_tgt_here(desc, write_move_cmd);
++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
++ /* Move payload data to OFIFO */
++ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
++
++ /* Read ICV */
++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
++
++/*
++ * For ablkcipher encrypt and decrypt, read from req->src and
++ * write to req->dst
++ */
++static inline void ablkcipher_append_src_dst(u32 *desc)
++{
++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
++ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
++}
++
++/**
++ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * @ivsize: initialization vector size
++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
++ * @ctx1_iv_off: IV offset in CONTEXT1 register
++ */
++void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, const bool is_rfc3686,
++ const u32 ctx1_iv_off)
++{
++ u32 *key_jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ /* Load class1 key only */
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++
++ /* Load nonce into CONTEXT1 reg */
++ if (is_rfc3686) {
++ u8 *nonce = cdata->key_virt + cdata->keylen;
++
++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
++ LDST_CLASS_IND_CCB |
++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
++ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
++ }
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Load iv */
++ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
++
++ /* Load counter into CONTEXT1 reg */
++ if (is_rfc3686)
++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
++ LDST_OFFSET_SHIFT));
++
++ /* Load operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* Perform operation */
++ ablkcipher_append_src_dst(desc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
++
++/**
++ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * @ivsize: initialization vector size
++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
++ * @ctx1_iv_off: IV offset in CONTEXT1 register
++ */
++void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, const bool is_rfc3686,
++ const u32 ctx1_iv_off)
++{
++ u32 *key_jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ /* Load class1 key only */
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++
++ /* Load nonce into CONTEXT1 reg */
++ if (is_rfc3686) {
++ u8 *nonce = cdata->key_virt + cdata->keylen;
++
++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
++ LDST_CLASS_IND_CCB |
++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
++ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
++ }
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* load IV */
++ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
++
++ /* Load counter into CONTEXT1 reg */
++ if (is_rfc3686)
++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
++ LDST_OFFSET_SHIFT));
++
++ /* Choose operation */
++ if (ctx1_iv_off)
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT);
++ else
++ append_dec_op1(desc, cdata->algtype);
++
++ /* Perform operation */
++ ablkcipher_append_src_dst(desc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
++
++/**
++ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
++ * with HW-generated initialization vector.
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
++ * with OP_ALG_AAI_CBC.
++ * @ivsize: initialization vector size
++ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
++ * @ctx1_iv_off: IV offset in CONTEXT1 register
++ */
++void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, const bool is_rfc3686,
++ const u32 ctx1_iv_off)
++{
++ u32 *key_jump_cmd, geniv;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ /* Load class1 key only */
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++
++ /* Load Nonce into CONTEXT1 reg */
++ if (is_rfc3686) {
++ u8 *nonce = cdata->key_virt + cdata->keylen;
++
++ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
++ LDST_CLASS_IND_CCB |
++ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
++ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
++ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
++ }
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Generate IV */
++ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
++ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
++ (ivsize << NFIFOENTRY_DLEN_SHIFT);
++ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
++ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
++ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
++ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
++ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
++ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
++
++ /* Copy generated IV to memory */
++ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
++
++ /* Load Counter into CONTEXT1 reg */
++ if (is_rfc3686)
++ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
++ LDST_OFFSET_SHIFT));
++
++ if (ctx1_iv_off)
++ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
++ (1 << JUMP_OFFSET_SHIFT));
++
++ /* Load operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* Perform operation */
++ ablkcipher_append_src_dst(desc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
++
++/**
++ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
++ * descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
++ */
++void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
++{
++ __be64 sector_size = cpu_to_be64(512);
++ u32 *key_jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ /* Load class1 keys only */
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++
++ /* Load sector size with index 40 bytes (0x28) */
++ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ (0x28 << LDST_OFFSET_SHIFT));
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /*
++ * create sequence for loading the sector index
++ * Upper 8B of IV - will be used as sector index
++ * Lower 8B of IV - will be discarded
++ */
++ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
++ (0x20 << LDST_OFFSET_SHIFT));
++ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
++
++ /* Load operation */
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++
++ /* Perform operation */
++ ablkcipher_append_src_dst(desc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
++
++/**
++ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
++ * descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
++ */
++void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
++{
++ __be64 sector_size = cpu_to_be64(512);
++ u32 *key_jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ /* Load class1 key only */
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
++ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
++
++ /* Load sector size with index 40 bytes (0x28) */
++ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ (0x28 << LDST_OFFSET_SHIFT));
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /*
++ * create sequence for loading the sector index
++ * Upper 8B of IV - will be used as sector index
++ * Lower 8B of IV - will be discarded
++ */
++ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
++ (0x20 << LDST_OFFSET_SHIFT));
++ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
++
++ /* Load operation */
++ append_dec_op1(desc, cdata->algtype);
++
++ /* Perform operation */
++ ablkcipher_append_src_dst(desc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("FSL CAAM descriptor support");
++MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
+diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
+new file mode 100644
+index 00000000..6b436f65
+--- /dev/null
++++ b/drivers/crypto/caam/caamalg_desc.h
+@@ -0,0 +1,127 @@
++/*
++ * Shared descriptors for aead, ablkcipher algorithms
++ *
++ * Copyright 2016 NXP
++ */
++
++#ifndef _CAAMALG_DESC_H_
++#define _CAAMALG_DESC_H_
++
++/* length of descriptors text */
++#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
++#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
++#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
++#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
++#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
++#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
++#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
++
++#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
++#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
++
++/* Note: Nonce is counted in cdata.keylen */
++#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
++
++#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
++#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
++#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
++
++#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
++#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
++#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
++#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
++#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
++
++#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
++#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
++#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
++#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
++#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
++
++#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
++#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
++#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
++#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
++#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
++
++#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
++#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
++ 20 * CAAM_CMD_SZ)
++#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
++ 15 * CAAM_CMD_SZ)
++
++void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
++ unsigned int icvsize);
++
++void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
++ unsigned int icvsize);
++
++void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool is_rfc3686,
++ u32 *nonce, const u32 ctx1_iv_off,
++ const bool is_qi);
++
++void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool geniv,
++ const bool is_rfc3686, u32 *nonce,
++ const u32 ctx1_iv_off, const bool is_qi);
++
++void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool is_rfc3686,
++ u32 *nonce, const u32 ctx1_iv_off,
++ const bool is_qi);
++
++void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int assoclen,
++ unsigned int ivsize, unsigned int authsize,
++ unsigned int blocksize);
++
++void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int assoclen,
++ unsigned int ivsize, unsigned int authsize,
++ unsigned int blocksize);
++
++void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi);
++
++void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi);
++
++void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi);
++
++void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi);
++
++void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi);
++
++void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, unsigned int icvsize,
++ const bool is_qi);
++
++void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, const bool is_rfc3686,
++ const u32 ctx1_iv_off);
++
++void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, const bool is_rfc3686,
++ const u32 ctx1_iv_off);
++
++void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
++ unsigned int ivsize, const bool is_rfc3686,
++ const u32 ctx1_iv_off);
++
++void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
++
++void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
++
++#endif /* _CAAMALG_DESC_H_ */
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+new file mode 100644
+index 00000000..d6a9b0c5
+--- /dev/null
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -0,0 +1,2877 @@
++/*
++ * Freescale FSL CAAM support for crypto API over QI backend.
++ * Based on caamalg.c
++ *
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016-2017 NXP
++ */
++
++#include "compat.h"
++#include "ctrl.h"
++#include "regs.h"
++#include "intern.h"
++#include "desc_constr.h"
++#include "error.h"
++#include "sg_sw_qm.h"
++#include "key_gen.h"
++#include "qi.h"
++#include "jr.h"
++#include "caamalg_desc.h"
++
++/*
++ * crypto alg
++ */
++#define CAAM_CRA_PRIORITY 2000
++/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
++#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
++ SHA512_DIGEST_SIZE * 2)
++
++#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
++ CAAM_MAX_KEY_SIZE)
++#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
++
++struct caam_alg_entry {
++ int class1_alg_type;
++ int class2_alg_type;
++ bool rfc3686;
++ bool geniv;
++};
++
++struct caam_aead_alg {
++ struct aead_alg aead;
++ struct caam_alg_entry caam;
++ bool registered;
++};
++
++/*
++ * per-session context
++ */
++struct caam_ctx {
++ struct device *jrdev;
++ u32 sh_desc_enc[DESC_MAX_USED_LEN];
++ u32 sh_desc_dec[DESC_MAX_USED_LEN];
++ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
++ u8 key[CAAM_MAX_KEY_SIZE];
++ dma_addr_t key_dma;
++ struct alginfo adata;
++ struct alginfo cdata;
++ unsigned int authsize;
++ struct device *qidev;
++ spinlock_t lock; /* Protects multiple init of driver context */
++ struct caam_drv_ctx *drv_ctx[NUM_OP];
++};
++
++static int aead_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
++ typeof(*alg), aead);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ u32 ctx1_iv_off = 0;
++ u32 *nonce = NULL;
++ unsigned int data_len[2];
++ u32 inl_mask;
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_CTR_MOD128);
++ const bool is_rfc3686 = alg->caam.rfc3686;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ /*
++ * AES-CTR needs to load IV in CONTEXT1 reg
++ * at an offset of 128bits (16bytes)
++ * CONTEXT1[255:128] = IV
++ */
++ if (ctr_mode)
++ ctx1_iv_off = 16;
++
++ /*
++ * RFC3686 specific:
++ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++ */
++ if (is_rfc3686) {
++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
++ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
++ }
++
++ data_len[0] = ctx->adata.keylen_pad;
++ data_len[1] = ctx->cdata.keylen;
++
++ if (alg->caam.geniv)
++ goto skip_enc;
++
++ /* aead_encrypt shared descriptor */
++ if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
++
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
++
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
++ ivsize, ctx->authsize, is_rfc3686, nonce,
++ ctx1_iv_off, true);
++
++skip_enc:
++ /* aead_decrypt shared descriptor */
++ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
++
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
++
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
++ ivsize, ctx->authsize, alg->caam.geniv,
++ is_rfc3686, nonce, ctx1_iv_off, true);
++
++ if (!alg->caam.geniv)
++ goto skip_givenc;
++
++ /* aead_givencrypt shared descriptor */
++ if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
++
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
++
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
++ ivsize, ctx->authsize, is_rfc3686, nonce,
++ ctx1_iv_off, true);
++
++skip_givenc:
++ return 0;
++}
++
++static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ aead_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int aead_setkey(struct crypto_aead *aead, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ struct crypto_authenc_keys keys;
++ int ret = 0;
++
++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
++ goto badkey;
++
++#ifdef DEBUG
++ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
++ keys.authkeylen + keys.enckeylen, keys.enckeylen,
++ keys.authkeylen);
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
++ keys.authkeylen, CAAM_MAX_KEY_SIZE -
++ keys.enckeylen);
++ if (ret)
++ goto badkey;
++
++ /* postpend encryption key to auth split key */
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ ctx->adata.keylen_pad + keys.enckeylen, 1);
++#endif
++
++ ctx->cdata.keylen = keys.enckeylen;
++
++ ret = aead_set_sh_desc(aead);
++ if (ret)
++ goto badkey;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static int tls_set_sh_desc(struct crypto_aead *tls)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ unsigned int ivsize = crypto_aead_ivsize(tls);
++ unsigned int blocksize = crypto_aead_blocksize(tls);
++ unsigned int assoclen = 13; /* always 13 bytes for TLS */
++ unsigned int data_len[2];
++ u32 inl_mask;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ /*
++ * TLS 1.0 encrypt shared descriptor
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ data_len[0] = ctx->adata.keylen_pad;
++ data_len[1] = ctx->cdata.keylen;
++
++ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
++ &inl_mask, ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
++
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
++
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
++ assoclen, ivsize, ctx->authsize, blocksize);
++
++ /*
++ * TLS 1.0 decrypt shared descriptor
++ * Keys do not fit inline, regardless of algorithms used
++ */
++ ctx->adata.key_dma = ctx->key_dma;
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
++ assoclen, ivsize, ctx->authsize, blocksize);
++
++ return 0;
++}
++
++static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++
++ ctx->authsize = authsize;
++ tls_set_sh_desc(tls);
++
++ return 0;
++}
++
++static int tls_setkey(struct crypto_aead *tls, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ struct device *jrdev = ctx->jrdev;
++ struct crypto_authenc_keys keys;
++ int ret = 0;
++
++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
++ goto badkey;
++
++#ifdef DEBUG
++ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
++ keys.authkeylen + keys.enckeylen, keys.enckeylen,
++ keys.authkeylen);
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
++ keys.authkeylen, CAAM_MAX_KEY_SIZE -
++ keys.enckeylen);
++ if (ret)
++ goto badkey;
++
++ /* postpend encryption key to auth split key */
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++
++#ifdef DEBUG
++ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
++ ctx->adata.keylen, ctx->adata.keylen_pad);
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ ctx->adata.keylen_pad + keys.enckeylen, 1);
++#endif
++
++ ctx->cdata.keylen = keys.enckeylen;
++
++ ret = tls_set_sh_desc(tls);
++ if (ret)
++ goto badkey;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
++ const char *alg_name = crypto_tfm_alg_name(tfm);
++ struct device *jrdev = ctx->jrdev;
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ u32 ctx1_iv_off = 0;
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_CTR_MOD128);
++ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++ int ret = 0;
++
++ memcpy(ctx->key, key, keylen);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++ /*
++ * AES-CTR needs to load IV in CONTEXT1 reg
++ * at an offset of 128bits (16bytes)
++ * CONTEXT1[255:128] = IV
++ */
++ if (ctr_mode)
++ ctx1_iv_off = 16;
++
++ /*
++ * RFC3686 specific:
++ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++ * | *key = {KEY, NONCE}
++ */
++ if (is_rfc3686) {
++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ keylen -= CTR_RFC3686_NONCE_SIZE;
++ }
++
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
++ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
++ ivsize, is_rfc3686, ctx1_iv_off);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[GIVENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
++ ctx->sh_desc_givenc);
++ if (ret) {
++ dev_err(jrdev, "driver givenc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *jrdev = ctx->jrdev;
++ int ret = 0;
++
++ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
++ crypto_ablkcipher_set_flags(ablkcipher,
++ CRYPTO_TFM_RES_BAD_KEY_LEN);
++ dev_err(jrdev, "key size mismatch\n");
++ return -EINVAL;
++ }
++
++ memcpy(ctx->key, key, keylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* xts ablkcipher encrypt, decrypt shared descriptors */
++ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
++ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return 0;
++}
++
++/*
++ * aead_edesc - s/w-extended aead descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
++ * @assoclen_dma: bus physical mapped address of req->assoclen
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct aead_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ unsigned int assoclen;
++ dma_addr_t assoclen_dma;
++ struct caam_drv_req drv_req;
++#define CAAM_QI_MAX_AEAD_SG \
++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
++ sizeof(struct qm_sg_entry))
++ struct qm_sg_entry sgt[0];
++};
++
++/*
++ * tls_edesc - s/w-extended tls descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct tls_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct scatterlist tmp[2];
++ struct scatterlist *dst;
++ struct caam_drv_req drv_req;
++ struct qm_sg_entry sgt[0];
++};
++
++/*
++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct ablkcipher_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct caam_drv_req drv_req;
++#define CAAM_QI_MAX_ABLKCIPHER_SG \
++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
++ sizeof(struct qm_sg_entry))
++ struct qm_sg_entry sgt[0];
++};
++
++static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
++ enum optype type)
++{
++ /*
++ * This function is called on the fast path with values of 'type'
++ * known at compile time. Invalid arguments are not expected and
++ * thus no checks are made.
++ */
++ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
++ u32 *desc;
++
++ if (unlikely(!drv_ctx)) {
++ spin_lock(&ctx->lock);
++
++ /* Read again to check if some other core init drv_ctx */
++ drv_ctx = ctx->drv_ctx[type];
++ if (!drv_ctx) {
++ int cpu;
++
++ if (type == ENCRYPT)
++ desc = ctx->sh_desc_enc;
++ else if (type == DECRYPT)
++ desc = ctx->sh_desc_dec;
++ else /* (type == GIVENCRYPT) */
++ desc = ctx->sh_desc_givenc;
++
++ cpu = smp_processor_id();
++ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
++ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
++ drv_ctx->op_type = type;
++
++ ctx->drv_ctx[type] = drv_ctx;
++ }
++
++ spin_unlock(&ctx->lock);
++ }
++
++ return drv_ctx;
++}
++
++static void caam_unmap(struct device *dev, struct scatterlist *src,
++ struct scatterlist *dst, int src_nents,
++ int dst_nents, dma_addr_t iv_dma, int ivsize,
++ enum optype op_type, dma_addr_t qm_sg_dma,
++ int qm_sg_bytes)
++{
++ if (dst != src) {
++ if (src_nents)
++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
++ } else {
++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
++ }
++
++ if (iv_dma)
++ dma_unmap_single(dev, iv_dma, ivsize,
++ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
++ DMA_TO_DEVICE);
++ if (qm_sg_bytes)
++ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
++}
++
++static void aead_unmap(struct device *dev,
++ struct aead_edesc *edesc,
++ struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(aead);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++}
++
++static void tls_unmap(struct device *dev,
++ struct tls_edesc *edesc,
++ struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(aead);
++
++ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
++ edesc->dst_nents, edesc->iv_dma, ivsize,
++ edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
++ edesc->qm_sg_bytes);
++}
++
++static void ablkcipher_unmap(struct device *dev,
++ struct ablkcipher_edesc *edesc,
++ struct ablkcipher_request *req)
++{
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++}
++
++static void aead_done(struct caam_drv_req *drv_req, u32 status)
++{
++ struct device *qidev;
++ struct aead_edesc *edesc;
++ struct aead_request *aead_req = drv_req->app_ctx;
++ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
++ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
++ int ecode = 0;
++
++ qidev = caam_ctx->qidev;
++
++ if (unlikely(status)) {
++ caam_jr_strstatus(qidev, status);
++ ecode = -EIO;
++ }
++
++ edesc = container_of(drv_req, typeof(*edesc), drv_req);
++ aead_unmap(qidev, edesc, aead_req);
++
++ aead_request_complete(aead_req, ecode);
++ qi_cache_free(edesc);
++}
++
++/*
++ * allocate and map the aead extended descriptor
++ */
++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
++ bool encrypt)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
++ typeof(*alg), aead);
++ struct device *qidev = ctx->qidev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++ struct aead_edesc *edesc;
++ dma_addr_t qm_sg_dma, iv_dma = 0;
++ int ivsize = 0;
++ unsigned int authsize = ctx->authsize;
++ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
++ int in_len, out_len;
++ struct qm_sg_entry *sg_table, *fd_sgt;
++ struct caam_drv_ctx *drv_ctx;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++
++ drv_ctx = get_drv_ctx(ctx, op_type);
++ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
++ return (struct aead_edesc *)drv_ctx;
++
++ /* allocate space for base edesc and hw desc commands, link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(qidev, "could not allocate extended descriptor\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ if (likely(req->src == req->dst)) {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen);
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen);
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize :
++ (-authsize)));
++ if (unlikely(dst_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : (-authsize)));
++ qi_cache_free(edesc);
++ return ERR_PTR(dst_nents);
++ }
++
++ if (src_nents) {
++ mapped_src_nents = dma_map_sg(qidev, req->src,
++ src_nents, DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = 0;
++ }
++
++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(qidev, "unable to map destination\n");
++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
++ ivsize = crypto_aead_ivsize(aead);
++ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents,
++ dst_nents, 0, 0, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ /*
++ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
++ * Input is not contiguous.
++ */
++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ sg_table = &edesc->sgt[0];
++ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->iv_dma = iv_dma;
++ edesc->drv_req.app_ctx = req;
++ edesc->drv_req.cbk = aead_done;
++ edesc->drv_req.drv_ctx = drv_ctx;
++
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
++ dev_err(qidev, "unable to map assoclen\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
++ qm_sg_index++;
++ if (ivsize) {
++ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
++ qm_sg_index++;
++ }
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
++ qm_sg_index += mapped_src_nents;
++
++ if (mapped_dst_nents > 1)
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ qm_sg_index, 0);
++
++ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, qm_sg_dma)) {
++ dev_err(qidev, "unable to map S/G table\n");
++ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->qm_sg_dma = qm_sg_dma;
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ out_len = req->assoclen + req->cryptlen +
++ (encrypt ? ctx->authsize : (-ctx->authsize));
++ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++
++ fd_sgt = &edesc->drv_req.fd_sgt[0];
++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
++
++ if (req->dst == req->src) {
++ if (mapped_src_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++ out_len, 0);
++ else
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (1 + !!ivsize) * sizeof(*sg_table),
++ out_len, 0);
++ } else if (mapped_dst_nents == 1) {
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
++ 0);
++ } else {
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
++ qm_sg_index, out_len, 0);
++ }
++
++ return edesc;
++}
++
++static inline int aead_crypt(struct aead_request *req, bool encrypt)
++{
++ struct aead_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ int ret;
++
++ if (unlikely(caam_congested))
++ return -EAGAIN;
++
++ /* allocate extended descriptor */
++ edesc = aead_edesc_alloc(req, encrypt);
++ if (IS_ERR_OR_NULL(edesc))
++ return PTR_ERR(edesc);
++
++ /* Create and submit job descriptor */
++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ aead_unmap(ctx->qidev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int aead_encrypt(struct aead_request *req)
++{
++ return aead_crypt(req, true);
++}
++
++static int aead_decrypt(struct aead_request *req)
++{
++ return aead_crypt(req, false);
++}
++
++static void tls_done(struct caam_drv_req *drv_req, u32 status)
++{
++ struct device *qidev;
++ struct tls_edesc *edesc;
++ struct aead_request *aead_req = drv_req->app_ctx;
++ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
++ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
++ int ecode = 0;
++
++ qidev = caam_ctx->qidev;
++
++ if (unlikely(status)) {
++ caam_jr_strstatus(qidev, status);
++ ecode = -EIO;
++ }
++
++ edesc = container_of(drv_req, typeof(*edesc), drv_req);
++ tls_unmap(qidev, edesc, aead_req);
++
++ aead_request_complete(aead_req, ecode);
++ qi_cache_free(edesc);
++}
++
++/*
++ * allocate and map the tls extended descriptor
++ */
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int blocksize = crypto_aead_blocksize(aead);
++ unsigned int padsize, authsize;
++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
++ typeof(*alg), aead);
++ struct device *qidev = ctx->qidev;
++ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
++ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++ struct tls_edesc *edesc;
++ dma_addr_t qm_sg_dma, iv_dma = 0;
++ int ivsize = 0;
++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
++ int in_len, out_len;
++ struct qm_sg_entry *sg_table, *fd_sgt;
++ struct caam_drv_ctx *drv_ctx;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct scatterlist *dst;
++
++ if (encrypt) {
++ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++ blocksize);
++ authsize = ctx->authsize + padsize;
++ } else {
++ authsize = ctx->authsize;
++ }
++
++ drv_ctx = get_drv_ctx(ctx, op_type);
++ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
++ return (struct tls_edesc *)drv_ctx;
++
++ /* allocate space for base edesc and hw desc commands, link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(qidev, "could not allocate extended descriptor\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ if (likely(req->src == req->dst)) {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ dst = req->dst;
++ } else {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen);
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen);
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++ dst_nents = sg_nents_for_len(dst, req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(dst_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(dst_nents);
++ }
++
++ if (src_nents) {
++ mapped_src_nents = dma_map_sg(qidev, req->src,
++ src_nents, DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = 0;
++ }
++
++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(qidev, "unable to map destination\n");
++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ ivsize = crypto_aead_ivsize(aead);
++ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
++ op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /*
++ * Create S/G table: IV, src, dst.
++ * Input is not contiguous.
++ */
++ qm_sg_ents = 1 + mapped_src_nents +
++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ sg_table = &edesc->sgt[0];
++ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->dst = dst;
++ edesc->iv_dma = iv_dma;
++ edesc->drv_req.app_ctx = req;
++ edesc->drv_req.cbk = tls_done;
++ edesc->drv_req.drv_ctx = drv_ctx;
++
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ qm_sg_index = 1;
++
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
++ qm_sg_index += mapped_src_nents;
++
++ if (mapped_dst_nents > 1)
++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++ qm_sg_index, 0);
++
++ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, qm_sg_dma)) {
++ dev_err(qidev, "unable to map S/G table\n");
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
++ ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->qm_sg_dma = qm_sg_dma;
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ out_len = req->cryptlen + (encrypt ? authsize : 0);
++ in_len = ivsize + req->assoclen + req->cryptlen;
++
++ fd_sgt = &edesc->drv_req.fd_sgt[0];
++
++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
++
++ if (req->dst == req->src)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (sg_nents_for_len(req->src, req->assoclen) +
++ 1) * sizeof(*sg_table), out_len, 0);
++ else if (mapped_dst_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
++ else
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
++ qm_sg_index, out_len, 0);
++
++ return edesc;
++}
++
++static int tls_crypt(struct aead_request *req, bool encrypt)
++{
++ struct tls_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ int ret;
++
++ if (unlikely(caam_congested))
++ return -EAGAIN;
++
++ edesc = tls_edesc_alloc(req, encrypt);
++ if (IS_ERR_OR_NULL(edesc))
++ return PTR_ERR(edesc);
++
++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ tls_unmap(ctx->qidev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int tls_encrypt(struct aead_request *req)
++{
++ return tls_crypt(req, true);
++}
++
++static int tls_decrypt(struct aead_request *req)
++{
++ return tls_crypt(req, false);
++}
++
++static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
++{
++ struct ablkcipher_edesc *edesc;
++ struct ablkcipher_request *req = drv_req->app_ctx;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *qidev = caam_ctx->qidev;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++#ifdef DEBUG
++ dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ edesc = container_of(drv_req, typeof(*edesc), drv_req);
++
++ if (status)
++ caam_jr_strstatus(qidev, status);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
++ edesc->src_nents > 1 ? 100 : ivsize, 1);
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
++#endif
++
++ ablkcipher_unmap(qidev, edesc, req);
++ qi_cache_free(edesc);
++
++ /*
++ * The crypto API expects us to set the IV (req->info) to the last
++ * ciphertext block. This is used e.g. by the CTS mode.
++ */
++ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
++ ivsize, 0);
++
++ ablkcipher_request_complete(req, status);
++}
++
++static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
++ *req, bool encrypt)
++{
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *qidev = ctx->qidev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++ struct ablkcipher_edesc *edesc;
++ dma_addr_t iv_dma;
++ bool in_contig;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ int dst_sg_idx, qm_sg_ents;
++ struct qm_sg_entry *sg_table, *fd_sgt;
++ struct caam_drv_ctx *drv_ctx;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++
++ drv_ctx = get_drv_ctx(ctx, op_type);
++ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
++ return (struct ablkcipher_edesc *)drv_ctx;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->nbytes);
++ return ERR_PTR(src_nents);
++ }
++
++ if (unlikely(req->src != req->dst)) {
++ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++ if (unlikely(dst_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++ req->nbytes);
++ return ERR_PTR(dst_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(qidev, "unable to map destination\n");
++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ if (mapped_src_nents == 1 &&
++ iv_dma + ivsize == sg_dma_address(req->src)) {
++ in_contig = true;
++ qm_sg_ents = 0;
++ } else {
++ in_contig = false;
++ qm_sg_ents = 1 + mapped_src_nents;
++ }
++ dst_sg_idx = qm_sg_ents;
++
++ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(qidev, "could not allocate extended descriptor\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->iv_dma = iv_dma;
++ sg_table = &edesc->sgt[0];
++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++ edesc->drv_req.app_ctx = req;
++ edesc->drv_req.cbk = ablkcipher_done;
++ edesc->drv_req.drv_ctx = drv_ctx;
++
++ if (!in_contig) {
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
++ }
++
++ if (mapped_dst_nents > 1)
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ dst_sg_idx, 0);
++
++ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
++ dev_err(qidev, "unable to map S/G table\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ fd_sgt = &edesc->drv_req.fd_sgt[0];
++
++ if (!in_contig)
++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
++ ivsize + req->nbytes, 0);
++ else
++ dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
++ 0);
++
++ if (req->src == req->dst) {
++ if (!in_contig)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
++ sizeof(*sg_table), req->nbytes, 0);
++ else
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++ req->nbytes, 0);
++ } else if (mapped_dst_nents > 1) {
++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
++ sizeof(*sg_table), req->nbytes, 0);
++ } else {
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
++ req->nbytes, 0);
++ }
++
++ return edesc;
++}
++
++static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
++ struct skcipher_givcrypt_request *creq)
++{
++ struct ablkcipher_request *req = &creq->creq;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *qidev = ctx->qidev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
++ struct ablkcipher_edesc *edesc;
++ dma_addr_t iv_dma;
++ bool out_contig;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ struct qm_sg_entry *sg_table, *fd_sgt;
++ int dst_sg_idx, qm_sg_ents;
++ struct caam_drv_ctx *drv_ctx;
++
++ drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
++ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
++ return (struct ablkcipher_edesc *)drv_ctx;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->nbytes);
++ return ERR_PTR(src_nents);
++ }
++
++ if (unlikely(req->src != req->dst)) {
++ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++ if (unlikely(dst_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++ req->nbytes);
++ return ERR_PTR(dst_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(qidev, "unable to map destination\n");
++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dst_nents = src_nents;
++ mapped_dst_nents = src_nents;
++ }
++
++ iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
++ dst_sg_idx = qm_sg_ents;
++ if (mapped_dst_nents == 1 &&
++ iv_dma + ivsize == sg_dma_address(req->dst)) {
++ out_contig = true;
++ } else {
++ out_contig = false;
++ qm_sg_ents += 1 + mapped_dst_nents;
++ }
++
++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, GIVENCRYPT, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (!edesc) {
++ dev_err(qidev, "could not allocate extended descriptor\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, GIVENCRYPT, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->iv_dma = iv_dma;
++ sg_table = &edesc->sgt[0];
++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++ edesc->drv_req.app_ctx = req;
++ edesc->drv_req.cbk = ablkcipher_done;
++ edesc->drv_req.drv_ctx = drv_ctx;
++
++ if (mapped_src_nents > 1)
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
++
++ if (!out_contig) {
++ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ dst_sg_idx + 1, 0);
++ }
++
++ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
++ dev_err(qidev, "unable to map S/G table\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, GIVENCRYPT, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ fd_sgt = &edesc->drv_req.fd_sgt[0];
++
++ if (mapped_src_nents > 1)
++ dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
++ 0);
++ else
++ dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
++ req->nbytes, 0);
++
++ if (!out_contig)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
++ sizeof(*sg_table), ivsize + req->nbytes,
++ 0);
++ else
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
++ ivsize + req->nbytes, 0);
++
++ return edesc;
++}
++
++static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
++{
++ struct ablkcipher_edesc *edesc;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ int ret;
++
++ if (unlikely(caam_congested))
++ return -EAGAIN;
++
++ /* allocate extended descriptor */
++ edesc = ablkcipher_edesc_alloc(req, encrypt);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ ablkcipher_unmap(ctx->qidev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int ablkcipher_encrypt(struct ablkcipher_request *req)
++{
++ return ablkcipher_crypt(req, true);
++}
++
++static int ablkcipher_decrypt(struct ablkcipher_request *req)
++{
++ return ablkcipher_crypt(req, false);
++}
++
++static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
++{
++ struct ablkcipher_request *req = &creq->creq;
++ struct ablkcipher_edesc *edesc;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ int ret;
++
++ if (unlikely(caam_congested))
++ return -EAGAIN;
++
++ /* allocate extended descriptor */
++ edesc = ablkcipher_giv_edesc_alloc(creq);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ ablkcipher_unmap(ctx->qidev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++#define template_ablkcipher template_u.ablkcipher
++struct caam_alg_template {
++ char name[CRYPTO_MAX_ALG_NAME];
++ char driver_name[CRYPTO_MAX_ALG_NAME];
++ unsigned int blocksize;
++ u32 type;
++ union {
++ struct ablkcipher_alg ablkcipher;
++ } template_u;
++ u32 class1_alg_type;
++ u32 class2_alg_type;
++};
++
++static struct caam_alg_template driver_algs[] = {
++ /* ablkcipher descriptor */
++ {
++ .name = "cbc(aes)",
++ .driver_name = "cbc-aes-caam-qi",
++ .blocksize = AES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ },
++ {
++ .name = "cbc(des3_ede)",
++ .driver_name = "cbc-3des-caam-qi",
++ .blocksize = DES3_EDE_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = DES3_EDE_KEY_SIZE,
++ .max_keysize = DES3_EDE_KEY_SIZE,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ },
++ {
++ .name = "cbc(des)",
++ .driver_name = "cbc-des-caam-qi",
++ .blocksize = DES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = DES_KEY_SIZE,
++ .max_keysize = DES_KEY_SIZE,
++ .ivsize = DES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ },
++ {
++ .name = "ctr(aes)",
++ .driver_name = "ctr-aes-caam-qi",
++ .blocksize = 1,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "chainiv",
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
++ },
++ {
++ .name = "rfc3686(ctr(aes))",
++ .driver_name = "rfc3686-ctr-aes-caam-qi",
++ .blocksize = 1,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = AES_MIN_KEY_SIZE +
++ CTR_RFC3686_NONCE_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE +
++ CTR_RFC3686_NONCE_SIZE,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
++ },
++ {
++ .name = "xts(aes)",
++ .driver_name = "xts-aes-caam-qi",
++ .blocksize = AES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = xts_ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "eseqiv",
++ .min_keysize = 2 * AES_MIN_KEY_SIZE,
++ .max_keysize = 2 * AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
++ },
++};
++
++static struct caam_aead_alg driver_aeads[] = {
++ /* single-pass ipsec_esp descriptor */
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(md5),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-hmac-md5-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha1),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha1-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha1),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha1-cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha224),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha224-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha224),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha224-cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha256),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha256-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha256),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha256-cbc-aes-"
++ "caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha384),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha384-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha384),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha384-cbc-aes-"
++ "caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha512),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha512-cbc-aes-"
++ "caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(md5),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-hmac-md5-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha1),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha1-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha1),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha1-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha224),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha224-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha224),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha224-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha256),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha256-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha256),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha256-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha384),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha384-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha384),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha384-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha512),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha512-"
++ "cbc-des3_ede-caam-qi",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),cbc(des))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(md5),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-hmac-md5-"
++ "cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha1),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha1-"
++ "cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha1),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha1-cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha224),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha224-"
++ "cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha224),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha224-cbc-des-"
++ "caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha256),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha256-"
++ "cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha256),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha256-cbc-des-"
++ "caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha384),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha384-"
++ "cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha384),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha384-cbc-des-"
++ "caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "cbc-des-caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha512),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha512-cbc-des-"
++ "caam-qi",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "tls10(hmac(sha1),cbc(aes))",
++ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = tls_setkey,
++ .setauthsize = tls_setauthsize,
++ .encrypt = tls_encrypt,
++ .decrypt = tls_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ }
++};
++
++struct caam_crypto_alg {
++ struct list_head entry;
++ struct crypto_alg crypto_alg;
++ struct caam_alg_entry caam;
++};
++
++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
++{
++ struct caam_drv_private *priv;
++ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
++ static const u8 digest_size[] = {
++ MD5_DIGEST_SIZE,
++ SHA1_DIGEST_SIZE,
++ SHA224_DIGEST_SIZE,
++ SHA256_DIGEST_SIZE,
++ SHA384_DIGEST_SIZE,
++ SHA512_DIGEST_SIZE
++ };
++ u8 op_id;
++
++ /*
++ * distribute tfms across job rings to ensure in-order
++ * crypto request processing per tfm
++ */
++ ctx->jrdev = caam_jr_alloc();
++ if (IS_ERR(ctx->jrdev)) {
++ pr_err("Job Ring Device allocation for transform failed\n");
++ return PTR_ERR(ctx->jrdev);
++ }
++
++ ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
++ dev_err(ctx->jrdev, "unable to map key\n");
++ caam_jr_free(ctx->jrdev);
++ return -ENOMEM;
++ }
++
++ /* copy descriptor header template value */
++ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
++
++ if (ctx->adata.algtype) {
++ op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
++ >> OP_ALG_ALGSEL_SHIFT;
++ if (op_id < ARRAY_SIZE(digest_size)) {
++ ctx->authsize = digest_size[op_id];
++ } else {
++ dev_err(ctx->jrdev,
++ "incorrect op_id %d; must be less than %zu\n",
++ op_id, ARRAY_SIZE(digest_size));
++ caam_jr_free(ctx->jrdev);
++ return -EINVAL;
++ }
++ } else {
++ ctx->authsize = 0;
++ }
++
++ priv = dev_get_drvdata(ctx->jrdev->parent);
++ ctx->qidev = priv->qidev;
++
++ spin_lock_init(&ctx->lock);
++ ctx->drv_ctx[ENCRYPT] = NULL;
++ ctx->drv_ctx[DECRYPT] = NULL;
++ ctx->drv_ctx[GIVENCRYPT] = NULL;
++
++ return 0;
++}
++
++static int caam_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_alg *alg = tfm->__crt_alg;
++ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
++ crypto_alg);
++ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ return caam_init_common(ctx, &caam_alg->caam);
++}
++
++static int caam_aead_init(struct crypto_aead *tfm)
++{
++ struct aead_alg *alg = crypto_aead_alg(tfm);
++ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
++ aead);
++ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
++
++ return caam_init_common(ctx, &caam_alg->caam);
++}
++
++static void caam_exit_common(struct caam_ctx *ctx)
++{
++ caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
++ caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
++ caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
++
++ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
++ DMA_TO_DEVICE);
++
++ caam_jr_free(ctx->jrdev);
++}
++
++static void caam_cra_exit(struct crypto_tfm *tfm)
++{
++ caam_exit_common(crypto_tfm_ctx(tfm));
++}
++
++static void caam_aead_exit(struct crypto_aead *tfm)
++{
++ caam_exit_common(crypto_aead_ctx(tfm));
++}
++
++static struct list_head alg_list;
++static void __exit caam_qi_algapi_exit(void)
++{
++ struct caam_crypto_alg *t_alg, *n;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
++ struct caam_aead_alg *t_alg = driver_aeads + i;
++
++ if (t_alg->registered)
++ crypto_unregister_aead(&t_alg->aead);
++ }
++
++ if (!alg_list.next)
++ return;
++
++ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
++ crypto_unregister_alg(&t_alg->crypto_alg);
++ list_del(&t_alg->entry);
++ kfree(t_alg);
++ }
++}
++
++static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
++ *template)
++{
++ struct caam_crypto_alg *t_alg;
++ struct crypto_alg *alg;
++
++ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
++ if (!t_alg)
++ return ERR_PTR(-ENOMEM);
++
++ alg = &t_alg->crypto_alg;
++
++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->driver_name);
++ alg->cra_module = THIS_MODULE;
++ alg->cra_init = caam_cra_init;
++ alg->cra_exit = caam_cra_exit;
++ alg->cra_priority = CAAM_CRA_PRIORITY;
++ alg->cra_blocksize = template->blocksize;
++ alg->cra_alignmask = 0;
++ alg->cra_ctxsize = sizeof(struct caam_ctx);
++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
++ template->type;
++ switch (template->type) {
++ case CRYPTO_ALG_TYPE_GIVCIPHER:
++ alg->cra_type = &crypto_givcipher_type;
++ alg->cra_ablkcipher = template->template_ablkcipher;
++ break;
++ case CRYPTO_ALG_TYPE_ABLKCIPHER:
++ alg->cra_type = &crypto_ablkcipher_type;
++ alg->cra_ablkcipher = template->template_ablkcipher;
++ break;
++ }
++
++ t_alg->caam.class1_alg_type = template->class1_alg_type;
++ t_alg->caam.class2_alg_type = template->class2_alg_type;
++
++ return t_alg;
++}
++
++static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
++{
++ struct aead_alg *alg = &t_alg->aead;
++
++ alg->base.cra_module = THIS_MODULE;
++ alg->base.cra_priority = CAAM_CRA_PRIORITY;
++ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
++ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
++
++ alg->init = caam_aead_init;
++ alg->exit = caam_aead_exit;
++}
++
++static int __init caam_qi_algapi_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++ struct device *ctrldev;
++ struct caam_drv_private *priv;
++ int i = 0, err = 0;
++ u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
++ unsigned int md_limit = SHA512_DIGEST_SIZE;
++ bool registered = false;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ of_node_put(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++
++ /*
++ * If priv is NULL, it's probably because the caam driver wasn't
++ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
++ */
++ if (!priv || !priv->qi_present)
++ return -ENODEV;
++
++ if (caam_dpaa2) {
++ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
++ return -ENODEV;
++ }
++
++ INIT_LIST_HEAD(&alg_list);
++
++ /*
++ * Register crypto algorithms the device supports.
++ * First, detect presence and attributes of DES, AES, and MD blocks.
++ */
++ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
++ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
++ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
++ aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
++ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++
++ /* If MD is present, limit digest size based on LP256 */
++ if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
++ md_limit = SHA256_DIGEST_SIZE;
++
++ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
++ struct caam_crypto_alg *t_alg;
++ struct caam_alg_template *alg = driver_algs + i;
++ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
++
++ /* Skip DES algorithms if not supported by device */
++ if (!des_inst &&
++ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
++ (alg_sel == OP_ALG_ALGSEL_DES)))
++ continue;
++
++ /* Skip AES algorithms if not supported by device */
++ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
++ continue;
++
++ t_alg = caam_alg_alloc(alg);
++ if (IS_ERR(t_alg)) {
++ err = PTR_ERR(t_alg);
++ dev_warn(priv->qidev, "%s alg allocation failed\n",
++ alg->driver_name);
++ continue;
++ }
++
++ err = crypto_register_alg(&t_alg->crypto_alg);
++ if (err) {
++ dev_warn(priv->qidev, "%s alg registration failed\n",
++ t_alg->crypto_alg.cra_driver_name);
++ kfree(t_alg);
++ continue;
++ }
++
++ list_add_tail(&t_alg->entry, &alg_list);
++ registered = true;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
++ struct caam_aead_alg *t_alg = driver_aeads + i;
++ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
++ OP_ALG_ALGSEL_MASK;
++ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
++ OP_ALG_ALGSEL_MASK;
++ u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++
++ /* Skip DES algorithms if not supported by device */
++ if (!des_inst &&
++ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
++ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
++ continue;
++
++ /* Skip AES algorithms if not supported by device */
++ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
++ continue;
++
++ /*
++ * Check support for AES algorithms not available
++ * on LP devices.
++ */
++ if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
++ (alg_aai == OP_ALG_AAI_GCM))
++ continue;
++
++ /*
++ * Skip algorithms requiring message digests
++ * if MD or MD size is not supported by device.
++ */
++ if (c2_alg_sel &&
++ (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
++ continue;
++
++ caam_aead_alg_init(t_alg);
++
++ err = crypto_register_aead(&t_alg->aead);
++ if (err) {
++ pr_warn("%s alg registration failed\n",
++ t_alg->aead.base.cra_driver_name);
++ continue;
++ }
++
++ t_alg->registered = true;
++ registered = true;
++ }
++
++ if (registered)
++ dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
++
++ return err;
++}
++
++module_init(caam_qi_algapi_init);
++module_exit(caam_qi_algapi_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
++MODULE_AUTHOR("Freescale Semiconductor");
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+new file mode 100644
+index 00000000..f0316346
+--- /dev/null
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -0,0 +1,4428 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "compat.h"
++#include "regs.h"
++#include "caamalg_qi2.h"
++#include "dpseci_cmd.h"
++#include "desc_constr.h"
++#include "error.h"
++#include "sg_sw_sec4.h"
++#include "sg_sw_qm2.h"
++#include "key_gen.h"
++#include "caamalg_desc.h"
++#include "../../../drivers/staging/fsl-mc/include/mc.h"
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
++
++#define CAAM_CRA_PRIORITY 2000
++
++/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
++#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
++ SHA512_DIGEST_SIZE * 2)
++
++#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
++bool caam_little_end;
++EXPORT_SYMBOL(caam_little_end);
++bool caam_imx;
++EXPORT_SYMBOL(caam_imx);
++#endif
++
++/*
++ * This is a a cache of buffers, from which the users of CAAM QI driver
++ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
++ * NOTE: A more elegant solution would be to have some headroom in the frames
++ * being processed. This can be added by the dpaa2-eth driver. This would
++ * pose a problem for userspace application processing which cannot
++ * know of this limitation. So for now, this will work.
++ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
++ */
++static struct kmem_cache *qi_cache;
++
++struct caam_alg_entry {
++ struct device *dev;
++ int class1_alg_type;
++ int class2_alg_type;
++ bool rfc3686;
++ bool geniv;
++};
++
++struct caam_aead_alg {
++ struct aead_alg aead;
++ struct caam_alg_entry caam;
++ bool registered;
++};
++
++/**
++ * caam_ctx - per-session context
++ * @flc: Flow Contexts array
++ * @key: virtual address of the key(s): [authentication key], encryption key
++ * @key_dma: I/O virtual address of the key
++ * @dev: dpseci device
++ * @adata: authentication algorithm details
++ * @cdata: encryption algorithm details
++ * @authsize: authentication tag (a.k.a. ICV / MAC) size
++ */
++struct caam_ctx {
++ struct caam_flc flc[NUM_OP];
++ u8 key[CAAM_MAX_KEY_SIZE];
++ dma_addr_t key_dma;
++ struct device *dev;
++ struct alginfo adata;
++ struct alginfo cdata;
++ unsigned int authsize;
++};
++
++void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
++ dma_addr_t iova_addr)
++{
++ phys_addr_t phys_addr;
++
++ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
++ iova_addr;
++
++ return phys_to_virt(phys_addr);
++}
++
++/*
++ * qi_cache_alloc - Allocate buffers from CAAM-QI cache
++ *
++ * Allocate data on the hotpath. Instead of using kmalloc, one can use the
++ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
++ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
++ * hosting 16 SG entries.
++ *
++ * @flags - flags that would be used for the equivalent kmalloc(..) call
++ *
++ * Returns a pointer to a retrieved buffer on success or NULL on failure.
++ */
++static inline void *qi_cache_alloc(gfp_t flags)
++{
++ return kmem_cache_alloc(qi_cache, flags);
++}
++
++/*
++ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
++ *
++ * @obj - buffer previously allocated by qi_cache_alloc
++ *
++ * No checking is being done, the call is a passthrough call to
++ * kmem_cache_free(...)
++ */
++static inline void qi_cache_free(void *obj)
++{
++ kmem_cache_free(qi_cache, obj);
++}
++
++static struct caam_request *to_caam_req(struct crypto_async_request *areq)
++{
++ switch (crypto_tfm_alg_type(areq->tfm)) {
++ case CRYPTO_ALG_TYPE_ABLKCIPHER:
++ case CRYPTO_ALG_TYPE_GIVCIPHER:
++ return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
++ case CRYPTO_ALG_TYPE_AEAD:
++ return aead_request_ctx(container_of(areq, struct aead_request,
++ base));
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++}
++
++static void caam_unmap(struct device *dev, struct scatterlist *src,
++ struct scatterlist *dst, int src_nents,
++ int dst_nents, dma_addr_t iv_dma, int ivsize,
++ enum optype op_type, dma_addr_t qm_sg_dma,
++ int qm_sg_bytes)
++{
++ if (dst != src) {
++ if (src_nents)
++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
++ } else {
++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
++ }
++
++ if (iv_dma)
++ dma_unmap_single(dev, iv_dma, ivsize,
++ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
++ DMA_TO_DEVICE);
++
++ if (qm_sg_bytes)
++ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
++}
++
++static int aead_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
++ typeof(*alg), aead);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ struct device *dev = ctx->dev;
++ struct caam_flc *flc;
++ u32 *desc;
++ u32 ctx1_iv_off = 0;
++ u32 *nonce = NULL;
++ unsigned int data_len[2];
++ u32 inl_mask;
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_CTR_MOD128);
++ const bool is_rfc3686 = alg->caam.rfc3686;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ /*
++ * AES-CTR needs to load IV in CONTEXT1 reg
++ * at an offset of 128bits (16bytes)
++ * CONTEXT1[255:128] = IV
++ */
++ if (ctr_mode)
++ ctx1_iv_off = 16;
++
++ /*
++ * RFC3686 specific:
++ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++ */
++ if (is_rfc3686) {
++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
++ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
++ }
++
++ data_len[0] = ctx->adata.keylen_pad;
++ data_len[1] = ctx->cdata.keylen;
++
++ /* aead_encrypt shared descriptor */
++ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
++ DESC_QI_AEAD_ENC_LEN) +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
++
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
++
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++
++ if (alg->caam.geniv)
++ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
++ ivsize, ctx->authsize, is_rfc3686,
++ nonce, ctx1_iv_off, true);
++ else
++ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
++ ivsize, ctx->authsize, is_rfc3686, nonce,
++ ctx1_iv_off, true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /* aead_decrypt shared descriptor */
++ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
++ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
++ DESC_JOB_IO_LEN, data_len, &inl_mask,
++ ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
++
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
++
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++
++ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
++ ivsize, ctx->authsize, alg->caam.geniv,
++ is_rfc3686, nonce, ctx1_iv_off, true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ aead_set_sh_desc(authenc);
++
++ return 0;
++}
++
++struct split_key_sh_result {
++ struct completion completion;
++ int err;
++ struct device *dev;
++};
++
++static void split_key_sh_done(void *cbk_ctx, u32 err)
++{
++ struct split_key_sh_result *res = cbk_ctx;
++
++#ifdef DEBUG
++ dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
++#endif
++
++ if (err)
++ caam_qi2_strstatus(res->dev, err);
++
++ res->err = err;
++ complete(&res->completion);
++}
++
++static int gen_split_key_sh(struct device *dev, u8 *key_out,
++ struct alginfo * const adata, const u8 *key_in,
++ u32 keylen)
++{
++ struct caam_request *req_ctx;
++ u32 *desc;
++ struct split_key_sh_result result;
++ dma_addr_t dma_addr_in, dma_addr_out;
++ struct caam_flc *flc;
++ struct dpaa2_fl_entry *in_fle, *out_fle;
++ int ret = -ENOMEM;
++
++ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
++ if (!req_ctx)
++ return -ENOMEM;
++
++ in_fle = &req_ctx->fd_flt[1];
++ out_fle = &req_ctx->fd_flt[0];
++
++ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
++ if (!flc)
++ goto err_flc;
++
++ dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, dma_addr_in)) {
++ dev_err(dev, "unable to map key input memory\n");
++ goto err_dma_addr_in;
++ }
++
++ dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, dma_addr_out)) {
++ dev_err(dev, "unable to map key output memory\n");
++ goto err_dma_addr_out;
++ }
++
++ desc = flc->sh_desc;
++
++ init_sh_desc(desc, 0);
++ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
++
++ /* Sets MDHA up into an HMAC-INIT */
++ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
++ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
++ OP_ALG_AS_INIT);
++
++ /*
++ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
++ * into both pads inside MDHA
++ */
++ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
++
++ /*
++ * FIFO_STORE with the explicit split-key content store
++ * (0x26 output type)
++ */
++ append_fifo_store(desc, dma_addr_out, adata->keylen,
++ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ goto err_flc_dma;
++ }
++
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, dma_addr_in);
++ dpaa2_fl_set_len(in_fle, keylen);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, dma_addr_out);
++ dpaa2_fl_set_len(out_fle, adata->keylen_pad);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
++ print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ result.err = 0;
++ init_completion(&result.completion);
++ result.dev = dev;
++
++ req_ctx->flc = flc;
++ req_ctx->cbk = split_key_sh_done;
++ req_ctx->ctx = &result;
++
++ ret = dpaa2_caam_enqueue(dev, req_ctx);
++ if (ret == -EINPROGRESS) {
++ /* in progress */
++ wait_for_completion(&result.completion);
++ ret = result.err;
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
++ adata->keylen_pad, 1);
++#endif
++ }
++
++ dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
++ DMA_TO_DEVICE);
++err_flc_dma:
++ dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
++err_dma_addr_out:
++ dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
++err_dma_addr_in:
++ kfree(flc);
++err_flc:
++ kfree(req_ctx);
++ return ret;
++}
++
++static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
++ u32 authkeylen)
++{
++ return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
++ authkeylen);
++}
++
++static int aead_setkey(struct crypto_aead *aead, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *dev = ctx->dev;
++ struct crypto_authenc_keys keys;
++ int ret;
++
++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
++ goto badkey;
++
++#ifdef DEBUG
++ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
++ keys.authkeylen + keys.enckeylen, keys.enckeylen,
++ keys.authkeylen);
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++#ifdef DEBUG
++ dev_err(dev, "split keylen %d split keylen padded %d\n",
++ ctx->adata.keylen, ctx->adata.keylen_pad);
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
++#endif
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
++ if (ret)
++ goto badkey;
++
++ /* postpend encryption key to auth split key */
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
++
++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ ctx->adata.keylen_pad + keys.enckeylen, 1);
++#endif
++
++ ctx->cdata.keylen = keys.enckeylen;
++
++ ret = aead_set_sh_desc(aead);
++ if (ret)
++ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++
++ return ret;
++badkey:
++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
++ bool encrypt)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_request *req_ctx = aead_request_ctx(req);
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
++ typeof(*alg), aead);
++ struct device *dev = ctx->dev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++ struct aead_edesc *edesc;
++ dma_addr_t qm_sg_dma, iv_dma = 0;
++ int ivsize = 0;
++ unsigned int authsize = ctx->authsize;
++ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
++ int in_len, out_len;
++ struct dpaa2_sg_entry *sg_table;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(dev, "could not allocate extended descriptor\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ if (unlikely(req->dst != req->src)) {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen);
++ if (unlikely(src_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen);
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize :
++ (-authsize)));
++ if (unlikely(dst_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : (-authsize)));
++ qi_cache_free(edesc);
++ return ERR_PTR(dst_nents);
++ }
++
++ if (src_nents) {
++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = 0;
++ }
++
++ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(dev, "unable to map destination\n");
++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(src_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
++ ivsize = crypto_aead_ivsize(aead);
++ iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, iv_dma)) {
++ dev_err(dev, "unable to map IV\n");
++ caam_unmap(dev, req->src, req->dst, src_nents,
++ dst_nents, 0, 0, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ /*
++ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
++ * Input is not contiguous.
++ */
++ qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
++ dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ sg_table = &edesc->sgt[0];
++ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->iv_dma = iv_dma;
++
++ edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
++ dev_err(dev, "unable to map assoclen\n");
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
++ qm_sg_index++;
++ if (ivsize) {
++ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
++ qm_sg_index++;
++ }
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
++ qm_sg_index += mapped_src_nents;
++
++ if (mapped_dst_nents > 1)
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ qm_sg_index, 0);
++
++ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, qm_sg_dma)) {
++ dev_err(dev, "unable to map S/G table\n");
++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->qm_sg_dma = qm_sg_dma;
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ out_len = req->assoclen + req->cryptlen +
++ (encrypt ? ctx->authsize : (-ctx->authsize));
++ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, in_len);
++
++ if (req->dst == req->src) {
++ if (mapped_src_nents == 1) {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
++ } else {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
++ (1 + !!ivsize) * sizeof(*sg_table));
++ }
++ } else if (mapped_dst_nents == 1) {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
++ } else {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
++ sizeof(*sg_table));
++ }
++
++ dpaa2_fl_set_len(out_fle, out_len);
++
++ return edesc;
++}
++
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
++ bool encrypt)
++{
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ unsigned int blocksize = crypto_aead_blocksize(tls);
++ unsigned int padsize, authsize;
++ struct caam_request *req_ctx = aead_request_ctx(req);
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
++ typeof(*alg), aead);
++ struct device *dev = ctx->dev;
++ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
++ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++ struct tls_edesc *edesc;
++ dma_addr_t qm_sg_dma, iv_dma = 0;
++ int ivsize = 0;
++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
++ int in_len, out_len;
++ struct dpaa2_sg_entry *sg_table;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct scatterlist *dst;
++
++ if (encrypt) {
++ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++ blocksize);
++ authsize = ctx->authsize + padsize;
++ } else {
++ authsize = ctx->authsize;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(dev, "could not allocate extended descriptor\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ if (likely(req->src == req->dst)) {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(src_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ dst = req->dst;
++ } else {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen);
++ if (unlikely(src_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen);
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
++
++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++ dst_nents = sg_nents_for_len(dst, req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(dst_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(dst_nents);
++ }
++
++ if (src_nents) {
++ mapped_src_nents = dma_map_sg(dev, req->src,
++ src_nents, DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = 0;
++ }
++
++ mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(dev, "unable to map destination\n");
++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ ivsize = crypto_aead_ivsize(tls);
++ iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, iv_dma)) {
++ dev_err(dev, "unable to map IV\n");
++ caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
++ op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /*
++ * Create S/G table: IV, src, dst.
++ * Input is not contiguous.
++ */
++ qm_sg_ents = 1 + mapped_src_nents +
++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ sg_table = &edesc->sgt[0];
++ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->dst = dst;
++ edesc->iv_dma = iv_dma;
++
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ qm_sg_index = 1;
++
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
++ qm_sg_index += mapped_src_nents;
++
++ if (mapped_dst_nents > 1)
++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++ qm_sg_index, 0);
++
++ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, qm_sg_dma)) {
++ dev_err(dev, "unable to map S/G table\n");
++ caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
++ ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->qm_sg_dma = qm_sg_dma;
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ out_len = req->cryptlen + (encrypt ? authsize : 0);
++ in_len = ivsize + req->assoclen + req->cryptlen;
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, in_len);
++
++ if (req->dst == req->src) {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
++ (sg_nents_for_len(req->src, req->assoclen) +
++ 1) * sizeof(*sg_table));
++ } else if (mapped_dst_nents == 1) {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
++ } else {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
++ sizeof(*sg_table));
++ }
++
++ dpaa2_fl_set_len(out_fle, out_len);
++
++ return edesc;
++}
++
++static int tls_set_sh_desc(struct crypto_aead *tls)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ unsigned int ivsize = crypto_aead_ivsize(tls);
++ unsigned int blocksize = crypto_aead_blocksize(tls);
++ struct device *dev = ctx->dev;
++ struct caam_flc *flc;
++ u32 *desc;
++ unsigned int assoclen = 13; /* always 13 bytes for TLS */
++ unsigned int data_len[2];
++ u32 inl_mask;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ /*
++ * TLS 1.0 encrypt shared descriptor
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ data_len[0] = ctx->adata.keylen_pad;
++ data_len[1] = ctx->cdata.keylen;
++
++ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
++ &inl_mask, ARRAY_SIZE(data_len)) < 0)
++ return -EINVAL;
++
++ if (inl_mask & 1)
++ ctx->adata.key_virt = ctx->key;
++ else
++ ctx->adata.key_dma = ctx->key_dma;
++
++ if (inl_mask & 2)
++ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
++ else
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
++
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++
++ cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
++ assoclen, ivsize, ctx->authsize, blocksize);
++
++ flc->flc[1] = desc_len(desc);
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * TLS 1.0 decrypt shared descriptor
++ * Keys do not fit inline, regardless of algorithms used
++ */
++ ctx->adata.key_dma = ctx->key_dma;
++ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
++
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++
++ cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
++ ctx->authsize, blocksize);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int tls_setkey(struct crypto_aead *tls, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ struct device *dev = ctx->dev;
++ struct crypto_authenc_keys keys;
++ int ret;
++
++ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
++ goto badkey;
++
++#ifdef DEBUG
++ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
++ keys.authkeylen + keys.enckeylen, keys.enckeylen,
++ keys.authkeylen);
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++#ifdef DEBUG
++ dev_err(dev, "split keylen %d split keylen padded %d\n",
++ ctx->adata.keylen, ctx->adata.keylen_pad);
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
++ keys.authkeylen + keys.enckeylen, 1);
++#endif
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
++ if (ret)
++ goto badkey;
++
++ /* postpend encryption key to auth split key */
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
++
++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ ctx->adata.keylen_pad + keys.enckeylen, 1);
++#endif
++
++ ctx->cdata.keylen = keys.enckeylen;
++
++ ret = tls_set_sh_desc(tls);
++ if (ret)
++ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++
++ return ret;
++badkey:
++ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++
++ ctx->authsize = authsize;
++ tls_set_sh_desc(tls);
++
++ return 0;
++}
++
++static int gcm_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *dev = ctx->dev;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ struct caam_flc *flc;
++ u32 *desc;
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ /*
++ * AES GCM encrypt shared descriptor
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Job Descriptor and Shared Descriptors
++ * must all fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ gcm_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int gcm_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *dev = ctx->dev;
++ int ret;
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ memcpy(ctx->key, key, keylen);
++ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++ ctx->cdata.keylen = keylen;
++
++ ret = gcm_set_sh_desc(aead);
++ if (ret)
++ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
++ DMA_TO_DEVICE);
++
++ return ret;
++}
++
++static int rfc4106_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *dev = ctx->dev;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ struct caam_flc *flc;
++ u32 *desc;
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ ctx->cdata.key_virt = ctx->key;
++
++ /*
++ * RFC4106 encrypt shared descriptor
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Job Descriptor and Shared Descriptors
++ * must all fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int rfc4106_setauthsize(struct crypto_aead *authenc,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ rfc4106_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int rfc4106_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *dev = ctx->dev;
++ int ret;
++
++ if (keylen < 4)
++ return -EINVAL;
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ memcpy(ctx->key, key, keylen);
++ /*
++ * The last four bytes of the key material are used as the salt value
++ * in the nonce. Update the AES key length.
++ */
++ ctx->cdata.keylen = keylen - 4;
++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++
++ ret = rfc4106_set_sh_desc(aead);
++ if (ret)
++ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
++ DMA_TO_DEVICE);
++
++ return ret;
++}
++
++static int rfc4543_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *dev = ctx->dev;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ struct caam_flc *flc;
++ u32 *desc;
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ ctx->cdata.key_virt = ctx->key;
++
++ /*
++ * RFC4543 encrypt shared descriptor
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Job Descriptor and Shared Descriptors
++ * must all fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
++ true);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int rfc4543_setauthsize(struct crypto_aead *authenc,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ rfc4543_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int rfc4543_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *dev = ctx->dev;
++ int ret;
++
++ if (keylen < 4)
++ return -EINVAL;
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ memcpy(ctx->key, key, keylen);
++ /*
++ * The last four bytes of the key material are used as the salt value
++ * in the nonce. Update the AES key length.
++ */
++ ctx->cdata.keylen = keylen - 4;
++ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++
++ ret = rfc4543_set_sh_desc(aead);
++ if (ret)
++ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
++ DMA_TO_DEVICE);
++
++ return ret;
++}
++
++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
++ const char *alg_name = crypto_tfm_alg_name(tfm);
++ struct device *dev = ctx->dev;
++ struct caam_flc *flc;
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ u32 *desc;
++ u32 ctx1_iv_off = 0;
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_CTR_MOD128);
++ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++
++ memcpy(ctx->key, key, keylen);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++ /*
++ * AES-CTR needs to load IV in CONTEXT1 reg
++ * at an offset of 128bits (16bytes)
++ * CONTEXT1[255:128] = IV
++ */
++ if (ctr_mode)
++ ctx1_iv_off = 16;
++
++ /*
++ * RFC3686 specific:
++ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++ * | *key = {KEY, NONCE}
++ */
++ if (is_rfc3686) {
++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ keylen -= CTR_RFC3686_NONCE_SIZE;
++ }
++
++ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* ablkcipher_encrypt shared descriptor */
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++
++ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /* ablkcipher_decrypt shared descriptor */
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++
++ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /* ablkcipher_givencrypt shared descriptor */
++ flc = &ctx->flc[GIVENCRYPT];
++ desc = flc->sh_desc;
++
++ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
++ ivsize, is_rfc3686, ctx1_iv_off);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *dev = ctx->dev;
++ struct caam_flc *flc;
++ u32 *desc;
++
++ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
++ dev_err(dev, "key size mismatch\n");
++ crypto_ablkcipher_set_flags(ablkcipher,
++ CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
++ memcpy(ctx->key, key, keylen);
++ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* xts_ablkcipher_encrypt shared descriptor */
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ /* xts_ablkcipher_decrypt shared descriptor */
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++
++ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, flc->flc_dma)) {
++ dev_err(dev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
++ *req, bool encrypt)
++{
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_request *req_ctx = ablkcipher_request_ctx(req);
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *dev = ctx->dev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++ struct ablkcipher_edesc *edesc;
++ dma_addr_t iv_dma;
++ bool in_contig;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ int dst_sg_idx, qm_sg_ents;
++ struct dpaa2_sg_entry *sg_table;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (unlikely(src_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
++ req->nbytes);
++ return ERR_PTR(src_nents);
++ }
++
++ if (unlikely(req->dst != req->src)) {
++ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++ if (unlikely(dst_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
++ req->nbytes);
++ return ERR_PTR(dst_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(dev, "unable to map destination\n");
++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, iv_dma)) {
++ dev_err(dev, "unable to map IV\n");
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ if (mapped_src_nents == 1 &&
++ iv_dma + ivsize == sg_dma_address(req->src)) {
++ in_contig = true;
++ qm_sg_ents = 0;
++ } else {
++ in_contig = false;
++ qm_sg_ents = 1 + mapped_src_nents;
++ }
++ dst_sg_idx = qm_sg_ents;
++
++ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
++ dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(dev, "could not allocate extended descriptor\n");
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->iv_dma = iv_dma;
++ sg_table = &edesc->sgt[0];
++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++ if (!in_contig) {
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
++ }
++
++ if (mapped_dst_nents > 1)
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ dst_sg_idx, 0);
++
++ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
++ dev_err(dev, "unable to map S/G table\n");
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
++ dpaa2_fl_set_len(out_fle, req->nbytes);
++
++ if (!in_contig) {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ } else {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, iv_dma);
++ }
++
++ if (req->src == req->dst) {
++ if (!in_contig) {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
++ sizeof(*sg_table));
++ } else {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
++ }
++ } else if (mapped_dst_nents > 1) {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
++ sizeof(*sg_table));
++ } else {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
++ }
++
++ return edesc;
++}
++
++static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
++ struct skcipher_givcrypt_request *greq)
++{
++ struct ablkcipher_request *req = &greq->creq;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_request *req_ctx = ablkcipher_request_ctx(req);
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *dev = ctx->dev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
++ struct ablkcipher_edesc *edesc;
++ dma_addr_t iv_dma;
++ bool out_contig;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ struct dpaa2_sg_entry *sg_table;
++ int dst_sg_idx, qm_sg_ents;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (unlikely(src_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
++ req->nbytes);
++ return ERR_PTR(src_nents);
++ }
++
++ if (unlikely(req->dst != req->src)) {
++ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++ if (unlikely(dst_nents < 0)) {
++ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
++ req->nbytes);
++ return ERR_PTR(dst_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(dev, "unable to map destination\n");
++ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ dst_nents = src_nents;
++ mapped_dst_nents = src_nents;
++ }
++
++ iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, iv_dma)) {
++ dev_err(dev, "unable to map IV\n");
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
++ dst_sg_idx = qm_sg_ents;
++ if (mapped_dst_nents == 1 &&
++ iv_dma + ivsize == sg_dma_address(req->dst)) {
++ out_contig = true;
++ } else {
++ out_contig = false;
++ qm_sg_ents += 1 + mapped_dst_nents;
++ }
++
++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
++ dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, GIVENCRYPT, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (!edesc) {
++ dev_err(dev, "could not allocate extended descriptor\n");
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, GIVENCRYPT, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->iv_dma = iv_dma;
++ sg_table = &edesc->sgt[0];
++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++ if (mapped_src_nents > 1)
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
++
++ if (!out_contig) {
++ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ dst_sg_idx + 1, 0);
++ }
++
++ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
++ dev_err(dev, "unable to map S/G table\n");
++ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, GIVENCRYPT, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_len(in_fle, req->nbytes);
++ dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
++
++ if (mapped_src_nents > 1) {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ } else {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
++ }
++
++ if (!out_contig) {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
++ sizeof(*sg_table));
++ } else {
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
++ }
++
++ return edesc;
++}
++
++static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
++ struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(aead);
++ struct caam_request *caam_req = aead_request_ctx(req);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, caam_req->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++}
++
++static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
++ struct aead_request *req)
++{
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(tls);
++ struct caam_request *caam_req = aead_request_ctx(req);
++
++ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
++ edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++}
++
++static void ablkcipher_unmap(struct device *dev,
++ struct ablkcipher_edesc *edesc,
++ struct ablkcipher_request *req)
++{
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ struct caam_request *caam_req = ablkcipher_request_ctx(req);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, caam_req->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++}
++
++static void aead_encrypt_done(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct aead_request *req = container_of(areq, struct aead_request,
++ base);
++ struct caam_request *req_ctx = to_caam_req(areq);
++ struct aead_edesc *edesc = req_ctx->edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ aead_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ aead_request_complete(req, ecode);
++}
++
++static void aead_decrypt_done(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct aead_request *req = container_of(areq, struct aead_request,
++ base);
++ struct caam_request *req_ctx = to_caam_req(areq);
++ struct aead_edesc *edesc = req_ctx->edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
++ JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
++ }
++
++ aead_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ aead_request_complete(req, ecode);
++}
++
++static int aead_encrypt(struct aead_request *req)
++{
++ struct aead_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct caam_request *caam_req = aead_request_ctx(req);
++ int ret;
++
++ /* allocate extended descriptor */
++ edesc = aead_edesc_alloc(req, true);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->op_type = ENCRYPT;
++ caam_req->cbk = aead_encrypt_done;
++ caam_req->ctx = &req->base;
++ caam_req->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ aead_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int aead_decrypt(struct aead_request *req)
++{
++ struct aead_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct caam_request *caam_req = aead_request_ctx(req);
++ int ret;
++
++ /* allocate extended descriptor */
++ edesc = aead_edesc_alloc(req, false);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->op_type = DECRYPT;
++ caam_req->cbk = aead_decrypt_done;
++ caam_req->ctx = &req->base;
++ caam_req->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ aead_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static void tls_encrypt_done(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct aead_request *req = container_of(areq, struct aead_request,
++ base);
++ struct caam_request *req_ctx = to_caam_req(areq);
++ struct tls_edesc *edesc = req_ctx->edesc;
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ tls_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ aead_request_complete(req, ecode);
++}
++
++static void tls_decrypt_done(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct aead_request *req = container_of(areq, struct aead_request,
++ base);
++ struct caam_request *req_ctx = to_caam_req(areq);
++ struct tls_edesc *edesc = req_ctx->edesc;
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
++ JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
++ }
++
++ tls_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ aead_request_complete(req, ecode);
++}
++
++static int tls_encrypt(struct aead_request *req)
++{
++ struct tls_edesc *edesc;
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ struct caam_request *caam_req = aead_request_ctx(req);
++ int ret;
++
++ /* allocate extended descriptor */
++ edesc = tls_edesc_alloc(req, true);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->op_type = ENCRYPT;
++ caam_req->cbk = tls_encrypt_done;
++ caam_req->ctx = &req->base;
++ caam_req->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ tls_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int tls_decrypt(struct aead_request *req)
++{
++ struct tls_edesc *edesc;
++ struct crypto_aead *tls = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(tls);
++ struct caam_request *caam_req = aead_request_ctx(req);
++ int ret;
++
++ /* allocate extended descriptor */
++ edesc = tls_edesc_alloc(req, false);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->op_type = DECRYPT;
++ caam_req->cbk = tls_decrypt_done;
++ caam_req->ctx = &req->base;
++ caam_req->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ tls_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int ipsec_gcm_encrypt(struct aead_request *req)
++{
++ if (req->assoclen < 8)
++ return -EINVAL;
++
++ return aead_encrypt(req);
++}
++
++static int ipsec_gcm_decrypt(struct aead_request *req)
++{
++ if (req->assoclen < 8)
++ return -EINVAL;
++
++ return aead_decrypt(req);
++}
++
++static void ablkcipher_done(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ablkcipher_request *req = ablkcipher_request_cast(areq);
++ struct caam_request *req_ctx = to_caam_req(areq);
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct ablkcipher_edesc *edesc = req_ctx->edesc;
++ int ecode = 0;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
++ edesc->src_nents > 1 ? 100 : ivsize, 1);
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
++#endif
++
++ ablkcipher_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++
++ /*
++ * The crypto API expects us to set the IV (req->info) to the last
++ * ciphertext block. This is used e.g. by the CTS mode.
++ */
++ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
++ ivsize, 0);
++
++ ablkcipher_request_complete(req, ecode);
++}
++
++static int ablkcipher_encrypt(struct ablkcipher_request *req)
++{
++ struct ablkcipher_edesc *edesc;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct caam_request *caam_req = ablkcipher_request_ctx(req);
++ int ret;
++
++ /* allocate extended descriptor */
++ edesc = ablkcipher_edesc_alloc(req, true);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->op_type = ENCRYPT;
++ caam_req->cbk = ablkcipher_done;
++ caam_req->ctx = &req->base;
++ caam_req->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ ablkcipher_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
++{
++ struct ablkcipher_request *req = &greq->creq;
++ struct ablkcipher_edesc *edesc;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct caam_request *caam_req = ablkcipher_request_ctx(req);
++ int ret;
++
++ /* allocate extended descriptor */
++ edesc = ablkcipher_giv_edesc_alloc(greq);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ caam_req->flc = &ctx->flc[GIVENCRYPT];
++ caam_req->op_type = GIVENCRYPT;
++ caam_req->cbk = ablkcipher_done;
++ caam_req->ctx = &req->base;
++ caam_req->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ ablkcipher_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int ablkcipher_decrypt(struct ablkcipher_request *req)
++{
++ struct ablkcipher_edesc *edesc;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct caam_request *caam_req = ablkcipher_request_ctx(req);
++ int ret;
++
++ /* allocate extended descriptor */
++ edesc = ablkcipher_edesc_alloc(req, false);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->op_type = DECRYPT;
++ caam_req->cbk = ablkcipher_done;
++ caam_req->ctx = &req->base;
++ caam_req->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ ablkcipher_unmap(ctx->dev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++struct caam_crypto_alg {
++ struct list_head entry;
++ struct crypto_alg crypto_alg;
++ struct caam_alg_entry caam;
++};
++
++static int caam_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_alg *alg = tfm->__crt_alg;
++ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
++ crypto_alg);
++ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ /* copy descriptor header template value */
++ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
++ caam_alg->caam.class1_alg_type;
++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
++ caam_alg->caam.class2_alg_type;
++
++ ctx->dev = caam_alg->caam.dev;
++
++ return 0;
++}
++
++static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
++{
++ struct ablkcipher_tfm *ablkcipher_tfm =
++ crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
++
++ ablkcipher_tfm->reqsize = sizeof(struct caam_request);
++ return caam_cra_init(tfm);
++}
++
++static int caam_cra_init_aead(struct crypto_aead *tfm)
++{
++ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
++ return caam_cra_init(crypto_aead_tfm(tfm));
++}
++
++static void caam_exit_common(struct caam_ctx *ctx)
++{
++ int i;
++
++ for (i = 0; i < NUM_OP; i++) {
++ if (!ctx->flc[i].flc_dma)
++ continue;
++ dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
++ sizeof(ctx->flc[i].flc) +
++ desc_bytes(ctx->flc[i].sh_desc),
++ DMA_TO_DEVICE);
++ }
++
++ if (ctx->key_dma)
++ dma_unmap_single(ctx->dev, ctx->key_dma,
++ ctx->cdata.keylen + ctx->adata.keylen_pad,
++ DMA_TO_DEVICE);
++}
++
++static void caam_cra_exit(struct crypto_tfm *tfm)
++{
++ caam_exit_common(crypto_tfm_ctx(tfm));
++}
++
++static void caam_cra_exit_aead(struct crypto_aead *tfm)
++{
++ caam_exit_common(crypto_aead_ctx(tfm));
++}
++
++#define template_ablkcipher template_u.ablkcipher
++struct caam_alg_template {
++ char name[CRYPTO_MAX_ALG_NAME];
++ char driver_name[CRYPTO_MAX_ALG_NAME];
++ unsigned int blocksize;
++ u32 type;
++ union {
++ struct ablkcipher_alg ablkcipher;
++ } template_u;
++ u32 class1_alg_type;
++ u32 class2_alg_type;
++};
++
++static struct caam_alg_template driver_algs[] = {
++ /* ablkcipher descriptor */
++ {
++ .name = "cbc(aes)",
++ .driver_name = "cbc-aes-caam-qi2",
++ .blocksize = AES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ },
++ {
++ .name = "cbc(des3_ede)",
++ .driver_name = "cbc-3des-caam-qi2",
++ .blocksize = DES3_EDE_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = DES3_EDE_KEY_SIZE,
++ .max_keysize = DES3_EDE_KEY_SIZE,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ },
++ {
++ .name = "cbc(des)",
++ .driver_name = "cbc-des-caam-qi2",
++ .blocksize = DES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = DES_KEY_SIZE,
++ .max_keysize = DES_KEY_SIZE,
++ .ivsize = DES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ },
++ {
++ .name = "ctr(aes)",
++ .driver_name = "ctr-aes-caam-qi2",
++ .blocksize = 1,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "chainiv",
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
++ },
++ {
++ .name = "rfc3686(ctr(aes))",
++ .driver_name = "rfc3686-ctr-aes-caam-qi2",
++ .blocksize = 1,
++ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .givencrypt = ablkcipher_givencrypt,
++ .geniv = "<built-in>",
++ .min_keysize = AES_MIN_KEY_SIZE +
++ CTR_RFC3686_NONCE_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE +
++ CTR_RFC3686_NONCE_SIZE,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
++ },
++ {
++ .name = "xts(aes)",
++ .driver_name = "xts-aes-caam-qi2",
++ .blocksize = AES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = xts_ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "eseqiv",
++ .min_keysize = 2 * AES_MIN_KEY_SIZE,
++ .max_keysize = 2 * AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
++ }
++};
++
++static struct caam_aead_alg driver_aeads[] = {
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc4106(gcm(aes))",
++ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = rfc4106_setkey,
++ .setauthsize = rfc4106_setauthsize,
++ .encrypt = ipsec_gcm_encrypt,
++ .decrypt = ipsec_gcm_decrypt,
++ .ivsize = 8,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc4543(gcm(aes))",
++ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = rfc4543_setkey,
++ .setauthsize = rfc4543_setauthsize,
++ .encrypt = ipsec_gcm_encrypt,
++ .decrypt = ipsec_gcm_decrypt,
++ .ivsize = 8,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ },
++ },
++ /* Galois Counter Mode */
++ {
++ .aead = {
++ .base = {
++ .cra_name = "gcm(aes)",
++ .cra_driver_name = "gcm-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = gcm_setkey,
++ .setauthsize = gcm_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = 12,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ }
++ },
++ /* single-pass ipsec_esp descriptor */
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(md5),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-hmac-md5-"
++ "cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha1),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha1-"
++ "cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha1),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha1-cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha224),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha224-"
++ "cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha224),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha224-cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha256),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha256-"
++ "cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha256),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha256-cbc-aes-"
++ "caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha384),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha384-"
++ "cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha384),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha384-cbc-aes-"
++ "caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha512),"
++ "cbc(aes)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha512-cbc-aes-"
++ "caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(md5),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-hmac-md5-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha1),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha1-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha1),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha1-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha224),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha224-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha224),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha224-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha256),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha256-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha256),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha256-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha384),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha384-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha384),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha384-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),"
++ "cbc(des3_ede))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha512),"
++ "cbc(des3_ede)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha512-"
++ "cbc-des3_ede-caam-qi2",
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),cbc(des))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(md5),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-hmac-md5-"
++ "cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha1),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha1-"
++ "cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha1),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha1-cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha224),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha224-"
++ "cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha224),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha224-cbc-des-"
++ "caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha256),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha256-"
++ "cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha256),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha256-cbc-desi-"
++ "caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha384),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha384-"
++ "cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha384),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha384-cbc-des-"
++ "caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),cbc(des))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "cbc-des-caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "echainiv(authenc(hmac(sha512),"
++ "cbc(des)))",
++ .cra_driver_name = "echainiv-authenc-"
++ "hmac-sha512-cbc-des-"
++ "caam-qi2",
++ .cra_blocksize = DES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .geniv = true,
++ }
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),"
++ "rfc3686(ctr(aes)))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc("
++ "hmac(md5),rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-md5-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha1),"
++ "rfc3686(ctr(aes)))",
++ .cra_driver_name = "authenc-hmac-sha1-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc("
++ "hmac(sha1),rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha224),"
++ "rfc3686(ctr(aes)))",
++ .cra_driver_name = "authenc-hmac-sha224-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc("
++ "hmac(sha224),rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha256),"
++ "rfc3686(ctr(aes)))",
++ .cra_driver_name = "authenc-hmac-sha256-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc(hmac(sha256),"
++ "rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha384),"
++ "rfc3686(ctr(aes)))",
++ .cra_driver_name = "authenc-hmac-sha384-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc(hmac(sha384),"
++ "rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),"
++ "rfc3686(ctr(aes)))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc(hmac(sha512),"
++ "rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "tls10(hmac(sha1),cbc(aes))",
++ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = tls_setkey,
++ .setauthsize = tls_setauthsize,
++ .encrypt = tls_encrypt,
++ .decrypt = tls_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++};
++
++static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
++ *template)
++{
++ struct caam_crypto_alg *t_alg;
++ struct crypto_alg *alg;
++
++ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
++ if (!t_alg)
++ return ERR_PTR(-ENOMEM);
++
++ alg = &t_alg->crypto_alg;
++
++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->driver_name);
++ alg->cra_module = THIS_MODULE;
++ alg->cra_exit = caam_cra_exit;
++ alg->cra_priority = CAAM_CRA_PRIORITY;
++ alg->cra_blocksize = template->blocksize;
++ alg->cra_alignmask = 0;
++ alg->cra_ctxsize = sizeof(struct caam_ctx);
++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
++ template->type;
++ switch (template->type) {
++ case CRYPTO_ALG_TYPE_GIVCIPHER:
++ alg->cra_init = caam_cra_init_ablkcipher;
++ alg->cra_type = &crypto_givcipher_type;
++ alg->cra_ablkcipher = template->template_ablkcipher;
++ break;
++ case CRYPTO_ALG_TYPE_ABLKCIPHER:
++ alg->cra_init = caam_cra_init_ablkcipher;
++ alg->cra_type = &crypto_ablkcipher_type;
++ alg->cra_ablkcipher = template->template_ablkcipher;
++ break;
++ }
++
++ t_alg->caam.class1_alg_type = template->class1_alg_type;
++ t_alg->caam.class2_alg_type = template->class2_alg_type;
++
++ return t_alg;
++}
++
++static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
++{
++ struct aead_alg *alg = &t_alg->aead;
++
++ alg->base.cra_module = THIS_MODULE;
++ alg->base.cra_priority = CAAM_CRA_PRIORITY;
++ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
++ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
++
++ alg->init = caam_cra_init_aead;
++ alg->exit = caam_cra_exit_aead;
++}
++
++static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
++{
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++
++ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
++ napi_schedule_irqoff(&ppriv->napi);
++}
++
++static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
++{
++ struct device *dev = priv->dev;
++ struct dpaa2_io_notification_ctx *nctx;
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err, i = 0, cpu;
++
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ ppriv->priv = priv;
++ nctx = &ppriv->nctx;
++ nctx->is_cdan = 0;
++ nctx->id = ppriv->rsp_fqid;
++ nctx->desired_cpu = cpu;
++ nctx->cb = dpaa2_caam_fqdan_cb;
++
++ /* Register notification callbacks */
++ err = dpaa2_io_service_register(NULL, nctx);
++ if (unlikely(err)) {
++ dev_err(dev, "notification register failed\n");
++ nctx->cb = NULL;
++ goto err;
++ }
++
++ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
++ dev);
++ if (unlikely(!ppriv->store)) {
++ dev_err(dev, "dpaa2_io_store_create() failed\n");
++ goto err;
++ }
++
++ if (++i == priv->num_pairs)
++ break;
++ }
++
++ return 0;
++
++err:
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ if (!ppriv->nctx.cb)
++ break;
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ }
++
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ if (!ppriv->store)
++ break;
++ dpaa2_io_store_destroy(ppriv->store);
++ }
++
++ return err;
++}
++
++static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
++{
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int i = 0, cpu;
++
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_store_destroy(ppriv->store);
++
++ if (++i == priv->num_pairs)
++ return;
++ }
++}
++
++static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
++{
++ struct dpseci_rx_queue_cfg rx_queue_cfg;
++ struct device *dev = priv->dev;
++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err = 0, i = 0, cpu;
++
++ /* Configure Rx queues */
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++
++ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
++ DPSECI_QUEUE_OPT_USER_CTX;
++ rx_queue_cfg.order_preservation_en = 0;
++ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
++ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
++ /*
++ * Rx priority (WQ) doesn't really matter, since we use
++ * pull mode, i.e. volatile dequeues from specific FQs
++ */
++ rx_queue_cfg.dest_cfg.priority = 0;
++ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
++
++ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
++ &rx_queue_cfg);
++ if (err) {
++ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
++ err);
++ return err;
++ }
++
++ if (++i == priv->num_pairs)
++ break;
++ }
++
++ return err;
++}
++
++static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
++{
++ struct device *dev = priv->dev;
++
++ if (!priv->cscn_mem)
++ return;
++
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ kfree(priv->cscn_mem);
++}
++
++static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
++{
++ struct device *dev = priv->dev;
++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
++
++ dpaa2_dpseci_congestion_free(priv);
++ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
++}
++
++static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
++ const struct dpaa2_fd *fd)
++{
++ struct caam_request *req;
++ u32 fd_err;
++
++ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
++ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
++ return;
++ }
++
++ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
++ if (unlikely(fd_err))
++ dev_err(priv->dev, "FD error: %08x\n", fd_err);
++
++ /*
++ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
++ * in FD[ERR] or FD[FRC].
++ */
++ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
++ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
++ DMA_BIDIRECTIONAL);
++ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
++}
++
++static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
++{
++ int err;
++
++ /* Retry while portal is busy */
++ do {
++ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
++ ppriv->store);
++ } while (err == -EBUSY);
++
++ if (unlikely(err))
++ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
++
++ return err;
++}
++
++static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
++{
++ struct dpaa2_dq *dq;
++ int cleaned = 0, is_last;
++
++ do {
++ dq = dpaa2_io_store_next(ppriv->store, &is_last);
++ if (unlikely(!dq)) {
++ if (unlikely(!is_last)) {
++ dev_dbg(ppriv->priv->dev,
++ "FQ %d returned no valid frames\n",
++ ppriv->rsp_fqid);
++ /*
++ * MUST retry until we get some sort of
++ * valid response token (be it "empty dequeue"
++ * or a valid frame).
++ */
++ continue;
++ }
++ break;
++ }
++
++ /* Process FD */
++ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
++ cleaned++;
++ } while (!is_last);
++
++ return cleaned;
++}
++
++static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
++{
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ struct dpaa2_caam_priv *priv;
++ int err, cleaned = 0, store_cleaned;
++
++ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
++ priv = ppriv->priv;
++
++ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
++ return 0;
++
++ do {
++ store_cleaned = dpaa2_caam_store_consume(ppriv);
++ cleaned += store_cleaned;
++
++ if (store_cleaned == 0 ||
++ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
++ break;
++
++ /* Try to dequeue some more */
++ err = dpaa2_caam_pull_fq(ppriv);
++ if (unlikely(err))
++ break;
++ } while (1);
++
++ if (cleaned < budget) {
++ napi_complete_done(napi, cleaned);
++ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
++ if (unlikely(err))
++ dev_err(priv->dev, "Notification rearm failed: %d\n",
++ err);
++ }
++
++ return cleaned;
++}
++
++static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
++ u16 token)
++{
++ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
++ struct device *dev = priv->dev;
++ int err;
++
++ /*
++ * Congestion group feature supported starting with DPSECI API v5.1
++ * and only when object has been created with this capability.
++ */
++ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
++ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
++ return 0;
++
++ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
++ GFP_KERNEL | GFP_DMA);
++ if (!priv->cscn_mem)
++ return -ENOMEM;
++
++ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
++ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, priv->cscn_dma)) {
++ dev_err(dev, "Error mapping CSCN memory area\n");
++ err = -ENOMEM;
++ goto err_dma_map;
++ }
++
++ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
++ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
++ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
++ cong_notif_cfg.message_ctx = (u64)priv;
++ cong_notif_cfg.message_iova = priv->cscn_dma;
++ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
++ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
++ DPSECI_CGN_MODE_COHERENT_WRITE;
++
++ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
++ &cong_notif_cfg);
++ if (err) {
++ dev_err(dev, "dpseci_set_congestion_notification failed\n");
++ goto err_set_cong;
++ }
++
++ return 0;
++
++err_set_cong:
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++err_dma_map:
++ kfree(priv->cscn_mem);
++
++ return err;
++}
++
++static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev = &ls_dev->dev;
++ struct dpaa2_caam_priv *priv;
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err, cpu;
++ u8 i;
++
++ priv = dev_get_drvdata(dev);
++
++ priv->dev = dev;
++ priv->dpsec_id = ls_dev->obj_desc.id;
++
++ /* Get a handle for the DPSECI this interface is associate with */
++ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpsec_open() failed: %d\n", err);
++ goto err_open;
++ }
++
++ dev_info(dev, "Opened dpseci object successfully\n");
++
++ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
++ &priv->minor_ver);
++ if (err) {
++ dev_err(dev, "dpseci_get_api_version() failed\n");
++ goto err_get_vers;
++ }
++
++ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
++ &priv->dpseci_attr);
++ if (err) {
++ dev_err(dev, "dpseci_get_attributes() failed\n");
++ goto err_get_vers;
++ }
++
++ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
++ &priv->sec_attr);
++ if (err) {
++ dev_err(dev, "dpseci_get_sec_attr() failed\n");
++ goto err_get_vers;
++ }
++
++ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "setup_congestion() failed\n");
++ goto err_get_vers;
++ }
++
++ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
++ priv->dpseci_attr.num_tx_queues);
++ if (priv->num_pairs > num_online_cpus()) {
++ dev_warn(dev, "%d queues won't be used\n",
++ priv->num_pairs - num_online_cpus());
++ priv->num_pairs = num_online_cpus();
++ }
++
++ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
++ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
++ &priv->rx_queue_attr[i]);
++ if (err) {
++ dev_err(dev, "dpseci_get_rx_queue() failed\n");
++ goto err_get_rx_queue;
++ }
++ }
++
++ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
++ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
++ &priv->tx_queue_attr[i]);
++ if (err) {
++ dev_err(dev, "dpseci_get_tx_queue() failed\n");
++ goto err_get_rx_queue;
++ }
++ }
++
++ i = 0;
++ for_each_online_cpu(cpu) {
++ dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
++ priv->rx_queue_attr[i].fqid,
++ priv->tx_queue_attr[i].fqid);
++
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
++ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
++ ppriv->prio = i;
++
++ ppriv->net_dev.dev = *dev;
++ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
++ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
++ DPAA2_CAAM_NAPI_WEIGHT);
++ if (++i == priv->num_pairs)
++ break;
++ }
++
++ return 0;
++
++err_get_rx_queue:
++ dpaa2_dpseci_congestion_free(priv);
++err_get_vers:
++ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
++err_open:
++ return err;
++}
++
++static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
++{
++ struct device *dev = priv->dev;
++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err, i;
++
++ for (i = 0; i < priv->num_pairs; i++) {
++ ppriv = per_cpu_ptr(priv->ppriv, i);
++ napi_enable(&ppriv->napi);
++ }
++
++ err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpseci_enable() failed\n");
++ return err;
++ }
++
++ dev_info(dev, "DPSECI version %d.%d\n",
++ priv->major_ver,
++ priv->minor_ver);
++
++ return 0;
++}
++
++static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
++{
++ struct device *dev = priv->dev;
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
++ int i, err = 0, enabled;
++
++ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpseci_disable() failed\n");
++ return err;
++ }
++
++ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
++ if (err) {
++ dev_err(dev, "dpseci_is_enabled() failed\n");
++ return err;
++ }
++
++ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
++
++ for (i = 0; i < priv->num_pairs; i++) {
++ ppriv = per_cpu_ptr(priv->ppriv, i);
++ napi_disable(&ppriv->napi);
++ netif_napi_del(&ppriv->napi);
++ }
++
++ return 0;
++}
++
++static struct list_head alg_list;
++
++static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
++{
++ struct device *dev;
++ struct dpaa2_caam_priv *priv;
++ int i, err = 0;
++ bool registered = false;
++
++ /*
++ * There is no way to get CAAM endianness - there is no direct register
++ * space access and MC f/w does not provide this attribute.
++ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
++ * property.
++ */
++ caam_little_end = true;
++
++ caam_imx = false;
++
++ dev = &dpseci_dev->dev;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ dev_set_drvdata(dev, priv);
++
++ priv->domain = iommu_get_domain_for_dev(dev);
++
++ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
++ 0, SLAB_CACHE_DMA, NULL);
++ if (!qi_cache) {
++ dev_err(dev, "Can't allocate SEC cache\n");
++ err = -ENOMEM;
++ goto err_qicache;
++ }
++
++ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
++ if (err) {
++ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
++ goto err_dma_mask;
++ }
++
++ /* Obtain a MC portal */
++ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_dma_mask;
++ }
++
++ priv->ppriv = alloc_percpu(*priv->ppriv);
++ if (!priv->ppriv) {
++ dev_err(dev, "alloc_percpu() failed\n");
++ goto err_alloc_ppriv;
++ }
++
++ /* DPSECI initialization */
++ err = dpaa2_dpseci_setup(dpseci_dev);
++ if (err < 0) {
++ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
++ goto err_dpseci_setup;
++ }
++
++ /* DPIO */
++ err = dpaa2_dpseci_dpio_setup(priv);
++ if (err) {
++ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
++ goto err_dpio_setup;
++ }
++
++ /* DPSECI binding to DPIO */
++ err = dpaa2_dpseci_bind(priv);
++ if (err) {
++ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
++ goto err_bind;
++ }
++
++ /* DPSECI enable */
++ err = dpaa2_dpseci_enable(priv);
++ if (err) {
++ dev_err(dev, "dpaa2_dpseci_enable() failed");
++ goto err_bind;
++ }
++
++ /* register crypto algorithms the device supports */
++ INIT_LIST_HEAD(&alg_list);
++ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
++ struct caam_crypto_alg *t_alg;
++ struct caam_alg_template *alg = driver_algs + i;
++ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
++
++ /* Skip DES algorithms if not supported by device */
++ if (!priv->sec_attr.des_acc_num &&
++ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
++ (alg_sel == OP_ALG_ALGSEL_DES)))
++ continue;
++
++ /* Skip AES algorithms if not supported by device */
++ if (!priv->sec_attr.aes_acc_num &&
++ (alg_sel == OP_ALG_ALGSEL_AES))
++ continue;
++
++ t_alg = caam_alg_alloc(alg);
++ if (IS_ERR(t_alg)) {
++ err = PTR_ERR(t_alg);
++ dev_warn(dev, "%s alg allocation failed: %d\n",
++ alg->driver_name, err);
++ continue;
++ }
++ t_alg->caam.dev = dev;
++
++ err = crypto_register_alg(&t_alg->crypto_alg);
++ if (err) {
++ dev_warn(dev, "%s alg registration failed: %d\n",
++ t_alg->crypto_alg.cra_driver_name, err);
++ kfree(t_alg);
++ continue;
++ }
++
++ list_add_tail(&t_alg->entry, &alg_list);
++ registered = true;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
++ struct caam_aead_alg *t_alg = driver_aeads + i;
++ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
++ OP_ALG_ALGSEL_MASK;
++ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
++ OP_ALG_ALGSEL_MASK;
++
++ /* Skip DES algorithms if not supported by device */
++ if (!priv->sec_attr.des_acc_num &&
++ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
++ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
++ continue;
++
++ /* Skip AES algorithms if not supported by device */
++ if (!priv->sec_attr.aes_acc_num &&
++ (c1_alg_sel == OP_ALG_ALGSEL_AES))
++ continue;
++
++ /*
++ * Skip algorithms requiring message digests
++ * if MD not supported by device.
++ */
++ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
++ continue;
++
++ t_alg->caam.dev = dev;
++ caam_aead_alg_init(t_alg);
++
++ err = crypto_register_aead(&t_alg->aead);
++ if (err) {
++ dev_warn(dev, "%s alg registration failed: %d\n",
++ t_alg->aead.base.cra_driver_name, err);
++ continue;
++ }
++
++ t_alg->registered = true;
++ registered = true;
++ }
++ if (registered)
++ dev_info(dev, "algorithms registered in /proc/crypto\n");
++
++ return err;
++
++err_bind:
++ dpaa2_dpseci_dpio_free(priv);
++err_dpio_setup:
++ dpaa2_dpseci_free(priv);
++err_dpseci_setup:
++ free_percpu(priv->ppriv);
++err_alloc_ppriv:
++ fsl_mc_portal_free(priv->mc_io);
++err_dma_mask:
++ kmem_cache_destroy(qi_cache);
++err_qicache:
++ dev_set_drvdata(dev, NULL);
++
++ return err;
++}
++
++static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev;
++ struct dpaa2_caam_priv *priv;
++ int i;
++
++ dev = &ls_dev->dev;
++ priv = dev_get_drvdata(dev);
++
++ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
++ struct caam_aead_alg *t_alg = driver_aeads + i;
++
++ if (t_alg->registered)
++ crypto_unregister_aead(&t_alg->aead);
++ }
++
++ if (alg_list.next) {
++ struct caam_crypto_alg *t_alg, *n;
++
++ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
++ crypto_unregister_alg(&t_alg->crypto_alg);
++ list_del(&t_alg->entry);
++ kfree(t_alg);
++ }
++ }
++
++ dpaa2_dpseci_disable(priv);
++ dpaa2_dpseci_dpio_free(priv);
++ dpaa2_dpseci_free(priv);
++ free_percpu(priv->ppriv);
++ fsl_mc_portal_free(priv->mc_io);
++ dev_set_drvdata(dev, NULL);
++ kmem_cache_destroy(qi_cache);
++
++ return 0;
++}
++
++int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
++{
++ struct dpaa2_fd fd;
++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
++ int err = 0, i, id;
++
++ if (IS_ERR(req))
++ return PTR_ERR(req);
++
++ if (priv->cscn_mem) {
++ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
++ DPAA2_CSCN_SIZE,
++ DMA_FROM_DEVICE);
++ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
++ dev_dbg_ratelimited(dev, "Dropping request\n");
++ return -EBUSY;
++ }
++ }
++
++ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
++
++ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
++ DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(dev, req->fd_flt_dma)) {
++ dev_err(dev, "DMA mapping error for QI enqueue request\n");
++ goto err_out;
++ }
++
++ memset(&fd, 0, sizeof(fd));
++ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
++ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
++ dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
++ dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
++
++ /*
++ * There is no guarantee that preemption is disabled here,
++ * thus take action.
++ */
++ preempt_disable();
++ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
++ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
++ err = dpaa2_io_service_enqueue_fq(NULL,
++ priv->tx_queue_attr[id].fqid,
++ &fd);
++ if (err != -EBUSY)
++ break;
++ }
++ preempt_enable();
++
++ if (unlikely(err < 0)) {
++ dev_err(dev, "Error enqueuing frame: %d\n", err);
++ goto err_out;
++ }
++
++ return -EINPROGRESS;
++
++err_out:
++ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
++ DMA_BIDIRECTIONAL);
++ return -EIO;
++}
++EXPORT_SYMBOL(dpaa2_caam_enqueue);
++
++const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpseci",
++ },
++ { .vendor = 0x0 }
++};
++
++static struct fsl_mc_driver dpaa2_caam_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_caam_probe,
++ .remove = dpaa2_caam_remove,
++ .match_id_table = dpaa2_caam_match_id_table
++};
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
++
++module_fsl_mc_driver(dpaa2_caam_driver);
+diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
+new file mode 100644
+index 00000000..2ba179db
+--- /dev/null
++++ b/drivers/crypto/caam/caamalg_qi2.h
+@@ -0,0 +1,265 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CAAMALG_QI2_H_
++#define _CAAMALG_QI2_H_
++
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
++#include <linux/threads.h>
++#include "dpseci.h"
++#include "desc_constr.h"
++
++#define DPAA2_CAAM_STORE_SIZE 16
++/* NAPI weight *must* be a multiple of the store size. */
++#define DPAA2_CAAM_NAPI_WEIGHT 64
++
++/* The congestion entrance threshold was chosen so that on LS2088
++ * we support the maximum throughput for the available memory
++ */
++#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
++#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
++
++/**
++ * dpaa2_caam_priv - driver private data
++ * @dpseci_id: DPSECI object unique ID
++ * @major_ver: DPSECI major version
++ * @minor_ver: DPSECI minor version
++ * @dpseci_attr: DPSECI attributes
++ * @sec_attr: SEC engine attributes
++ * @rx_queue_attr: array of Rx queue attributes
++ * @tx_queue_attr: array of Tx queue attributes
++ * @cscn_mem: pointer to memory region containing the
++ * dpaa2_cscn struct; it's size is larger than
++ * sizeof(struct dpaa2_cscn) to accommodate alignment
++ * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
++ * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
++ * @cscn_dma: dma address used by the QMAN to write CSCN messages
++ * @dev: device associated with the DPSECI object
++ * @mc_io: pointer to MC portal's I/O object
++ * @domain: IOMMU domain
++ * @ppriv: per CPU pointers to privata data
++ */
++struct dpaa2_caam_priv {
++ int dpsec_id;
++
++ u16 major_ver;
++ u16 minor_ver;
++
++ struct dpseci_attr dpseci_attr;
++ struct dpseci_sec_attr sec_attr;
++ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
++ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
++ int num_pairs;
++
++ /* congestion */
++ void *cscn_mem;
++ void *cscn_mem_aligned;
++ dma_addr_t cscn_dma;
++
++ struct device *dev;
++ struct fsl_mc_io *mc_io;
++ struct iommu_domain *domain;
++
++ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
++};
++
++/**
++ * dpaa2_caam_priv_per_cpu - per CPU private data
++ * @napi: napi structure
++ * @net_dev: netdev used by napi
++ * @req_fqid: (virtual) request (Tx / enqueue) FQID
++ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
++ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
++ * @nctx: notification context of response FQ
++ * @store: where dequeued frames are stored
++ * @priv: backpointer to dpaa2_caam_priv
++ */
++struct dpaa2_caam_priv_per_cpu {
++ struct napi_struct napi;
++ struct net_device net_dev;
++ int req_fqid;
++ int rsp_fqid;
++ int prio;
++ struct dpaa2_io_notification_ctx nctx;
++ struct dpaa2_io_store *store;
++ struct dpaa2_caam_priv *priv;
++};
++
++/*
++ * The CAAM QI hardware constructs a job descriptor which points
++ * to shared descriptor (as pointed by context_a of FQ to CAAM).
++ * When the job descriptor is executed by deco, the whole job
++ * descriptor together with shared descriptor gets loaded in
++ * deco buffer which is 64 words long (each 32-bit).
++ *
++ * The job descriptor constructed by QI hardware has layout:
++ *
++ * HEADER (1 word)
++ * Shdesc ptr (1 or 2 words)
++ * SEQ_OUT_PTR (1 word)
++ * Out ptr (1 or 2 words)
++ * Out length (1 word)
++ * SEQ_IN_PTR (1 word)
++ * In ptr (1 or 2 words)
++ * In length (1 word)
++ *
++ * The shdesc ptr is used to fetch shared descriptor contents
++ * into deco buffer.
++ *
++ * Apart from shdesc contents, the total number of words that
++ * get loaded in deco buffer are '8' or '11'. The remaining words
++ * in deco buffer can be used for storing shared descriptor.
++ */
++#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
++
++/* Length of a single buffer in the QI driver memory cache */
++#define CAAM_QI_MEMCACHE_SIZE 512
++
++/*
++ * aead_edesc - s/w-extended aead descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen_dma: bus physical mapped address of req->assoclen
++ * @sgt: the h/w link table
++ */
++struct aead_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ dma_addr_t assoclen_dma;
++#define CAAM_QI_MAX_AEAD_SG \
++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
++ sizeof(struct dpaa2_sg_entry))
++ struct dpaa2_sg_entry sgt[0];
++};
++
++/*
++ * tls_edesc - s/w-extended tls descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
++ * @dst: pointer to output scatterlist, usefull for unmapping
++ * @sgt: the h/w link table
++ */
++struct tls_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct scatterlist tmp[2];
++ struct scatterlist *dst;
++ struct dpaa2_sg_entry sgt[0];
++};
++
++/*
++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped qm_sg space
++ * @qm_sg_dma: I/O virtual address of h/w link table
++ * @sgt: the h/w link table
++ */
++struct ablkcipher_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++#define CAAM_QI_MAX_ABLKCIPHER_SG \
++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
++ sizeof(struct dpaa2_sg_entry))
++ struct dpaa2_sg_entry sgt[0];
++};
++
++/**
++ * caam_flc - Flow Context (FLC)
++ * @flc: Flow Context options
++ * @sh_desc: Shared Descriptor
++ * @flc_dma: DMA address of the Flow Context
++ */
++struct caam_flc {
++ u32 flc[16];
++ u32 sh_desc[MAX_SDLEN];
++ dma_addr_t flc_dma;
++} ____cacheline_aligned;
++
++enum optype {
++ ENCRYPT = 0,
++ DECRYPT,
++ GIVENCRYPT,
++ NUM_OP
++};
++
++/**
++ * caam_request - the request structure the driver application should fill while
++ * submitting a job to driver.
++ * @fd_flt: Frame list table defining input and output
++ * fd_flt[0] - FLE pointing to output buffer
++ * fd_flt[1] - FLE pointing to input buffer
++ * @fd_flt_dma: DMA address for the frame list table
++ * @flc: Flow Context
++ * @op_type: operation type
++ * @cbk: Callback function to invoke when job is completed
++ * @ctx: arbit context attached with request by the application
++ * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
++ */
++struct caam_request {
++ struct dpaa2_fl_entry fd_flt[2];
++ dma_addr_t fd_flt_dma;
++ struct caam_flc *flc;
++ enum optype op_type;
++ void (*cbk)(void *ctx, u32 err);
++ void *ctx;
++ void *edesc;
++};
++
++/**
++ * dpaa2_caam_enqueue() - enqueue a crypto request
++ * @dev: device associated with the DPSECI object
++ * @req: pointer to caam_request
++ */
++int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
++
++#endif /* _CAAMALG_QI2_H_ */
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 631337c2..698580b6 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -72,7 +72,7 @@
+ #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+ /* length of descriptors text */
+-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
++#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
+ #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+ #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+ #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+@@ -103,20 +103,14 @@ struct caam_hash_ctx {
+ u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ dma_addr_t sh_desc_update_dma ____cacheline_aligned;
+ dma_addr_t sh_desc_update_first_dma;
+ dma_addr_t sh_desc_fin_dma;
+ dma_addr_t sh_desc_digest_dma;
+- dma_addr_t sh_desc_finup_dma;
+ struct device *jrdev;
+- u32 alg_type;
+- u32 alg_op;
+ u8 key[CAAM_MAX_HASH_KEY_SIZE];
+- dma_addr_t key_dma;
+ int ctx_len;
+- unsigned int split_key_len;
+- unsigned int split_key_pad_len;
++ struct alginfo adata;
+ };
+
+ /* ahash state */
+@@ -143,6 +137,31 @@ struct caam_export_state {
+ int (*finup)(struct ahash_request *req);
+ };
+
++static inline void switch_buf(struct caam_hash_state *state)
++{
++ state->current_buf ^= 1;
++}
++
++static inline u8 *current_buf(struct caam_hash_state *state)
++{
++ return state->current_buf ? state->buf_1 : state->buf_0;
++}
++
++static inline u8 *alt_buf(struct caam_hash_state *state)
++{
++ return state->current_buf ? state->buf_0 : state->buf_1;
++}
++
++static inline int *current_buflen(struct caam_hash_state *state)
++{
++ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
++}
++
++static inline int *alt_buflen(struct caam_hash_state *state)
++{
++ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
++}
++
+ /* Common job descriptor seq in/out ptr routines */
+
+ /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
+@@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
+ return dst_dma;
+ }
+
+-/* Map current buffer in state and put it in link table */
+-static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
+- struct sec4_sg_entry *sec4_sg,
+- u8 *buf, int buflen)
++/* Map current buffer in state (if length > 0) and put it in link table */
++static inline int buf_map_to_sec4_sg(struct device *jrdev,
++ struct sec4_sg_entry *sec4_sg,
++ struct caam_hash_state *state)
+ {
+- dma_addr_t buf_dma;
++ int buflen = *current_buflen(state);
+
+- buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+- dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
++ if (!buflen)
++ return 0;
+
+- return buf_dma;
+-}
++ state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, state->buf_dma)) {
++ dev_err(jrdev, "unable to map buf\n");
++ state->buf_dma = 0;
++ return -ENOMEM;
++ }
+
+-/*
+- * Only put buffer in link table if it contains data, which is possible,
+- * since a buffer has previously been used, and needs to be unmapped,
+- */
+-static inline dma_addr_t
+-try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
+- u8 *buf, dma_addr_t buf_dma, int buflen,
+- int last_buflen)
+-{
+- if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
+- dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
+- if (buflen)
+- buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
+- else
+- buf_dma = 0;
+-
+- return buf_dma;
++ dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
++
++ return 0;
+ }
+
+ /* Map state->caam_ctx, and add it to link table */
+@@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+ return 0;
+ }
+
+-/* Common shared descriptor commands */
+-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+-{
+- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+- ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+-}
+-
+-/* Append key if it has been set */
+-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+-{
+- u32 *key_jump_cmd;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- if (ctx->split_key_len) {
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- append_key_ahash(desc, ctx);
+-
+- set_jump_tgt_here(desc, key_jump_cmd);
+- }
+-
+- /* Propagate errors from shared to job descriptor */
+- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+-}
+-
+ /*
+- * For ahash read data from seqin following state->caam_ctx,
+- * and write resulting class2 context to seqout, which may be state->caam_ctx
+- * or req->result
++ * For ahash update, final and finup (import_ctx = true)
++ * import context, read and write to seqout
++ * For ahash firsts and digest (import_ctx = false)
++ * read and write to seqout
+ */
+-static inline void ahash_append_load_str(u32 *desc, int digestsize)
++static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
++ struct caam_hash_ctx *ctx, bool import_ctx)
+ {
+- /* Calculate remaining bytes to read */
+- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Read remaining bytes */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+- FIFOLD_TYPE_MSG | KEY_VLF);
++ u32 op = ctx->adata.algtype;
++ u32 *skip_key_load;
+
+- /* Store class2 context bytes */
+- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-}
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+-/*
+- * For ahash update, final and finup, import context, read and write to seqout
+- */
+-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
+- int digestsize,
+- struct caam_hash_ctx *ctx)
+-{
+- init_sh_desc_key_ahash(desc, ctx);
++ /* Append key if it has been set; ahash update excluded */
++ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
++ /* Skip key loading if already shared */
++ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
+
+- /* Import context from software */
+- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_2_CCB | ctx->ctx_len);
++ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
++ ctx->adata.keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+- /* Class 2 operation */
+- append_operation(desc, op | state | OP_ALG_ENCRYPT);
++ set_jump_tgt_here(desc, skip_key_load);
+
+- /*
+- * Load from buf and/or src and write to req->result or state->context
+- */
+- ahash_append_load_str(desc, digestsize);
+-}
++ op |= OP_ALG_AAI_HMAC_PRECOMP;
++ }
+
+-/* For ahash firsts and digest, read and write to seqout */
+-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
+- int digestsize, struct caam_hash_ctx *ctx)
+-{
+- init_sh_desc_key_ahash(desc, ctx);
++ /* If needed, import context from software */
++ if (import_ctx)
++ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
++ * Calculate remaining bytes to read
+ */
+- ahash_append_load_str(desc, digestsize);
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ /* Read remaining bytes */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
++ FIFOLD_TYPE_MSG | KEY_VLF);
++ /* Store class2 context bytes */
++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
+ }
+
+ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+@@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+- u32 have_key = 0;
+ u32 *desc;
+
+- if (ctx->split_key_len)
+- have_key = OP_ALG_AAI_HMAC_PRECOMP;
+-
+ /* ahash_update shared descriptor */
+ desc = ctx->sh_desc_update;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Import context from software */
+- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_2_CCB | ctx->ctx_len);
+-
+- /* Class 2 operation */
+- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
+- OP_ALG_ENCRYPT);
+-
+- /* Load data and write to result or context */
+- ahash_append_load_str(desc, ctx->ctx_len);
+-
+- ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
++ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ahash update shdesc@"__stringify(__LINE__)": ",
+@@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+
+ /* ahash_update_first shared descriptor */
+ desc = ctx->sh_desc_update_first;
+-
+- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
+- ctx->ctx_len, ctx);
+-
+- ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
++ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ahash update first shdesc@"__stringify(__LINE__)": ",
+@@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+
+ /* ahash_final shared descriptor */
+ desc = ctx->sh_desc_fin;
+-
+- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+- OP_ALG_AS_FINALIZE, digestsize, ctx);
+-
+- ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
++ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
+
+- /* ahash_finup shared descriptor */
+- desc = ctx->sh_desc_finup;
+-
+- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+- OP_ALG_AS_FINALIZE, digestsize, ctx);
+-
+- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+ /* ahash_digest shared descriptor */
+ desc = ctx->sh_desc_digest;
+-
+- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
+- digestsize, ctx);
+-
+- ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
++ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ahash digest shdesc@"__stringify(__LINE__)": ",
+@@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+ return 0;
+ }
+
+-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+- u32 keylen)
+-{
+- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+- ctx->split_key_pad_len, key_in, keylen,
+- ctx->alg_op);
+-}
+-
+ /* Digest hash size if it is too large */
+ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+@@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ }
+
+ /* Job descriptor to perform unkeyed hash on key_in */
+- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
++ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_in_ptr(desc, src_dma, *keylen, 0);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+@@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ static int ahash_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+ {
+- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+- struct device *jrdev = ctx->jrdev;
+ int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ret;
+@@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ahash *ahash,
+ key = hashed_key;
+ }
+
+- /* Pick class 2 key length from algorithm submask */
+- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+- OP_ALG_ALGSEL_SHIFT] * 2;
+- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+-
+-#ifdef DEBUG
+- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+- ctx->split_key_len, ctx->split_key_pad_len);
+- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+-#endif
+-
+- ret = gen_split_hash_key(ctx, key, keylen);
++ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
++ CAAM_MAX_HASH_KEY_SIZE);
+ if (ret)
+ goto bad_free_key;
+
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- ret = -ENOMEM;
+- goto error_free_key;
+- }
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+- ctx->split_key_pad_len, 1);
++ ctx->adata.keylen_pad, 1);
+ #endif
+
+- ret = ahash_set_sh_desc(ahash);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+- DMA_TO_DEVICE);
+- }
+- error_free_key:
+ kfree(hashed_key);
+- return ret;
++ return ahash_set_sh_desc(ahash);
+ bad_free_key:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+@@ -604,6 +482,8 @@ static inline void ahash_unmap(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+ {
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
+ if (edesc->src_nents)
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ if (edesc->dst_dma)
+@@ -612,6 +492,12 @@ static inline void ahash_unmap(struct device *dev,
+ if (edesc->sec4_sg_bytes)
+ dma_unmap_single(dev, edesc->sec4_sg_dma,
+ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
++
++ if (state->buf_dma) {
++ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
++ DMA_TO_DEVICE);
++ state->buf_dma = 0;
++ }
+ }
+
+ static inline void ahash_unmap_ctx(struct device *dev,
+@@ -643,8 +529,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+
+- edesc = (struct ahash_edesc *)((char *)desc -
+- offsetof(struct ahash_edesc, hw_desc));
++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+@@ -671,19 +556,19 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+-#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
++#ifdef DEBUG
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+
+- edesc = (struct ahash_edesc *)((char *)desc -
+- offsetof(struct ahash_edesc, hw_desc));
++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++ switch_buf(state);
+ kfree(edesc);
+
+ #ifdef DEBUG
+@@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+
+- edesc = (struct ahash_edesc *)((char *)desc -
+- offsetof(struct ahash_edesc, hw_desc));
++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+@@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+-#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
++#ifdef DEBUG
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+
+- edesc = (struct ahash_edesc *)((char *)desc -
+- offsetof(struct ahash_edesc, hw_desc));
++ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
++ switch_buf(state);
+ kfree(edesc);
+
+ #ifdef DEBUG
+@@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash_request *req)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+- int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+- u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+- int *next_buflen = state->current_buf ? &state->buflen_0 :
+- &state->buflen_1, last_buflen;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int *buflen = current_buflen(state);
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state), last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ u32 *desc;
+ int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
+@@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash_request *req)
+ if (ret)
+ goto unmap_ctx;
+
+- state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
+- edesc->sec4_sg + 1,
+- buf, state->buf_dma,
+- *buflen, last_buflen);
++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
++ if (ret)
++ goto unmap_ctx;
+
+ if (mapped_nents) {
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+@@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash_request *req)
+ to_hash - *buflen,
+ *next_buflen, 0);
+ } else {
+- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
+- cpu_to_caam32(SEC4_SG_LEN_FIN);
++ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
++ 1);
+ }
+
+- state->current_buf = !state->current_buf;
+-
+ desc = edesc->hw_desc;
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+@@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_request *req)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+- int last_buflen = state->current_buf ? state->buflen_0 :
+- state->buflen_1;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
+ u32 *desc;
+ int sec4_sg_bytes, sec4_sg_src_index;
+ int digestsize = crypto_ahash_digestsize(ahash);
+@@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_request *req)
+ if (ret)
+ goto unmap_ctx;
+
+- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+- buf, state->buf_dma, buflen,
+- last_buflen);
+- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
+- cpu_to_caam32(SEC4_SG_LEN_FIN);
++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
++ if (ret)
++ goto unmap_ctx;
++
++ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+@@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+- int last_buflen = state->current_buf ? state->buflen_0 :
+- state->buflen_1;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
+ u32 *desc;
+ int sec4_sg_src_index;
+ int src_nents, mapped_nents;
+@@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+- ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
++ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+ flags);
+ if (!edesc) {
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+@@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
+ if (ret)
+ goto unmap_ctx;
+
+- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+- buf, state->buf_dma, buflen,
+- last_buflen);
++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
++ if (ret)
++ goto unmap_ctx;
+
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
+ sec4_sg_src_index, ctx->ctx_len + buflen,
+@@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_request *req)
+ {
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
+ u32 *desc;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret;
+
++ state->buf_dma = 0;
++
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(jrdev, "Invalid number of src SG.\n");
+@@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct ahash_request *req)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int buflen = *current_buflen(state);
+ u32 *desc;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+@@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+- int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+- u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+- int *next_buflen = state->current_buf ? &state->buflen_0 :
+- &state->buflen_1;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int *buflen = current_buflen(state);
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state);
+ int in_len = *buflen + req->nbytes, to_hash;
+ int sec4_sg_bytes, src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+@@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->dst_dma = 0;
+
+- state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
+- buf, *buflen);
++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
++ if (ret)
++ goto unmap_ctx;
++
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + 1, 0);
+
+@@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
+ *next_buflen, 0);
+ }
+
+- state->current_buf = !state->current_buf;
+-
+ desc = edesc->hw_desc;
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+@@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+- int last_buflen = state->current_buf ? state->buflen_0 :
+- state->buflen_1;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
+ u32 *desc;
+ int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+@@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
+ edesc->src_nents = src_nents;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
+- state->buf_dma, buflen,
+- last_buflen);
++ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
++ if (ret)
++ goto unmap;
+
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
+ req->nbytes);
+@@ -1496,11 +1369,10 @@ static int ahash_update_first(struct ahash_request *req)
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+- u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
+- int *next_buflen = state->current_buf ?
+- &state->buflen_1 : &state->buflen_0;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state);
+ int to_hash;
+ u32 *desc;
+ int src_nents, mapped_nents;
+@@ -1582,6 +1454,7 @@ static int ahash_update_first(struct ahash_request *req)
+ state->final = ahash_final_no_ctx;
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
++ switch_buf(state);
+ }
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+@@ -1688,7 +1561,6 @@ struct caam_hash_template {
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+- u32 alg_op;
+ };
+
+ /* ahash descriptors */
+@@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_hash[] = {
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam",
+@@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_hash[] = {
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam",
+@@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_hash[] = {
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam",
+@@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_hash[] = {
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam",
+@@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_hash[] = {
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam",
+@@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_hash[] = {
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ };
+
+ struct caam_hash_alg {
+ struct list_head entry;
+ int alg_type;
+- int alg_op;
+ struct ahash_alg ahash_alg;
+ };
+
+@@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
++ dma_addr_t dma_addr;
+
+ /*
+ * Get a Job ring from Job Ring driver to ensure in-order
+@@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
++
++ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
++ offsetof(struct caam_hash_ctx,
++ sh_desc_update_dma),
++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
++ dev_err(ctx->jrdev, "unable to map shared descriptors\n");
++ caam_jr_free(ctx->jrdev);
++ return -ENOMEM;
++ }
++
++ ctx->sh_desc_update_dma = dma_addr;
++ ctx->sh_desc_update_first_dma = dma_addr +
++ offsetof(struct caam_hash_ctx,
++ sh_desc_update_first);
++ ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
++ sh_desc_fin);
++ ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
++ sh_desc_digest);
++
+ /* copy descriptor header template value */
+- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
++ ctx->ctx_len = runninglen[(ctx->adata.algtype &
++ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+@@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+ {
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+- if (ctx->sh_desc_update_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
+- desc_bytes(ctx->sh_desc_update),
+- DMA_TO_DEVICE);
+- if (ctx->sh_desc_update_first_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
+- desc_bytes(ctx->sh_desc_update_first),
+- DMA_TO_DEVICE);
+- if (ctx->sh_desc_fin_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
+- desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
+- if (ctx->sh_desc_digest_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
+- desc_bytes(ctx->sh_desc_digest),
+- DMA_TO_DEVICE);
+- if (ctx->sh_desc_finup_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
+- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
+- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+-
++ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
++ offsetof(struct caam_hash_ctx,
++ sh_desc_update_dma),
++ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ caam_jr_free(ctx->jrdev);
+ }
+
+@@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_template *template,
+ alg->cra_type = &crypto_ahash_type;
+
+ t_alg->alg_type = template->alg_type;
+- t_alg->alg_op = template->alg_op;
+
+ return t_alg;
+ }
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
+index 354a16ab..4fcb378e 100644
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -18,6 +18,10 @@
+ #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
+ #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
+ sizeof(struct rsa_priv_f1_pdb))
++#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
++ sizeof(struct rsa_priv_f2_pdb))
++#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
++ sizeof(struct rsa_priv_f3_pdb))
+
+ static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+@@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
+ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+ }
+
++static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
++ struct akcipher_request *req)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct caam_rsa_key *key = &ctx->key;
++ struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
++ size_t p_sz = key->p_sz;
++ size_t q_sz = key->p_sz;
++
++ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
++}
++
++static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
++ struct akcipher_request *req)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct caam_rsa_key *key = &ctx->key;
++ struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
++ size_t p_sz = key->p_sz;
++ size_t q_sz = key->p_sz;
++
++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
++}
++
+ /* RSA Job Completion handler */
+ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
+ {
+@@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
+ akcipher_request_complete(req, err);
+ }
+
++static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
++ void *context)
++{
++ struct akcipher_request *req = context;
++ struct rsa_edesc *edesc;
++
++ if (err)
++ caam_jr_strstatus(dev, err);
++
++ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
++
++ rsa_priv_f2_unmap(dev, edesc, req);
++ rsa_io_unmap(dev, edesc, req);
++ kfree(edesc);
++
++ akcipher_request_complete(req, err);
++}
++
++static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
++ void *context)
++{
++ struct akcipher_request *req = context;
++ struct rsa_edesc *edesc;
++
++ if (err)
++ caam_jr_strstatus(dev, err);
++
++ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
++
++ rsa_priv_f3_unmap(dev, edesc, req);
++ rsa_io_unmap(dev, edesc, req);
++ kfree(edesc);
++
++ akcipher_request_complete(req, err);
++}
++
+ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+ size_t desclen)
+ {
+@@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct rsa_edesc *edesc;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
+ int sgc;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ int src_nents, dst_nents;
+@@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
+ return 0;
+ }
+
++static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
++ struct rsa_edesc *edesc)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct caam_rsa_key *key = &ctx->key;
++ struct device *dev = ctx->dev;
++ struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
++ int sec4_sg_index = 0;
++ size_t p_sz = key->p_sz;
++ size_t q_sz = key->p_sz;
++
++ pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->d_dma)) {
++ dev_err(dev, "Unable to map RSA private exponent memory\n");
++ return -ENOMEM;
++ }
++
++ pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->p_dma)) {
++ dev_err(dev, "Unable to map RSA prime factor p memory\n");
++ goto unmap_d;
++ }
++
++ pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->q_dma)) {
++ dev_err(dev, "Unable to map RSA prime factor q memory\n");
++ goto unmap_p;
++ }
++
++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->tmp1_dma)) {
++ dev_err(dev, "Unable to map RSA tmp1 memory\n");
++ goto unmap_q;
++ }
++
++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->tmp2_dma)) {
++ dev_err(dev, "Unable to map RSA tmp2 memory\n");
++ goto unmap_tmp1;
++ }
++
++ if (edesc->src_nents > 1) {
++ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
++ pdb->g_dma = edesc->sec4_sg_dma;
++ sec4_sg_index += edesc->src_nents;
++ } else {
++ pdb->g_dma = sg_dma_address(req->src);
++ }
++
++ if (edesc->dst_nents > 1) {
++ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
++ pdb->f_dma = edesc->sec4_sg_dma +
++ sec4_sg_index * sizeof(struct sec4_sg_entry);
++ } else {
++ pdb->f_dma = sg_dma_address(req->dst);
++ }
++
++ pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
++ pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
++
++ return 0;
++
++unmap_tmp1:
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
++unmap_q:
++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
++unmap_p:
++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
++unmap_d:
++ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
++
++ return -ENOMEM;
++}
++
++static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
++ struct rsa_edesc *edesc)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct caam_rsa_key *key = &ctx->key;
++ struct device *dev = ctx->dev;
++ struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
++ int sec4_sg_index = 0;
++ size_t p_sz = key->p_sz;
++ size_t q_sz = key->p_sz;
++
++ pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->p_dma)) {
++ dev_err(dev, "Unable to map RSA prime factor p memory\n");
++ return -ENOMEM;
++ }
++
++ pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->q_dma)) {
++ dev_err(dev, "Unable to map RSA prime factor q memory\n");
++ goto unmap_p;
++ }
++
++ pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->dp_dma)) {
++ dev_err(dev, "Unable to map RSA exponent dp memory\n");
++ goto unmap_q;
++ }
++
++ pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->dq_dma)) {
++ dev_err(dev, "Unable to map RSA exponent dq memory\n");
++ goto unmap_dp;
++ }
++
++ pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->c_dma)) {
++ dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
++ goto unmap_dq;
++ }
++
++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->tmp1_dma)) {
++ dev_err(dev, "Unable to map RSA tmp1 memory\n");
++ goto unmap_qinv;
++ }
++
++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, pdb->tmp2_dma)) {
++ dev_err(dev, "Unable to map RSA tmp2 memory\n");
++ goto unmap_tmp1;
++ }
++
++ if (edesc->src_nents > 1) {
++ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
++ pdb->g_dma = edesc->sec4_sg_dma;
++ sec4_sg_index += edesc->src_nents;
++ } else {
++ pdb->g_dma = sg_dma_address(req->src);
++ }
++
++ if (edesc->dst_nents > 1) {
++ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
++ pdb->f_dma = edesc->sec4_sg_dma +
++ sec4_sg_index * sizeof(struct sec4_sg_entry);
++ } else {
++ pdb->f_dma = sg_dma_address(req->dst);
++ }
++
++ pdb->sgf |= key->n_sz;
++ pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
++
++ return 0;
++
++unmap_tmp1:
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
++unmap_qinv:
++ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
++unmap_dq:
++ dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
++unmap_dp:
++ dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
++unmap_q:
++ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
++unmap_p:
++ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
++
++ return -ENOMEM;
++}
++
+ static int caam_rsa_enc(struct akcipher_request *req)
+ {
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+@@ -301,24 +543,14 @@ static int caam_rsa_enc(struct akcipher_request *req)
+ return ret;
+ }
+
+-static int caam_rsa_dec(struct akcipher_request *req)
++static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
+ {
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+- struct caam_rsa_key *key = &ctx->key;
+ struct device *jrdev = ctx->dev;
+ struct rsa_edesc *edesc;
+ int ret;
+
+- if (unlikely(!key->n || !key->d))
+- return -EINVAL;
+-
+- if (req->dst_len < key->n_sz) {
+- req->dst_len = key->n_sz;
+- dev_err(jrdev, "Output buffer length less than parameter n\n");
+- return -EOVERFLOW;
+- }
+-
+ /* Allocate extended descriptor */
+ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
+ if (IS_ERR(edesc))
+@@ -344,17 +576,147 @@ static int caam_rsa_dec(struct akcipher_request *req)
+ return ret;
+ }
+
++static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct device *jrdev = ctx->dev;
++ struct rsa_edesc *edesc;
++ int ret;
++
++ /* Allocate extended descriptor */
++ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
++ ret = set_rsa_priv_f2_pdb(req, edesc);
++ if (ret)
++ goto init_fail;
++
++ /* Initialize Job Descriptor */
++ init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
++
++ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
++ if (!ret)
++ return -EINPROGRESS;
++
++ rsa_priv_f2_unmap(jrdev, edesc, req);
++
++init_fail:
++ rsa_io_unmap(jrdev, edesc, req);
++ kfree(edesc);
++ return ret;
++}
++
++static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct device *jrdev = ctx->dev;
++ struct rsa_edesc *edesc;
++ int ret;
++
++ /* Allocate extended descriptor */
++ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
++ ret = set_rsa_priv_f3_pdb(req, edesc);
++ if (ret)
++ goto init_fail;
++
++ /* Initialize Job Descriptor */
++ init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
++
++ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
++ if (!ret)
++ return -EINPROGRESS;
++
++ rsa_priv_f3_unmap(jrdev, edesc, req);
++
++init_fail:
++ rsa_io_unmap(jrdev, edesc, req);
++ kfree(edesc);
++ return ret;
++}
++
++static int caam_rsa_dec(struct akcipher_request *req)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct caam_rsa_key *key = &ctx->key;
++ int ret;
++
++ if (unlikely(!key->n || !key->d))
++ return -EINVAL;
++
++ if (req->dst_len < key->n_sz) {
++ req->dst_len = key->n_sz;
++ dev_err(ctx->dev, "Output buffer length less than parameter n\n");
++ return -EOVERFLOW;
++ }
++
++ if (key->priv_form == FORM3)
++ ret = caam_rsa_dec_priv_f3(req);
++ else if (key->priv_form == FORM2)
++ ret = caam_rsa_dec_priv_f2(req);
++ else
++ ret = caam_rsa_dec_priv_f1(req);
++
++ return ret;
++}
++
+ static void caam_rsa_free_key(struct caam_rsa_key *key)
+ {
+ kzfree(key->d);
++ kzfree(key->p);
++ kzfree(key->q);
++ kzfree(key->dp);
++ kzfree(key->dq);
++ kzfree(key->qinv);
++ kzfree(key->tmp1);
++ kzfree(key->tmp2);
+ kfree(key->e);
+ kfree(key->n);
+- key->d = NULL;
+- key->e = NULL;
+- key->n = NULL;
+- key->d_sz = 0;
+- key->e_sz = 0;
+- key->n_sz = 0;
++ memset(key, 0, sizeof(*key));
++}
++
++static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
++{
++ while (!**ptr && *nbytes) {
++ (*ptr)++;
++ (*nbytes)--;
++ }
++}
++
++/**
++ * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
++ * dP, dQ and qInv could decode to less than corresponding p, q length, as the
++ * BER-encoding requires that the minimum number of bytes be used to encode the
++ * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
++ * length.
++ *
++ * @ptr : pointer to {dP, dQ, qInv} CRT member
++ * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
++ * @dstlen: length in bytes of corresponding p or q prime factor
++ */
++static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
++{
++ u8 *dst;
++
++ caam_rsa_drop_leading_zeros(&ptr, &nbytes);
++ if (!nbytes)
++ return NULL;
++
++ dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
++ if (!dst)
++ return NULL;
++
++ memcpy(dst + (dstlen - nbytes), ptr, nbytes);
++
++ return dst;
+ }
+
+ /**
+@@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
+ {
+ u8 *val;
+
+- while (!*buf && *nbytes) {
+- buf++;
+- (*nbytes)--;
+- }
++ caam_rsa_drop_leading_zeros(&buf, nbytes);
++ if (!*nbytes)
++ return NULL;
+
+ val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
+ if (!val)
+@@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+ {
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+- struct rsa_key raw_key = {0};
++ struct rsa_key raw_key = {NULL};
+ struct caam_rsa_key *rsa_key = &ctx->key;
+ int ret;
+
+@@ -437,11 +798,69 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ return -ENOMEM;
+ }
+
++static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
++ struct rsa_key *raw_key)
++{
++ struct caam_rsa_key *rsa_key = &ctx->key;
++ size_t p_sz = raw_key->p_sz;
++ size_t q_sz = raw_key->q_sz;
++
++ rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
++ if (!rsa_key->p)
++ return;
++ rsa_key->p_sz = p_sz;
++
++ rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
++ if (!rsa_key->q)
++ goto free_p;
++ rsa_key->q_sz = q_sz;
++
++ rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
++ if (!rsa_key->tmp1)
++ goto free_q;
++
++ rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
++ if (!rsa_key->tmp2)
++ goto free_tmp1;
++
++ rsa_key->priv_form = FORM2;
++
++ rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
++ if (!rsa_key->dp)
++ goto free_tmp2;
++
++ rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
++ if (!rsa_key->dq)
++ goto free_dp;
++
++ rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
++ q_sz);
++ if (!rsa_key->qinv)
++ goto free_dq;
++
++ rsa_key->priv_form = FORM3;
++
++ return;
++
++free_dq:
++ kzfree(rsa_key->dq);
++free_dp:
++ kzfree(rsa_key->dp);
++free_tmp2:
++ kzfree(rsa_key->tmp2);
++free_tmp1:
++ kzfree(rsa_key->tmp1);
++free_q:
++ kzfree(rsa_key->q);
++free_p:
++ kzfree(rsa_key->p);
++}
++
+ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+ {
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+- struct rsa_key raw_key = {0};
++ struct rsa_key raw_key = {NULL};
+ struct caam_rsa_key *rsa_key = &ctx->key;
+ int ret;
+
+@@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
+ memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
++ caam_rsa_set_priv_key_form(ctx, &raw_key);
++
+ return 0;
+
+ err:
+diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
+index f595d159..87ab75e9 100644
+--- a/drivers/crypto/caam/caampkc.h
++++ b/drivers/crypto/caam/caampkc.h
+@@ -12,22 +12,76 @@
+ #include "compat.h"
+ #include "pdb.h"
+
++/**
++ * caam_priv_key_form - CAAM RSA private key representation
++ * CAAM RSA private key may have either of three forms.
++ *
++ * 1. The first representation consists of the pair (n, d), where the
++ * components have the following meanings:
++ * n the RSA modulus
++ * d the RSA private exponent
++ *
++ * 2. The second representation consists of the triplet (p, q, d), where the
++ * components have the following meanings:
++ * p the first prime factor of the RSA modulus n
++ * q the second prime factor of the RSA modulus n
++ * d the RSA private exponent
++ *
++ * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
++ * where the components have the following meanings:
++ * p the first prime factor of the RSA modulus n
++ * q the second prime factor of the RSA modulus n
++ * dP the first factors's CRT exponent
++ * dQ the second factors's CRT exponent
++ * qInv the (first) CRT coefficient
++ *
++ * The benefit of using the third or the second key form is lower computational
++ * cost for the decryption and signature operations.
++ */
++enum caam_priv_key_form {
++ FORM1,
++ FORM2,
++ FORM3
++};
++
+ /**
+ * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
+ * @n : RSA modulus raw byte stream
+ * @e : RSA public exponent raw byte stream
+ * @d : RSA private exponent raw byte stream
++ * @p : RSA prime factor p of RSA modulus n
++ * @q : RSA prime factor q of RSA modulus n
++ * @dp : RSA CRT exponent of p
++ * @dp : RSA CRT exponent of q
++ * @qinv : RSA CRT coefficient
++ * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
++ * It is assumed to be as long as p.
++ * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
++ * It is assumed to be as long as q.
+ * @n_sz : length in bytes of RSA modulus n
+ * @e_sz : length in bytes of RSA public exponent
+ * @d_sz : length in bytes of RSA private exponent
++ * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
++ * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
++ * @priv_form : CAAM RSA private key representation
+ */
+ struct caam_rsa_key {
+ u8 *n;
+ u8 *e;
+ u8 *d;
++ u8 *p;
++ u8 *q;
++ u8 *dp;
++ u8 *dq;
++ u8 *qinv;
++ u8 *tmp1;
++ u8 *tmp2;
+ size_t n_sz;
+ size_t e_sz;
+ size_t d_sz;
++ size_t p_sz;
++ size_t q_sz;
++ enum caam_priv_key_form priv_form;
+ };
+
+ /**
+@@ -59,6 +113,8 @@ struct rsa_edesc {
+ union {
+ struct rsa_pub_pdb pub;
+ struct rsa_priv_f1_pdb priv_f1;
++ struct rsa_priv_f2_pdb priv_f2;
++ struct rsa_priv_f3_pdb priv_f3;
+ } pdb;
+ u32 hw_desc[];
+ };
+@@ -66,5 +122,7 @@ struct rsa_edesc {
+ /* Descriptor construction primitives. */
+ void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
+ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
++void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
++void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
+
+ #endif
+diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
+index 9b92af2c..fde07d4f 100644
+--- a/drivers/crypto/caam/caamrng.c
++++ b/drivers/crypto/caam/caamrng.c
+@@ -52,7 +52,7 @@
+
+ /* length of descriptors */
+ #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
++#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
+
+ /* Buffer, its dma address and lock */
+ struct buf_data {
+@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+ {
+ struct buf_data *bd;
+
+- bd = (struct buf_data *)((char *)desc -
+- offsetof(struct buf_data, hw_desc));
++ bd = container_of(desc, struct buf_data, hw_desc[0]);
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+- /* Propagate errors from shared to job descriptor */
+- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+-
+ /* Generate random bytes */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+
+@@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+ if (err)
+ return err;
+
+- err = caam_init_buf(ctx, 1);
+- if (err)
+- return err;
+-
+- return 0;
++ return caam_init_buf(ctx, 1);
+ }
+
+ static struct hwrng caam_rng = {
+@@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(dev);
+ }
+- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
++ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
+ if (!rng_ctx) {
+ err = -ENOMEM;
+ goto free_caam_alloc;
+diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
+index 7149cd24..4e084f51 100644
+--- a/drivers/crypto/caam/compat.h
++++ b/drivers/crypto/caam/compat.h
+@@ -16,6 +16,7 @@
+ #include <linux/of_platform.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/io.h>
++#include <linux/iommu.h>
+ #include <linux/spinlock.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/in.h>
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index 98468b96..8f9642c6 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -2,40 +2,41 @@
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
+ */
+
+ #include <linux/device.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/sys_soc.h>
+
+ #include "compat.h"
+ #include "regs.h"
+ #include "intern.h"
+ #include "jr.h"
+ #include "desc_constr.h"
+-#include "error.h"
+ #include "ctrl.h"
+
+ bool caam_little_end;
+ EXPORT_SYMBOL(caam_little_end);
++bool caam_imx;
++EXPORT_SYMBOL(caam_imx);
++bool caam_dpaa2;
++EXPORT_SYMBOL(caam_dpaa2);
++
++#ifdef CONFIG_CAAM_QI
++#include "qi.h"
++#endif
+
+ /*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+-static inline struct clk *caam_drv_identify_clk(struct device *dev,
+- char *clk_name)
+-{
+- return devm_clk_get(dev, clk_name);
+-}
+-#else
+ static inline struct clk *caam_drv_identify_clk(struct device *dev,
+ char *clk_name)
+ {
+- return NULL;
++ return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
+ }
+-#endif
+
+ /*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+@@ -270,7 +271,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+- * deintialized as well
++ * deinitialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+@@ -303,20 +304,24 @@ static int caam_remove(struct platform_device *pdev)
+ struct device *ctrldev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_ctrl __iomem *ctrl;
+- int ring;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+
+- /* Remove platform devices for JobRs */
+- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+- if (ctrlpriv->jrpdev[ring])
+- of_device_unregister(ctrlpriv->jrpdev[ring]);
+- }
++ /* Remove platform devices under the crypto node */
++ of_platform_depopulate(ctrldev);
++
++#ifdef CONFIG_CAAM_QI
++ if (ctrlpriv->qidev)
++ caam_qi_shutdown(ctrlpriv->qidev);
++#endif
+
+- /* De-initialize RNG state handles initialized by this driver. */
+- if (ctrlpriv->rng4_sh_init)
++ /*
++ * De-initialize RNG state handles initialized by this driver.
++ * In case of DPAA 2.x, RNG is managed by MC firmware.
++ */
++ if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+ /* Shut down debug views */
+@@ -331,8 +336,8 @@ static int caam_remove(struct platform_device *pdev)
+ clk_disable_unprepare(ctrlpriv->caam_ipg);
+ clk_disable_unprepare(ctrlpriv->caam_mem);
+ clk_disable_unprepare(ctrlpriv->caam_aclk);
+- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+-
++ if (ctrlpriv->caam_emi_slow)
++ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ return 0;
+ }
+
+@@ -366,11 +371,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
+ */
+ val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+ >> RTSDCTL_ENT_DLY_SHIFT;
+- if (ent_delay <= val) {
+- /* put RNG4 into run mode */
+- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
+- return;
+- }
++ if (ent_delay <= val)
++ goto start_rng;
+
+ val = rd_reg32(&r4tst->rtsdctl);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+@@ -382,15 +384,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
+ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+ /* read the control register */
+ val = rd_reg32(&r4tst->rtmctl);
++start_rng:
+ /*
+ * select raw sampling in both entropy shifter
+- * and statistical checker
++ * and statistical checker; ; put RNG4 into run mode
+ */
+- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
+- /* put RNG4 into run mode */
+- clrsetbits_32(&val, RTMCTL_PRGM, 0);
+- /* write back the control register */
+- wr_reg32(&r4tst->rtmctl, val);
++ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
+ }
+
+ /**
+@@ -411,28 +410,26 @@ int caam_get_era(void)
+ }
+ EXPORT_SYMBOL(caam_get_era);
+
+-#ifdef CONFIG_DEBUG_FS
+-static int caam_debugfs_u64_get(void *data, u64 *val)
+-{
+- *val = caam64_to_cpu(*(u64 *)data);
+- return 0;
+-}
+-
+-static int caam_debugfs_u32_get(void *data, u64 *val)
+-{
+- *val = caam32_to_cpu(*(u32 *)data);
+- return 0;
+-}
+-
+-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+-#endif
++static const struct of_device_id caam_match[] = {
++ {
++ .compatible = "fsl,sec-v4.0",
++ },
++ {
++ .compatible = "fsl,sec4.0",
++ },
++ {},
++};
++MODULE_DEVICE_TABLE(of, caam_match);
+
+ /* Probe routine for CAAM top (controller) level */
+ static int caam_probe(struct platform_device *pdev)
+ {
+- int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
++ int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ u64 caam_id;
++ static const struct soc_device_attribute imx_soc[] = {
++ {.family = "Freescale i.MX"},
++ {},
++ };
+ struct device *dev;
+ struct device_node *nprop, *np;
+ struct caam_ctrl __iomem *ctrl;
+@@ -452,9 +449,10 @@ static int caam_probe(struct platform_device *pdev)
+
+ dev = &pdev->dev;
+ dev_set_drvdata(dev, ctrlpriv);
+- ctrlpriv->pdev = pdev;
+ nprop = pdev->dev.of_node;
+
++ caam_imx = (bool)soc_device_match(imx_soc);
++
+ /* Enable clocking */
+ clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+ if (IS_ERR(clk)) {
+@@ -483,14 +481,16 @@ static int caam_probe(struct platform_device *pdev)
+ }
+ ctrlpriv->caam_aclk = clk;
+
+- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+- if (IS_ERR(clk)) {
+- ret = PTR_ERR(clk);
+- dev_err(&pdev->dev,
+- "can't identify CAAM emi_slow clk: %d\n", ret);
+- return ret;
++ if (!of_machine_is_compatible("fsl,imx6ul")) {
++ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
++ if (IS_ERR(clk)) {
++ ret = PTR_ERR(clk);
++ dev_err(&pdev->dev,
++ "can't identify CAAM emi_slow clk: %d\n", ret);
++ return ret;
++ }
++ ctrlpriv->caam_emi_slow = clk;
+ }
+- ctrlpriv->caam_emi_slow = clk;
+
+ ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+ if (ret < 0) {
+@@ -511,11 +511,13 @@ static int caam_probe(struct platform_device *pdev)
+ goto disable_caam_mem;
+ }
+
+- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+- if (ret < 0) {
+- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+- ret);
+- goto disable_caam_aclk;
++ if (ctrlpriv->caam_emi_slow) {
++ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
++ ret);
++ goto disable_caam_aclk;
++ }
+ }
+
+ /* Get configuration properties from device tree */
+@@ -542,13 +544,13 @@ static int caam_probe(struct platform_device *pdev)
+ else
+ BLOCK_OFFSET = PG_SIZE_64K;
+
+- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+- ctrlpriv->assure = (struct caam_assurance __force *)
+- ((uint8_t *)ctrl +
++ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
++ ctrlpriv->assure = (struct caam_assurance __iomem __force *)
++ ((__force uint8_t *)ctrl +
+ BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
+ );
+- ctrlpriv->deco = (struct caam_deco __force *)
+- ((uint8_t *)ctrl +
++ ctrlpriv->deco = (struct caam_deco __iomem __force *)
++ ((__force uint8_t *)ctrl +
+ BLOCK_OFFSET * DECO_BLOCK_NUMBER
+ );
+
+@@ -557,12 +559,17 @@ static int caam_probe(struct platform_device *pdev)
+
+ /*
+ * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+- * long pointers in master configuration register
++ * long pointers in master configuration register.
++ * In case of DPAA 2.x, Management Complex firmware performs
++ * the configuration.
+ */
+- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+- MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
++ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
++ if (!caam_dpaa2)
++ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
++ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
++ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
++ (sizeof(dma_addr_t) == sizeof(u64) ?
++ MCFGR_LONG_PTR : 0));
+
+ /*
+ * Read the Compile Time paramters and SCFGR to determine
+@@ -590,64 +597,67 @@ static int caam_probe(struct platform_device *pdev)
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
+
+- if (sizeof(dma_addr_t) == sizeof(u64))
+- if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
++ if (sizeof(dma_addr_t) == sizeof(u64)) {
++ if (caam_dpaa2)
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
++ else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ else
+- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+- else
+- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+-
+- /*
+- * Detect and enable JobRs
+- * First, find out how many ring spec'ed, allocate references
+- * for all, then go probe each one.
+- */
+- rspec = 0;
+- for_each_available_child_of_node(nprop, np)
+- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+- of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
+- rspec++;
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
++ } else {
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
++ }
++ if (ret) {
++ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
++ goto iounmap_ctrl;
++ }
+
+- ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
+- sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
+- if (ctrlpriv->jrpdev == NULL) {
+- ret = -ENOMEM;
++ ret = of_platform_populate(nprop, caam_match, NULL, dev);
++ if (ret) {
++ dev_err(dev, "JR platform devices creation error\n");
+ goto iounmap_ctrl;
+ }
+
++#ifdef CONFIG_DEBUG_FS
++ /*
++ * FIXME: needs better naming distinction, as some amalgamation of
++ * "caam" and nprop->full_name. The OF name isn't distinctive,
++ * but does separate instances
++ */
++ perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
++
++ ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
++ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
++#endif
+ ring = 0;
+- ctrlpriv->total_jobrs = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+- ctrlpriv->jrpdev[ring] =
+- of_platform_device_create(np, NULL, dev);
+- if (!ctrlpriv->jrpdev[ring]) {
+- pr_warn("JR%d Platform device creation error\n",
+- ring);
+- continue;
+- }
+- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
+- ((uint8_t *)ctrl +
++ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
++ ((__force uint8_t *)ctrl +
+ (ring + JR_BLOCK_NUMBER) *
+ BLOCK_OFFSET
+ );
+ ctrlpriv->total_jobrs++;
+ ring++;
+- }
++ }
+
+- /* Check to see if QI present. If so, enable */
+- ctrlpriv->qi_present =
+- !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
+- CTPR_MS_QI_MASK);
+- if (ctrlpriv->qi_present) {
+- ctrlpriv->qi = (struct caam_queue_if __force *)
+- ((uint8_t *)ctrl +
++ /* Check to see if (DPAA 1.x) QI present. If so, enable */
++ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
++ if (ctrlpriv->qi_present && !caam_dpaa2) {
++ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
++ ((__force uint8_t *)ctrl +
+ BLOCK_OFFSET * QI_BLOCK_NUMBER
+ );
+ /* This is all that's required to physically enable QI */
+ wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
++
++ /* If QMAN driver is present, init CAAM-QI backend */
++#ifdef CONFIG_CAAM_QI
++ ret = caam_qi_init(pdev);
++ if (ret)
++ dev_err(dev, "caam qi i/f init failed: %d\n", ret);
++#endif
+ }
+
+ /* If no QI and no rings specified, quit and go home */
+@@ -662,8 +672,10 @@ static int caam_probe(struct platform_device *pdev)
+ /*
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
++ * In case of DPAA 2.x, RNG is managed by MC firmware.
+ */
+- if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
++ if (!caam_dpaa2 &&
++ (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&ctrl->r4tst[0].rdsta);
+ /*
+@@ -731,77 +743,46 @@ static int caam_probe(struct platform_device *pdev)
+ /* Report "alive" for developer to see */
+ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
+ caam_get_era());
+- dev_info(dev, "job rings = %d, qi = %d\n",
+- ctrlpriv->total_jobrs, ctrlpriv->qi_present);
++ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
++ ctrlpriv->total_jobrs, ctrlpriv->qi_present,
++ caam_dpaa2 ? "yes" : "no");
+
+ #ifdef CONFIG_DEBUG_FS
+- /*
+- * FIXME: needs better naming distinction, as some amalgamation of
+- * "caam" and nprop->full_name. The OF name isn't distinctive,
+- * but does separate instances
+- */
+- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+-
+- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+-
+- /* Controller-level - performance monitor counters */
+-
+- ctrlpriv->ctl_rq_dequeued =
+- debugfs_create_file("rq_dequeued",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->req_dequeued,
+- &caam_fops_u64_ro);
+- ctrlpriv->ctl_ob_enc_req =
+- debugfs_create_file("ob_rq_encrypted",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->ob_enc_req,
+- &caam_fops_u64_ro);
+- ctrlpriv->ctl_ib_dec_req =
+- debugfs_create_file("ib_rq_decrypted",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->ib_dec_req,
+- &caam_fops_u64_ro);
+- ctrlpriv->ctl_ob_enc_bytes =
+- debugfs_create_file("ob_bytes_encrypted",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+- &caam_fops_u64_ro);
+- ctrlpriv->ctl_ob_prot_bytes =
+- debugfs_create_file("ob_bytes_protected",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+- &caam_fops_u64_ro);
+- ctrlpriv->ctl_ib_dec_bytes =
+- debugfs_create_file("ib_bytes_decrypted",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+- &caam_fops_u64_ro);
+- ctrlpriv->ctl_ib_valid_bytes =
+- debugfs_create_file("ib_bytes_validated",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+- &caam_fops_u64_ro);
++ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->req_dequeued,
++ &caam_fops_u64_ro);
++ debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->ob_enc_req,
++ &caam_fops_u64_ro);
++ debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->ib_dec_req,
++ &caam_fops_u64_ro);
++ debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->ob_enc_bytes,
++ &caam_fops_u64_ro);
++ debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->ob_prot_bytes,
++ &caam_fops_u64_ro);
++ debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->ib_dec_bytes,
++ &caam_fops_u64_ro);
++ debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->ib_valid_bytes,
++ &caam_fops_u64_ro);
+
+ /* Controller level - global status values */
+- ctrlpriv->ctl_faultaddr =
+- debugfs_create_file("fault_addr",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->faultaddr,
+- &caam_fops_u32_ro);
+- ctrlpriv->ctl_faultdetail =
+- debugfs_create_file("fault_detail",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->faultdetail,
+- &caam_fops_u32_ro);
+- ctrlpriv->ctl_faultstatus =
+- debugfs_create_file("fault_status",
+- S_IRUSR | S_IRGRP | S_IROTH,
+- ctrlpriv->ctl, &perfmon->status,
+- &caam_fops_u32_ro);
++ debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->faultaddr,
++ &caam_fops_u32_ro);
++ debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->faultdetail,
++ &caam_fops_u32_ro);
++ debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
++ ctrlpriv->ctl, &perfmon->status,
++ &caam_fops_u32_ro);
+
+ /* Internal covering keys (useful in non-secure mode only) */
+- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
++ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ ctrlpriv->ctl_kek = debugfs_create_blob("kek",
+ S_IRUSR |
+@@ -809,7 +790,7 @@ static int caam_probe(struct platform_device *pdev)
+ ctrlpriv->ctl,
+ &ctrlpriv->ctl_kek_wrap);
+
+- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
++ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
+ S_IRUSR |
+@@ -817,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
+ ctrlpriv->ctl,
+ &ctrlpriv->ctl_tkek_wrap);
+
+- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
++ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
+ S_IRUSR |
+@@ -828,13 +809,17 @@ static int caam_probe(struct platform_device *pdev)
+ return 0;
+
+ caam_remove:
++#ifdef CONFIG_DEBUG_FS
++ debugfs_remove_recursive(ctrlpriv->dfs_root);
++#endif
+ caam_remove(pdev);
+ return ret;
+
+ iounmap_ctrl:
+ iounmap(ctrl);
+ disable_caam_emi_slow:
+- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
++ if (ctrlpriv->caam_emi_slow)
++ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ disable_caam_aclk:
+ clk_disable_unprepare(ctrlpriv->caam_aclk);
+ disable_caam_mem:
+@@ -844,17 +829,6 @@ static int caam_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static struct of_device_id caam_match[] = {
+- {
+- .compatible = "fsl,sec-v4.0",
+- },
+- {
+- .compatible = "fsl,sec4.0",
+- },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, caam_match);
+-
+ static struct platform_driver caam_driver = {
+ .driver = {
+ .name = "caam",
+diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
+index cac5402a..7e7bf68c 100644
+--- a/drivers/crypto/caam/ctrl.h
++++ b/drivers/crypto/caam/ctrl.h
+@@ -10,4 +10,6 @@
+ /* Prototypes for backend-level services exposed to APIs */
+ int caam_get_era(void);
+
++extern bool caam_dpaa2;
++
+ #endif /* CTRL_H */
+diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
+index 513b6646..6ec6f8c3 100644
+--- a/drivers/crypto/caam/desc.h
++++ b/drivers/crypto/caam/desc.h
+@@ -22,12 +22,6 @@
+ #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
+ #define SEC4_SG_OFFSET_MASK 0x00001fff
+
+-struct sec4_sg_entry {
+- u64 ptr;
+- u32 len;
+- u32 bpid_offset;
+-};
+-
+ /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
+ #define MAX_CAAM_DESCSIZE 64
+
+@@ -47,6 +41,7 @@ struct sec4_sg_entry {
+ #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+ #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+ #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
++#define CMD_MOVEB (0x07 << CMD_SHIFT)
+ #define CMD_STORE (0x0a << CMD_SHIFT)
+ #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+ #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+@@ -90,8 +85,8 @@ struct sec4_sg_entry {
+ #define HDR_ZRO 0x00008000
+
+ /* Start Index or SharedDesc Length */
+-#define HDR_START_IDX_MASK 0x3f
+ #define HDR_START_IDX_SHIFT 16
++#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
+
+ /* If shared descriptor header, 6-bit length */
+ #define HDR_DESCLEN_SHR_MASK 0x3f
+@@ -121,10 +116,10 @@ struct sec4_sg_entry {
+ #define HDR_PROP_DNR 0x00000800
+
+ /* JobDesc/SharedDesc share property */
+-#define HDR_SD_SHARE_MASK 0x03
+ #define HDR_SD_SHARE_SHIFT 8
+-#define HDR_JD_SHARE_MASK 0x07
++#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
+ #define HDR_JD_SHARE_SHIFT 8
++#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
+
+ #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+ #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+@@ -235,7 +230,7 @@ struct sec4_sg_entry {
+ #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
++#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+@@ -400,7 +395,7 @@ struct sec4_sg_entry {
+ #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
++#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+@@ -1107,8 +1102,8 @@ struct sec4_sg_entry {
+ /* For non-protocol/alg-only op commands */
+ #define OP_ALG_TYPE_SHIFT 24
+ #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+-#define OP_ALG_TYPE_CLASS1 2
+-#define OP_ALG_TYPE_CLASS2 4
++#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
++#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
+
+ #define OP_ALG_ALGSEL_SHIFT 16
+ #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+@@ -1249,7 +1244,7 @@ struct sec4_sg_entry {
+ #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+
+ /* PKHA mode copy-memory functions */
+-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
++#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
+ #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+ #define OP_ALG_PKMODE_DST_REG_SHIFT 10
+ #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+@@ -1445,7 +1440,7 @@ struct sec4_sg_entry {
+ #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+-#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
++#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+@@ -1629,4 +1624,31 @@ struct sec4_sg_entry {
+ /* Frame Descriptor Command for Replacement Job Descriptor */
+ #define FD_CMD_REPLACE_JOB_DESC 0x20000000
+
++/* CHA Control Register bits */
++#define CCTRL_RESET_CHA_ALL 0x1
++#define CCTRL_RESET_CHA_AESA 0x2
++#define CCTRL_RESET_CHA_DESA 0x4
++#define CCTRL_RESET_CHA_AFHA 0x8
++#define CCTRL_RESET_CHA_KFHA 0x10
++#define CCTRL_RESET_CHA_SF8A 0x20
++#define CCTRL_RESET_CHA_PKHA 0x40
++#define CCTRL_RESET_CHA_MDHA 0x80
++#define CCTRL_RESET_CHA_CRCA 0x100
++#define CCTRL_RESET_CHA_RNG 0x200
++#define CCTRL_RESET_CHA_SF9A 0x400
++#define CCTRL_RESET_CHA_ZUCE 0x800
++#define CCTRL_RESET_CHA_ZUCA 0x1000
++#define CCTRL_UNLOAD_PK_A0 0x10000
++#define CCTRL_UNLOAD_PK_A1 0x20000
++#define CCTRL_UNLOAD_PK_A2 0x40000
++#define CCTRL_UNLOAD_PK_A3 0x80000
++#define CCTRL_UNLOAD_PK_B0 0x100000
++#define CCTRL_UNLOAD_PK_B1 0x200000
++#define CCTRL_UNLOAD_PK_B2 0x400000
++#define CCTRL_UNLOAD_PK_B3 0x800000
++#define CCTRL_UNLOAD_PK_N 0x1000000
++#define CCTRL_UNLOAD_PK_A 0x4000000
++#define CCTRL_UNLOAD_PK_B 0x8000000
++#define CCTRL_UNLOAD_SBOX 0x10000000
++
+ #endif /* DESC_H */
+diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
+index a8cd8a78..2d9dbeca 100644
+--- a/drivers/crypto/caam/desc_constr.h
++++ b/drivers/crypto/caam/desc_constr.h
+@@ -4,6 +4,9 @@
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
++#ifndef DESC_CONSTR_H
++#define DESC_CONSTR_H
++
+ #include "desc.h"
+ #include "regs.h"
+
+@@ -33,38 +36,39 @@
+
+ extern bool caam_little_end;
+
+-static inline int desc_len(u32 *desc)
++static inline int desc_len(u32 * const desc)
+ {
+ return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
+ }
+
+-static inline int desc_bytes(void *desc)
++static inline int desc_bytes(void * const desc)
+ {
+ return desc_len(desc) * CAAM_CMD_SZ;
+ }
+
+-static inline u32 *desc_end(u32 *desc)
++static inline u32 *desc_end(u32 * const desc)
+ {
+ return desc + desc_len(desc);
+ }
+
+-static inline void *sh_desc_pdb(u32 *desc)
++static inline void *sh_desc_pdb(u32 * const desc)
+ {
+ return desc + 1;
+ }
+
+-static inline void init_desc(u32 *desc, u32 options)
++static inline void init_desc(u32 * const desc, u32 options)
+ {
+ *desc = cpu_to_caam32((options | HDR_ONE) + 1);
+ }
+
+-static inline void init_sh_desc(u32 *desc, u32 options)
++static inline void init_sh_desc(u32 * const desc, u32 options)
+ {
+ PRINT_POS;
+ init_desc(desc, CMD_SHARED_DESC_HDR | options);
+ }
+
+-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
++static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
++ size_t pdb_bytes)
+ {
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+@@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+ options);
+ }
+
+-static inline void init_job_desc(u32 *desc, u32 options)
++static inline void init_job_desc(u32 * const desc, u32 options)
+ {
+ init_desc(desc, CMD_DESC_HDR | options);
+ }
+
+-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
++static inline void init_job_desc_pdb(u32 * const desc, u32 options,
++ size_t pdb_bytes)
+ {
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+ init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
+ }
+
+-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
++static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
+ {
+ dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+
+@@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+ CAAM_PTR_SZ / CAAM_CMD_SZ);
+ }
+
+-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
+- u32 options)
++static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
++ int len, u32 options)
+ {
+ PRINT_POS;
+ init_job_desc(desc, HDR_SHARED | options |
+@@ -103,7 +108,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
+ append_ptr(desc, ptr);
+ }
+
+-static inline void append_data(u32 *desc, void *data, int len)
++static inline void append_data(u32 * const desc, void *data, int len)
+ {
+ u32 *offset = desc_end(desc);
+
+@@ -114,7 +119,7 @@ static inline void append_data(u32 *desc, void *data, int len)
+ (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
+ }
+
+-static inline void append_cmd(u32 *desc, u32 command)
++static inline void append_cmd(u32 * const desc, u32 command)
+ {
+ u32 *cmd = desc_end(desc);
+
+@@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc, u32 command)
+
+ #define append_u32 append_cmd
+
+-static inline void append_u64(u32 *desc, u64 data)
++static inline void append_u64(u32 * const desc, u64 data)
+ {
+ u32 *offset = desc_end(desc);
+
+@@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc, u64 data)
+ }
+
+ /* Write command without affecting header, and return pointer to next word */
+-static inline u32 *write_cmd(u32 *desc, u32 command)
++static inline u32 *write_cmd(u32 * const desc, u32 command)
+ {
+ *desc = cpu_to_caam32(command);
+
+ return desc + 1;
+ }
+
+-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
++static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
+ u32 command)
+ {
+ append_cmd(desc, command | len);
+@@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+ }
+
+ /* Write length after pointer, rather than inside command */
+-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
++static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
+ unsigned int len, u32 command)
+ {
+ append_cmd(desc, command);
+@@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+ append_cmd(desc, len);
+ }
+
+-static inline void append_cmd_data(u32 *desc, void *data, int len,
++static inline void append_cmd_data(u32 * const desc, void *data, int len,
+ u32 command)
+ {
+ append_cmd(desc, command | IMMEDIATE | len);
+@@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
+ }
+
+ #define APPEND_CMD_RET(cmd, op) \
+-static inline u32 *append_##cmd(u32 *desc, u32 options) \
++static inline u32 *append_##cmd(u32 * const desc, u32 options) \
+ { \
+ u32 *cmd = desc_end(desc); \
+ PRINT_POS; \
+@@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
+ }
+ APPEND_CMD_RET(jump, JUMP)
+ APPEND_CMD_RET(move, MOVE)
++APPEND_CMD_RET(moveb, MOVEB)
+
+-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
++static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
+ {
+ *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
+ (desc_len(desc) - (jump_cmd - desc)));
+ }
+
+-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
++static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
+ {
+ u32 val = caam32_to_cpu(*move_cmd);
+
+@@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+ }
+
+ #define APPEND_CMD(cmd, op) \
+-static inline void append_##cmd(u32 *desc, u32 options) \
++static inline void append_##cmd(u32 * const desc, u32 options) \
+ { \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | options); \
+@@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
+ APPEND_CMD(operation, OPERATION)
+
+ #define APPEND_CMD_LEN(cmd, op) \
+-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
++static inline void append_##cmd(u32 * const desc, unsigned int len, \
++ u32 options) \
+ { \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | len | options); \
+@@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+ APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+
+ #define APPEND_CMD_PTR(cmd, op) \
+-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
+- u32 options) \
++static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
++ unsigned int len, u32 options) \
+ { \
+ PRINT_POS; \
+ append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
+@@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
+ APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+ APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
+- u32 options)
++static inline void append_store(u32 * const desc, dma_addr_t ptr,
++ unsigned int len, u32 options)
+ {
+ u32 cmd_src;
+
+@@ -249,7 +256,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
+ }
+
+ #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
++static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
++ dma_addr_t ptr, \
+ unsigned int len, \
+ u32 options) \
+ { \
+@@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
+ APPEND_SEQ_PTR_INTLEN(out, OUT)
+
+ #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
+-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+ unsigned int len, u32 options) \
+ { \
+ PRINT_POS; \
+@@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
+ APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+
+ #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
+-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
++static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
+ { \
+ PRINT_POS; \
+@@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
+ * the size of its type
+ */
+ #define APPEND_CMD_PTR_LEN(cmd, op, type) \
+-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
++static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+ type len, u32 options) \
+ { \
+ PRINT_POS; \
+@@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
+ * from length of immediate data provided, e.g., split keys
+ */
+ #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
+-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+ unsigned int data_len, \
+ unsigned int len, u32 options) \
+ { \
+@@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+ APPEND_CMD_PTR_TO_IMM2(key, KEY);
+
+ #define APPEND_CMD_RAW_IMM(cmd, op, type) \
+-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
++static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
+ u32 options) \
+ { \
+ PRINT_POS; \
+@@ -426,3 +434,66 @@ do { \
+ APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
+ #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
++
++/**
++ * struct alginfo - Container for algorithm details
++ * @algtype: algorithm selector; for valid values, see documentation of the
++ * functions where it is used.
++ * @keylen: length of the provided algorithm key, in bytes
++ * @keylen_pad: padded length of the provided algorithm key, in bytes
++ * @key: address where algorithm key resides; virtual address if key_inline
++ * is true, dma (bus) address if key_inline is false.
++ * @key_inline: true - key can be inlined in the descriptor; false - key is
++ * referenced by the descriptor
++ */
++struct alginfo {
++ u32 algtype;
++ unsigned int keylen;
++ unsigned int keylen_pad;
++ union {
++ dma_addr_t key_dma;
++ void *key_virt;
++ };
++ bool key_inline;
++};
++
++/**
++ * desc_inline_query() - Provide indications on which data items can be inlined
++ * and which shall be referenced in a shared descriptor.
++ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
++ * excluding the data items to be inlined (or corresponding
++ * pointer if an item is not inlined). Each cnstr_* function that
++ * generates descriptors should have a define mentioning
++ * corresponding length.
++ * @jd_len: Maximum length of the job descriptor(s) that will be used
++ * together with the shared descriptor.
++ * @data_len: Array of lengths of the data items trying to be inlined
++ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
++ * otherwise.
++ * @count: Number of data items (size of @data_len array); must be <= 32
++ *
++ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
++ * check @inl_mask for details.
++ */
++static inline int desc_inline_query(unsigned int sd_base_len,
++ unsigned int jd_len, unsigned int *data_len,
++ u32 *inl_mask, unsigned int count)
++{
++ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
++ unsigned int i;
++
++ *inl_mask = 0;
++ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
++ if (rem_bytes - (int)(data_len[i] +
++ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
++ rem_bytes -= data_len[i];
++ *inl_mask |= (1 << i);
++ } else {
++ rem_bytes -= CAAM_PTR_SZ;
++ }
++ }
++
++ return (rem_bytes >= 0) ? 0 : -1;
++}
++
++#endif /* DESC_CONSTR_H */
+diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
+new file mode 100644
+index 00000000..410cd790
+--- /dev/null
++++ b/drivers/crypto/caam/dpseci.c
+@@ -0,0 +1,859 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
++#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
++#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
++#include "dpseci.h"
++#include "dpseci_cmd.h"
++
++/**
++ * dpseci_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpseci_id: DPSECI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an already created
++ * object; an object may have been declared in the DPL or by calling the
++ * dpseci_create() function.
++ * This function returns a unique authentication token, associated with the
++ * specific object ID and the specific MC portal; this token must be used in all
++ * subsequent commands for this specific object.
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_open *cmd_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dpseci_cmd_open *)cmd.params;
++ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpseci_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * After this function is called, no further operations are allowed on the
++ * object without opening a new control session.
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
++ cmd_flags,
++ token);
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_create() - Create the DPSECI object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @obj_id: returned object id
++ *
++ * Create the DPSECI object, allocate required resources and perform required
++ * initialization.
++ *
++ * The object can be created either by declaring it in the DPL file, or by
++ * calling this function.
++ *
++ * The function accepts an authentication token of a parent container that this
++ * object should be assigned to. The token can be '0' so the object will be
++ * assigned to the default container.
++ * The newly created object can be opened with the returned object id and using
++ * the container's associated tokens and MC portals.
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
++ const struct dpseci_cfg *cfg, u32 *obj_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_create *cmd_params;
++ int i, err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
++ cmd_flags,
++ dprc_token);
++ cmd_params = (struct dpseci_cmd_create *)cmd.params;
++ for (i = 0; i < 8; i++)
++ cmd_params->priorities[i] = cfg->priorities[i];
++ cmd_params->num_tx_queues = cfg->num_tx_queues;
++ cmd_params->num_rx_queues = cfg->num_rx_queues;
++ cmd_params->options = cpu_to_le32(cfg->options);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ *obj_id = mc_cmd_read_object_id(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpseci_destroy() - Destroy the DPSECI object and release all its resources
++ * @mc_io: Pointer to MC portal's I/O object
++ * @dprc_token: Parent container token; '0' for default container
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @object_id: The object id; it must be a valid id within the container that
++ * created this object
++ *
++ * The function accepts the authentication token of the parent container that
++ * created the object (not the one that currently owns the object). The object
++ * is searched within parent using the provided 'object_id'.
++ * All tokens to the object must be closed before calling destroy.
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
++ u32 object_id)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_destroy *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
++ cmd_flags,
++ dprc_token);
++ cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
++ cmd_params->object_id = cpu_to_le32(object_id);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_is_enabled() - Check if the DPSECI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_rsp_is_enabled *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
++ *en = le32_to_cpu(rsp_params->is_enabled);
++
++ return 0;
++}
++
++/**
++ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned Interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u8 *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_irq_enable *cmd_params;
++ struct dpseci_rsp_get_irq_enable *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
++ *en = rsp_params->enable_state;
++
++ return 0;
++}
++
++/**
++ * dpseci_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. If the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u8 en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_irq_enable *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->enable_state = en;
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently.
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 *mask)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_irq_mask *cmd_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ *mask = le32_to_cpu(cmd_params->mask);
++
++ return 0;
++}
++
++/**
++ * dpseci_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 mask)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_irq_mask *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 *status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_irq_status *cmd_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ *status = le32_to_cpu(cmd_params->status);
++
++ return 0;
++}
++
++/**
++ * dpseci_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 status)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_irq_status *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(status);
++ cmd_params->irq_index = irq_index;
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_get_attributes() - Retrieve DPSECI attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ struct dpseci_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_rsp_get_attributes *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
++ attr->id = le32_to_cpu(rsp_params->id);
++ attr->num_tx_queues = rsp_params->num_tx_queues;
++ attr->num_rx_queues = rsp_params->num_rx_queues;
++ attr->options = le32_to_cpu(rsp_params->options);
++
++ return 0;
++}
++
++/**
++ * dpseci_set_rx_queue() - Set Rx queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @queue: Select the queue relative to number of priorities configured at
++ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
++ * Rx queues identically.
++ * @cfg: Rx queue configuration
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_queue *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
++ cmd_params->priority = cfg->dest_cfg.priority;
++ cmd_params->queue = queue;
++ cmd_params->dest_type = cfg->dest_cfg.dest_type;
++ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
++ cmd_params->options = cpu_to_le32(cfg->options);
++ cmd_params->order_preservation_en =
++ cpu_to_le32(cfg->order_preservation_en);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @queue: Select the queue relative to number of priorities configured at
++ * DPSECI creation
++ * @attr: Returned Rx queue attributes
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 queue, struct dpseci_rx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_queue *cmd_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
++ cmd_params->queue = queue;
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
++ attr->dest_cfg.priority = cmd_params->priority;
++ attr->dest_cfg.dest_type = cmd_params->dest_type;
++ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
++ attr->fqid = le32_to_cpu(cmd_params->fqid);
++ attr->order_preservation_en =
++ le32_to_cpu(cmd_params->order_preservation_en);
++
++ return 0;
++}
++
++/**
++ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @queue: Select the queue relative to number of priorities configured at
++ * DPSECI creation
++ * @attr: Returned Tx queue attributes
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 queue, struct dpseci_tx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_queue *cmd_params;
++ struct dpseci_rsp_get_tx_queue *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
++ cmd_params->queue = queue;
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
++ attr->fqid = le32_to_cpu(rsp_params->fqid);
++ attr->priority = rsp_params->priority;
++
++ return 0;
++}
++
++/**
++ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @attr: Returned SEC attributes
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ struct dpseci_sec_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_rsp_get_sec_attr *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
++ cmd_flags,
++ token);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
++ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
++ attr->major_rev = rsp_params->major_rev;
++ attr->minor_rev = rsp_params->minor_rev;
++ attr->era = rsp_params->era;
++ attr->deco_num = rsp_params->deco_num;
++ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
++ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
++ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
++ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
++ attr->crc_acc_num = rsp_params->crc_acc_num;
++ attr->pk_acc_num = rsp_params->pk_acc_num;
++ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
++ attr->rng_acc_num = rsp_params->rng_acc_num;
++ attr->md_acc_num = rsp_params->md_acc_num;
++ attr->arc4_acc_num = rsp_params->arc4_acc_num;
++ attr->des_acc_num = rsp_params->des_acc_num;
++ attr->aes_acc_num = rsp_params->aes_acc_num;
++
++ return 0;
++}
++
++/**
++ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @counters: Returned SEC counters
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ struct dpseci_sec_counters *counters)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_rsp_get_sec_counters *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
++ cmd_flags,
++ token);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
++ counters->dequeued_requests =
++ le64_to_cpu(rsp_params->dequeued_requests);
++ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
++ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
++ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
++ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
++ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
++ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
++
++ return 0;
++}
++
++/**
++ * dpseci_get_api_version() - Get Data Path SEC Interface API version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path sec API
++ * @minor_ver: Minor version of data path sec API
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
++ u16 *major_ver, u16 *minor_ver)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_rsp_get_api_version *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->major);
++ *minor_ver = le16_to_cpu(rsp_params->minor);
++
++ return 0;
++}
++
++/**
++ * dpseci_set_opr() - Set Order Restoration configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @index: The queue index
++ * @options: Configuration mode options; can be OPR_OPT_CREATE or
++ * OPR_OPT_RETIRE
++ * @cfg: Configuration options for the OPR
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
++ u8 options, struct opr_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_opr *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(
++ DPSECI_CMDID_SET_OPR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_opr *)cmd.params;
++ cmd_params->index = index;
++ cmd_params->options = options;
++ cmd_params->oloe = cfg->oloe;
++ cmd_params->oeane = cfg->oeane;
++ cmd_params->olws = cfg->olws;
++ cmd_params->oa = cfg->oa;
++ cmd_params->oprrws = cfg->oprrws;
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_get_opr() - Retrieve Order Restoration config and query
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @index: The queue index
++ * @cfg: Returned OPR configuration
++ * @qry: Returned OPR query
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
++ struct opr_cfg *cfg, struct opr_qry *qry)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_opr *cmd_params;
++ struct dpseci_rsp_get_opr *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_opr *)cmd.params;
++ cmd_params->index = index;
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
++ qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
++ qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
++ cfg->oloe = rsp_params->oloe;
++ cfg->oeane = rsp_params->oeane;
++ cfg->olws = rsp_params->olws;
++ cfg->oa = rsp_params->oa;
++ cfg->oprrws = rsp_params->oprrws;
++ qry->nesn = le16_to_cpu(rsp_params->nesn);
++ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
++ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
++ qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
++ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
++ qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
++ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
++ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
++ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
++ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
++
++ return 0;
++}
++
++/**
++ * dpseci_set_congestion_notification() - Set congestion group
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
++ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_congestion_notification *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(
++ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
++ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
++ cmd_params->priority = cfg->dest_cfg.priority;
++ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
++ cfg->dest_cfg.dest_type);
++ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
++ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
++ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
++ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
++ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpseci_get_congestion_notification() - Get congestion group notification
++ * configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on success, error code otherwise
++ */
++int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
++ u16 token, struct dpseci_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpseci_cmd_congestion_notification *rsp_params;
++ int err;
++
++ cmd.header = mc_encode_cmd_header(
++ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
++ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
++ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
++ cfg->dest_cfg.priority = rsp_params->priority;
++ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
++ CGN_DEST_TYPE);
++ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
++ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
++ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
++ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
++ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
++
++ return 0;
++}
+diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
+new file mode 100644
+index 00000000..d37489c6
+--- /dev/null
++++ b/drivers/crypto/caam/dpseci.h
+@@ -0,0 +1,395 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _DPSECI_H_
++#define _DPSECI_H_
++
++/*
++ * Data Path SEC Interface API
++ * Contains initialization APIs and runtime control APIs for DPSECI
++ */
++
++struct fsl_mc_io;
++struct opr_cfg;
++struct opr_qry;
++
++/**
++ * General DPSECI macros
++ */
++
++/**
++ * Maximum number of Tx/Rx priorities per DPSECI object
++ */
++#define DPSECI_PRIO_NUM 8
++
++/**
++ * All queues considered; see dpseci_set_rx_queue()
++ */
++#define DPSECI_ALL_QUEUES (u8)(-1)
++
++int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
++ u16 *token);
++
++int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
++
++/**
++ * Enable the Congestion Group support
++ */
++#define DPSECI_OPT_HAS_CG 0x000020
++
++/**
++ * Enable the Order Restoration support
++ */
++#define DPSECI_OPT_HAS_OPR 0x000040
++
++/**
++ * Order Point Records are shared for the entire DPSECI
++ */
++#define DPSECI_OPT_OPR_SHARED 0x000080
++
++/**
++ * struct dpseci_cfg - Structure representing DPSECI configuration
++ * @options: Any combination of the following options:
++ * DPSECI_OPT_HAS_CG
++ * DPSECI_OPT_HAS_OPR
++ * DPSECI_OPT_OPR_SHARED
++ * @num_tx_queues: num of queues towards the SEC
++ * @num_rx_queues: num of queues back from the SEC
++ * @priorities: Priorities for the SEC hardware processing;
++ * each place in the array is the priority of the tx queue
++ * towards the SEC;
++ * valid priorities are configured with values 1-8;
++ */
++struct dpseci_cfg {
++ u32 options;
++ u8 num_tx_queues;
++ u8 num_rx_queues;
++ u8 priorities[DPSECI_PRIO_NUM];
++};
++
++int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
++ const struct dpseci_cfg *cfg, u32 *obj_id);
++
++int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
++ u32 object_id);
++
++int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
++
++int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
++
++int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ int *en);
++
++int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
++
++int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u8 *en);
++
++int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u8 en);
++
++int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 *mask);
++
++int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 mask);
++
++int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 *status);
++
++int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 irq_index, u32 status);
++
++/**
++ * struct dpseci_attr - Structure representing DPSECI attributes
++ * @id: DPSECI object ID
++ * @num_tx_queues: number of queues towards the SEC
++ * @num_rx_queues: number of queues back from the SEC
++ * @options: any combination of the following options:
++ * DPSECI_OPT_HAS_CG
++ * DPSECI_OPT_HAS_OPR
++ * DPSECI_OPT_OPR_SHARED
++ */
++struct dpseci_attr {
++ int id;
++ u8 num_tx_queues;
++ u8 num_rx_queues;
++ u32 options;
++};
++
++int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ struct dpseci_attr *attr);
++
++/**
++ * enum dpseci_dest - DPSECI destination types
++ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
++ * and does not generate FQDAN notifications; user is expected to dequeue
++ * from the queue based on polling or other user-defined method
++ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue from
++ * the queue only after notification is received
++ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON object;
++ * user is expected to dequeue from the DPCON channel
++ */
++enum dpseci_dest {
++ DPSECI_DEST_NONE = 0,
++ DPSECI_DEST_DPIO,
++ DPSECI_DEST_DPCON
++};
++
++/**
++ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that channel;
++ * not relevant for 'DPSECI_DEST_NONE' option
++ */
++struct dpseci_dest_cfg {
++ enum dpseci_dest dest_type;
++ int dest_id;
++ u8 priority;
++};
++
++/**
++ * DPSECI queue modification options
++ */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
++
++/**
++ * Select to modify the queue's destination
++ */
++#define DPSECI_QUEUE_OPT_DEST 0x00000002
++
++/**
++ * Select to modify the queue's order preservation
++ */
++#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
++
++/**
++ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
++ * @order_preservation_en: order preservation configuration for the rx queue
++ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
++ * in 'options'
++ * @dest_cfg: Queue destination parameters; valid only if
++ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
++ */
++struct dpseci_rx_queue_cfg {
++ u32 options;
++ int order_preservation_en;
++ u64 user_ctx;
++ struct dpseci_dest_cfg dest_cfg;
++};
++
++int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
++
++/**
++ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @order_preservation_en: Status of the order preservation configuration on the
++ * queue
++ * @dest_cfg: Queue destination configuration
++ * @fqid: Virtual FQID value to be used for dequeue operations
++ */
++struct dpseci_rx_queue_attr {
++ u64 user_ctx;
++ int order_preservation_en;
++ struct dpseci_dest_cfg dest_cfg;
++ u32 fqid;
++};
++
++int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 queue, struct dpseci_rx_queue_attr *attr);
++
++/**
++ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
++ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
++ * @priority: SEC hardware processing priority for the queue
++ */
++struct dpseci_tx_queue_attr {
++ u32 fqid;
++ u8 priority;
++};
++
++int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ u8 queue, struct dpseci_tx_queue_attr *attr);
++
++/**
++ * struct dpseci_sec_attr - Structure representing attributes of the SEC
++ * hardware accelerator
++ * @ip_id: ID for SEC
++ * @major_rev: Major revision number for SEC
++ * @minor_rev: Minor revision number for SEC
++ * @era: SEC Era
++ * @deco_num: The number of copies of the DECO that are implemented in this
++ * version of SEC
++ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
++ * version of SEC
++ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
++ * version of SEC
++ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
++ * implemented in this version of SEC
++ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
++ * implemented in this version of SEC
++ * @crc_acc_num: The number of copies of the CRC module that are implemented in
++ * this version of SEC
++ * @pk_acc_num: The number of copies of the Public Key module that are
++ * implemented in this version of SEC
++ * @kasumi_acc_num: The number of copies of the Kasumi module that are
++ * implemented in this version of SEC
++ * @rng_acc_num: The number of copies of the Random Number Generator that are
++ * implemented in this version of SEC
++ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
++ * implemented in this version of SEC
++ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
++ * in this version of SEC
++ * @des_acc_num: The number of copies of the DES module that are implemented in
++ * this version of SEC
++ * @aes_acc_num: The number of copies of the AES module that are implemented in
++ * this version of SEC
++ **/
++struct dpseci_sec_attr {
++ u16 ip_id;
++ u8 major_rev;
++ u8 minor_rev;
++ u8 era;
++ u8 deco_num;
++ u8 zuc_auth_acc_num;
++ u8 zuc_enc_acc_num;
++ u8 snow_f8_acc_num;
++ u8 snow_f9_acc_num;
++ u8 crc_acc_num;
++ u8 pk_acc_num;
++ u8 kasumi_acc_num;
++ u8 rng_acc_num;
++ u8 md_acc_num;
++ u8 arc4_acc_num;
++ u8 des_acc_num;
++ u8 aes_acc_num;
++};
++
++int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ struct dpseci_sec_attr *attr);
++
++/**
++ * struct dpseci_sec_counters - Structure representing global SEC counters and
++ * not per dpseci counters
++ * @dequeued_requests: Number of Requests Dequeued
++ * @ob_enc_requests: Number of Outbound Encrypt Requests
++ * @ib_dec_requests: Number of Inbound Decrypt Requests
++ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
++ * @ob_prot_bytes: Number of Outbound Bytes Protected
++ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
++ * @ib_valid_bytes: Number of Inbound Bytes Validated
++ */
++struct dpseci_sec_counters {
++ u64 dequeued_requests;
++ u64 ob_enc_requests;
++ u64 ib_dec_requests;
++ u64 ob_enc_bytes;
++ u64 ob_prot_bytes;
++ u64 ib_dec_bytes;
++ u64 ib_valid_bytes;
++};
++
++int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
++ struct dpseci_sec_counters *counters);
++
++int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
++ u16 *major_ver, u16 *minor_ver);
++
++int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
++ u8 options, struct opr_cfg *cfg);
++
++int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
++ struct opr_cfg *cfg, struct opr_qry *qry);
++
++/**
++ * enum dpseci_congestion_unit - DPSECI congestion units
++ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
++ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
++ */
++enum dpseci_congestion_unit {
++ DPSECI_CONGESTION_UNIT_BYTES = 0,
++ DPSECI_CONGESTION_UNIT_FRAMES
++};
++
++#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
++#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
++#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
++#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
++#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
++#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
++
++/**
++ * struct dpseci_congestion_notification_cfg - congestion notification
++ * configuration
++ * @units: units type
++ * @threshold_entry: above this threshold we enter a congestion state.
++ * set it to '0' to disable it
++ * @threshold_exit: below this threshold we exit the congestion state.
++ * @message_ctx: The context that will be part of the CSCN message
++ * @message_iova: I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned;
++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
++ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
++ * values
++ */
++struct dpseci_congestion_notification_cfg {
++ enum dpseci_congestion_unit units;
++ u32 threshold_entry;
++ u32 threshold_exit;
++ u64 message_ctx;
++ u64 message_iova;
++ struct dpseci_dest_cfg dest_cfg;
++ u16 notification_mode;
++};
++
++int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
++ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
++
++int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
++ u16 token, struct dpseci_congestion_notification_cfg *cfg);
++
++#endif /* _DPSECI_H_ */
+diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
+new file mode 100644
+index 00000000..7624315e
+--- /dev/null
++++ b/drivers/crypto/caam/dpseci_cmd.h
+@@ -0,0 +1,261 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _DPSECI_CMD_H_
++#define _DPSECI_CMD_H_
++
++/* DPSECI Version */
++#define DPSECI_VER_MAJOR 5
++#define DPSECI_VER_MINOR 1
++
++#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
++#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
++
++/* Command IDs */
++
++#define DPSECI_CMDID_CLOSE 0x8001
++#define DPSECI_CMDID_OPEN 0x8091
++#define DPSECI_CMDID_CREATE 0x9092
++#define DPSECI_CMDID_DESTROY 0x9891
++#define DPSECI_CMDID_GET_API_VERSION 0xa091
++
++#define DPSECI_CMDID_ENABLE 0x0021
++#define DPSECI_CMDID_DISABLE 0x0031
++#define DPSECI_CMDID_GET_ATTR 0x0041
++#define DPSECI_CMDID_RESET 0x0051
++#define DPSECI_CMDID_IS_ENABLED 0x0061
++
++#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
++#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
++#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
++#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
++#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
++#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
++
++#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
++#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
++#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
++#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
++#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
++#define DPSECI_CMDID_SET_OPR 0x19A1
++#define DPSECI_CMDID_GET_OPR 0x19B1
++
++#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
++#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
++
++/* Macros for accessing command fields smaller than 1 byte */
++#define DPSECI_MASK(field) \
++ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
++ DPSECI_##field##_SHIFT)
++
++#define dpseci_set_field(var, field, val) \
++ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
++
++#define dpseci_get_field(var, field) \
++ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
++
++struct dpseci_cmd_open {
++ __le32 dpseci_id;
++};
++
++struct dpseci_cmd_create {
++ u8 priorities[8];
++ u8 num_tx_queues;
++ u8 num_rx_queues;
++ __le16 pad;
++ __le32 options;
++};
++
++struct dpseci_cmd_destroy {
++ __le32 object_id;
++};
++
++struct dpseci_rsp_is_enabled {
++ __le32 is_enabled;
++};
++
++struct dpseci_cmd_irq_enable {
++ u8 enable_state;
++ u8 pad[3];
++ u8 irq_index;
++};
++
++struct dpseci_rsp_get_irq_enable {
++ u8 enable_state;
++};
++
++struct dpseci_cmd_irq_mask {
++ __le32 mask;
++ u8 irq_index;
++};
++
++struct dpseci_cmd_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
++
++struct dpseci_rsp_get_attributes {
++ __le32 id;
++ __le32 pad0;
++ u8 num_tx_queues;
++ u8 num_rx_queues;
++ u8 pad1[6];
++ __le32 options;
++};
++
++struct dpseci_cmd_queue {
++ __le32 dest_id;
++ u8 priority;
++ u8 queue;
++ u8 dest_type;
++ u8 pad;
++ __le64 user_ctx;
++ union {
++ __le32 options;
++ __le32 fqid;
++ };
++ __le32 order_preservation_en;
++};
++
++struct dpseci_rsp_get_tx_queue {
++ __le32 pad;
++ __le32 fqid;
++ u8 priority;
++};
++
++struct dpseci_rsp_get_sec_attr {
++ __le16 ip_id;
++ u8 major_rev;
++ u8 minor_rev;
++ u8 era;
++ u8 pad0[3];
++ u8 deco_num;
++ u8 zuc_auth_acc_num;
++ u8 zuc_enc_acc_num;
++ u8 pad1;
++ u8 snow_f8_acc_num;
++ u8 snow_f9_acc_num;
++ u8 crc_acc_num;
++ u8 pad2;
++ u8 pk_acc_num;
++ u8 kasumi_acc_num;
++ u8 rng_acc_num;
++ u8 pad3;
++ u8 md_acc_num;
++ u8 arc4_acc_num;
++ u8 des_acc_num;
++ u8 aes_acc_num;
++};
++
++struct dpseci_rsp_get_sec_counters {
++ __le64 dequeued_requests;
++ __le64 ob_enc_requests;
++ __le64 ib_dec_requests;
++ __le64 ob_enc_bytes;
++ __le64 ob_prot_bytes;
++ __le64 ib_dec_bytes;
++ __le64 ib_valid_bytes;
++};
++
++struct dpseci_rsp_get_api_version {
++ __le16 major;
++ __le16 minor;
++};
++
++struct dpseci_cmd_opr {
++ __le16 pad;
++ u8 index;
++ u8 options;
++ u8 pad1[7];
++ u8 oloe;
++ u8 oeane;
++ u8 olws;
++ u8 oa;
++ u8 oprrws;
++};
++
++#define DPSECI_OPR_RIP_SHIFT 0
++#define DPSECI_OPR_RIP_SIZE 1
++#define DPSECI_OPR_ENABLE_SHIFT 1
++#define DPSECI_OPR_ENABLE_SIZE 1
++#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
++#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
++#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
++#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
++
++struct dpseci_rsp_get_opr {
++ __le64 pad;
++ u8 rip_enable;
++ u8 pad0[2];
++ u8 oloe;
++ u8 oeane;
++ u8 olws;
++ u8 oa;
++ u8 oprrws;
++ __le16 nesn;
++ __le16 pad1;
++ __le16 ndsn;
++ __le16 pad2;
++ __le16 ea_tseq;
++ u8 tseq_nlis;
++ u8 pad3;
++ __le16 ea_hseq;
++ u8 hseq_nlis;
++ u8 pad4;
++ __le16 ea_hptr;
++ __le16 pad5;
++ __le16 ea_tptr;
++ __le16 pad6;
++ __le16 opr_vid;
++ __le16 pad7;
++ __le16 opr_id;
++};
++
++#define DPSECI_CGN_DEST_TYPE_SHIFT 0
++#define DPSECI_CGN_DEST_TYPE_SIZE 4
++#define DPSECI_CGN_UNITS_SHIFT 4
++#define DPSECI_CGN_UNITS_SIZE 2
++
++struct dpseci_cmd_congestion_notification {
++ __le32 dest_id;
++ __le16 notification_mode;
++ u8 priority;
++ u8 options;
++ __le64 message_iova;
++ __le64 message_ctx;
++ __le32 threshold_entry;
++ __le32 threshold_exit;
++};
++
++#endif /* _DPSECI_CMD_H_ */
+diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
+index 33e41ea8..31963397 100644
+--- a/drivers/crypto/caam/error.c
++++ b/drivers/crypto/caam/error.c
+@@ -6,11 +6,54 @@
+
+ #include "compat.h"
+ #include "regs.h"
+-#include "intern.h"
+ #include "desc.h"
+-#include "jr.h"
+ #include "error.h"
+
++#ifdef DEBUG
++
++#include <linux/highmem.h>
++
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize, struct scatterlist *sg,
++ size_t tlen, bool ascii)
++{
++ struct scatterlist *it;
++ void *it_page;
++ size_t len;
++ void *buf;
++
++ for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
++ /*
++ * make sure the scatterlist's page
++ * has a valid virtual memory mapping
++ */
++ it_page = kmap_atomic(sg_page(it));
++ if (unlikely(!it_page)) {
++ pr_err("caam_dump_sg: kmap failed\n");
++ return;
++ }
++
++ buf = it_page + it->offset;
++ len = min_t(size_t, tlen, it->length);
++ print_hex_dump(level, prefix_str, prefix_type, rowsize,
++ groupsize, buf, len, ascii);
++ tlen -= len;
++
++ kunmap_atomic(it_page);
++ }
++}
++
++#else
++
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize, struct scatterlist *sg,
++ size_t tlen, bool ascii)
++{}
++
++#endif
++
++EXPORT_SYMBOL(caam_dump_sg);
++
+ static const struct {
+ u8 value;
+ const char *error_text;
+@@ -69,6 +112,54 @@ static const struct {
+ { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+ };
+
++static const struct {
++ u8 value;
++ const char *error_text;
++} qi_error_list[] = {
++ { 0x1F, "Job terminated by FQ or ICID flush" },
++ { 0x20, "FD format error"},
++ { 0x21, "FD command format error"},
++ { 0x23, "FL format error"},
++ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
++ { 0x30, "Max. buffer size too small"},
++ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
++ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
++ { 0x33, "Size over/underflow (allocate mode)"},
++ { 0x34, "Size over/underflow (reuse mode)"},
++ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
++ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
++ { 0x41, "SBC frame format not supported (allocate mode)"},
++ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
++ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
++ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
++ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
++ { 0x46, "Annotation length exceeds offset (reuse mode)"},
++ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
++ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
++ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
++ { 0x51, "Unsupported IF reuse mode"},
++ { 0x52, "Unsupported FL use mode"},
++ { 0x53, "Unsupported RJD use mode"},
++ { 0x54, "Unsupported inline descriptor use mode"},
++ { 0xC0, "Table buffer pool 0 depletion"},
++ { 0xC1, "Table buffer pool 1 depletion"},
++ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
++ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
++ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
++ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
++ { 0xD0, "FLC read error"},
++ { 0xD1, "FL read error"},
++ { 0xD2, "FL write error"},
++ { 0xD3, "OF SGT write error"},
++ { 0xD4, "PTA read error"},
++ { 0xD5, "PTA write error"},
++ { 0xD6, "OF SGT F-bit write error"},
++ { 0xD7, "ASA write error"},
++ { 0xE1, "FLC[ICR]=0 ICID error"},
++ { 0xE2, "FLC[ICR]=1 ICID error"},
++ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
++};
++
+ static const char * const cha_id_list[] = {
+ "",
+ "AES",
+@@ -146,10 +237,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
+ strlen(rng_err_id_list[err_id])) {
+ /* RNG-only error */
+ err_str = rng_err_id_list[err_id];
+- } else if (err_id < ARRAY_SIZE(err_id_list))
++ } else {
+ err_str = err_id_list[err_id];
+- else
+- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
++ }
+
+ /*
+ * CCB ICV check failures are part of normal operation life;
+@@ -198,6 +288,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
+ status, error, idx_str, idx, err_str, err_err_code);
+ }
+
++static void report_qi_status(struct device *qidev, const u32 status,
++ const char *error)
++{
++ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
++ const char *err_str = "unidentified error value 0x";
++ char err_err_code[3] = { 0 };
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
++ if (qi_error_list[i].value == err_id)
++ break;
++
++ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
++ err_str = qi_error_list[i].error_text;
++ else
++ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
++
++ dev_err(qidev, "%08x: %s: %s%s\n",
++ status, error, err_str, err_err_code);
++}
++
+ static void report_jr_status(struct device *jrdev, const u32 status,
+ const char *error)
+ {
+@@ -212,7 +323,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
+ status, error, __func__);
+ }
+
+-void caam_jr_strstatus(struct device *jrdev, u32 status)
++void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
+ {
+ static const struct stat_src {
+ void (*report_ssed)(struct device *jrdev, const u32 status,
+@@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
+ { report_ccb_status, "CCB" },
+ { report_jump_status, "Jump" },
+ { report_deco_status, "DECO" },
+- { NULL, "Queue Manager Interface" },
++ { report_qi_status, "Queue Manager Interface" },
+ { report_jr_status, "Job Ring" },
+ { report_cond_code_status, "Condition Code" },
+ { NULL, NULL },
+@@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
+ else
+ dev_err(jrdev, "%d: unknown error source\n", ssrc);
+ }
+-EXPORT_SYMBOL(caam_jr_strstatus);
++EXPORT_SYMBOL(caam_strstatus);
+diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
+index b6350b0d..751ddcac 100644
+--- a/drivers/crypto/caam/error.h
++++ b/drivers/crypto/caam/error.h
+@@ -7,5 +7,13 @@
+ #ifndef CAAM_ERROR_H
+ #define CAAM_ERROR_H
+ #define CAAM_ERROR_STR_MAX 302
+-void caam_jr_strstatus(struct device *jrdev, u32 status);
++
++void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
++
++#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
++#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
++
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize, struct scatterlist *sg,
++ size_t tlen, bool ascii);
+ #endif /* CAAM_ERROR_H */
+diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
+index 5d4c0507..a5236125 100644
+--- a/drivers/crypto/caam/intern.h
++++ b/drivers/crypto/caam/intern.h
+@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
+ struct device *dev;
+ int ridx;
+ struct caam_job_ring __iomem *rregs; /* JobR's register space */
++ struct tasklet_struct irqtask;
+ int irq; /* One per queue */
+
+ /* Number of scatterlist crypt transforms active on the JobR */
+@@ -63,10 +64,9 @@ struct caam_drv_private_jr {
+ * Driver-private storage for a single CAAM block instance
+ */
+ struct caam_drv_private {
+-
+- struct device *dev;
+- struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
+- struct platform_device *pdev;
++#ifdef CONFIG_CAAM_QI
++ struct device *qidev;
++#endif
+
+ /* Physical-presence section */
+ struct caam_ctrl __iomem *ctrl; /* controller region */
+@@ -102,11 +102,6 @@ struct caam_drv_private {
+ #ifdef CONFIG_DEBUG_FS
+ struct dentry *dfs_root;
+ struct dentry *ctl; /* controller dir */
+- struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
+- struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
+- struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
+- struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
+-
+ struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
+ struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
+ #endif
+@@ -114,4 +109,22 @@ struct caam_drv_private {
+
+ void caam_jr_algapi_init(struct device *dev);
+ void caam_jr_algapi_remove(struct device *dev);
++
++#ifdef CONFIG_DEBUG_FS
++static int caam_debugfs_u64_get(void *data, u64 *val)
++{
++ *val = caam64_to_cpu(*(u64 *)data);
++ return 0;
++}
++
++static int caam_debugfs_u32_get(void *data, u64 *val)
++{
++ *val = caam32_to_cpu(*(u32 *)data);
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
++DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
++#endif
++
+ #endif /* INTERN_H */
+diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
+index 757c27f9..d258953f 100644
+--- a/drivers/crypto/caam/jr.c
++++ b/drivers/crypto/caam/jr.c
+@@ -9,6 +9,7 @@
+ #include <linux/of_address.h>
+
+ #include "compat.h"
++#include "ctrl.h"
+ #include "regs.h"
+ #include "jr.h"
+ #include "desc.h"
+@@ -73,6 +74,8 @@ static int caam_jr_shutdown(struct device *dev)
+
+ ret = caam_reset_hw_jr(dev);
+
++ tasklet_kill(&jrp->irqtask);
++
+ /* Release interrupt */
+ free_irq(jrp->irq, dev);
+
+@@ -128,7 +131,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+
+ /*
+ * Check the output ring for ready responses, kick
+- * the threaded irq if jobs done.
++ * tasklet if jobs done.
+ */
+ irqstate = rd_reg32(&jrp->rregs->jrintstatus);
+ if (!irqstate)
+@@ -150,13 +153,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+ /* Have valid interrupt at this point, just ACK and trigger */
+ wr_reg32(&jrp->rregs->jrintstatus, irqstate);
+
+- return IRQ_WAKE_THREAD;
++ preempt_disable();
++ tasklet_schedule(&jrp->irqtask);
++ preempt_enable();
++
++ return IRQ_HANDLED;
+ }
+
+-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
++/* Deferred service handler, run as interrupt-fired tasklet */
++static void caam_jr_dequeue(unsigned long devarg)
+ {
+ int hw_idx, sw_idx, i, head, tail;
+- struct device *dev = st_dev;
++ struct device *dev = (struct device *)devarg;
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+ u32 *userdesc, userstatus;
+@@ -230,8 +238,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+
+ /* reenable / unmask IRQs */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+-
+- return IRQ_HANDLED;
+ }
+
+ /**
+@@ -389,10 +395,11 @@ static int caam_jr_init(struct device *dev)
+
+ jrp = dev_get_drvdata(dev);
+
++ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
++
+ /* Connect job ring interrupt handler. */
+- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
+- caam_jr_threadirq, IRQF_SHARED,
+- dev_name(dev), dev);
++ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
++ dev_name(dev), dev);
+ if (error) {
+ dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+ jrp->ridx, jrp->irq);
+@@ -454,6 +461,7 @@ static int caam_jr_init(struct device *dev)
+ out_free_irq:
+ free_irq(jrp->irq, dev);
+ out_kill_deq:
++ tasklet_kill(&jrp->irqtask);
+ return error;
+ }
+
+@@ -489,15 +497,28 @@ static int caam_jr_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ }
+
+- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
++ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
+
+- if (sizeof(dma_addr_t) == sizeof(u64))
+- if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
+- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
++ if (sizeof(dma_addr_t) == sizeof(u64)) {
++ if (caam_dpaa2)
++ error = dma_set_mask_and_coherent(jrdev,
++ DMA_BIT_MASK(49));
++ else if (of_device_is_compatible(nprop,
++ "fsl,sec-v5.0-job-ring"))
++ error = dma_set_mask_and_coherent(jrdev,
++ DMA_BIT_MASK(40));
+ else
+- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
+- else
+- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
++ error = dma_set_mask_and_coherent(jrdev,
++ DMA_BIT_MASK(36));
++ } else {
++ error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
++ }
++ if (error) {
++ dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
++ error);
++ iounmap(ctrl);
++ return error;
++ }
+
+ /* Identify the interrupt */
+ jrpriv->irq = irq_of_parse_and_map(nprop, 0);
+@@ -520,7 +541,7 @@ static int caam_jr_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct of_device_id caam_jr_match[] = {
++static const struct of_device_id caam_jr_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0-job-ring",
+ },
+diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
+index 3ce1d5cd..a523ed77 100644
+--- a/drivers/crypto/caam/key_gen.c
++++ b/drivers/crypto/caam/key_gen.c
+@@ -41,15 +41,29 @@ Split key generation-----------------------------------------------
+ [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
+ @0xffe04000
+ */
+-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+- int split_key_pad_len, const u8 *key_in, u32 keylen,
+- u32 alg_op)
++int gen_split_key(struct device *jrdev, u8 *key_out,
++ struct alginfo * const adata, const u8 *key_in, u32 keylen,
++ int max_keylen)
+ {
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t dma_addr_in, dma_addr_out;
+ int ret = -ENOMEM;
+
++ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
++ adata->keylen_pad = split_key_pad_len(adata->algtype &
++ OP_ALG_ALGSEL_MASK);
++
++#ifdef DEBUG
++ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
++ adata->keylen, adata->keylen_pad);
++ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
++#endif
++
++ if (adata->keylen_pad > max_keylen)
++ return -EINVAL;
++
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "unable to allocate key input memory\n");
+@@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ goto out_free;
+ }
+
+- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
++ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+@@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /* Sets MDHA up into an HMAC-INIT */
+- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
++ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
++ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
++ OP_ALG_AS_INIT);
+
+ /*
+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+@@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ * FIFO_STORE with the explicit split-key content store
+ * (0x26 output type)
+ */
+- append_fifo_store(desc, dma_addr_out, split_key_len,
++ append_fifo_store(desc, dma_addr_out, adata->keylen,
+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+ #ifdef DEBUG
+@@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+- split_key_pad_len, 1);
++ adata->keylen_pad, 1);
+ #endif
+ }
+
+- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
++ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
+ DMA_FROM_DEVICE);
+ out_unmap_in:
+ dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
+index c5588f6d..851a7c86 100644
+--- a/drivers/crypto/caam/key_gen.h
++++ b/drivers/crypto/caam/key_gen.h
+@@ -5,6 +5,36 @@
+ *
+ */
+
++/**
++ * split_key_len - Compute MDHA split key length for a given algorithm
++ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
++ * SHA224, SHA384, SHA512.
++ *
++ * Return: MDHA split key length
++ */
++static inline u32 split_key_len(u32 hash)
++{
++ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
++ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
++ u32 idx;
++
++ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
++
++ return (u32)(mdpadlen[idx] * 2);
++}
++
++/**
++ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
++ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
++ * SHA224, SHA384, SHA512.
++ *
++ * Return: MDHA split key pad length
++ */
++static inline u32 split_key_pad_len(u32 hash)
++{
++ return ALIGN(split_key_len(hash), 16);
++}
++
+ struct split_key_result {
+ struct completion completion;
+ int err;
+@@ -12,6 +42,6 @@ struct split_key_result {
+
+ void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
+
+-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+- int split_key_pad_len, const u8 *key_in, u32 keylen,
+- u32 alg_op);
++int gen_split_key(struct device *jrdev, u8 *key_out,
++ struct alginfo * const adata, const u8 *key_in, u32 keylen,
++ int max_keylen);
+diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
+index aaa00dd1..31e59963 100644
+--- a/drivers/crypto/caam/pdb.h
++++ b/drivers/crypto/caam/pdb.h
+@@ -483,6 +483,8 @@ struct dsa_verify_pdb {
+ #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
+ #define RSA_PDB_D_SHIFT 12
+ #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
++#define RSA_PDB_Q_SHIFT 12
++#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
+
+ #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
+ #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
+@@ -490,6 +492,8 @@ struct dsa_verify_pdb {
+ #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
+
+ #define RSA_PRIV_KEY_FRM_1 0
++#define RSA_PRIV_KEY_FRM_2 1
++#define RSA_PRIV_KEY_FRM_3 2
+
+ /**
+ * RSA Encrypt Protocol Data Block
+@@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
+ dma_addr_t d_dma;
+ } __packed;
+
++/**
++ * RSA Decrypt PDB - Private Key Form #2
++ * @sgf : scatter-gather field
++ * @g_dma : dma address of encrypted input data
++ * @f_dma : dma address of output data
++ * @d_dma : dma address of RSA private exponent
++ * @p_dma : dma address of RSA prime factor p of RSA modulus n
++ * @q_dma : dma address of RSA prime factor q of RSA modulus n
++ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
++ * as internal state buffer. It is assumed to be as long as p.
++ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
++ * as internal state buffer. It is assumed to be as long as q.
++ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
++ */
++struct rsa_priv_f2_pdb {
++ u32 sgf;
++ dma_addr_t g_dma;
++ dma_addr_t f_dma;
++ dma_addr_t d_dma;
++ dma_addr_t p_dma;
++ dma_addr_t q_dma;
++ dma_addr_t tmp1_dma;
++ dma_addr_t tmp2_dma;
++ u32 p_q_len;
++} __packed;
++
++/**
++ * RSA Decrypt PDB - Private Key Form #3
++ * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
++ * the RSA modulus.
++ * @sgf : scatter-gather field
++ * @g_dma : dma address of encrypted input data
++ * @f_dma : dma address of output data
++ * @c_dma : dma address of RSA CRT coefficient
++ * @p_dma : dma address of RSA prime factor p of RSA modulus n
++ * @q_dma : dma address of RSA prime factor q of RSA modulus n
++ * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
++ * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
++ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
++ * as internal state buffer. It is assumed to be as long as p.
++ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
++ * as internal state buffer. It is assumed to be as long as q.
++ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
++ */
++struct rsa_priv_f3_pdb {
++ u32 sgf;
++ dma_addr_t g_dma;
++ dma_addr_t f_dma;
++ dma_addr_t c_dma;
++ dma_addr_t p_dma;
++ dma_addr_t q_dma;
++ dma_addr_t dp_dma;
++ dma_addr_t dq_dma;
++ dma_addr_t tmp1_dma;
++ dma_addr_t tmp2_dma;
++ u32 p_q_len;
++} __packed;
++
+ #endif
+diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
+index 4e4183e6..9e2ce6fe 100644
+--- a/drivers/crypto/caam/pkc_desc.c
++++ b/drivers/crypto/caam/pkc_desc.c
+@@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+ RSA_PRIV_KEY_FRM_1);
+ }
++
++/* Descriptor for RSA Private operation - Private Key Form #2 */
++void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
++{
++ init_job_desc_pdb(desc, 0, sizeof(*pdb));
++ append_cmd(desc, pdb->sgf);
++ append_ptr(desc, pdb->g_dma);
++ append_ptr(desc, pdb->f_dma);
++ append_ptr(desc, pdb->d_dma);
++ append_ptr(desc, pdb->p_dma);
++ append_ptr(desc, pdb->q_dma);
++ append_ptr(desc, pdb->tmp1_dma);
++ append_ptr(desc, pdb->tmp2_dma);
++ append_cmd(desc, pdb->p_q_len);
++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
++ RSA_PRIV_KEY_FRM_2);
++}
++
++/* Descriptor for RSA Private operation - Private Key Form #3 */
++void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
++{
++ init_job_desc_pdb(desc, 0, sizeof(*pdb));
++ append_cmd(desc, pdb->sgf);
++ append_ptr(desc, pdb->g_dma);
++ append_ptr(desc, pdb->f_dma);
++ append_ptr(desc, pdb->c_dma);
++ append_ptr(desc, pdb->p_dma);
++ append_ptr(desc, pdb->q_dma);
++ append_ptr(desc, pdb->dp_dma);
++ append_ptr(desc, pdb->dq_dma);
++ append_ptr(desc, pdb->tmp1_dma);
++ append_ptr(desc, pdb->tmp2_dma);
++ append_cmd(desc, pdb->p_q_len);
++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
++ RSA_PRIV_KEY_FRM_3);
++}
+diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
+new file mode 100644
+index 00000000..48185d55
+--- /dev/null
++++ b/drivers/crypto/caam/qi.c
+@@ -0,0 +1,797 @@
++/*
++ * CAAM/SEC 4.x QI transport/backend driver
++ * Queue Interface backend functionality
++ *
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016-2017 NXP
++ */
++
++#include <linux/cpumask.h>
++#include <linux/kthread.h>
++#include <linux/fsl_qman.h>
++
++#include "regs.h"
++#include "qi.h"
++#include "desc.h"
++#include "intern.h"
++#include "desc_constr.h"
++
++#define PREHDR_RSLS_SHIFT 31
++
++/*
++ * Use a reasonable backlog of frames (per CPU) as congestion threshold,
++ * so that resources used by the in-flight buffers do not become a memory hog.
++ */
++#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
++
++#define CAAM_QI_ENQUEUE_RETRIES 10000
++
++#define CAAM_NAPI_WEIGHT 63
++
++/*
++ * caam_napi - struct holding CAAM NAPI-related params
++ * @irqtask: IRQ task for QI backend
++ * @p: QMan portal
++ */
++struct caam_napi {
++ struct napi_struct irqtask;
++ struct qman_portal *p;
++};
++
++/*
++ * caam_qi_pcpu_priv - percpu private data structure to main list of pending
++ * responses expected on each cpu.
++ * @caam_napi: CAAM NAPI params
++ * @net_dev: netdev used by NAPI
++ * @rsp_fq: response FQ from CAAM
++ */
++struct caam_qi_pcpu_priv {
++ struct caam_napi caam_napi;
++ struct net_device net_dev;
++ struct qman_fq *rsp_fq;
++} ____cacheline_aligned;
++
++static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
++static DEFINE_PER_CPU(int, last_cpu);
++
++/*
++ * caam_qi_priv - CAAM QI backend private params
++ * @cgr: QMan congestion group
++ * @qi_pdev: platform device for QI backend
++ */
++struct caam_qi_priv {
++ struct qman_cgr cgr;
++ struct platform_device *qi_pdev;
++};
++
++static struct caam_qi_priv qipriv ____cacheline_aligned;
++
++/*
++ * This is written by only one core - the one that initialized the CGR - and
++ * read by multiple cores (all the others).
++ */
++bool caam_congested __read_mostly;
++EXPORT_SYMBOL(caam_congested);
++
++#ifdef CONFIG_DEBUG_FS
++/*
++ * This is a counter for the number of times the congestion group (where all
++ * the request and response queueus are) reached congestion. Incremented
++ * each time the congestion callback is called with congested == true.
++ */
++static u64 times_congested;
++#endif
++
++/*
++ * CPU from where the module initialised. This is required because QMan driver
++ * requires CGRs to be removed from same CPU from where they were originally
++ * allocated.
++ */
++static int mod_init_cpu;
++
++/*
++ * This is a a cache of buffers, from which the users of CAAM QI driver
++ * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
++ * doing malloc on the hotpath.
++ * NOTE: A more elegant solution would be to have some headroom in the frames
++ * being processed. This could be added by the dpaa-ethernet driver.
++ * This would pose a problem for userspace application processing which
++ * cannot know of this limitation. So for now, this will work.
++ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
++ */
++static struct kmem_cache *qi_cache;
++
++int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
++{
++ struct qm_fd fd;
++ int ret;
++ int num_retries = 0;
++
++ fd.cmd = 0;
++ fd.format = qm_fd_compound;
++ fd.cong_weight = req->fd_sgt[1].length;
++ fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
++ DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(qidev, fd.addr)) {
++ dev_err(qidev, "DMA mapping error for QI enqueue request\n");
++ return -EIO;
++ }
++
++ do {
++ ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
++ if (likely(!ret))
++ return 0;
++
++ if (ret != -EBUSY)
++ break;
++ num_retries++;
++ } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
++
++ dev_err(qidev, "qman_enqueue failed: %d\n", ret);
++
++ return ret;
++}
++EXPORT_SYMBOL(caam_qi_enqueue);
++
++static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
++ const struct qm_mr_entry *msg)
++{
++ const struct qm_fd *fd;
++ struct caam_drv_req *drv_req;
++ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
++
++ fd = &msg->ern.fd;
++
++ if (fd->format != qm_fd_compound) {
++ dev_err(qidev, "Non-compound FD from CAAM\n");
++ return;
++ }
++
++ drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
++ if (!drv_req) {
++ dev_err(qidev,
++ "Can't find original request for CAAM response\n");
++ return;
++ }
++
++ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
++ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
++
++ drv_req->cbk(drv_req, -EIO);
++}
++
++static struct qman_fq *create_caam_req_fq(struct device *qidev,
++ struct qman_fq *rsp_fq,
++ dma_addr_t hwdesc,
++ int fq_sched_flag)
++{
++ int ret;
++ struct qman_fq *req_fq;
++ struct qm_mcc_initfq opts;
++
++ req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
++ if (!req_fq)
++ return ERR_PTR(-ENOMEM);
++
++ req_fq->cb.ern = caam_fq_ern_cb;
++ req_fq->cb.fqs = NULL;
++
++ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
++ QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
++ req_fq);
++ if (ret) {
++ dev_err(qidev, "Failed to create session req FQ\n");
++ goto create_req_fq_fail;
++ }
++
++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
++ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
++ QM_INITFQ_WE_CGID;
++ opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
++ opts.fqd.dest.channel = qm_channel_caam;
++ opts.fqd.dest.wq = 2;
++ opts.fqd.context_b = qman_fq_fqid(rsp_fq);
++ opts.fqd.context_a.hi = upper_32_bits(hwdesc);
++ opts.fqd.context_a.lo = lower_32_bits(hwdesc);
++ opts.fqd.cgid = qipriv.cgr.cgrid;
++
++ ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
++ if (ret) {
++ dev_err(qidev, "Failed to init session req FQ\n");
++ goto init_req_fq_fail;
++ }
++
++ dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
++ smp_processor_id());
++ return req_fq;
++
++init_req_fq_fail:
++ qman_destroy_fq(req_fq, 0);
++create_req_fq_fail:
++ kfree(req_fq);
++ return ERR_PTR(ret);
++}
++
++static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
++{
++ int ret;
++
++ ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
++ QMAN_VOLATILE_FLAG_FINISH,
++ QM_VDQCR_PRECEDENCE_VDQCR |
++ QM_VDQCR_NUMFRAMES_TILLEMPTY);
++ if (ret) {
++ dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
++ return ret;
++ }
++
++ do {
++ struct qman_portal *p;
++
++ p = qman_get_affine_portal(smp_processor_id());
++ qman_p_poll_dqrr(p, 16);
++ } while (fq->flags & QMAN_FQ_STATE_NE);
++
++ return 0;
++}
++
++static int kill_fq(struct device *qidev, struct qman_fq *fq)
++{
++ u32 flags;
++ int ret;
++
++ ret = qman_retire_fq(fq, &flags);
++ if (ret < 0) {
++ dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
++ return ret;
++ }
++
++ if (!ret)
++ goto empty_fq;
++
++ /* Async FQ retirement condition */
++ if (ret == 1) {
++ /* Retry till FQ gets in retired state */
++ do {
++ msleep(20);
++ } while (fq->state != qman_fq_state_retired);
++
++ WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
++ WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
++ }
++
++empty_fq:
++ if (fq->flags & QMAN_FQ_STATE_NE) {
++ ret = empty_retired_fq(qidev, fq);
++ if (ret) {
++ dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
++ fq->fqid);
++ return ret;
++ }
++ }
++
++ ret = qman_oos_fq(fq);
++ if (ret)
++ dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
++
++ qman_destroy_fq(fq, 0);
++ kfree(fq);
++
++ return ret;
++}
++
++static int empty_caam_fq(struct qman_fq *fq)
++{
++ int ret;
++ struct qm_mcr_queryfq_np np;
++
++ /* Wait till the older CAAM FQ get empty */
++ do {
++ ret = qman_query_fq_np(fq, &np);
++ if (ret)
++ return ret;
++
++ if (!np.frm_cnt)
++ break;
++
++ msleep(20);
++ } while (1);
++
++ /*
++ * Give extra time for pending jobs from this FQ in holding tanks
++ * to get processed
++ */
++ msleep(20);
++ return 0;
++}
++
++int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
++{
++ int ret;
++ u32 num_words;
++ struct qman_fq *new_fq, *old_fq;
++ struct device *qidev = drv_ctx->qidev;
++
++ num_words = desc_len(sh_desc);
++ if (num_words > MAX_SDLEN) {
++ dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
++ return -EINVAL;
++ }
++
++ /* Note down older req FQ */
++ old_fq = drv_ctx->req_fq;
++
++ /* Create a new req FQ in parked state */
++ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
++ drv_ctx->context_a, 0);
++ if (unlikely(IS_ERR_OR_NULL(new_fq))) {
++ dev_err(qidev, "FQ allocation for shdesc update failed\n");
++ return PTR_ERR(new_fq);
++ }
++
++ /* Hook up new FQ to context so that new requests keep queuing */
++ drv_ctx->req_fq = new_fq;
++
++ /* Empty and remove the older FQ */
++ ret = empty_caam_fq(old_fq);
++ if (ret) {
++ dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
++
++ /* We can revert to older FQ */
++ drv_ctx->req_fq = old_fq;
++
++ if (kill_fq(qidev, new_fq))
++ dev_warn(qidev, "New CAAM FQ kill failed\n");
++
++ return ret;
++ }
++
++ /*
++ * Re-initialise pre-header. Set RSLS and SDLEN.
++ * Update the shared descriptor for driver context.
++ */
++ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
++ num_words);
++ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
++ dma_sync_single_for_device(qidev, drv_ctx->context_a,
++ sizeof(drv_ctx->sh_desc) +
++ sizeof(drv_ctx->prehdr),
++ DMA_BIDIRECTIONAL);
++
++ /* Put the new FQ in scheduled state */
++ ret = qman_schedule_fq(new_fq);
++ if (ret) {
++ dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
++
++ /*
++ * We can kill new FQ and revert to old FQ.
++ * Since the desc is already modified, it is success case
++ */
++
++ drv_ctx->req_fq = old_fq;
++
++ if (kill_fq(qidev, new_fq))
++ dev_warn(qidev, "New CAAM FQ kill failed\n");
++ } else if (kill_fq(qidev, old_fq)) {
++ dev_warn(qidev, "Old CAAM FQ kill failed\n");
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(caam_drv_ctx_update);
++
++struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
++ int *cpu,
++ u32 *sh_desc)
++{
++ size_t size;
++ u32 num_words;
++ dma_addr_t hwdesc;
++ struct caam_drv_ctx *drv_ctx;
++ const cpumask_t *cpus = qman_affine_cpus();
++
++ num_words = desc_len(sh_desc);
++ if (num_words > MAX_SDLEN) {
++ dev_err(qidev, "Invalid descriptor len: %d words\n",
++ num_words);
++ return ERR_PTR(-EINVAL);
++ }
++
++ drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
++ if (!drv_ctx)
++ return ERR_PTR(-ENOMEM);
++
++ /*
++ * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
++ * and dma-map them.
++ */
++ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
++ num_words);
++ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
++ size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
++ hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
++ DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(qidev, hwdesc)) {
++ dev_err(qidev, "DMA map error for preheader + shdesc\n");
++ kfree(drv_ctx);
++ return ERR_PTR(-ENOMEM);
++ }
++ drv_ctx->context_a = hwdesc;
++
++ /* If given CPU does not own the portal, choose another one that does */
++ if (!cpumask_test_cpu(*cpu, cpus)) {
++ int *pcpu = &get_cpu_var(last_cpu);
++
++ *pcpu = cpumask_next(*pcpu, cpus);
++ if (*pcpu >= nr_cpu_ids)
++ *pcpu = cpumask_first(cpus);
++ *cpu = *pcpu;
++
++ put_cpu_var(last_cpu);
++ }
++ drv_ctx->cpu = *cpu;
++
++ /* Find response FQ hooked with this CPU */
++ drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
++
++ /* Attach request FQ */
++ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
++ QMAN_INITFQ_FLAG_SCHED);
++ if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
++ dev_err(qidev, "create_caam_req_fq failed\n");
++ dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
++ kfree(drv_ctx);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ drv_ctx->qidev = qidev;
++ return drv_ctx;
++}
++EXPORT_SYMBOL(caam_drv_ctx_init);
++
++void *qi_cache_alloc(gfp_t flags)
++{
++ return kmem_cache_alloc(qi_cache, flags);
++}
++EXPORT_SYMBOL(qi_cache_alloc);
++
++void qi_cache_free(void *obj)
++{
++ kmem_cache_free(qi_cache, obj);
++}
++EXPORT_SYMBOL(qi_cache_free);
++
++static int caam_qi_poll(struct napi_struct *napi, int budget)
++{
++ struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
++
++ int cleaned = qman_p_poll_dqrr(np->p, budget);
++
++ if (cleaned < budget) {
++ napi_complete(napi);
++ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
++ }
++
++ return cleaned;
++}
++
++void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
++{
++ if (IS_ERR_OR_NULL(drv_ctx))
++ return;
++
++ /* Remove request FQ */
++ if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
++ dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
++
++ dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
++ sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
++ DMA_BIDIRECTIONAL);
++ kfree(drv_ctx);
++}
++EXPORT_SYMBOL(caam_drv_ctx_rel);
++
++int caam_qi_shutdown(struct device *qidev)
++{
++ int i, ret;
++ struct caam_qi_priv *priv = dev_get_drvdata(qidev);
++ const cpumask_t *cpus = qman_affine_cpus();
++ struct cpumask old_cpumask = current->cpus_allowed;
++
++ for_each_cpu(i, cpus) {
++ struct napi_struct *irqtask;
++
++ irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
++ napi_disable(irqtask);
++ netif_napi_del(irqtask);
++
++ if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
++ dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
++ }
++
++ /*
++ * QMan driver requires CGRs to be deleted from same CPU from where they
++ * were instantiated. Hence we get the module removal execute from the
++ * same CPU from where it was originally inserted.
++ */
++ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
++
++ ret = qman_delete_cgr(&priv->cgr);
++ if (ret)
++ dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
++ else
++ qman_release_cgrid(priv->cgr.cgrid);
++
++ kmem_cache_destroy(qi_cache);
++
++ /* Now that we're done with the CGRs, restore the cpus allowed mask */
++ set_cpus_allowed_ptr(current, &old_cpumask);
++
++ platform_device_unregister(priv->qi_pdev);
++ return ret;
++}
++
++static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
++{
++ caam_congested = congested;
++
++ if (congested) {
++#ifdef CONFIG_DEBUG_FS
++ times_congested++;
++#endif
++ pr_debug_ratelimited("CAAM entered congestion\n");
++
++ } else {
++ pr_debug_ratelimited("CAAM exited congestion\n");
++ }
++}
++
++static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
++{
++ /*
++ * In case of threaded ISR, for RT kernels in_irq() does not return
++ * appropriate value, so use in_serving_softirq to distinguish between
++ * softirq and irq contexts.
++ */
++ if (unlikely(in_irq() || !in_serving_softirq())) {
++ /* Disable QMan IRQ source and invoke NAPI */
++ qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
++ np->p = p;
++ napi_schedule(&np->irqtask);
++ return 1;
++ }
++ return 0;
++}
++
++static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
++ struct qman_fq *rsp_fq,
++ const struct qm_dqrr_entry *dqrr)
++{
++ struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
++ struct caam_drv_req *drv_req;
++ const struct qm_fd *fd;
++ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
++
++ if (caam_qi_napi_schedule(p, caam_napi))
++ return qman_cb_dqrr_stop;
++
++ fd = &dqrr->fd;
++ if (unlikely(fd->status))
++ dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
++
++ if (unlikely(fd->format != fd->format)) {
++ dev_err(qidev, "Non-compound FD from CAAM\n");
++ return qman_cb_dqrr_consume;
++ }
++
++ drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
++ if (unlikely(!drv_req)) {
++ dev_err(qidev,
++ "Can't find original request for caam response\n");
++ return qman_cb_dqrr_consume;
++ }
++
++ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
++ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
++
++ drv_req->cbk(drv_req, fd->status);
++ return qman_cb_dqrr_consume;
++}
++
++static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
++{
++ struct qm_mcc_initfq opts;
++ struct qman_fq *fq;
++ int ret;
++
++ fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
++ if (!fq)
++ return -ENOMEM;
++
++ fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
++
++ ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
++ QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
++ if (ret) {
++ dev_err(qidev, "Rsp FQ create failed\n");
++ kfree(fq);
++ return -ENODEV;
++ }
++
++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
++ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
++ QM_INITFQ_WE_CGID;
++ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
++ QM_FQCTRL_CGE;
++ opts.fqd.dest.channel = qman_affine_channel(cpu);
++ opts.fqd.dest.wq = 3;
++ opts.fqd.cgid = qipriv.cgr.cgrid;
++ opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
++ QM_STASHING_EXCL_DATA;
++ opts.fqd.context_a.stashing.data_cl = 1;
++ opts.fqd.context_a.stashing.context_cl = 1;
++
++ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
++ if (ret) {
++ dev_err(qidev, "Rsp FQ init failed\n");
++ kfree(fq);
++ return -ENODEV;
++ }
++
++ per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
++
++ dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
++ return 0;
++}
++
++static int init_cgr(struct device *qidev)
++{
++ int ret;
++ struct qm_mcc_initcgr opts;
++ const u64 cpus = *(u64 *)qman_affine_cpus();
++ const int num_cpus = hweight64(cpus);
++ const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
++
++ ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
++ if (ret) {
++ dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
++ return ret;
++ }
++
++ qipriv.cgr.cb = cgr_cb;
++ memset(&opts, 0, sizeof(opts));
++ opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
++ opts.cgr.cscn_en = QM_CGR_EN;
++ opts.cgr.mode = QMAN_CGR_MODE_FRAME;
++ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
++
++ ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
++ if (ret) {
++ dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
++ qipriv.cgr.cgrid);
++ return ret;
++ }
++
++ dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
++ return 0;
++}
++
++static int alloc_rsp_fqs(struct device *qidev)
++{
++ int ret, i;
++ const cpumask_t *cpus = qman_affine_cpus();
++
++ /*Now create response FQs*/
++ for_each_cpu(i, cpus) {
++ ret = alloc_rsp_fq_cpu(qidev, i);
++ if (ret) {
++ dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
++static void free_rsp_fqs(void)
++{
++ int i;
++ const cpumask_t *cpus = qman_affine_cpus();
++
++ for_each_cpu(i, cpus)
++ kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
++}
++
++int caam_qi_init(struct platform_device *caam_pdev)
++{
++ int err, i;
++ struct platform_device *qi_pdev;
++ struct device *ctrldev = &caam_pdev->dev, *qidev;
++ struct caam_drv_private *ctrlpriv;
++ const cpumask_t *cpus = qman_affine_cpus();
++ struct cpumask old_cpumask = current->cpus_allowed;
++ static struct platform_device_info qi_pdev_info = {
++ .name = "caam_qi",
++ .id = PLATFORM_DEVID_NONE
++ };
++
++ /*
++ * QMAN requires CGRs to be removed from same CPU+portal from where it
++ * was originally allocated. Hence we need to note down the
++ * initialisation CPU and use the same CPU for module exit.
++ * We select the first CPU to from the list of portal owning CPUs.
++ * Then we pin module init to this CPU.
++ */
++ mod_init_cpu = cpumask_first(cpus);
++ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
++
++ qi_pdev_info.parent = ctrldev;
++ qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
++ qi_pdev = platform_device_register_full(&qi_pdev_info);
++ if (IS_ERR(qi_pdev))
++ return PTR_ERR(qi_pdev);
++ arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
++
++ ctrlpriv = dev_get_drvdata(ctrldev);
++ qidev = &qi_pdev->dev;
++
++ qipriv.qi_pdev = qi_pdev;
++ dev_set_drvdata(qidev, &qipriv);
++
++ /* Initialize the congestion detection */
++ err = init_cgr(qidev);
++ if (err) {
++ dev_err(qidev, "CGR initialization failed: %d\n", err);
++ platform_device_unregister(qi_pdev);
++ return err;
++ }
++
++ /* Initialise response FQs */
++ err = alloc_rsp_fqs(qidev);
++ if (err) {
++ dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
++ free_rsp_fqs();
++ platform_device_unregister(qi_pdev);
++ return err;
++ }
++
++ /*
++ * Enable the NAPI contexts on each of the core which has an affine
++ * portal.
++ */
++ for_each_cpu(i, cpus) {
++ struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
++ struct caam_napi *caam_napi = &priv->caam_napi;
++ struct napi_struct *irqtask = &caam_napi->irqtask;
++ struct net_device *net_dev = &priv->net_dev;
++
++ net_dev->dev = *qidev;
++ INIT_LIST_HEAD(&net_dev->napi_list);
++
++ netif_napi_add(net_dev, irqtask, caam_qi_poll,
++ CAAM_NAPI_WEIGHT);
++
++ napi_enable(irqtask);
++ }
++
++ /* Hook up QI device to parent controlling caam device */
++ ctrlpriv->qidev = qidev;
++
++ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
++ SLAB_CACHE_DMA, NULL);
++ if (!qi_cache) {
++ dev_err(qidev, "Can't allocate CAAM cache\n");
++ free_rsp_fqs();
++ platform_device_unregister(qi_pdev);
++ return -ENOMEM;
++ }
++
++ /* Done with the CGRs; restore the cpus allowed mask */
++ set_cpus_allowed_ptr(current, &old_cpumask);
++#ifdef CONFIG_DEBUG_FS
++ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
++ &times_congested, &caam_fops_u64_ro);
++#endif
++ dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
++ return 0;
++}
+diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
+new file mode 100644
+index 00000000..0c2e68b3
+--- /dev/null
++++ b/drivers/crypto/caam/qi.h
+@@ -0,0 +1,204 @@
++/*
++ * Public definitions for the CAAM/QI (Queue Interface) backend.
++ *
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016-2017 NXP
++ */
++
++#ifndef __QI_H__
++#define __QI_H__
++
++#include <linux/fsl_qman.h>
++#include "compat.h"
++#include "desc.h"
++#include "desc_constr.h"
++
++/*
++ * CAAM hardware constructs a job descriptor which points to a shared descriptor
++ * (as pointed by context_a of to-CAAM FQ).
++ * When the job descriptor is executed by DECO, the whole job descriptor
++ * together with shared descriptor gets loaded in DECO buffer, which is
++ * 64 words (each 32-bit) long.
++ *
++ * The job descriptor constructed by CAAM hardware has the following layout:
++ *
++ * HEADER (1 word)
++ * Shdesc ptr (1 or 2 words)
++ * SEQ_OUT_PTR (1 word)
++ * Out ptr (1 or 2 words)
++ * Out length (1 word)
++ * SEQ_IN_PTR (1 word)
++ * In ptr (1 or 2 words)
++ * In length (1 word)
++ *
++ * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
++ *
++ * Apart from shdesc contents, the total number of words that get loaded in DECO
++ * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
++ * storing shared descriptor.
++ */
++#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
++
++/* Length of a single buffer in the QI driver memory cache */
++#define CAAM_QI_MEMCACHE_SIZE 768
++
++extern bool caam_congested __read_mostly;
++
++/*
++ * This is the request structure the driver application should fill while
++ * submitting a job to driver.
++ */
++struct caam_drv_req;
++
++/*
++ * caam_qi_cbk - application's callback function invoked by the driver when the
++ * request has been successfully processed.
++ * @drv_req: original request that was submitted
++ * @status: completion status of request (0 - success, non-zero - error code)
++ */
++typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
++
++enum optype {
++ ENCRYPT,
++ DECRYPT,
++ GIVENCRYPT,
++ NUM_OP
++};
++
++/**
++ * caam_drv_ctx - CAAM/QI backend driver context
++ *
++ * The jobs are processed by the driver against a driver context.
++ * With every cryptographic context, a driver context is attached.
++ * The driver context contains data for private use by driver.
++ * For the applications, this is an opaque structure.
++ *
++ * @prehdr: preheader placed before shrd desc
++ * @sh_desc: shared descriptor
++ * @context_a: shared descriptor dma address
++ * @req_fq: to-CAAM request frame queue
++ * @rsp_fq: from-CAAM response frame queue
++ * @cpu: cpu on which to receive CAAM response
++ * @op_type: operation type
++ * @qidev: device pointer for CAAM/QI backend
++ */
++struct caam_drv_ctx {
++ u32 prehdr[2];
++ u32 sh_desc[MAX_SDLEN];
++ dma_addr_t context_a;
++ struct qman_fq *req_fq;
++ struct qman_fq *rsp_fq;
++ int cpu;
++ enum optype op_type;
++ struct device *qidev;
++} ____cacheline_aligned;
++
++/**
++ * caam_drv_req - The request structure the driver application should fill while
++ * submitting a job to driver.
++ * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
++ * buffers.
++ * @cbk: callback function to invoke when job is completed
++ * @app_ctx: arbitrary context attached with request by the application
++ *
++ * The fields mentioned below should not be used by application.
++ * These are for private use by driver.
++ *
++ * @hdr__: linked list header to maintain list of outstanding requests to CAAM
++ * @hwaddr: DMA address for the S/G table.
++ */
++struct caam_drv_req {
++ struct qm_sg_entry fd_sgt[2];
++ struct caam_drv_ctx *drv_ctx;
++ caam_qi_cbk cbk;
++ void *app_ctx;
++} ____cacheline_aligned;
++
++/**
++ * caam_drv_ctx_init - Initialise a CAAM/QI driver context
++ *
++ * A CAAM/QI driver context must be attached with each cryptographic context.
++ * This function allocates memory for CAAM/QI context and returns a handle to
++ * the application. This handle must be submitted along with each enqueue
++ * request to the driver by the application.
++ *
++ * @cpu: CPU where the application prefers to the driver to receive CAAM
++ * responses. The request completion callback would be issued from this
++ * CPU.
++ * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
++ * context.
++ *
++ * Returns a driver context on success or negative error code on failure.
++ */
++struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
++ u32 *sh_desc);
++
++/**
++ * caam_qi_enqueue - Submit a request to QI backend driver.
++ *
++ * The request structure must be properly filled as described above.
++ *
++ * @qidev: device pointer for QI backend
++ * @req: CAAM QI request structure
++ *
++ * Returns 0 on success or negative error code on failure.
++ */
++int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
++
++/**
++ * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
++ * or too many CAAM responses are pending to be processed.
++ * @drv_ctx: driver context for which job is to be submitted
++ *
++ * Returns caam congestion status 'true/false'
++ */
++bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
++
++/**
++ * caam_drv_ctx_update - Update QI driver context
++ *
++ * Invoked when shared descriptor is required to be change in driver context.
++ *
++ * @drv_ctx: driver context to be updated
++ * @sh_desc: new shared descriptor pointer to be updated in QI driver context
++ *
++ * Returns 0 on success or negative error code on failure.
++ */
++int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
++
++/**
++ * caam_drv_ctx_rel - Release a QI driver context
++ * @drv_ctx: context to be released
++ */
++void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
++
++int caam_qi_init(struct platform_device *pdev);
++int caam_qi_shutdown(struct device *dev);
++
++/**
++ * qi_cache_alloc - Allocate buffers from CAAM-QI cache
++ *
++ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
++ * to be allocated on the hotpath. Instead of using malloc, one can use the
++ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
++ * will have a size of 256B, which is sufficient for hosting 16 SG entries.
++ *
++ * @flags: flags that would be used for the equivalent malloc(..) call
++ *
++ * Returns a pointer to a retrieved buffer on success or NULL on failure.
++ */
++void *qi_cache_alloc(gfp_t flags);
++
++/**
++ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
++ *
++ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
++ * the buffer previously allocated by a qi_cache_alloc call.
++ * No checking is being done, the call is a passthrough call to
++ * kmem_cache_free(...)
++ *
++ * @obj: object previously allocated using qi_cache_alloc()
++ */
++void qi_cache_free(void *obj);
++
++#endif /* __QI_H__ */
+diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
+index 84d2f838..74eb8c6c 100644
+--- a/drivers/crypto/caam/regs.h
++++ b/drivers/crypto/caam/regs.h
+@@ -2,6 +2,7 @@
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
+ */
+
+ #ifndef REGS_H
+@@ -67,6 +68,7 @@
+ */
+
+ extern bool caam_little_end;
++extern bool caam_imx;
+
+ #define caam_to_cpu(len) \
+ static inline u##len caam##len ## _to_cpu(u##len val) \
+@@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem *reg)
+ #else /* CONFIG_64BIT */
+ static inline void wr_reg64(void __iomem *reg, u64 data)
+ {
+-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+- if (caam_little_end) {
++ if (!caam_imx && caam_little_end) {
+ wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
+ wr_reg32((u32 __iomem *)(reg), data);
+- } else
+-#endif
+- {
++ } else {
+ wr_reg32((u32 __iomem *)(reg), data >> 32);
+ wr_reg32((u32 __iomem *)(reg) + 1, data);
+ }
+@@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
+
+ static inline u64 rd_reg64(void __iomem *reg)
+ {
+-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+- if (caam_little_end)
++ if (!caam_imx && caam_little_end)
+ return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg)));
+- else
+-#endif
+- return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+- (u64)rd_reg32((u32 __iomem *)(reg) + 1));
++
++ return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
++ (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+ }
+ #endif /* CONFIG_64BIT */
+
++static inline u64 cpu_to_caam_dma64(dma_addr_t value)
++{
++ if (caam_imx)
++ return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
++ (u64)cpu_to_caam32(upper_32_bits(value)));
++
++ return cpu_to_caam64(value);
++}
++
++static inline u64 caam_dma64_to_cpu(u64 value)
++{
++ if (caam_imx)
++ return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
++ (u64)caam32_to_cpu(upper_32_bits(value)));
++
++ return caam64_to_cpu(value);
++}
++
+ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+-#ifdef CONFIG_SOC_IMX7D
+-#define cpu_to_caam_dma(value) \
+- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
+- (u64)cpu_to_caam32(upper_32_bits(value)))
+-#define caam_dma_to_cpu(value) \
+- (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
+- (u64)caam32_to_cpu(upper_32_bits(value)))
+-#else
+-#define cpu_to_caam_dma(value) cpu_to_caam64(value)
+-#define caam_dma_to_cpu(value) caam64_to_cpu(value)
+-#endif /* CONFIG_SOC_IMX7D */
++#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
++#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
+ #else
+ #define cpu_to_caam_dma(value) cpu_to_caam32(value)
+ #define caam_dma_to_cpu(value) caam32_to_cpu(value)
+-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+-
+-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+-#define cpu_to_caam_dma64(value) \
+- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
+- (u64)cpu_to_caam32(upper_32_bits(value)))
+-#else
+-#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
+-#endif
++#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+
+ /*
+ * jr_outentry
+@@ -293,6 +291,7 @@ struct caam_perfmon {
+ u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
+ #define CTPR_MS_QI_SHIFT 25
+ #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
++#define CTPR_MS_DPAA2 BIT(13)
+ #define CTPR_MS_VIRT_EN_INCL 0x00000001
+ #define CTPR_MS_VIRT_EN_POR 0x00000002
+ #define CTPR_MS_PG_SZ_MASK 0x10
+@@ -628,6 +627,8 @@ struct caam_job_ring {
+ #define JRSTA_DECOERR_INVSIGN 0x86
+ #define JRSTA_DECOERR_DSASIGN 0x87
+
++#define JRSTA_QIERR_ERROR_MASK 0x00ff
++
+ #define JRSTA_CCBERR_JUMP 0x08000000
+ #define JRSTA_CCBERR_INDEX_MASK 0xff00
+ #define JRSTA_CCBERR_INDEX_SHIFT 8
+diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
+new file mode 100644
+index 00000000..3b3cabc4
+--- /dev/null
++++ b/drivers/crypto/caam/sg_sw_qm.h
+@@ -0,0 +1,126 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016-2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __SG_SW_QM_H
++#define __SG_SW_QM_H
++
++#include <linux/fsl_qman.h>
++#include "regs.h"
++
++static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
++{
++ dma_addr_t addr = qm_sg_ptr->opaque;
++
++ qm_sg_ptr->opaque = cpu_to_caam64(addr);
++ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
++}
++
++static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
++ u32 len, u16 offset)
++{
++ qm_sg_ptr->addr = dma;
++ qm_sg_ptr->length = len;
++ qm_sg_ptr->__reserved2 = 0;
++ qm_sg_ptr->bpid = 0;
++ qm_sg_ptr->__reserved3 = 0;
++ qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
++
++ cpu_to_hw_sg(qm_sg_ptr);
++}
++
++static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
++ dma_addr_t dma, u32 len, u16 offset)
++{
++ qm_sg_ptr->extension = 0;
++ qm_sg_ptr->final = 0;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
++}
++
++static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
++ dma_addr_t dma, u32 len, u16 offset)
++{
++ qm_sg_ptr->extension = 0;
++ qm_sg_ptr->final = 1;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
++}
++
++static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
++ dma_addr_t dma, u32 len, u16 offset)
++{
++ qm_sg_ptr->extension = 1;
++ qm_sg_ptr->final = 0;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
++}
++
++static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
++ dma_addr_t dma, u32 len,
++ u16 offset)
++{
++ qm_sg_ptr->extension = 1;
++ qm_sg_ptr->final = 1;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
++}
++
++/*
++ * convert scatterlist to h/w link table format
++ * but does not have final bit; instead, returns last entry
++ */
++static inline struct qm_sg_entry *
++sg_to_qm_sg(struct scatterlist *sg, int sg_count,
++ struct qm_sg_entry *qm_sg_ptr, u16 offset)
++{
++ while (sg_count && sg) {
++ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
++ sg_dma_len(sg), offset);
++ qm_sg_ptr++;
++ sg = sg_next(sg);
++ sg_count--;
++ }
++ return qm_sg_ptr - 1;
++}
++
++/*
++ * convert scatterlist to h/w link table format
++ * scatterlist must have been previously dma mapped
++ */
++static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
++ struct qm_sg_entry *qm_sg_ptr, u16 offset)
++{
++ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
++
++ qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
++ qm_sg_ptr->final = 1;
++ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
++}
++
++#endif /* __SG_SW_QM_H */
+diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
+new file mode 100644
+index 00000000..31b44075
+--- /dev/null
++++ b/drivers/crypto/caam/sg_sw_qm2.h
+@@ -0,0 +1,81 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SG_SW_QM2_H_
++#define _SG_SW_QM2_H_
++
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
++
++static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
++ dma_addr_t dma, u32 len, u16 offset)
++{
++ dpaa2_sg_set_addr(qm_sg_ptr, dma);
++ dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
++ dpaa2_sg_set_final(qm_sg_ptr, false);
++ dpaa2_sg_set_len(qm_sg_ptr, len);
++ dpaa2_sg_set_bpid(qm_sg_ptr, 0);
++ dpaa2_sg_set_offset(qm_sg_ptr, offset);
++}
++
++/*
++ * convert scatterlist to h/w link table format
++ * but does not have final bit; instead, returns last entry
++ */
++static inline struct dpaa2_sg_entry *
++sg_to_qm_sg(struct scatterlist *sg, int sg_count,
++ struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
++{
++ while (sg_count && sg) {
++ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
++ sg_dma_len(sg), offset);
++ qm_sg_ptr++;
++ sg = sg_next(sg);
++ sg_count--;
++ }
++ return qm_sg_ptr - 1;
++}
++
++/*
++ * convert scatterlist to h/w link table format
++ * scatterlist must have been previously dma mapped
++ */
++static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
++ struct dpaa2_sg_entry *qm_sg_ptr,
++ u16 offset)
++{
++ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
++ dpaa2_sg_set_final(qm_sg_ptr, true);
++}
++
++#endif /* _SG_SW_QM2_H_ */
+diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
+index 41cd5a35..936b1b63 100644
+--- a/drivers/crypto/caam/sg_sw_sec4.h
++++ b/drivers/crypto/caam/sg_sw_sec4.h
+@@ -5,9 +5,19 @@
+ *
+ */
+
++#ifndef _SG_SW_SEC4_H_
++#define _SG_SW_SEC4_H_
++
++#include "ctrl.h"
+ #include "regs.h"
++#include "sg_sw_qm2.h"
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+-struct sec4_sg_entry;
++struct sec4_sg_entry {
++ u64 ptr;
++ u32 len;
++ u32 bpid_offset;
++};
+
+ /*
+ * convert single dma address to h/w link table format
+@@ -15,9 +25,15 @@ struct sec4_sg_entry;
+ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+ {
+- sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
+- sec4_sg_ptr->len = cpu_to_caam32(len);
+- sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
++ if (caam_dpaa2) {
++ dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
++ offset);
++ } else {
++ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
++ sec4_sg_ptr->len = cpu_to_caam32(len);
++ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
++ SEC4_SG_OFFSET_MASK);
++ }
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
+@@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+ return sec4_sg_ptr - 1;
+ }
+
++static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
++{
++ if (caam_dpaa2)
++ dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
++ else
++ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
++}
++
+ /*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+@@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+ u16 offset)
+ {
+ sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+- sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+-}
+-
+-static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
+- struct scatterlist *sg, unsigned int total,
+- struct sec4_sg_entry *sec4_sg_ptr)
+-{
+- do {
+- unsigned int len = min(sg_dma_len(sg), total);
+-
+- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
+- sec4_sg_ptr++;
+- sg = sg_next(sg);
+- total -= len;
+- } while (total);
+- return sec4_sg_ptr - 1;
++ sg_to_sec4_set_last(sec4_sg_ptr);
+ }
+
+-/* derive number of elements in scatterlist, but return 0 for 1 */
+-static inline int sg_count(struct scatterlist *sg_list, int nbytes)
+-{
+- int sg_nents = sg_nents_for_len(sg_list, nbytes);
+-
+- if (likely(sg_nents == 1))
+- return 0;
+-
+- return sg_nents;
+-}
++#endif /* _SG_SW_SEC4_H_ */
+diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
+index ef5d394f..cc8deece 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
+@@ -516,7 +516,7 @@ static int rsi_probe(struct usb_interface *pfunction,
+
+ /**
+ * rsi_disconnect() - This function performs the reverse of the probe function,
+- * it deintialize the driver structure.
++ * it deinitialize the driver structure.
+ * @pfunction: Pointer to the USB interface structure.
+ *
+ * Return: None.
+diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
+index defffa75..ec88ed9c 100644
+--- a/drivers/staging/wilc1000/linux_wlan.c
++++ b/drivers/staging/wilc1000/linux_wlan.c
+@@ -211,7 +211,7 @@ static void deinit_irq(struct net_device *dev)
+ vif = netdev_priv(dev);
+ wilc = vif->wilc;
+
+- /* Deintialize IRQ */
++ /* Deinitialize IRQ */
+ if (wilc->dev_irq_num) {
+ free_irq(wilc->dev_irq_num, wilc);
+ gpio_free(wilc->gpio);
+diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+index 60d8b055..02d3e721 100644
+--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
++++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+@@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_device *net)
+ del_timer_sync(&wilc_during_ip_timer);
+
+ if (s32Error)
+- netdev_err(net, "Error while deintializing host interface\n");
++ netdev_err(net, "Error while deinitializing host interface\n");
+
+ return s32Error;
+ }
+diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
+new file mode 100644
+index 00000000..e328b524
+--- /dev/null
++++ b/include/crypto/acompress.h
+@@ -0,0 +1,269 @@
++/*
++ * Asynchronous Compression operations
++ *
++ * Copyright (c) 2016, Intel Corporation
++ * Authors: Weigang Li <weigang.li@intel.com>
++ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++#ifndef _CRYPTO_ACOMP_H
++#define _CRYPTO_ACOMP_H
++#include <linux/crypto.h>
++
++#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
++
++/**
++ * struct acomp_req - asynchronous (de)compression request
++ *
++ * @base: Common attributes for asynchronous crypto requests
++ * @src: Source Data
++ * @dst: Destination data
++ * @slen: Size of the input buffer
++ * @dlen: Size of the output buffer and number of bytes produced
++ * @flags: Internal flags
++ * @__ctx: Start of private context data
++ */
++struct acomp_req {
++ struct crypto_async_request base;
++ struct scatterlist *src;
++ struct scatterlist *dst;
++ unsigned int slen;
++ unsigned int dlen;
++ u32 flags;
++ void *__ctx[] CRYPTO_MINALIGN_ATTR;
++};
++
++/**
++ * struct crypto_acomp - user-instantiated objects which encapsulate
++ * algorithms and core processing logic
++ *
++ * @compress: Function performs a compress operation
++ * @decompress: Function performs a de-compress operation
++ * @dst_free: Frees destination buffer if allocated inside the
++ * algorithm
++ * @reqsize: Context size for (de)compression requests
++ * @base: Common crypto API algorithm data structure
++ */
++struct crypto_acomp {
++ int (*compress)(struct acomp_req *req);
++ int (*decompress)(struct acomp_req *req);
++ void (*dst_free)(struct scatterlist *dst);
++ unsigned int reqsize;
++ struct crypto_tfm base;
++};
++
++/**
++ * struct acomp_alg - asynchronous compression algorithm
++ *
++ * @compress: Function performs a compress operation
++ * @decompress: Function performs a de-compress operation
++ * @dst_free: Frees destination buffer if allocated inside the algorithm
++ * @init: Initialize the cryptographic transformation object.
++ * This function is used to initialize the cryptographic
++ * transformation object. This function is called only once at
++ * the instantiation time, right after the transformation context
++ * was allocated. In case the cryptographic hardware has some
++ * special requirements which need to be handled by software, this
++ * function shall check for the precise requirement of the
++ * transformation and put any software fallbacks in place.
++ * @exit: Deinitialize the cryptographic transformation object. This is a
++ * counterpart to @init, used to remove various changes set in
++ * @init.
++ *
++ * @reqsize: Context size for (de)compression requests
++ * @base: Common crypto API algorithm data structure
++ */
++struct acomp_alg {
++ int (*compress)(struct acomp_req *req);
++ int (*decompress)(struct acomp_req *req);
++ void (*dst_free)(struct scatterlist *dst);
++ int (*init)(struct crypto_acomp *tfm);
++ void (*exit)(struct crypto_acomp *tfm);
++ unsigned int reqsize;
++ struct crypto_alg base;
++};
++
++/**
++ * DOC: Asynchronous Compression API
++ *
++ * The Asynchronous Compression API is used with the algorithms of type
++ * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
++ */
++
++/**
++ * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
++ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
++ * compression algorithm e.g. "deflate"
++ * @type: specifies the type of the algorithm
++ * @mask: specifies the mask for the algorithm
++ *
++ * Allocate a handle for a compression algorithm. The returned struct
++ * crypto_acomp is the handle that is required for any subsequent
++ * API invocation for the compression operations.
++ *
++ * Return: allocated handle in case of success; IS_ERR() is true in case
++ * of an error, PTR_ERR() returns the error code.
++ */
++struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
++ u32 mask);
++
++static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
++{
++ return &tfm->base;
++}
++
++static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
++{
++ return container_of(alg, struct acomp_alg, base);
++}
++
++static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
++{
++ return container_of(tfm, struct crypto_acomp, base);
++}
++
++static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
++{
++ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
++}
++
++static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
++{
++ return tfm->reqsize;
++}
++
++static inline void acomp_request_set_tfm(struct acomp_req *req,
++ struct crypto_acomp *tfm)
++{
++ req->base.tfm = crypto_acomp_tfm(tfm);
++}
++
++static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
++{
++ return __crypto_acomp_tfm(req->base.tfm);
++}
++
++/**
++ * crypto_free_acomp() -- free ACOMPRESS tfm handle
++ *
++ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
++ */
++static inline void crypto_free_acomp(struct crypto_acomp *tfm)
++{
++ crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
++}
++
++static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
++{
++ type &= ~CRYPTO_ALG_TYPE_MASK;
++ type |= CRYPTO_ALG_TYPE_ACOMPRESS;
++ mask |= CRYPTO_ALG_TYPE_MASK;
++
++ return crypto_has_alg(alg_name, type, mask);
++}
++
++/**
++ * acomp_request_alloc() -- allocates asynchronous (de)compression request
++ *
++ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
++ *
++ * Return: allocated handle in case of success or NULL in case of an error
++ */
++struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
++
++/**
++ * acomp_request_free() -- zeroize and free asynchronous (de)compression
++ * request as well as the output buffer if allocated
++ * inside the algorithm
++ *
++ * @req: request to free
++ */
++void acomp_request_free(struct acomp_req *req);
++
++/**
++ * acomp_request_set_callback() -- Sets an asynchronous callback
++ *
++ * Callback will be called when an asynchronous operation on a given
++ * request is finished.
++ *
++ * @req: request that the callback will be set for
++ * @flgs: specify for instance if the operation may backlog
++ * @cmlp: callback which will be called
++ * @data: private data used by the caller
++ */
++static inline void acomp_request_set_callback(struct acomp_req *req,
++ u32 flgs,
++ crypto_completion_t cmpl,
++ void *data)
++{
++ req->base.complete = cmpl;
++ req->base.data = data;
++ req->base.flags = flgs;
++}
++
++/**
++ * acomp_request_set_params() -- Sets request parameters
++ *
++ * Sets parameters required by an acomp operation
++ *
++ * @req: asynchronous compress request
++ * @src: pointer to input buffer scatterlist
++ * @dst: pointer to output buffer scatterlist. If this is NULL, the
++ * acomp layer will allocate the output memory
++ * @slen: size of the input buffer
++ * @dlen: size of the output buffer. If dst is NULL, this can be used by
++ * the user to specify the maximum amount of memory to allocate
++ */
++static inline void acomp_request_set_params(struct acomp_req *req,
++ struct scatterlist *src,
++ struct scatterlist *dst,
++ unsigned int slen,
++ unsigned int dlen)
++{
++ req->src = src;
++ req->dst = dst;
++ req->slen = slen;
++ req->dlen = dlen;
++
++ if (!req->dst)
++ req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
++}
++
++/**
++ * crypto_acomp_compress() -- Invoke asynchronous compress operation
++ *
++ * Function invokes the asynchronous compress operation
++ *
++ * @req: asynchronous compress request
++ *
++ * Return: zero on success; error code in case of error
++ */
++static inline int crypto_acomp_compress(struct acomp_req *req)
++{
++ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
++
++ return tfm->compress(req);
++}
++
++/**
++ * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
++ *
++ * Function invokes the asynchronous decompress operation
++ *
++ * @req: asynchronous compress request
++ *
++ * Return: zero on success; error code in case of error
++ */
++static inline int crypto_acomp_decompress(struct acomp_req *req)
++{
++ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
++
++ return tfm->decompress(req);
++}
++
++#endif
+diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
+new file mode 100644
+index 00000000..1de2b5af
+--- /dev/null
++++ b/include/crypto/internal/acompress.h
+@@ -0,0 +1,81 @@
++/*
++ * Asynchronous Compression operations
++ *
++ * Copyright (c) 2016, Intel Corporation
++ * Authors: Weigang Li <weigang.li@intel.com>
++ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++#ifndef _CRYPTO_ACOMP_INT_H
++#define _CRYPTO_ACOMP_INT_H
++#include <crypto/acompress.h>
++
++/*
++ * Transform internal helpers.
++ */
++static inline void *acomp_request_ctx(struct acomp_req *req)
++{
++ return req->__ctx;
++}
++
++static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
++{
++ return tfm->base.__crt_ctx;
++}
++
++static inline void acomp_request_complete(struct acomp_req *req,
++ int err)
++{
++ req->base.complete(&req->base, err);
++}
++
++static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
++{
++ return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
++}
++
++static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
++{
++ struct acomp_req *req;
++
++ req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
++ if (likely(req))
++ acomp_request_set_tfm(req, tfm);
++ return req;
++}
++
++static inline void __acomp_request_free(struct acomp_req *req)
++{
++ kzfree(req);
++}
++
++/**
++ * crypto_register_acomp() -- Register asynchronous compression algorithm
++ *
++ * Function registers an implementation of an asynchronous
++ * compression algorithm
++ *
++ * @alg: algorithm definition
++ *
++ * Return: zero on success; error code in case of error
++ */
++int crypto_register_acomp(struct acomp_alg *alg);
++
++/**
++ * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
++ *
++ * Function unregisters an implementation of an asynchronous
++ * compression algorithm
++ *
++ * @alg: algorithm definition
++ *
++ * Return: zero on success; error code in case of error
++ */
++int crypto_unregister_acomp(struct acomp_alg *alg);
++
++#endif
+diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
+new file mode 100644
+index 00000000..3fda3c56
+--- /dev/null
++++ b/include/crypto/internal/scompress.h
+@@ -0,0 +1,136 @@
++/*
++ * Synchronous Compression operations
++ *
++ * Copyright 2015 LG Electronics Inc.
++ * Copyright (c) 2016, Intel Corporation
++ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++#ifndef _CRYPTO_SCOMP_INT_H
++#define _CRYPTO_SCOMP_INT_H
++#include <linux/crypto.h>
++
++#define SCOMP_SCRATCH_SIZE 131072
++
++struct crypto_scomp {
++ struct crypto_tfm base;
++};
++
++/**
++ * struct scomp_alg - synchronous compression algorithm
++ *
++ * @alloc_ctx: Function allocates algorithm specific context
++ * @free_ctx: Function frees context allocated with alloc_ctx
++ * @compress: Function performs a compress operation
++ * @decompress: Function performs a de-compress operation
++ * @init: Initialize the cryptographic transformation object.
++ * This function is used to initialize the cryptographic
++ * transformation object. This function is called only once at
++ * the instantiation time, right after the transformation context
++ * was allocated. In case the cryptographic hardware has some
++ * special requirements which need to be handled by software, this
++ * function shall check for the precise requirement of the
++ * transformation and put any software fallbacks in place.
++ * @exit: Deinitialize the cryptographic transformation object. This is a
++ * counterpart to @init, used to remove various changes set in
++ * @init.
++ * @base: Common crypto API algorithm data structure
++ */
++struct scomp_alg {
++ void *(*alloc_ctx)(struct crypto_scomp *tfm);
++ void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
++ int (*compress)(struct crypto_scomp *tfm, const u8 *src,
++ unsigned int slen, u8 *dst, unsigned int *dlen,
++ void *ctx);
++ int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
++ unsigned int slen, u8 *dst, unsigned int *dlen,
++ void *ctx);
++ struct crypto_alg base;
++};
++
++static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
++{
++ return container_of(alg, struct scomp_alg, base);
++}
++
++static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
++{
++ return container_of(tfm, struct crypto_scomp, base);
++}
++
++static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
++{
++ return &tfm->base;
++}
++
++static inline void crypto_free_scomp(struct crypto_scomp *tfm)
++{
++ crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
++}
++
++static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
++{
++ return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
++}
++
++static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
++{
++ return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
++}
++
++static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
++ void *ctx)
++{
++ return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
++}
++
++static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
++ const u8 *src, unsigned int slen,
++ u8 *dst, unsigned int *dlen, void *ctx)
++{
++ return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
++}
++
++static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
++ const u8 *src, unsigned int slen,
++ u8 *dst, unsigned int *dlen,
++ void *ctx)
++{
++ return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
++ ctx);
++}
++
++int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
++struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
++void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
++
++/**
++ * crypto_register_scomp() -- Register synchronous compression algorithm
++ *
++ * Function registers an implementation of a synchronous
++ * compression algorithm
++ *
++ * @alg: algorithm definition
++ *
++ * Return: zero on success; error code in case of error
++ */
++int crypto_register_scomp(struct scomp_alg *alg);
++
++/**
++ * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
++ *
++ * Function unregisters an implementation of a synchronous
++ * compression algorithm
++ *
++ * @alg: algorithm definition
++ *
++ * Return: zero on success; error code in case of error
++ */
++int crypto_unregister_scomp(struct scomp_alg *alg);
++
++#endif
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index 7cee5551..8348d83d 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -50,6 +50,8 @@
+ #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
+ #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
+ #define CRYPTO_ALG_TYPE_KPP 0x00000008
++#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
++#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
+ #define CRYPTO_ALG_TYPE_RNG 0x0000000c
+ #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
+ #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
+@@ -60,6 +62,7 @@
+ #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
+ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
+ #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
++#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
+
+ #define CRYPTO_ALG_LARVAL 0x00000010
+ #define CRYPTO_ALG_DEAD 0x00000020
+diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
+index 79b5ded2..11d21fce 100644
+--- a/include/uapi/linux/cryptouser.h
++++ b/include/uapi/linux/cryptouser.h
+@@ -46,6 +46,7 @@ enum crypto_attr_type_t {
+ CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
+ CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
+ CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
++ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
+ __CRYPTOCFGA_MAX
+
+ #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
+@@ -112,5 +113,9 @@ struct crypto_report_kpp {
+ char type[CRYPTO_MAX_NAME];
+ };
+
++struct crypto_report_acomp {
++ char type[CRYPTO_MAX_NAME];
++};
++
+ #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
+ sizeof(struct crypto_report_blkcipher))
+diff --git a/scripts/spelling.txt b/scripts/spelling.txt
+index 163c720d..8392f89c 100644
+--- a/scripts/spelling.txt
++++ b/scripts/spelling.txt
+@@ -305,6 +305,9 @@ defintion||definition
+ defintions||definitions
+ defualt||default
+ defult||default
++deintializing||deinitializing
++deintialize||deinitialize
++deintialized||deinitialized
+ deivce||device
+ delared||declared
+ delare||declare
+diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
+index 504c7cd7..d8577374 100644
+--- a/sound/soc/amd/acp-pcm-dma.c
++++ b/sound/soc/amd/acp-pcm-dma.c
+@@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mmio)
+ return 0;
+ }
+
+-/* Deintialize ACP */
++/* Deinitialize ACP */
+ static int acp_deinit(void __iomem *acp_mmio)
+ {
+ u32 val;
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch
new file mode 100644
index 0000000000..29fc301abd
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch
@@ -0,0 +1,3781 @@
+From 659603c5f6cbc3d39922d4374df25ae4627d0e88 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:12:20 +0800
+Subject: [PATCH] dma: support layerscape
+
+This is a integrated patch for layerscape dma support.
+
+Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/dma/Kconfig | 14 +
+ drivers/dma/Makefile | 2 +
+ drivers/dma/dpaa2-qdma/Kconfig | 8 +
+ drivers/dma/dpaa2-qdma/Makefile | 8 +
+ drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++
+ drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++
+ drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++
+ drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++
+ drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++
+ drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++
+ 10 files changed, 3678 insertions(+)
+ create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
+ create mode 100644 drivers/dma/dpaa2-qdma/Makefile
+ create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
+ create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
+ create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
+ create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
+ create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
+ create mode 100644 drivers/dma/fsl-qdma.c
+
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 141aefbe..e5b0fb0b 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -192,6 +192,20 @@ config FSL_EDMA
+ multiplexing capability for DMA request sources(slot).
+ This module can be found on Freescale Vybrid and LS-1 SoCs.
+
++config FSL_QDMA
++ tristate "Freescale qDMA engine support"
++ select DMA_ENGINE
++ select DMA_VIRTUAL_CHANNELS
++ select DMA_ENGINE_RAID
++ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
++ help
++ Support the Freescale qDMA engine with command queue and legacy mode.
++ Channel virtualization is supported through enqueuing of DMA jobs to,
++ or dequeuing DMA jobs from, different work queues.
++ This module can be found on Freescale LS SoCs.
++
++source drivers/dma/dpaa2-qdma/Kconfig
++
+ config FSL_RAID
+ tristate "Freescale RAID engine Support"
+ depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
+index e4dc9cac..1226cbb4 100644
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
+ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+ obj-$(CONFIG_FSL_DMA) += fsldma.o
+ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
++obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
++obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
+ obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+ obj-$(CONFIG_HSU_DMA) += hsu/
+ obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+diff --git a/drivers/dma/dpaa2-qdma/Kconfig b/drivers/dma/dpaa2-qdma/Kconfig
+new file mode 100644
+index 00000000..084e34bf
+--- /dev/null
++++ b/drivers/dma/dpaa2-qdma/Kconfig
+@@ -0,0 +1,8 @@
++menuconfig FSL_DPAA2_QDMA
++ tristate "NXP DPAA2 QDMA"
++ depends on FSL_MC_BUS && FSL_MC_DPIO
++ select DMA_ENGINE
++ select DMA_VIRTUAL_CHANNELS
++ ---help---
++ NXP Data Path Acceleration Architecture 2 QDMA driver,
++ using the NXP MC bus driver.
+diff --git a/drivers/dma/dpaa2-qdma/Makefile b/drivers/dma/dpaa2-qdma/Makefile
+new file mode 100644
+index 00000000..ba599ac6
+--- /dev/null
++++ b/drivers/dma/dpaa2-qdma/Makefile
+@@ -0,0 +1,8 @@
++#
++# Makefile for the NXP DPAA2 CAAM controllers
++#
++ccflags-y += -DVERSION=\"\"
++
++obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
++
++fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
+diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
+new file mode 100644
+index 00000000..ad6b03f7
+--- /dev/null
++++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
+@@ -0,0 +1,986 @@
++/*
++ * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
++ *
++ * Copyright 2015-2017 NXP Semiconductor, Inc.
++ * Author: Changming Huang <jerry.huang@nxp.com>
++ *
++ * Driver for the NXP QDMA engine with QMan mode.
++ * Channel virtualization is supported through enqueuing of DMA jobs to,
++ * or dequeuing DMA jobs from different work queues with QMan portal.
++ * This module can be found on NXP LS2 SoCs.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/clk.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of_dma.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/iommu.h>
++
++#include "../virt-dma.h"
++
++#include "../../../drivers/staging/fsl-mc/include/mc.h"
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
++#include "fsl_dpdmai_cmd.h"
++#include "fsl_dpdmai.h"
++#include "dpaa2-qdma.h"
++
++static bool smmu_disable = true;
++
++static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
++{
++ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
++}
++
++static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
++{
++ return container_of(vd, struct dpaa2_qdma_comp, vdesc);
++}
++
++static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
++{
++ return 0;
++}
++
++static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
++{
++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
++ unsigned long flags;
++ LIST_HEAD(head);
++
++ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
++ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
++ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
++
++ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
++}
++
++/*
++ * Request a command descriptor for enqueue.
++ */
++static struct dpaa2_qdma_comp *
++dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
++{
++ struct dpaa2_qdma_comp *comp_temp = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
++ if (list_empty(&dpaa2_chan->comp_free)) {
++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
++ if (!comp_temp)
++ goto err;
++ comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
++ GFP_NOWAIT, &comp_temp->fd_bus_addr);
++ if (!comp_temp->fd_virt_addr)
++ goto err;
++
++ comp_temp->fl_virt_addr =
++ (void *)((struct dpaa2_fd *)
++ comp_temp->fd_virt_addr + 1);
++ comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
++ sizeof(struct dpaa2_fd);
++ comp_temp->desc_virt_addr =
++ (void *)((struct dpaa2_frame_list *)
++ comp_temp->fl_virt_addr + 3);
++ comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
++ sizeof(struct dpaa2_frame_list) * 3;
++
++ comp_temp->qchan = dpaa2_chan;
++ comp_temp->sg_blk_num = 0;
++ INIT_LIST_HEAD(&comp_temp->sg_src_head);
++ INIT_LIST_HEAD(&comp_temp->sg_dst_head);
++ return comp_temp;
++ }
++ comp_temp = list_first_entry(&dpaa2_chan->comp_free,
++ struct dpaa2_qdma_comp, list);
++ list_del(&comp_temp->list);
++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
++
++ comp_temp->qchan = dpaa2_chan;
++err:
++ return comp_temp;
++}
++
++static void dpaa2_qdma_populate_fd(uint32_t format,
++ struct dpaa2_qdma_comp *dpaa2_comp)
++{
++ struct dpaa2_fd *fd;
++
++ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
++ memset(fd, 0, sizeof(struct dpaa2_fd));
++
++ /* fd populated */
++ fd->simple.addr = dpaa2_comp->fl_bus_addr;
++ /* Bypass memory translation, Frame list format, short length disable */
++ /* we need to disable BMT if fsl-mc use iova addr */
++ if (smmu_disable)
++ fd->simple.bpid = QMAN_FD_BMT_ENABLE;
++ fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE;
++
++ fd->simple.frc = format | QDMA_SER_CTX;
++}
++
++/* first frame list for descriptor buffer */
++static void dpaa2_qdma_populate_first_framel(
++ struct dpaa2_frame_list *f_list,
++ struct dpaa2_qdma_comp *dpaa2_comp)
++{
++ struct dpaa2_qdma_sd_d *sdd;
++
++ sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
++ memset(sdd, 0, 2 * (sizeof(*sdd)));
++ /* source and destination descriptor */
++ sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */
++ sdd++;
++ sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */
++
++ memset(f_list, 0, sizeof(struct dpaa2_frame_list));
++ /* first frame list to source descriptor */
++ f_list->addr_lo = dpaa2_comp->desc_bus_addr;
++ f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32);
++ f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */
++ f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */
++ if (smmu_disable)
++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
++ f_list->sl = QDMA_FL_SL_LONG; /* long length */
++ f_list->f = 0; /* not the last frame list */
++}
++
++/* source and destination frame list */
++static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list,
++ dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
++{
++ /* source frame list to source buffer */
++ memset(f_list, 0, sizeof(struct dpaa2_frame_list));
++ f_list->addr_lo = src;
++ f_list->addr_hi = (src >> 32);
++ f_list->data_len.data_len_sl0 = len;
++ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
++ if (smmu_disable)
++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
++ f_list->sl = QDMA_FL_SL_LONG; /* long length */
++ f_list->f = 0; /* not the last frame list */
++
++ f_list++;
++ /* destination frame list to destination buffer */
++ memset(f_list, 0, sizeof(struct dpaa2_frame_list));
++ f_list->addr_lo = dst;
++ f_list->addr_hi = (dst >> 32);
++ f_list->data_len.data_len_sl0 = len;
++ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
++ if (smmu_disable)
++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
++ f_list->sl = QDMA_FL_SL_LONG; /* long length */
++ f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */
++}
++
++static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
++ struct dma_chan *chan, dma_addr_t dst,
++ dma_addr_t src, size_t len, unsigned long flags)
++{
++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
++ struct dpaa2_qdma_comp *dpaa2_comp;
++ struct dpaa2_frame_list *f_list;
++ uint32_t format;
++
++ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
++
++#ifdef LONG_FORMAT
++ format = QDMA_FD_LONG_FORMAT;
++#else
++ format = QDMA_FD_SHORT_FORMAT;
++#endif
++ /* populate Frame descriptor */
++ dpaa2_qdma_populate_fd(format, dpaa2_comp);
++
++ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
++
++#ifdef LONG_FORMAT
++ /* first frame list for descriptor buffer (logn format) */
++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
++
++ f_list++;
++#endif
++
++ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
++
++ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
++}
++
++static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
++ struct dpaa2_qdma_comp *dpaa2_comp,
++ struct dpaa2_qdma_chan *dpaa2_chan)
++{
++ struct qdma_sg_blk *sg_blk = NULL;
++ dma_addr_t phy_sgb;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
++ if (list_empty(&dpaa2_chan->sgb_free)) {
++ sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
++ dpaa2_chan->sg_blk_pool,
++ GFP_NOWAIT, &phy_sgb);
++ if (!sg_blk) {
++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
++ return sg_blk;
++ }
++ sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
++ sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
++ } else {
++ sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
++ struct qdma_sg_blk, list);
++ list_del(&sg_blk->list);
++ }
++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
++
++ return sg_blk;
++}
++
++static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
++ struct dpaa2_qdma_chan *dpaa2_chan,
++ struct dpaa2_qdma_comp *dpaa2_comp,
++ struct scatterlist *dst_sg, u32 dst_nents,
++ struct scatterlist *src_sg, u32 src_nents)
++{
++ struct dpaa2_qdma_sg *src_sge;
++ struct dpaa2_qdma_sg *dst_sge;
++ struct qdma_sg_blk *sg_blk;
++ struct qdma_sg_blk *sg_blk_dst;
++ dma_addr_t src;
++ dma_addr_t dst;
++ uint32_t num;
++ uint32_t blocks;
++ uint32_t len = 0;
++ uint32_t total_len = 0;
++ int i, j = 0;
++
++ num = min(dst_nents, src_nents);
++ blocks = num / (NUM_SG_PER_BLK - 1);
++ if (num % (NUM_SG_PER_BLK - 1))
++ blocks += 1;
++ if (dpaa2_comp->sg_blk_num < blocks) {
++ len = blocks - dpaa2_comp->sg_blk_num;
++ for (i = 0; i < len; i++) {
++ /* source sg blocks */
++ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
++ if (!sg_blk)
++ return 0;
++ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
++ /* destination sg blocks */
++ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
++ if (!sg_blk)
++ return 0;
++ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
++ }
++ } else {
++ len = dpaa2_comp->sg_blk_num - blocks;
++ for (i = 0; i < len; i++) {
++ spin_lock(&dpaa2_chan->queue_lock);
++ /* handle source sg blocks */
++ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
++ struct qdma_sg_blk, list);
++ list_del(&sg_blk->list);
++ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
++ /* handle destination sg blocks */
++ sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
++ struct qdma_sg_blk, list);
++ list_del(&sg_blk->list);
++ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
++ spin_unlock(&dpaa2_chan->queue_lock);
++ }
++ }
++ dpaa2_comp->sg_blk_num = blocks;
++
++ /* get the first source sg phy address */
++ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
++ struct qdma_sg_blk, list);
++ dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
++ /* get the first destinaiton sg phy address */
++ sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
++ struct qdma_sg_blk, list);
++ dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
++
++ for (i = 0; i < blocks; i++) {
++ src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
++ dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
++
++ for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
++ len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
++ if (0 == len)
++ goto fetch;
++ total_len += len;
++ src = sg_dma_address(src_sg);
++ dst = sg_dma_address(dst_sg);
++
++ /* source SG */
++ src_sge->addr_lo = src;
++ src_sge->addr_hi = (src >> 32);
++ src_sge->data_len.data_len_sl0 = len;
++ src_sge->ctrl.sl = QDMA_SG_SL_LONG;
++ src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
++ /* destination SG */
++ dst_sge->addr_lo = dst;
++ dst_sge->addr_hi = (dst >> 32);
++ dst_sge->data_len.data_len_sl0 = len;
++ dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
++ dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
++fetch:
++ num--;
++ if (0 == num) {
++ src_sge->ctrl.f = QDMA_SG_F;
++ dst_sge->ctrl.f = QDMA_SG_F;
++ goto end;
++ }
++ dst_sg = sg_next(dst_sg);
++ src_sg = sg_next(src_sg);
++ src_sge++;
++ dst_sge++;
++ if (j == (NUM_SG_PER_BLK - 2)) {
++ /* for next blocks, extension */
++ sg_blk = list_next_entry(sg_blk, list);
++ sg_blk_dst = list_next_entry(sg_blk_dst, list);
++ src_sge->addr_lo = sg_blk->blk_bus_addr;
++ src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
++ src_sge->ctrl.sl = QDMA_SG_SL_LONG;
++ src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
++ dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
++ dst_sge->addr_hi =
++ sg_blk_dst->blk_bus_addr >> 32;
++ dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
++ dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
++ }
++ }
++ }
++
++end:
++ return total_len;
++}
++
++static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg(
++ struct dma_chan *chan,
++ struct scatterlist *dst_sg, u32 dst_nents,
++ struct scatterlist *src_sg, u32 src_nents,
++ unsigned long flags)
++{
++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
++ struct dpaa2_qdma_comp *dpaa2_comp;
++ struct dpaa2_frame_list *f_list;
++ struct device *dev = dpaa2_chan->qdma->priv->dev;
++ uint32_t total_len = 0;
++
++ /* basic sanity checks */
++ if (dst_nents == 0 || src_nents == 0)
++ return NULL;
++
++ if (dst_sg == NULL || src_sg == NULL)
++ return NULL;
++
++ /* get the descriptors required */
++ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
++
++ /* populate Frame descriptor */
++ dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
++
++ /* prepare Scatter gather entry for source and destination */
++ total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan,
++ dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents);
++
++ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
++ /* first frame list for descriptor buffer */
++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
++ f_list++;
++ /* prepare Scatter gather entry for source and destination */
++ /* populate source and destination frame list table */
++ dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr,
++ dpaa2_comp->sge_src_bus_addr,
++ total_len, QDMA_FL_FMT_SGE);
++
++ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
++}
++
++static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
++ dma_cookie_t cookie, struct dma_tx_state *txstate)
++{
++ return dma_cookie_status(chan, cookie, txstate);
++}
++
++static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
++{
++}
++
++static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
++{
++ struct dpaa2_qdma_comp *dpaa2_comp;
++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
++ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
++ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
++ struct virt_dma_desc *vdesc;
++ struct dpaa2_fd *fd;
++ int err;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
++ spin_lock(&dpaa2_chan->vchan.lock);
++ if (vchan_issue_pending(&dpaa2_chan->vchan)) {
++ vdesc = vchan_next_desc(&dpaa2_chan->vchan);
++ if (!vdesc)
++ goto err_enqueue;
++ dpaa2_comp = to_fsl_qdma_comp(vdesc);
++
++ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
++
++ list_del(&vdesc->node);
++ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
++
++ /* TOBO: priority hard-coded to zero */
++ err = dpaa2_io_service_enqueue_fq(NULL,
++ priv->tx_queue_attr[0].fqid, fd);
++ if (err) {
++ list_del(&dpaa2_comp->list);
++ list_add_tail(&dpaa2_comp->list,
++ &dpaa2_chan->comp_free);
++ }
++
++ }
++err_enqueue:
++ spin_unlock(&dpaa2_chan->vchan.lock);
++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
++}
++
++static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev = &ls_dev->dev;
++ struct dpaa2_qdma_priv *priv;
++ struct dpaa2_qdma_priv_per_prio *ppriv;
++ uint8_t prio_def = DPDMAI_PRIO_NUM;
++ int err;
++ int i;
++
++ priv = dev_get_drvdata(dev);
++
++ priv->dev = dev;
++ priv->dpqdma_id = ls_dev->obj_desc.id;
++
++ /*Get the handle for the DPDMAI this interface is associate with */
++ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpdmai_open() failed\n");
++ return err;
++ }
++ dev_info(dev, "Opened dpdmai object successfully\n");
++
++ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
++ &priv->dpdmai_attr);
++ if (err) {
++ dev_err(dev, "dpdmai_get_attributes() failed\n");
++ return err;
++ }
++
++ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
++ dev_err(dev, "DPDMAI major version mismatch\n"
++ "Found %u.%u, supported version is %u.%u\n",
++ priv->dpdmai_attr.version.major,
++ priv->dpdmai_attr.version.minor,
++ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
++ }
++
++ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
++ dev_err(dev, "DPDMAI minor version mismatch\n"
++ "Found %u.%u, supported version is %u.%u\n",
++ priv->dpdmai_attr.version.major,
++ priv->dpdmai_attr.version.minor,
++ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
++ }
++
++ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
++ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
++ if (!ppriv) {
++ dev_err(dev, "kzalloc for ppriv failed\n");
++ return -1;
++ }
++ priv->ppriv = ppriv;
++
++ for (i = 0; i < priv->num_pairs; i++) {
++ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
++ i, &priv->rx_queue_attr[i]);
++ if (err) {
++ dev_err(dev, "dpdmai_get_rx_queue() failed\n");
++ return err;
++ }
++ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
++
++ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
++ i, &priv->tx_queue_attr[i]);
++ if (err) {
++ dev_err(dev, "dpdmai_get_tx_queue() failed\n");
++ return err;
++ }
++ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
++ ppriv->prio = i;
++ ppriv->priv = priv;
++ ppriv++;
++ }
++
++ return 0;
++}
++
++static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
++ struct dpaa2_qdma_priv_per_prio, nctx);
++ struct dpaa2_qdma_priv *priv = ppriv->priv;
++ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
++ struct dpaa2_qdma_chan *qchan;
++ const struct dpaa2_fd *fd;
++ const struct dpaa2_fd *fd_eq;
++ struct dpaa2_dq *dq;
++ int err;
++ int is_last = 0;
++ uint8_t status;
++ int i;
++ int found;
++ uint32_t n_chans = priv->dpaa2_qdma->n_chans;
++
++ do {
++ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
++ ppriv->store);
++ } while (err);
++
++ while (!is_last) {
++ do {
++ dq = dpaa2_io_store_next(ppriv->store, &is_last);
++ } while (!is_last && !dq);
++ if (!dq) {
++ dev_err(priv->dev, "FQID returned no valid frames!\n");
++ continue;
++ }
++
++ /* obtain FD and process the error */
++ fd = dpaa2_dq_fd(dq);
++ status = fd->simple.ctrl & 0xff;
++ if (status)
++ dev_err(priv->dev, "FD error occurred\n");
++ found = 0;
++ for (i = 0; i < n_chans; i++) {
++ qchan = &priv->dpaa2_qdma->chans[i];
++ spin_lock(&qchan->queue_lock);
++ if (list_empty(&qchan->comp_used)) {
++ spin_unlock(&qchan->queue_lock);
++ continue;
++ }
++ list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
++ &qchan->comp_used, list) {
++ fd_eq = (struct dpaa2_fd *)
++ dpaa2_comp->fd_virt_addr;
++
++ if (fd_eq->simple.addr ==
++ fd->simple.addr) {
++
++ list_del(&dpaa2_comp->list);
++ list_add_tail(&dpaa2_comp->list,
++ &qchan->comp_free);
++
++ spin_lock(&qchan->vchan.lock);
++ vchan_cookie_complete(
++ &dpaa2_comp->vdesc);
++ spin_unlock(&qchan->vchan.lock);
++ found = 1;
++ break;
++ }
++ }
++ spin_unlock(&qchan->queue_lock);
++ if (found)
++ break;
++ }
++ }
++
++ dpaa2_io_service_rearm(NULL, ctx);
++}
++
++static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
++{
++ int err, i, num;
++ struct device *dev = priv->dev;
++ struct dpaa2_qdma_priv_per_prio *ppriv;
++
++ num = priv->num_pairs;
++ ppriv = priv->ppriv;
++ for (i = 0; i < num; i++) {
++ ppriv->nctx.is_cdan = 0;
++ ppriv->nctx.desired_cpu = 1;
++ ppriv->nctx.id = ppriv->rsp_fqid;
++ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
++ err = dpaa2_io_service_register(NULL, &ppriv->nctx);
++ if (err) {
++ dev_err(dev, "Notification register failed\n");
++ goto err_service;
++ }
++
++ ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
++ dev);
++ if (!ppriv->store) {
++ dev_err(dev, "dpaa2_io_store_create() failed\n");
++ goto err_store;
++ }
++
++ ppriv++;
++ }
++ return 0;
++
++err_store:
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++err_service:
++ ppriv--;
++ while (ppriv >= priv->ppriv) {
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_store_destroy(ppriv->store);
++ ppriv--;
++ }
++ return -1;
++}
++
++static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
++{
++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
++ int i;
++
++ for (i = 0; i < priv->num_pairs; i++) {
++ dpaa2_io_store_destroy(ppriv->store);
++ ppriv++;
++ }
++}
++
++static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
++{
++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
++ int i;
++
++ for (i = 0; i < priv->num_pairs; i++) {
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ ppriv++;
++ }
++}
++
++static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
++{
++ int err;
++ struct dpdmai_rx_queue_cfg rx_queue_cfg;
++ struct device *dev = priv->dev;
++ struct dpaa2_qdma_priv_per_prio *ppriv;
++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
++ int i, num;
++
++ num = priv->num_pairs;
++ ppriv = priv->ppriv;
++ for (i = 0; i < num; i++) {
++ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
++ DPDMAI_QUEUE_OPT_DEST;
++ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
++ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
++ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
++ rx_queue_cfg.dest_cfg.priority = ppriv->prio;
++ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
++ rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
++ if (err) {
++ dev_err(dev, "dpdmai_set_rx_queue() failed\n");
++ return err;
++ }
++
++ ppriv++;
++ }
++
++ return 0;
++}
++
++static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
++{
++ int err = 0;
++ struct device *dev = priv->dev;
++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
++ int i;
++
++ for (i = 0; i < priv->num_pairs; i++) {
++ ppriv->nctx.qman64 = 0;
++ ppriv->nctx.dpio_id = 0;
++ ppriv++;
++ }
++
++ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
++ if (err)
++ dev_err(dev, "dpdmai_reset() failed\n");
++
++ return err;
++}
++
++static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
++ struct list_head *head)
++{
++ struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
++ /* free the QDMA SG pool block */
++ list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
++ sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
++ sgb_tmp->blk_virt_addr - 1);
++ sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
++ - sizeof(*sgb_tmp);
++ dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
++ sgb_tmp->blk_bus_addr);
++ }
++
++}
++
++static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
++ struct list_head *head)
++{
++ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
++ /* free the QDMA comp resource */
++ list_for_each_entry_safe(comp_tmp, _comp_tmp,
++ head, list) {
++ dma_pool_free(qchan->fd_pool,
++ comp_tmp->fd_virt_addr,
++ comp_tmp->fd_bus_addr);
++ /* free the SG source block on comp */
++ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
++ /* free the SG destination block on comp */
++ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
++ list_del(&comp_tmp->list);
++ kfree(comp_tmp);
++ }
++
++}
++
++static void __cold dpaa2_dpdmai_free_channels(
++ struct dpaa2_qdma_engine *dpaa2_qdma)
++{
++ struct dpaa2_qdma_chan *qchan;
++ int num, i;
++
++ num = dpaa2_qdma->n_chans;
++ for (i = 0; i < num; i++) {
++ qchan = &dpaa2_qdma->chans[i];
++ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
++ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
++ dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
++ dma_pool_destroy(qchan->fd_pool);
++ dma_pool_destroy(qchan->sg_blk_pool);
++ }
++}
++
++static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
++{
++ struct dpaa2_qdma_chan *dpaa2_chan;
++ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
++ int i;
++
++ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
++ for (i = 0; i < dpaa2_qdma->n_chans; i++) {
++ dpaa2_chan = &dpaa2_qdma->chans[i];
++ dpaa2_chan->qdma = dpaa2_qdma;
++ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
++ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
++
++ dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
++ dev, FD_POOL_SIZE, 32, 0);
++ if (!dpaa2_chan->fd_pool)
++ return -1;
++ dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
++ dev, SG_POOL_SIZE, 32, 0);
++ if (!dpaa2_chan->sg_blk_pool)
++ return -1;
++
++ spin_lock_init(&dpaa2_chan->queue_lock);
++ INIT_LIST_HEAD(&dpaa2_chan->comp_used);
++ INIT_LIST_HEAD(&dpaa2_chan->comp_free);
++ INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
++ }
++ return 0;
++}
++
++static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
++{
++ struct dpaa2_qdma_priv *priv;
++ struct device *dev = &dpdmai_dev->dev;
++ struct dpaa2_qdma_engine *dpaa2_qdma;
++ int err;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++ dev_set_drvdata(dev, priv);
++ priv->dpdmai_dev = dpdmai_dev;
++
++ priv->iommu_domain = iommu_get_domain_for_dev(dev);
++ if (priv->iommu_domain)
++ smmu_disable = false;
++
++ /* obtain a MC portal */
++ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_mcportal;
++ }
++
++ /* DPDMAI initialization */
++ err = dpaa2_qdma_setup(dpdmai_dev);
++ if (err) {
++ dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
++ goto err_dpdmai_setup;
++ }
++
++ /* DPIO */
++ err = dpaa2_qdma_dpio_setup(priv);
++ if (err) {
++ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
++ goto err_dpio_setup;
++ }
++
++ /* DPDMAI binding to DPIO */
++ err = dpaa2_dpdmai_bind(priv);
++ if (err) {
++ dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
++ goto err_bind;
++ }
++
++ /* DPDMAI enable */
++ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpdmai_enable() faile\n");
++ goto err_enable;
++ }
++
++ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
++ if (!dpaa2_qdma) {
++ err = -ENOMEM;
++ goto err_eng;
++ }
++
++ priv->dpaa2_qdma = dpaa2_qdma;
++ dpaa2_qdma->priv = priv;
++
++ dpaa2_qdma->n_chans = NUM_CH;
++
++ err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
++ if (err) {
++ dev_err(dev, "QDMA alloc channels faile\n");
++ goto err_reg;
++ }
++
++ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
++ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
++ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
++ dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask);
++
++ dpaa2_qdma->dma_dev.dev = dev;
++ dpaa2_qdma->dma_dev.device_alloc_chan_resources
++ = dpaa2_qdma_alloc_chan_resources;
++ dpaa2_qdma->dma_dev.device_free_chan_resources
++ = dpaa2_qdma_free_chan_resources;
++ dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
++ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
++ dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg;
++ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
++
++ err = dma_async_device_register(&dpaa2_qdma->dma_dev);
++ if (err) {
++ dev_err(dev, "Can't register NXP QDMA engine.\n");
++ goto err_reg;
++ }
++
++ return 0;
++
++err_reg:
++ dpaa2_dpdmai_free_channels(dpaa2_qdma);
++ kfree(dpaa2_qdma);
++err_eng:
++ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
++err_enable:
++ dpaa2_dpdmai_dpio_unbind(priv);
++err_bind:
++ dpaa2_dpmai_store_free(priv);
++ dpaa2_dpdmai_dpio_free(priv);
++err_dpio_setup:
++ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
++err_dpdmai_setup:
++ fsl_mc_portal_free(priv->mc_io);
++err_mcportal:
++ kfree(priv->ppriv);
++ kfree(priv);
++ dev_set_drvdata(dev, NULL);
++ return err;
++}
++
++static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev;
++ struct dpaa2_qdma_priv *priv;
++ struct dpaa2_qdma_engine *dpaa2_qdma;
++
++ dev = &ls_dev->dev;
++ priv = dev_get_drvdata(dev);
++ dpaa2_qdma = priv->dpaa2_qdma;
++
++ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
++ dpaa2_dpdmai_dpio_unbind(priv);
++ dpaa2_dpmai_store_free(priv);
++ dpaa2_dpdmai_dpio_free(priv);
++ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
++ fsl_mc_portal_free(priv->mc_io);
++ dev_set_drvdata(dev, NULL);
++ dpaa2_dpdmai_free_channels(dpaa2_qdma);
++
++ dma_async_device_unregister(&dpaa2_qdma->dma_dev);
++ kfree(priv);
++ kfree(dpaa2_qdma);
++
++ return 0;
++}
++
++static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpdmai",
++ },
++ { .vendor = 0x0 }
++};
++
++static struct fsl_mc_driver dpaa2_qdma_driver = {
++ .driver = {
++ .name = "dpaa2-qdma",
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_qdma_probe,
++ .remove = dpaa2_qdma_remove,
++ .match_id_table = dpaa2_qdma_id_table
++};
++
++static int __init dpaa2_qdma_driver_init(void)
++{
++ return fsl_mc_driver_register(&(dpaa2_qdma_driver));
++}
++late_initcall(dpaa2_qdma_driver_init);
++
++static void __exit fsl_qdma_exit(void)
++{
++ fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
++}
++module_exit(fsl_qdma_exit);
++
++MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
+new file mode 100644
+index 00000000..71a00db8
+--- /dev/null
++++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
+@@ -0,0 +1,262 @@
++/* Copyright 2015 NXP Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of NXP Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DPAA2_QDMA_H
++#define __DPAA2_QDMA_H
++
++#define LONG_FORMAT 1
++
++#define DPAA2_QDMA_STORE_SIZE 16
++#define NUM_CH 8
++#define NUM_SG_PER_BLK 16
++
++#define QDMA_DMR_OFFSET 0x0
++#define QDMA_DQ_EN (0 << 30)
++#define QDMA_DQ_DIS (1 << 30)
++
++#define QDMA_DSR_M_OFFSET 0x10004
++
++struct dpaa2_qdma_sd_d {
++ uint32_t rsv:32;
++ union {
++ struct {
++ uint32_t ssd:12; /* souce stride distance */
++ uint32_t sss:12; /* souce stride size */
++ uint32_t rsv1:8;
++ } sdf;
++ struct {
++ uint32_t dsd:12; /* Destination stride distance */
++ uint32_t dss:12; /* Destination stride size */
++ uint32_t rsv2:8;
++ } ddf;
++ } df;
++ uint32_t rbpcmd; /* Route-by-port command */
++ uint32_t cmd;
++} __attribute__((__packed__));
++/* Source descriptor command read transaction type for RBP=0:
++ coherent copy of cacheable memory */
++#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
++/* Destination descriptor command write transaction type for RBP=0:
++ coherent copy of cacheable memory */
++#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
++
++#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
++#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
++#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
++#define QDMA_SG_SL_SHORT 0x1 /* short length */
++#define QDMA_SG_SL_LONG 0x0 /* short length */
++#define QDMA_SG_F 0x1 /* last sg entry */
++struct dpaa2_qdma_sg {
++ uint32_t addr_lo; /* address 0:31 */
++ uint32_t addr_hi:17; /* address 32:48 */
++ uint32_t rsv:15;
++ union {
++ uint32_t data_len_sl0; /* SL=0, the long format */
++ struct {
++ uint32_t len:17; /* SL=1, the short format */
++ uint32_t reserve:3;
++ uint32_t sf:1;
++ uint32_t sr:1;
++ uint32_t size:10; /* buff size */
++ } data_len_sl1;
++ } data_len; /* AVAIL_LENGTH */
++ struct {
++ uint32_t bpid:14;
++ uint32_t ivp:1;
++ uint32_t mbt:1;
++ uint32_t offset:12;
++ uint32_t fmt:2;
++ uint32_t sl:1;
++ uint32_t f:1;
++ } ctrl;
++} __attribute__((__packed__));
++
++#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */
++#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
++#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
++#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
++#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
++
++#define QDMA_SB_FRAME (0 << 28) /* single frame */
++#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
++#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
++#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
++
++#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
++#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
++#define QDMA_SER_DISABLE (0 << 8) /* no notification */
++#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
++#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
++#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
++#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
++
++#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
++#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
++#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
++#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
++#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
++
++#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */
++#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
++#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */
++#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
++#define QDMA_FL_SL_LONG 0x0 /* long length */
++#define QDMA_FL_SL_SHORT 0x1 /* short length */
++#define QDMA_FL_F 0x1 /* last frame list bit */
++/*Description of Frame list table structure*/
++struct dpaa2_frame_list {
++ uint32_t addr_lo; /* lower 32 bits of address */
++ uint32_t addr_hi:17; /* upper 17 bits of address */
++ uint32_t resrvd:15;
++ union {
++ uint32_t data_len_sl0; /* If SL=0, then data length is 32 */
++ struct {
++ uint32_t data_len:18; /* IF SL=1; length is 18bit */
++ uint32_t resrvd:2;
++ uint32_t mem:12; /* Valid only when SL=1 */
++ } data_len_sl1;
++ } data_len;
++ /* word 4 */
++ uint32_t bpid:14; /* Frame buffer pool ID */
++ uint32_t ivp:1; /* Invalid Pool ID. */
++ uint32_t bmt:1; /* Bypass Memory Translation */
++ uint32_t offset:12; /* Frame offset */
++ uint32_t fmt:2; /* Frame Format */
++ uint32_t sl:1; /* Short Length */
++ uint32_t f:1; /* Final bit */
++
++ uint32_t frc; /* Frame Context */
++ /* word 6 */
++ uint32_t err:8; /* Frame errors */
++ uint32_t resrvd0:8;
++ uint32_t asal:4; /* accelerator-specific annotation length */
++ uint32_t resrvd1:1;
++ uint32_t ptv2:1;
++ uint32_t ptv1:1;
++ uint32_t pta:1; /* pass-through annotation */
++ uint32_t resrvd2:8;
++
++ uint32_t flc_lo; /* lower 32 bits fo flow context */
++ uint32_t flc_hi; /* higher 32 bits fo flow context */
++} __attribute__((__packed__));
++
++struct dpaa2_qdma_chan {
++ struct virt_dma_chan vchan;
++ struct virt_dma_desc vdesc;
++ enum dma_status status;
++ struct dpaa2_qdma_engine *qdma;
++
++ struct mutex dpaa2_queue_mutex;
++ spinlock_t queue_lock;
++ struct dma_pool *fd_pool;
++ struct dma_pool *sg_blk_pool;
++
++ struct list_head comp_used;
++ struct list_head comp_free;
++
++ struct list_head sgb_free;
++};
++
++struct qdma_sg_blk {
++ dma_addr_t blk_bus_addr;
++ void *blk_virt_addr;
++ struct list_head list;
++};
++
++struct dpaa2_qdma_comp {
++ dma_addr_t fd_bus_addr;
++ dma_addr_t fl_bus_addr;
++ dma_addr_t desc_bus_addr;
++ dma_addr_t sge_src_bus_addr;
++ dma_addr_t sge_dst_bus_addr;
++ void *fd_virt_addr;
++ void *fl_virt_addr;
++ void *desc_virt_addr;
++ void *sg_src_virt_addr;
++ void *sg_dst_virt_addr;
++ struct qdma_sg_blk *sg_blk;
++ uint32_t sg_blk_num;
++ struct list_head sg_src_head;
++ struct list_head sg_dst_head;
++ struct dpaa2_qdma_chan *qchan;
++ struct virt_dma_desc vdesc;
++ struct list_head list;
++};
++
++struct dpaa2_qdma_engine {
++ struct dma_device dma_dev;
++ u32 n_chans;
++ struct dpaa2_qdma_chan chans[NUM_CH];
++
++ struct dpaa2_qdma_priv *priv;
++};
++
++/*
++ * dpaa2_qdma_priv - driver private data
++ */
++struct dpaa2_qdma_priv {
++ int dpqdma_id;
++
++ struct iommu_domain *iommu_domain;
++ struct dpdmai_attr dpdmai_attr;
++ struct device *dev;
++ struct fsl_mc_io *mc_io;
++ struct fsl_mc_device *dpdmai_dev;
++
++ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
++ struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
++
++ uint8_t num_pairs;
++
++ struct dpaa2_qdma_engine *dpaa2_qdma;
++ struct dpaa2_qdma_priv_per_prio *ppriv;
++};
++
++struct dpaa2_qdma_priv_per_prio {
++ int req_fqid;
++ int rsp_fqid;
++ int prio;
++
++ struct dpaa2_io_store *store;
++ struct dpaa2_io_notification_ctx nctx;
++
++ struct dpaa2_qdma_priv *priv;
++};
++
++/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
++#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
++ sizeof(struct dpaa2_frame_list) * 3 + \
++ sizeof(struct dpaa2_qdma_sd_d) * 2)
++
++/* qdma_sg_blk + 16 SGs */
++#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
++ sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
++#endif /* __DPAA2_QDMA_H */
+diff --git a/drivers/dma/dpaa2-qdma/dpdmai.c b/drivers/dma/dpaa2-qdma/dpdmai.c
+new file mode 100644
+index 00000000..ad13fc1e
+--- /dev/null
++++ b/drivers/dma/dpaa2-qdma/dpdmai.c
+@@ -0,0 +1,454 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/types.h>
++#include <linux/io.h>
++#include "fsl_dpdmai.h"
++#include "fsl_dpdmai_cmd.h"
++#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
++#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
++
++int dpdmai_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmai_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPDMAI_CMD_OPEN(cmd, dpdmai_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmai_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmai_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPDMAI_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmai_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpdmai_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmai_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpdmai_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmai_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmai_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ const struct dpdmai_rx_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority, struct dpdmai_rx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpdmai_tx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
++
++ return 0;
++}
+diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
+new file mode 100644
+index 00000000..e931ce16
+--- /dev/null
++++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
+@@ -0,0 +1,521 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPDMAI_H
++#define __FSL_DPDMAI_H
++
++struct fsl_mc_io;
++
++/* Data Path DMA Interface API
++ * Contains initialization APIs and runtime control APIs for DPDMAI
++ */
++
++/* General DPDMAI macros */
++
++/**
++ * Maximum number of Tx/Rx priorities per DPDMAI object
++ */
++#define DPDMAI_PRIO_NUM 2
++
++/**
++ * All queues considered; see dpdmai_set_rx_queue()
++ */
++#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
++
++/**
++ * dpdmai_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpdmai_id: DPDMAI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpdmai_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmai_id,
++ uint16_t *token);
++
++/**
++ * dpdmai_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdmai_cfg - Structure representing DPDMAI configuration
++ * @priorities: Priorities for the DMA hardware processing; valid priorities are
++ * configured with values 1-8; the entry following last valid entry
++ * should be configured with 0
++ */
++struct dpdmai_cfg {
++ uint8_t priorities[DPDMAI_PRIO_NUM];
++};
++
++/**
++ * dpdmai_create() - Create the DPDMAI object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPDMAI object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpdmai_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmai_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpdmai_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdmai_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpdmai_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmai_irq_cfg *irq_cfg);
++
++/**
++ * dpdmai_get_irq() - Get IRQ information from the DPDMAI
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmai_irq_cfg *irq_cfg);
++
++/**
++ * dpdmai_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpdmai_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned Interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpdmai_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpdmai_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpdmai_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpdmai_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpdmai_attr - Structure representing DPDMAI attributes
++ * @id: DPDMAI object ID
++ * @version: DPDMAI version
++ * @num_of_priorities: number of priorities
++ */
++struct dpdmai_attr {
++ int id;
++ /**
++ * struct version - DPDMAI version
++ * @major: DPDMAI major version
++ * @minor: DPDMAI minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint8_t num_of_priorities;
++};
++
++/**
++ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmai_attr *attr);
++
++/**
++ * enum dpdmai_dest - DPDMAI destination types
++ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
++ * and does not generate FQDAN notifications; user is expected to dequeue
++ * from the queue based on polling or other user-defined method
++ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON object;
++ * user is expected to dequeue from the DPCON channel
++ */
++enum dpdmai_dest {
++ DPDMAI_DEST_NONE = 0,
++ DPDMAI_DEST_DPIO = 1,
++ DPDMAI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that
++ * channel; not relevant for 'DPDMAI_DEST_NONE' option
++ */
++struct dpdmai_dest_cfg {
++ enum dpdmai_dest dest_type;
++ int dest_id;
++ uint8_t priority;
++};
++
++/* DPDMAI queue modification options */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
++
++/**
++ * Select to modify the queue's destination
++ */
++#define DPDMAI_QUEUE_OPT_DEST 0x00000002
++
++/**
++ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame;
++ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
++ * @dest_cfg: Queue destination parameters;
++ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
++ */
++struct dpdmai_rx_queue_cfg {
++ uint32_t options;
++ uint64_t user_ctx;
++ struct dpdmai_dest_cfg dest_cfg;
++
++};
++
++/**
++ * dpdmai_set_rx_queue() - Set Rx queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPDMAI creation; use
++ * DPDMAI_ALL_QUEUES to configure all Rx queues
++ * identically.
++ * @cfg: Rx queue configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ const struct dpdmai_rx_queue_cfg *cfg);
++
++/**
++ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @dest_cfg: Queue destination configuration
++ * @fqid: Virtual FQID value to be used for dequeue operations
++ */
++struct dpdmai_rx_queue_attr {
++ uint64_t user_ctx;
++ struct dpdmai_dest_cfg dest_cfg;
++ uint32_t fqid;
++};
++
++/**
++ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPDMAI creation
++ * @attr: Returned Rx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpdmai_rx_queue_attr *attr);
++
++/**
++ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
++ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
++ */
++
++struct dpdmai_tx_queue_attr {
++ uint32_t fqid;
++};
++
++/**
++ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPDMAI creation
++ * @attr: Returned Tx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpdmai_tx_queue_attr *attr);
++
++#endif /* __FSL_DPDMAI_H */
+diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
+new file mode 100644
+index 00000000..7d403c01
+--- /dev/null
++++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
+@@ -0,0 +1,222 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPDMAI_CMD_H
++#define _FSL_DPDMAI_CMD_H
++
++/* DPDMAI Version */
++#define DPDMAI_VER_MAJOR 2
++#define DPDMAI_VER_MINOR 2
++
++#define DPDMAI_CMD_BASE_VERSION 0
++#define DPDMAI_CMD_ID_OFFSET 4
++
++/* Command IDs */
++#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++
++#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++
++#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++
++#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
++
++
++#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
++#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
++
++
++#define MAKE_UMASK64(_width) \
++ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
++ (uint64_t)-1))
++
++static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
++{
++ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
++}
++
++static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
++{
++ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
++}
++
++#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
++ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
++
++#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
++ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
++
++#define MC_CMD_HDR_READ_TOKEN(_hdr) \
++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
++ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
++
++#endif /* _FSL_DPDMAI_CMD_H */
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+new file mode 100644
+index 00000000..6c4c2813
+--- /dev/null
++++ b/drivers/dma/fsl-qdma.c
+@@ -0,0 +1,1201 @@
++/*
++ * drivers/dma/fsl-qdma.c
++ *
++ * Copyright 2014-2015 Freescale Semiconductor, Inc.
++ *
++ * Driver for the Freescale qDMA engine with software command queue mode.
++ * Channel virtualization is supported through enqueuing of DMA jobs to,
++ * or dequeuing DMA jobs from, different work queues.
++ * This module can be found on Freescale LS SoCs.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include <asm/cacheflush.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/of_dma.h>
++#include <linux/of_irq.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++
++#include "virt-dma.h"
++
++#define FSL_QDMA_DMR 0x0
++#define FSL_QDMA_DSR 0x4
++#define FSL_QDMA_DEIER 0xe00
++#define FSL_QDMA_DEDR 0xe04
++#define FSL_QDMA_DECFDW0R 0xe10
++#define FSL_QDMA_DECFDW1R 0xe14
++#define FSL_QDMA_DECFDW2R 0xe18
++#define FSL_QDMA_DECFDW3R 0xe1c
++#define FSL_QDMA_DECFQIDR 0xe30
++#define FSL_QDMA_DECBR 0xe34
++
++#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
++#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
++#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
++#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
++#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
++#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
++#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
++#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
++
++#define FSL_QDMA_SQDPAR 0x80c
++#define FSL_QDMA_SQEPAR 0x814
++#define FSL_QDMA_BSQMR 0x800
++#define FSL_QDMA_BSQSR 0x804
++#define FSL_QDMA_BSQICR 0x828
++#define FSL_QDMA_CQMR 0xa00
++#define FSL_QDMA_CQDSCR1 0xa08
++#define FSL_QDMA_CQDSCR2 0xa0c
++#define FSL_QDMA_CQIER 0xa10
++#define FSL_QDMA_CQEDR 0xa14
++#define FSL_QDMA_SQCCMR 0xa20
++
++#define FSL_QDMA_SQICR_ICEN
++
++#define FSL_QDMA_CQIDR_CQT 0xff000000
++#define FSL_QDMA_CQIDR_SQPE 0x800000
++#define FSL_QDMA_CQIDR_SQT 0x8000
++
++#define FSL_QDMA_BCQIER_CQTIE 0x8000
++#define FSL_QDMA_BCQIER_CQPEIE 0x800000
++#define FSL_QDMA_BSQICR_ICEN 0x80000000
++#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
++#define FSL_QDMA_CQIER_MEIE 0x80000000
++#define FSL_QDMA_CQIER_TEIE 0x1
++#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
++
++#define FSL_QDMA_QUEUE_MAX 8
++
++#define FSL_QDMA_BCQMR_EN 0x80000000
++#define FSL_QDMA_BCQMR_EI 0x40000000
++#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
++#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
++
++#define FSL_QDMA_BCQSR_QF 0x10000
++#define FSL_QDMA_BCQSR_XOFF 0x1
++
++#define FSL_QDMA_BSQMR_EN 0x80000000
++#define FSL_QDMA_BSQMR_DI 0x40000000
++#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
++
++#define FSL_QDMA_BSQSR_QE 0x20000
++
++#define FSL_QDMA_DMR_DQD 0x40000000
++#define FSL_QDMA_DSR_DB 0x80000000
++
++#define FSL_QDMA_BASE_BUFFER_SIZE 96
++#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16
++#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
++#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
++#define FSL_QDMA_QUEUE_NUM_MAX 8
++
++#define FSL_QDMA_CMD_RWTTYPE 0x4
++#define FSL_QDMA_CMD_LWC 0x2
++
++#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
++#define FSL_QDMA_CMD_NS_OFFSET 27
++#define FSL_QDMA_CMD_DQOS_OFFSET 24
++#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
++#define FSL_QDMA_CMD_DSEN_OFFSET 19
++#define FSL_QDMA_CMD_LWC_OFFSET 16
++
++#define FSL_QDMA_E_SG_TABLE 1
++#define FSL_QDMA_E_DATA_BUFFER 0
++#define FSL_QDMA_F_LAST_ENTRY 1
++
++u64 pre_addr, pre_queue;
++
++struct fsl_qdma_ccdf {
++ u8 status;
++ u32 rev1:22;
++ u32 ser:1;
++ u32 rev2:1;
++ u32 rev3:20;
++ u32 offset:9;
++ u32 format:3;
++ union {
++ struct {
++ u32 addr_lo; /* low 32-bits of 40-bit address */
++ u32 addr_hi:8; /* high 8-bits of 40-bit address */
++ u32 rev4:16;
++ u32 queue:3;
++ u32 rev5:3;
++ u32 dd:2; /* dynamic debug */
++ };
++ struct {
++ u64 addr:40;
++ /* More efficient address accessor */
++ u64 __notaddress:24;
++ };
++ };
++} __packed;
++
++struct fsl_qdma_csgf {
++ u32 offset:13;
++ u32 rev1:19;
++ u32 length:30;
++ u32 f:1;
++ u32 e:1;
++ union {
++ struct {
++ u32 addr_lo; /* low 32-bits of 40-bit address */
++ u32 addr_hi:8; /* high 8-bits of 40-bit address */
++ u32 rev2:24;
++ };
++ struct {
++ u64 addr:40;
++ /* More efficient address accessor */
++ u64 __notaddress:24;
++ };
++ };
++} __packed;
++
++struct fsl_qdma_sdf {
++ u32 rev3:32;
++ u32 ssd:12; /* souce stride distance */
++ u32 sss:12; /* souce stride size */
++ u32 rev4:8;
++ u32 rev5:32;
++ u32 cmd;
++} __packed;
++
++struct fsl_qdma_ddf {
++ u32 rev1:32;
++ u32 dsd:12; /* Destination stride distance */
++ u32 dss:12; /* Destination stride size */
++ u32 rev2:8;
++ u32 rev3:32;
++ u32 cmd;
++} __packed;
++
++struct fsl_qdma_chan {
++ struct virt_dma_chan vchan;
++ struct virt_dma_desc vdesc;
++ enum dma_status status;
++ u32 slave_id;
++ struct fsl_qdma_engine *qdma;
++ struct fsl_qdma_queue *queue;
++ struct list_head qcomp;
++};
++
++struct fsl_qdma_queue {
++ struct fsl_qdma_ccdf *virt_head;
++ struct fsl_qdma_ccdf *virt_tail;
++ struct list_head comp_used;
++ struct list_head comp_free;
++ struct dma_pool *comp_pool;
++ struct dma_pool *sg_pool;
++ spinlock_t queue_lock;
++ dma_addr_t bus_addr;
++ u32 n_cq;
++ u32 id;
++ struct fsl_qdma_ccdf *cq;
++};
++
++struct fsl_qdma_sg {
++ dma_addr_t bus_addr;
++ void *virt_addr;
++};
++
++struct fsl_qdma_comp {
++ dma_addr_t bus_addr;
++ void *virt_addr;
++ struct fsl_qdma_chan *qchan;
++ struct fsl_qdma_sg *sg_block;
++ struct virt_dma_desc vdesc;
++ struct list_head list;
++ u32 sg_block_src;
++ u32 sg_block_dst;
++};
++
++struct fsl_qdma_engine {
++ struct dma_device dma_dev;
++ void __iomem *ctrl_base;
++ void __iomem *status_base;
++ void __iomem *block_base;
++ u32 n_chans;
++ u32 n_queues;
++ struct mutex fsl_qdma_mutex;
++ int error_irq;
++ int queue_irq;
++ bool big_endian;
++ struct fsl_qdma_queue *queue;
++ struct fsl_qdma_queue *status;
++ struct fsl_qdma_chan chans[];
++
++};
++
++static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
++{
++ if (qdma->big_endian)
++ return ioread32be(addr);
++ else
++ return ioread32(addr);
++}
++
++static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
++ void __iomem *addr)
++{
++ if (qdma->big_endian)
++ iowrite32be(val, addr);
++ else
++ iowrite32(val, addr);
++}
++
++static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
++{
++ return container_of(chan, struct fsl_qdma_chan, vchan.chan);
++}
++
++static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
++{
++ return container_of(vd, struct fsl_qdma_comp, vdesc);
++}
++
++static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
++{
++ /*
++ * In QDMA mode, We don't need to do anything.
++ */
++ return 0;
++}
++
++static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
++{
++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
++ unsigned long flags;
++ LIST_HEAD(head);
++
++ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
++ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
++ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
++
++ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
++}
++
++static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
++ dma_addr_t dst, dma_addr_t src, u32 len)
++{
++ struct fsl_qdma_ccdf *ccdf;
++ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
++ struct fsl_qdma_sdf *sdf;
++ struct fsl_qdma_ddf *ddf;
++
++ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
++ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
++ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
++ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
++ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
++
++ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
++ /* Head Command Descriptor(Frame Descriptor) */
++ ccdf->addr = fsl_comp->bus_addr + 16;
++ ccdf->format = 1; /* Compound S/G format */
++ /* Status notification is enqueued to status queue. */
++ ccdf->ser = 1;
++ /* Compound Command Descriptor(Frame List Table) */
++ csgf_desc->addr = fsl_comp->bus_addr + 64;
++ /* It must be 32 as Compound S/G Descriptor */
++ csgf_desc->length = 32;
++ csgf_src->addr = src;
++ csgf_src->length = len;
++ csgf_dest->addr = dst;
++ csgf_dest->length = len;
++ /* This entry is the last entry. */
++ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
++ /* Descriptor Buffer */
++ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
++ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
++ ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET;
++}
++
++static void fsl_qdma_comp_fill_sg(
++ struct fsl_qdma_comp *fsl_comp,
++ struct scatterlist *dst_sg, unsigned int dst_nents,
++ struct scatterlist *src_sg, unsigned int src_nents)
++{
++ struct fsl_qdma_ccdf *ccdf;
++ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg;
++ struct fsl_qdma_sdf *sdf;
++ struct fsl_qdma_ddf *ddf;
++ struct fsl_qdma_sg *sg_block, *temp;
++ struct scatterlist *sg;
++ u64 total_src_len = 0;
++ u64 total_dst_len = 0;
++ u32 i;
++
++ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
++ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
++ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
++ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
++ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
++
++ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
++ /* Head Command Descriptor(Frame Descriptor) */
++ ccdf->addr = fsl_comp->bus_addr + 16;
++ ccdf->format = 1; /* Compound S/G format */
++ /* Status notification is enqueued to status queue. */
++ ccdf->ser = 1;
++
++ /* Compound Command Descriptor(Frame List Table) */
++ csgf_desc->addr = fsl_comp->bus_addr + 64;
++ /* It must be 32 as Compound S/G Descriptor */
++ csgf_desc->length = 32;
++
++ sg_block = fsl_comp->sg_block;
++ csgf_src->addr = sg_block->bus_addr;
++ /* This entry link to the s/g entry. */
++ csgf_src->e = FSL_QDMA_E_SG_TABLE;
++
++ temp = sg_block + fsl_comp->sg_block_src;
++ csgf_dest->addr = temp->bus_addr;
++ /* This entry is the last entry. */
++ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
++ /* This entry link to the s/g entry. */
++ csgf_dest->e = FSL_QDMA_E_SG_TABLE;
++
++ for_each_sg(src_sg, sg, src_nents, i) {
++ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
++ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
++ csgf_sg->addr = sg_dma_address(sg);
++ csgf_sg->length = sg_dma_len(sg);
++ total_src_len += sg_dma_len(sg);
++
++ if (i == src_nents - 1)
++ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
++ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
++ temp = sg_block +
++ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
++ csgf_sg->addr = temp->bus_addr;
++ csgf_sg->e = FSL_QDMA_E_SG_TABLE;
++ }
++ }
++
++ sg_block += fsl_comp->sg_block_src;
++ for_each_sg(dst_sg, sg, dst_nents, i) {
++ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
++ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
++ csgf_sg->addr = sg_dma_address(sg);
++ csgf_sg->length = sg_dma_len(sg);
++ total_dst_len += sg_dma_len(sg);
++
++ if (i == dst_nents - 1)
++ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
++ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
++ temp = sg_block +
++ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
++ csgf_sg->addr = temp->bus_addr;
++ csgf_sg->e = FSL_QDMA_E_SG_TABLE;
++ }
++ }
++
++ if (total_src_len != total_dst_len)
++ dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
++ "The data length for src and dst isn't match.\n");
++
++ csgf_src->length = total_src_len;
++ csgf_dest->length = total_dst_len;
++
++ /* Descriptor Buffer */
++ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
++ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
++}
++
++/*
++ * Prei-request full command descriptor for enqueue.
++ */
++static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
++{
++ struct fsl_qdma_comp *comp_temp;
++ int i;
++
++ for (i = 0; i < queue->n_cq; i++) {
++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
++ if (!comp_temp)
++ return -1;
++ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
++ GFP_NOWAIT,
++ &comp_temp->bus_addr);
++ if (!comp_temp->virt_addr)
++ return -1;
++ list_add_tail(&comp_temp->list, &queue->comp_free);
++ }
++ return 0;
++}
++
++/*
++ * Request a command descriptor for enqueue.
++ */
++static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
++ struct fsl_qdma_chan *fsl_chan,
++ unsigned int dst_nents,
++ unsigned int src_nents)
++{
++ struct fsl_qdma_comp *comp_temp;
++ struct fsl_qdma_sg *sg_block;
++ struct fsl_qdma_queue *queue = fsl_chan->queue;
++ unsigned long flags;
++ unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
++
++ spin_lock_irqsave(&queue->queue_lock, flags);
++ if (list_empty(&queue->comp_free)) {
++ spin_unlock_irqrestore(&queue->queue_lock, flags);
++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
++ if (!comp_temp)
++ return NULL;
++ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
++ GFP_NOWAIT,
++ &comp_temp->bus_addr);
++ if (!comp_temp->virt_addr)
++ return NULL;
++ } else {
++ comp_temp = list_first_entry(&queue->comp_free,
++ struct fsl_qdma_comp,
++ list);
++ list_del(&comp_temp->list);
++ spin_unlock_irqrestore(&queue->queue_lock, flags);
++ }
++
++ if (dst_nents != 0)
++ dst_sg_entry_block = dst_nents /
++ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
++ else
++ dst_sg_entry_block = 0;
++
++ if (src_nents != 0)
++ src_sg_entry_block = src_nents /
++ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
++ else
++ src_sg_entry_block = 0;
++
++ sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
++ if (sg_entry_total) {
++ sg_block = kzalloc(sizeof(*sg_block) *
++ sg_entry_total,
++ GFP_KERNEL);
++ if (!sg_block)
++ return NULL;
++ comp_temp->sg_block = sg_block;
++ for (i = 0; i < sg_entry_total; i++) {
++ sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
++ GFP_NOWAIT,
++ &sg_block->bus_addr);
++ memset(sg_block->virt_addr, 0,
++ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
++ sg_block++;
++ }
++ }
++
++ comp_temp->sg_block_src = src_sg_entry_block;
++ comp_temp->sg_block_dst = dst_sg_entry_block;
++ comp_temp->qchan = fsl_chan;
++
++ return comp_temp;
++}
++
++static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
++ struct platform_device *pdev,
++ unsigned int queue_num)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct fsl_qdma_queue *queue_head, *queue_temp;
++ int ret, len, i;
++ unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
++
++ if (queue_num > FSL_QDMA_QUEUE_MAX)
++ queue_num = FSL_QDMA_QUEUE_MAX;
++ len = sizeof(*queue_head) * queue_num;
++ queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
++ if (!queue_head)
++ return NULL;
++
++ ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
++ queue_num);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't get queue-sizes.\n");
++ return NULL;
++ }
++
++ for (i = 0; i < queue_num; i++) {
++ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
++ || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
++ dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
++ return NULL;
++ }
++ queue_temp = queue_head + i;
++ queue_temp->cq = dma_alloc_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_ccdf) *
++ queue_size[i],
++ &queue_temp->bus_addr,
++ GFP_KERNEL);
++ if (!queue_temp->cq)
++ return NULL;
++ queue_temp->n_cq = queue_size[i];
++ queue_temp->id = i;
++ queue_temp->virt_head = queue_temp->cq;
++ queue_temp->virt_tail = queue_temp->cq;
++ /*
++ * The dma pool for queue command buffer
++ */
++ queue_temp->comp_pool = dma_pool_create("comp_pool",
++ &pdev->dev,
++ FSL_QDMA_BASE_BUFFER_SIZE,
++ 16, 0);
++ if (!queue_temp->comp_pool) {
++ dma_free_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_ccdf) *
++ queue_size[i],
++ queue_temp->cq,
++ queue_temp->bus_addr);
++ return NULL;
++ }
++ /*
++ * The dma pool for queue command buffer
++ */
++ queue_temp->sg_pool = dma_pool_create("sg_pool",
++ &pdev->dev,
++ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
++ 64, 0);
++ if (!queue_temp->sg_pool) {
++ dma_free_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_ccdf) *
++ queue_size[i],
++ queue_temp->cq,
++ queue_temp->bus_addr);
++ dma_pool_destroy(queue_temp->comp_pool);
++ return NULL;
++ }
++ /*
++ * List for queue command buffer
++ */
++ INIT_LIST_HEAD(&queue_temp->comp_used);
++ INIT_LIST_HEAD(&queue_temp->comp_free);
++ spin_lock_init(&queue_temp->queue_lock);
++ }
++
++ return queue_head;
++}
++
++static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
++ struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct fsl_qdma_queue *status_head;
++ unsigned int status_size;
++ int ret;
++
++ ret = of_property_read_u32(np, "status-sizes", &status_size);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't get status-sizes.\n");
++ return NULL;
++ }
++ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
++ || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
++ dev_err(&pdev->dev, "Get wrong status_size.\n");
++ return NULL;
++ }
++ status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
++ GFP_KERNEL);
++ if (!status_head)
++ return NULL;
++
++ /*
++ * Buffer for queue command
++ */
++ status_head->cq = dma_alloc_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_ccdf) *
++ status_size,
++ &status_head->bus_addr,
++ GFP_KERNEL);
++ if (!status_head->cq)
++ return NULL;
++ status_head->n_cq = status_size;
++ status_head->virt_head = status_head->cq;
++ status_head->virt_tail = status_head->cq;
++ status_head->comp_pool = NULL;
++
++ return status_head;
++}
++
++static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
++{
++ void __iomem *ctrl = fsl_qdma->ctrl_base;
++ void __iomem *block = fsl_qdma->block_base;
++ int i, count = 5;
++ u32 reg;
++
++ /* Disable the command queue and wait for idle state. */
++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
++ reg |= FSL_QDMA_DMR_DQD;
++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
++ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
++
++ while (1) {
++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
++ if (!(reg & FSL_QDMA_DSR_DB))
++ break;
++ if (count-- < 0)
++ return -EBUSY;
++ udelay(100);
++ }
++
++ /* Disable status queue. */
++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
++
++ /*
++ * Clear the command queue interrupt detect register for all queues.
++ */
++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
++
++ return 0;
++}
++
++static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
++{
++ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
++ struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
++ struct fsl_qdma_queue *temp_queue;
++ struct fsl_qdma_comp *fsl_comp;
++ struct fsl_qdma_ccdf *status_addr;
++ struct fsl_qdma_csgf *csgf_src;
++ void __iomem *block = fsl_qdma->block_base;
++ u32 reg, i;
++ bool duplicate, duplicate_handle;
++
++ while (1) {
++ duplicate = 0;
++ duplicate_handle = 0;
++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
++ if (reg & FSL_QDMA_BSQSR_QE)
++ return 0;
++ status_addr = fsl_status->virt_head;
++ if (status_addr->queue == pre_queue &&
++ status_addr->addr == pre_addr)
++ duplicate = 1;
++
++ i = status_addr->queue;
++ pre_queue = status_addr->queue;
++ pre_addr = status_addr->addr;
++ temp_queue = fsl_queue + i;
++ spin_lock(&temp_queue->queue_lock);
++ if (list_empty(&temp_queue->comp_used)) {
++ if (duplicate)
++ duplicate_handle = 1;
++ else {
++ spin_unlock(&temp_queue->queue_lock);
++ return -1;
++ }
++ } else {
++ fsl_comp = list_first_entry(&temp_queue->comp_used,
++ struct fsl_qdma_comp,
++ list);
++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
++ + 2;
++ if (fsl_comp->bus_addr + 16 !=
++ (dma_addr_t)status_addr->addr) {
++ if (duplicate)
++ duplicate_handle = 1;
++ else {
++ spin_unlock(&temp_queue->queue_lock);
++ return -1;
++ }
++ }
++ }
++
++ if (duplicate_handle) {
++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
++ reg |= FSL_QDMA_BSQMR_DI;
++ status_addr->addr = 0x0;
++ fsl_status->virt_head++;
++ if (fsl_status->virt_head == fsl_status->cq
++ + fsl_status->n_cq)
++ fsl_status->virt_head = fsl_status->cq;
++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
++ spin_unlock(&temp_queue->queue_lock);
++ continue;
++ }
++ list_del(&fsl_comp->list);
++
++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
++ reg |= FSL_QDMA_BSQMR_DI;
++ status_addr->addr = 0x0;
++ fsl_status->virt_head++;
++ if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
++ fsl_status->virt_head = fsl_status->cq;
++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
++ spin_unlock(&temp_queue->queue_lock);
++
++ spin_lock(&fsl_comp->qchan->vchan.lock);
++ vchan_cookie_complete(&fsl_comp->vdesc);
++ fsl_comp->qchan->status = DMA_COMPLETE;
++ spin_unlock(&fsl_comp->qchan->vchan.lock);
++ }
++ return 0;
++}
++
++static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
++{
++ struct fsl_qdma_engine *fsl_qdma = dev_id;
++ unsigned int intr;
++ void __iomem *status = fsl_qdma->status_base;
++
++ intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
++
++ if (intr)
++ dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
++
++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
++{
++ struct fsl_qdma_engine *fsl_qdma = dev_id;
++ unsigned int intr, reg;
++ void __iomem *block = fsl_qdma->block_base;
++ void __iomem *ctrl = fsl_qdma->ctrl_base;
++
++ intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
++
++ if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
++ intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
++
++ if (intr != 0) {
++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
++ reg |= FSL_QDMA_DMR_DQD;
++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
++ dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
++ }
++
++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
++
++ return IRQ_HANDLED;
++}
++
++static int
++fsl_qdma_irq_init(struct platform_device *pdev,
++ struct fsl_qdma_engine *fsl_qdma)
++{
++ int ret;
++
++ fsl_qdma->error_irq = platform_get_irq_byname(pdev,
++ "qdma-error");
++ if (fsl_qdma->error_irq < 0) {
++ dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
++ return fsl_qdma->error_irq;
++ }
++
++ fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
++ if (fsl_qdma->queue_irq < 0) {
++ dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
++ return fsl_qdma->queue_irq;
++ }
++
++ ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
++ fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
++ return ret;
++ }
++ ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
++ fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
++{
++ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
++ struct fsl_qdma_queue *temp;
++ void __iomem *ctrl = fsl_qdma->ctrl_base;
++ void __iomem *status = fsl_qdma->status_base;
++ void __iomem *block = fsl_qdma->block_base;
++ int i, ret;
++ u32 reg;
++
++ /* Try to halt the qDMA engine first. */
++ ret = fsl_qdma_halt(fsl_qdma);
++ if (ret) {
++ dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
++ return ret;
++ }
++
++ /*
++ * Clear the command queue interrupt detect register for all queues.
++ */
++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
++
++ for (i = 0; i < fsl_qdma->n_queues; i++) {
++ temp = fsl_queue + i;
++ /*
++ * Initialize Command Queue registers to point to the first
++ * command descriptor in memory.
++ * Dequeue Pointer Address Registers
++ * Enqueue Pointer Address Registers
++ */
++ qdma_writel(fsl_qdma, temp->bus_addr,
++ block + FSL_QDMA_BCQDPA_SADDR(i));
++ qdma_writel(fsl_qdma, temp->bus_addr,
++ block + FSL_QDMA_BCQEPA_SADDR(i));
++
++ /* Initialize the queue mode. */
++ reg = FSL_QDMA_BCQMR_EN;
++ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
++ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
++ }
++
++ /*
++ * Workaround for erratum: ERR010812.
++ * We must enable XOFF to avoid the enqueue rejection occurs.
++ * Setting SQCCMR ENTER_WM to 0x20.
++ */
++ qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
++ block + FSL_QDMA_SQCCMR);
++ /*
++ * Initialize status queue registers to point to the first
++ * command descriptor in memory.
++ * Dequeue Pointer Address Registers
++ * Enqueue Pointer Address Registers
++ */
++ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
++ block + FSL_QDMA_SQEPAR);
++ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
++ block + FSL_QDMA_SQDPAR);
++ /* Initialize status queue interrupt. */
++ qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
++ block + FSL_QDMA_BCQIER(0));
++ qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
++ | 0x8000,
++ block + FSL_QDMA_BSQICR);
++ qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
++ block + FSL_QDMA_CQIER);
++ /* Initialize controller interrupt register. */
++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
++
++ /* Initialize the status queue mode. */
++ reg = FSL_QDMA_BSQMR_EN;
++ reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
++
++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
++ reg &= ~FSL_QDMA_DMR_DQD;
++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
++
++ return 0;
++}
++
++static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg(
++ struct dma_chan *chan,
++ struct scatterlist *dst_sg, unsigned int dst_nents,
++ struct scatterlist *src_sg, unsigned int src_nents,
++ unsigned long flags)
++{
++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
++ struct fsl_qdma_comp *fsl_comp;
++
++ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan,
++ dst_nents,
++ src_nents);
++ fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents);
++
++ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
++}
++
++static struct dma_async_tx_descriptor *
++fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
++ dma_addr_t src, size_t len, unsigned long flags)
++{
++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
++ struct fsl_qdma_comp *fsl_comp;
++
++ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
++ fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
++
++ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
++}
++
++static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
++{
++ void __iomem *block = fsl_chan->qdma->block_base;
++ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
++ struct fsl_qdma_comp *fsl_comp;
++ struct virt_dma_desc *vdesc;
++ u32 reg;
++
++ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
++ if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
++ return;
++ vdesc = vchan_next_desc(&fsl_chan->vchan);
++ if (!vdesc)
++ return;
++ list_del(&vdesc->node);
++ fsl_comp = to_fsl_qdma_comp(vdesc);
++
++ memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
++ if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
++ fsl_queue->virt_head = fsl_queue->cq;
++
++ list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
++ barrier();
++ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
++ reg |= FSL_QDMA_BCQMR_EI;
++ qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
++ fsl_chan->status = DMA_IN_PROGRESS;
++}
++
++static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
++ dma_cookie_t cookie, struct dma_tx_state *txstate)
++{
++ return dma_cookie_status(chan, cookie, txstate);
++}
++
++static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
++{
++ struct fsl_qdma_comp *fsl_comp;
++ struct fsl_qdma_queue *fsl_queue;
++ struct fsl_qdma_sg *sg_block;
++ unsigned long flags;
++ unsigned int i;
++
++ fsl_comp = to_fsl_qdma_comp(vdesc);
++ fsl_queue = fsl_comp->qchan->queue;
++
++ if (fsl_comp->sg_block) {
++ for (i = 0; i < fsl_comp->sg_block_src +
++ fsl_comp->sg_block_dst; i++) {
++ sg_block = fsl_comp->sg_block + i;
++ dma_pool_free(fsl_queue->sg_pool,
++ sg_block->virt_addr,
++ sg_block->bus_addr);
++ }
++ kfree(fsl_comp->sg_block);
++ }
++
++ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
++ list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
++ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
++}
++
++static void fsl_qdma_issue_pending(struct dma_chan *chan)
++{
++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
++ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
++ unsigned long flags;
++
++ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
++ spin_lock(&fsl_chan->vchan.lock);
++ if (vchan_issue_pending(&fsl_chan->vchan))
++ fsl_qdma_enqueue_desc(fsl_chan);
++ spin_unlock(&fsl_chan->vchan.lock);
++ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
++}
++
++static int fsl_qdma_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct fsl_qdma_engine *fsl_qdma;
++ struct fsl_qdma_chan *fsl_chan;
++ struct resource *res;
++ unsigned int len, chans, queues;
++ int ret, i;
++
++ ret = of_property_read_u32(np, "channels", &chans);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't get channels.\n");
++ return ret;
++ }
++
++ len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
++ fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
++ if (!fsl_qdma)
++ return -ENOMEM;
++
++ ret = of_property_read_u32(np, "queues", &queues);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't get queues.\n");
++ return ret;
++ }
++
++ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
++ if (!fsl_qdma->queue)
++ return -ENOMEM;
++
++ fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
++ if (!fsl_qdma->status)
++ return -ENOMEM;
++
++ fsl_qdma->n_chans = chans;
++ fsl_qdma->n_queues = queues;
++ mutex_init(&fsl_qdma->fsl_qdma_mutex);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(fsl_qdma->ctrl_base))
++ return PTR_ERR(fsl_qdma->ctrl_base);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(fsl_qdma->status_base))
++ return PTR_ERR(fsl_qdma->status_base);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
++ fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(fsl_qdma->block_base))
++ return PTR_ERR(fsl_qdma->block_base);
++
++ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
++ if (ret)
++ return ret;
++
++ fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
++ INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
++ for (i = 0; i < fsl_qdma->n_chans; i++) {
++ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
++
++ fsl_chan->qdma = fsl_qdma;
++ fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
++ fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
++ INIT_LIST_HEAD(&fsl_chan->qcomp);
++ vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
++ }
++ for (i = 0; i < fsl_qdma->n_queues; i++)
++ fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
++
++ dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
++ dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask);
++
++ fsl_qdma->dma_dev.dev = &pdev->dev;
++ fsl_qdma->dma_dev.device_alloc_chan_resources
++ = fsl_qdma_alloc_chan_resources;
++ fsl_qdma->dma_dev.device_free_chan_resources
++ = fsl_qdma_free_chan_resources;
++ fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
++ fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
++ fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg;
++ fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
++
++ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
++
++ platform_set_drvdata(pdev, fsl_qdma);
++
++ ret = dma_async_device_register(&fsl_qdma->dma_dev);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
++ return ret;
++ }
++
++ ret = fsl_qdma_reg_init(fsl_qdma);
++ if (ret) {
++ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
++ return ret;
++ }
++
++
++ return 0;
++}
++
++static int fsl_qdma_remove(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
++ struct fsl_qdma_queue *queue_temp;
++ struct fsl_qdma_queue *status = fsl_qdma->status;
++ struct fsl_qdma_comp *comp_temp, *_comp_temp;
++ int i;
++
++ of_dma_controller_free(np);
++ dma_async_device_unregister(&fsl_qdma->dma_dev);
++
++ /* Free descriptor areas */
++ for (i = 0; i < fsl_qdma->n_queues; i++) {
++ queue_temp = fsl_qdma->queue + i;
++ list_for_each_entry_safe(comp_temp, _comp_temp,
++ &queue_temp->comp_used, list) {
++ dma_pool_free(queue_temp->comp_pool,
++ comp_temp->virt_addr,
++ comp_temp->bus_addr);
++ list_del(&comp_temp->list);
++ kfree(comp_temp);
++ }
++ list_for_each_entry_safe(comp_temp, _comp_temp,
++ &queue_temp->comp_free, list) {
++ dma_pool_free(queue_temp->comp_pool,
++ comp_temp->virt_addr,
++ comp_temp->bus_addr);
++ list_del(&comp_temp->list);
++ kfree(comp_temp);
++ }
++ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
++ queue_temp->n_cq, queue_temp->cq,
++ queue_temp->bus_addr);
++ dma_pool_destroy(queue_temp->comp_pool);
++ }
++
++ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
++ status->n_cq, status->cq, status->bus_addr);
++ return 0;
++}
++
++static const struct of_device_id fsl_qdma_dt_ids[] = {
++ { .compatible = "fsl,ls1021a-qdma", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
++
++static struct platform_driver fsl_qdma_driver = {
++ .driver = {
++ .name = "fsl-qdma",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_qdma_dt_ids,
++ },
++ .probe = fsl_qdma_probe,
++ .remove = fsl_qdma_remove,
++};
++
++static int __init fsl_qdma_init(void)
++{
++ return platform_driver_register(&fsl_qdma_driver);
++}
++subsys_initcall(fsl_qdma_init);
++
++static void __exit fsl_qdma_exit(void)
++{
++ platform_driver_unregister(&fsl_qdma_driver);
++}
++module_exit(fsl_qdma_exit);
++
++MODULE_ALIAS("platform:fsl-qdma");
++MODULE_DESCRIPTION("Freescale qDMA engine driver");
++MODULE_LICENSE("GPL v2");
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch b/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch
new file mode 100644
index 0000000000..190e8d5307
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch
@@ -0,0 +1,331 @@
+From a5b3155b532289af793c26251cb087b4a24d5c15 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:13:12 +0800
+Subject: [PATCH] flextimer: support layerscape
+
+This is a integrated patch for layerscape flextimer support.
+
+Signed-off-by: Wang Dongsheng <dongsheng.wang@nxp.com>
+Signed-off-by: Meng Yi <meng.yi@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/clocksource/fsl_ftm_timer.c | 8 +-
+ drivers/soc/fsl/layerscape/ftm_alarm.c | 286 +++++++++++++++++++++++++++++++++
+ 2 files changed, 290 insertions(+), 4 deletions(-)
+ create mode 100644 drivers/soc/fsl/layerscape/ftm_alarm.c
+
+diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
+index 738515b8..770bbbca 100644
+--- a/drivers/clocksource/fsl_ftm_timer.c
++++ b/drivers/clocksource/fsl_ftm_timer.c
+@@ -83,11 +83,11 @@ static inline void ftm_counter_disable(void __iomem *base)
+
+ static inline void ftm_irq_acknowledge(void __iomem *base)
+ {
+- u32 val;
++ unsigned int timeout = 100;
+
+- val = ftm_readl(base + FTM_SC);
+- val &= ~FTM_SC_TOF;
+- ftm_writel(val, base + FTM_SC);
++ while ((FTM_SC_TOF & ftm_readl(base + FTM_SC)) && timeout--)
++ ftm_writel(ftm_readl(base + FTM_SC) & (~FTM_SC_TOF),
++ base + FTM_SC);
+ }
+
+ static inline void ftm_irq_enable(void __iomem *base)
+diff --git a/drivers/soc/fsl/layerscape/ftm_alarm.c b/drivers/soc/fsl/layerscape/ftm_alarm.c
+new file mode 100644
+index 00000000..6f9882ff
+--- /dev/null
++++ b/drivers/soc/fsl/layerscape/ftm_alarm.c
+@@ -0,0 +1,286 @@
++/*
++ * Freescale FlexTimer Module (FTM) Alarm driver.
++ *
++ * Copyright 2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ */
++
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/platform_device.h>
++
++#define FTM_SC 0x00
++#define FTM_SC_CLK_SHIFT 3
++#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT)
++#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT)
++#define FTM_SC_PS_MASK 0x7
++#define FTM_SC_TOIE BIT(6)
++#define FTM_SC_TOF BIT(7)
++
++#define FTM_SC_CLKS_FIXED_FREQ 0x02
++
++#define FTM_CNT 0x04
++#define FTM_MOD 0x08
++#define FTM_CNTIN 0x4C
++
++#define FIXED_FREQ_CLK 32000
++#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK)
++#define MAX_COUNT_VAL 0xffff
++
++static void __iomem *ftm1_base;
++static void __iomem *rcpm_ftm_addr;
++static u32 alarm_freq;
++static bool big_endian;
++
++static inline u32 ftm_readl(void __iomem *addr)
++{
++ if (big_endian)
++ return ioread32be(addr);
++
++ return ioread32(addr);
++}
++
++static inline void ftm_writel(u32 val, void __iomem *addr)
++{
++ if (big_endian)
++ iowrite32be(val, addr);
++ else
++ iowrite32(val, addr);
++}
++
++static inline void ftm_counter_enable(void __iomem *base)
++{
++ u32 val;
++
++ /* select and enable counter clock source */
++ val = ftm_readl(base + FTM_SC);
++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
++ val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ));
++ ftm_writel(val, base + FTM_SC);
++}
++
++static inline void ftm_counter_disable(void __iomem *base)
++{
++ u32 val;
++
++ /* disable counter clock source */
++ val = ftm_readl(base + FTM_SC);
++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
++ ftm_writel(val, base + FTM_SC);
++}
++
++static inline void ftm_irq_acknowledge(void __iomem *base)
++{
++ unsigned int timeout = 100;
++
++ while ((FTM_SC_TOF & ftm_readl(base + FTM_SC)) && timeout--)
++ ftm_writel(ftm_readl(base + FTM_SC) & (~FTM_SC_TOF),
++ base + FTM_SC);
++}
++
++static inline void ftm_irq_enable(void __iomem *base)
++{
++ u32 val;
++
++ val = ftm_readl(base + FTM_SC);
++ val |= FTM_SC_TOIE;
++ ftm_writel(val, base + FTM_SC);
++}
++
++static inline void ftm_irq_disable(void __iomem *base)
++{
++ u32 val;
++
++ val = ftm_readl(base + FTM_SC);
++ val &= ~FTM_SC_TOIE;
++ ftm_writel(val, base + FTM_SC);
++}
++
++static inline void ftm_reset_counter(void __iomem *base)
++{
++ /*
++ * The CNT register contains the FTM counter value.
++ * Reset clears the CNT register. Writing any value to COUNT
++ * updates the counter with its initial value, CNTIN.
++ */
++ ftm_writel(0x00, base + FTM_CNT);
++}
++
++static u32 time_to_cycle(unsigned long time)
++{
++ u32 cycle;
++
++ cycle = time * alarm_freq;
++ if (cycle > MAX_COUNT_VAL) {
++ pr_err("Out of alarm range.\n");
++ cycle = 0;
++ }
++
++ return cycle;
++}
++
++static u32 cycle_to_time(u32 cycle)
++{
++ return cycle / alarm_freq + 1;
++}
++
++static void ftm_clean_alarm(void)
++{
++ ftm_counter_disable(ftm1_base);
++
++ ftm_writel(0x00, ftm1_base + FTM_CNTIN);
++ ftm_writel(~0U, ftm1_base + FTM_MOD);
++
++ ftm_reset_counter(ftm1_base);
++}
++
++static int ftm_set_alarm(u64 cycle)
++{
++ ftm_irq_disable(ftm1_base);
++
++ /*
++ * The counter increments until the value of MOD is reached,
++ * at which point the counter is reloaded with the value of CNTIN.
++ * The TOF (the overflow flag) bit is set when the FTM counter
++ * changes from MOD to CNTIN. So we should using the cycle - 1.
++ */
++ ftm_writel(cycle - 1, ftm1_base + FTM_MOD);
++
++ ftm_counter_enable(ftm1_base);
++
++ ftm_irq_enable(ftm1_base);
++
++ return 0;
++}
++
++static irqreturn_t ftm_alarm_interrupt(int irq, void *dev_id)
++{
++ ftm_irq_acknowledge(ftm1_base);
++ ftm_irq_disable(ftm1_base);
++ ftm_clean_alarm();
++
++ return IRQ_HANDLED;
++}
++
++static ssize_t ftm_alarm_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ u32 count, val;
++
++ count = ftm_readl(ftm1_base + FTM_MOD);
++ val = ftm_readl(ftm1_base + FTM_CNT);
++ val = (count & MAX_COUNT_VAL) - val;
++ val = cycle_to_time(val);
++
++ return sprintf(buf, "%u\n", val);
++}
++
++static ssize_t ftm_alarm_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ u32 cycle;
++ unsigned long time;
++
++ if (kstrtoul(buf, 0, &time))
++ return -EINVAL;
++
++ ftm_clean_alarm();
++
++ cycle = time_to_cycle(time);
++ if (!cycle)
++ return -EINVAL;
++
++ ftm_set_alarm(cycle);
++
++ return count;
++}
++
++static struct device_attribute ftm_alarm_attributes = __ATTR(ftm_alarm, 0644,
++ ftm_alarm_show, ftm_alarm_store);
++
++static int ftm_alarm_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct resource *r;
++ int irq;
++ int ret;
++ u32 ippdexpcr;
++
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!r)
++ return -ENODEV;
++
++ ftm1_base = devm_ioremap_resource(&pdev->dev, r);
++ if (IS_ERR(ftm1_base))
++ return PTR_ERR(ftm1_base);
++
++ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "FlexTimer1");
++ if (r) {
++ rcpm_ftm_addr = devm_ioremap_resource(&pdev->dev, r);
++ if (IS_ERR(rcpm_ftm_addr))
++ return PTR_ERR(rcpm_ftm_addr);
++ ippdexpcr = ioread32be(rcpm_ftm_addr);
++ ippdexpcr |= 0x20000;
++ iowrite32be(ippdexpcr, rcpm_ftm_addr);
++ }
++
++ irq = irq_of_parse_and_map(np, 0);
++ if (irq <= 0) {
++ pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
++ return -EINVAL;
++ }
++
++ big_endian = of_property_read_bool(np, "big-endian");
++
++ ret = devm_request_irq(&pdev->dev, irq, ftm_alarm_interrupt,
++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), NULL);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ ret = device_create_file(&pdev->dev, &ftm_alarm_attributes);
++ if (ret) {
++ dev_err(&pdev->dev, "create sysfs fail.\n");
++ return ret;
++ }
++
++ alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV;
++
++ ftm_clean_alarm();
++
++ device_init_wakeup(&pdev->dev, true);
++
++ return ret;
++}
++
++static const struct of_device_id ftm_alarm_match[] = {
++ { .compatible = "fsl,ftm-alarm", },
++ { .compatible = "fsl,ftm-timer", },
++ { },
++};
++
++static struct platform_driver ftm_alarm_driver = {
++ .probe = ftm_alarm_probe,
++ .driver = {
++ .name = "ftm-alarm",
++ .owner = THIS_MODULE,
++ .of_match_table = ftm_alarm_match,
++ },
++};
++
++static int __init ftm_alarm_init(void)
++{
++ return platform_driver_register(&ftm_alarm_driver);
++}
++device_initcall(ftm_alarm_init);
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch b/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch
new file mode 100644
index 0000000000..8321d392aa
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch
@@ -0,0 +1,73 @@
+From 4278a546526094dd57bfa3cf7ae2bf34092246db Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:10:01 +0800
+Subject: [PATCH] gpu: support layerscape
+
+This is a integrated patch for layerscape dcu support.
+
+Signed-off-by: Alison Wang <b18965@freescale.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+index cc2fde2a..54f60ba1 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+@@ -225,7 +225,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
+ if (!fsl_dev)
+ return 0;
+
+- disable_irq(fsl_dev->irq);
+ drm_kms_helper_poll_disable(fsl_dev->drm);
+
+ console_lock();
+@@ -243,6 +242,8 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
+ return PTR_ERR(fsl_dev->state);
+ }
+
++ disable_irq(fsl_dev->irq);
++
+ clk_disable_unprepare(fsl_dev->pix_clk);
+ clk_disable_unprepare(fsl_dev->clk);
+
+@@ -263,6 +264,14 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
+ return ret;
+ }
+
++ ret = clk_prepare_enable(fsl_dev->pix_clk);
++ if (ret < 0) {
++ dev_err(dev, "failed to enable dcu pix clk\n");
++ return ret;
++ }
++
++ enable_irq(fsl_dev->irq);
++
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
+ fsl_dcu_drm_init_planes(fsl_dev->drm);
+@@ -273,7 +282,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
+ console_unlock();
+
+ drm_kms_helper_poll_enable(fsl_dev->drm);
+- enable_irq(fsl_dev->irq);
+
+ return 0;
+ }
+@@ -389,6 +397,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
+ goto disable_clk;
+ }
+
++ ret = clk_prepare_enable(fsl_dev->pix_clk);
++ if (ret < 0) {
++ dev_err(dev, "failed to enable dcu pix clk\n");
++ return ret;
++ }
++
+ fsl_dev->tcon = fsl_tcon_init(dev);
+
+ drm = drm_dev_alloc(driver, dev);
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch b/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch
new file mode 100644
index 0000000000..51476dac5f
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch
@@ -0,0 +1,462 @@
+From d51e307e4ecf51832c9e3bc30acb5dbd559d5f4d Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:19:34 +0800
+Subject: [PATCH] guts: support layerscape
+
+This is a integrated patch for layerscape guts support.
+
+Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/base/soc.c | 12 ++-
+ drivers/soc/fsl/guts.c | 238 +++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/fsl/guts.h | 125 +++++++++++++++----------
+ 3 files changed, 323 insertions(+), 52 deletions(-)
+ create mode 100644 drivers/soc/fsl/guts.c
+
+diff --git a/drivers/base/soc.c b/drivers/base/soc.c
+index 0c5cf872..0e701e22 100644
+--- a/drivers/base/soc.c
++++ b/drivers/base/soc.c
+@@ -167,19 +167,23 @@ static int soc_device_match_one(struct device *dev, void *arg)
+ const struct soc_device_attribute *match = arg;
+
+ if (match->machine &&
+- !glob_match(match->machine, soc_dev->attr->machine))
++ (!soc_dev->attr->machine ||
++ !glob_match(match->machine, soc_dev->attr->machine)))
+ return 0;
+
+ if (match->family &&
+- !glob_match(match->family, soc_dev->attr->family))
++ (!soc_dev->attr->family ||
++ !glob_match(match->family, soc_dev->attr->family)))
+ return 0;
+
+ if (match->revision &&
+- !glob_match(match->revision, soc_dev->attr->revision))
++ (!soc_dev->attr->revision ||
++ !glob_match(match->revision, soc_dev->attr->revision)))
+ return 0;
+
+ if (match->soc_id &&
+- !glob_match(match->soc_id, soc_dev->attr->soc_id))
++ (!soc_dev->attr->soc_id ||
++ !glob_match(match->soc_id, soc_dev->attr->soc_id)))
+ return 0;
+
+ return 1;
+diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
+new file mode 100644
+index 00000000..7d28784a
+--- /dev/null
++++ b/drivers/soc/fsl/guts.c
+@@ -0,0 +1,238 @@
++/*
++ * Freescale QorIQ Platforms GUTS Driver
++ *
++ * Copyright (C) 2016 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/of_fdt.h>
++#include <linux/sys_soc.h>
++#include <linux/of_address.h>
++#include <linux/platform_device.h>
++#include <linux/fsl/guts.h>
++
++struct guts {
++ struct ccsr_guts __iomem *regs;
++ bool little_endian;
++};
++
++struct fsl_soc_die_attr {
++ char *die;
++ u32 svr;
++ u32 mask;
++};
++
++static struct guts *guts;
++static struct soc_device_attribute soc_dev_attr;
++static struct soc_device *soc_dev;
++
++
++/* SoC die attribute definition for QorIQ platform */
++static const struct fsl_soc_die_attr fsl_soc_die[] = {
++ /*
++ * Power Architecture-based SoCs T Series
++ */
++
++ /* Die: T4240, SoC: T4240/T4160/T4080 */
++ { .die = "T4240",
++ .svr = 0x82400000,
++ .mask = 0xfff00000,
++ },
++ /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */
++ { .die = "T1040",
++ .svr = 0x85200000,
++ .mask = 0xfff00000,
++ },
++ /* Die: T2080, SoC: T2080/T2081 */
++ { .die = "T2080",
++ .svr = 0x85300000,
++ .mask = 0xfff00000,
++ },
++ /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */
++ { .die = "T1024",
++ .svr = 0x85400000,
++ .mask = 0xfff00000,
++ },
++
++ /*
++ * ARM-based SoCs LS Series
++ */
++
++ /* Die: LS1043A, SoC: LS1043A/LS1023A */
++ { .die = "LS1043A",
++ .svr = 0x87920000,
++ .mask = 0xffff0000,
++ },
++ /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */
++ { .die = "LS2080A",
++ .svr = 0x87010000,
++ .mask = 0xff3f0000,
++ },
++ /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */
++ { .die = "LS1088A",
++ .svr = 0x87030000,
++ .mask = 0xff3f0000,
++ },
++ /* Die: LS1012A, SoC: LS1012A */
++ { .die = "LS1012A",
++ .svr = 0x87040000,
++ .mask = 0xffff0000,
++ },
++ /* Die: LS1046A, SoC: LS1046A/LS1026A */
++ { .die = "LS1046A",
++ .svr = 0x87070000,
++ .mask = 0xffff0000,
++ },
++ /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */
++ { .die = "LS2088A",
++ .svr = 0x87090000,
++ .mask = 0xff3f0000,
++ },
++ /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */
++ { .die = "LS1021A",
++ .svr = 0x87000000,
++ .mask = 0xfff70000,
++ },
++ { },
++};
++
++static const struct fsl_soc_die_attr *fsl_soc_die_match(
++ u32 svr, const struct fsl_soc_die_attr *matches)
++{
++ while (matches->svr) {
++ if (matches->svr == (svr & matches->mask))
++ return matches;
++ matches++;
++ };
++ return NULL;
++}
++
++u32 fsl_guts_get_svr(void)
++{
++ u32 svr = 0;
++
++ if (!guts || !guts->regs)
++ return svr;
++
++ if (guts->little_endian)
++ svr = ioread32(&guts->regs->svr);
++ else
++ svr = ioread32be(&guts->regs->svr);
++
++ return svr;
++}
++EXPORT_SYMBOL(fsl_guts_get_svr);
++
++static int fsl_guts_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct device *dev = &pdev->dev;
++ struct resource *res;
++ const struct fsl_soc_die_attr *soc_die;
++ const char *machine;
++ u32 svr;
++
++ /* Initialize guts */
++ guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
++ if (!guts)
++ return -ENOMEM;
++
++ guts->little_endian = of_property_read_bool(np, "little-endian");
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ guts->regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(guts->regs))
++ return PTR_ERR(guts->regs);
++
++ /* Register soc device */
++ machine = of_flat_dt_get_machine_name();
++ if (machine)
++ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
++
++ svr = fsl_guts_get_svr();
++ soc_die = fsl_soc_die_match(svr, fsl_soc_die);
++ if (soc_die) {
++ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
++ "QorIQ %s", soc_die->die);
++ } else {
++ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
++ }
++ soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
++ "svr:0x%08x", svr);
++ soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
++ (svr >> 4) & 0xf, svr & 0xf);
++
++ soc_dev = soc_device_register(&soc_dev_attr);
++ if (IS_ERR(soc_dev))
++ return PTR_ERR(soc_dev);
++
++ pr_info("Machine: %s\n", soc_dev_attr.machine);
++ pr_info("SoC family: %s\n", soc_dev_attr.family);
++ pr_info("SoC ID: %s, Revision: %s\n",
++ soc_dev_attr.soc_id, soc_dev_attr.revision);
++ return 0;
++}
++
++static int fsl_guts_remove(struct platform_device *dev)
++{
++ soc_device_unregister(soc_dev);
++ return 0;
++}
++
++/*
++ * Table for matching compatible strings, for device tree
++ * guts node, for Freescale QorIQ SOCs.
++ */
++static const struct of_device_id fsl_guts_of_match[] = {
++ { .compatible = "fsl,qoriq-device-config-1.0", },
++ { .compatible = "fsl,qoriq-device-config-2.0", },
++ { .compatible = "fsl,p1010-guts", },
++ { .compatible = "fsl,p1020-guts", },
++ { .compatible = "fsl,p1021-guts", },
++ { .compatible = "fsl,p1022-guts", },
++ { .compatible = "fsl,p1023-guts", },
++ { .compatible = "fsl,p2020-guts", },
++ { .compatible = "fsl,bsc9131-guts", },
++ { .compatible = "fsl,bsc9132-guts", },
++ { .compatible = "fsl,mpc8536-guts", },
++ { .compatible = "fsl,mpc8544-guts", },
++ { .compatible = "fsl,mpc8548-guts", },
++ { .compatible = "fsl,mpc8568-guts", },
++ { .compatible = "fsl,mpc8569-guts", },
++ { .compatible = "fsl,mpc8572-guts", },
++ { .compatible = "fsl,ls1021a-dcfg", },
++ { .compatible = "fsl,ls1043a-dcfg", },
++ { .compatible = "fsl,ls1046a-dcfg", },
++ { .compatible = "fsl,ls2080a-dcfg", },
++ { .compatible = "fsl,ls1088a-dcfg", },
++ {}
++};
++MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
++
++static struct platform_driver fsl_guts_driver = {
++ .driver = {
++ .name = "fsl-guts",
++ .of_match_table = fsl_guts_of_match,
++ },
++ .probe = fsl_guts_probe,
++ .remove = fsl_guts_remove,
++};
++
++static int __init fsl_guts_init(void)
++{
++ return platform_driver_register(&fsl_guts_driver);
++}
++core_initcall(fsl_guts_init);
++
++static void __exit fsl_guts_exit(void)
++{
++ platform_driver_unregister(&fsl_guts_driver);
++}
++module_exit(fsl_guts_exit);
+diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
+index 649e9171..3efa3b86 100644
+--- a/include/linux/fsl/guts.h
++++ b/include/linux/fsl/guts.h
+@@ -29,83 +29,112 @@
+ * #ifdefs.
+ */
+ struct ccsr_guts {
+- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
+- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
+- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
+- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
+- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
+- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */
++ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
++ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
++ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and
++ * Control Register
++ */
++ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
++ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
++ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */
+ u8 res018[0x20 - 0x18];
+- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */
++ u32 porcir; /* 0x.0020 - POR Configuration Information
++ * Register
++ */
+ u8 res024[0x30 - 0x24];
+- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */
++ u32 gpiocr; /* 0x.0030 - GPIO Control Register */
+ u8 res034[0x40 - 0x34];
+- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
++ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data
++ * Register
++ */
+ u8 res044[0x50 - 0x44];
+- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
++ u32 gpindr; /* 0x.0050 - General-Purpose Input Data
++ * Register
++ */
+ u8 res054[0x60 - 0x54];
+- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
+- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */
+- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
++ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal
++ * Multiplex Control
++ */
++ u32 pmuxcr2; /* 0x.0064 - Alternate function signal
++ * multiplex control 2
++ */
++ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
+ u8 res06c[0x70 - 0x6c];
+- __be32 devdisr; /* 0x.0070 - Device Disable Control */
++ u32 devdisr; /* 0x.0070 - Device Disable Control */
+ #define CCSR_GUTS_DEVDISR_TB1 0x00001000
+ #define CCSR_GUTS_DEVDISR_TB0 0x00004000
+- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
++ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
+ u8 res078[0x7c - 0x78];
+- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
+- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
+- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */
+- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */
+- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */
+- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
+- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
+- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */
+- __be32 autorstsr; /* 0x.009c - Automatic reset status register */
+- __be32 pvr; /* 0x.00a0 - Processor Version Register */
+- __be32 svr; /* 0x.00a4 - System Version Register */
++ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control
++ * Register
++ */
++ u32 powmgtcsr; /* 0x.0080 - Power Management Status and
++ * Control Register
++ */
++ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter
++ * Configuration Register
++ */
++ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter
++ * Configuration Register
++ */
++ u32 pmcdr; /* 0x.008c - 4Power management clock disable
++ * register
++ */
++ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
++ u32 rstrscr; /* 0x.0094 - Reset Request Status and
++ * Control Register
++ */
++ u32 ectrstcr; /* 0x.0098 - Exception reset control register */
++ u32 autorstsr; /* 0x.009c - Automatic reset status register */
++ u32 pvr; /* 0x.00a0 - Processor Version Register */
++ u32 svr; /* 0x.00a4 - System Version Register */
+ u8 res0a8[0xb0 - 0xa8];
+- __be32 rstcr; /* 0x.00b0 - Reset Control Register */
++ u32 rstcr; /* 0x.00b0 - Reset Control Register */
+ u8 res0b4[0xc0 - 0xb4];
+- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
++ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register
+ Called 'elbcvselcr' on 86xx SOCs */
+ u8 res0c4[0x100 - 0xc4];
+- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
++ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
+ There are 16 registers */
+ u8 res140[0x224 - 0x140];
+- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
+- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
++ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */
++ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */
+ u8 res22c[0x604 - 0x22c];
+- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
++ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
+ u8 res608[0x800 - 0x608];
+- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
++ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */
+ u8 res804[0x900 - 0x804];
+- __be32 ircr; /* 0x.0900 - Infrared Control Register */
++ u32 ircr; /* 0x.0900 - Infrared Control Register */
+ u8 res904[0x908 - 0x904];
+- __be32 dmacr; /* 0x.0908 - DMA Control Register */
++ u32 dmacr; /* 0x.0908 - DMA Control Register */
+ u8 res90c[0x914 - 0x90c];
+- __be32 elbccr; /* 0x.0914 - eLBC Control Register */
++ u32 elbccr; /* 0x.0914 - eLBC Control Register */
+ u8 res918[0xb20 - 0x918];
+- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
+- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
+- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
++ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
++ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
++ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
+ u8 resb2c[0xe00 - 0xb2c];
+- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
++ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */
+ u8 rese04[0xe10 - 0xe04];
+- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
++ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
+ u8 rese14[0xe20 - 0xe14];
+- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
+- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */
++ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
++ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override
++ * register
++ */
+ u8 rese28[0xf04 - 0xe28];
+- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
+- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
++ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
++ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
+ u8 resf0c[0xf2c - 0xf0c];
+- __be32 itcr; /* 0x.0f2c - Internal transaction control register */
++ u32 itcr; /* 0x.0f2c - Internal transaction control
++ * register
++ */
+ u8 resf30[0xf40 - 0xf30];
+- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
+- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
++ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
++ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
+ } __attribute__ ((packed));
+
++u32 fsl_guts_get_svr(void);
+
+ /* Alternate function signal multiplex control */
+ #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch b/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch
new file mode 100644
index 0000000000..fd4371adb1
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch
@@ -0,0 +1,140 @@
+From 3c5032fe34f1af50e9e5fe58d40bf93c1717302f Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:19:53 +0800
+Subject: [PATCH] i2c: support layerscape
+
+This is a integrated patch for layerscape i2c support.
+
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: Priyanka Jain <Priyanka.Jain@freescale.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/i2c/busses/i2c-imx.c | 10 ++++++++-
+ drivers/i2c/muxes/i2c-mux-pca954x.c | 43 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 52 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 47fc1f1a..a35c366b 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -889,6 +889,14 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
+
++ /*
++ * workround for ERR010027: ensure that the I2C BUS is idle
++ * before switching to master mode and attempting a Start cycle
++ */
++ result = i2c_imx_bus_busy(i2c_imx, 0);
++ if (result)
++ goto out;
++
+ result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
+ if (result < 0)
+ goto out;
+@@ -1100,7 +1108,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ }
+
+ /* Request IRQ */
+- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
++ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
+ pdev->name, i2c_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
+index 9c4ac26c..3c27ab84 100644
+--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
+@@ -74,6 +74,7 @@ struct pca954x {
+ u8 last_chan; /* last register value */
+ u8 deselect;
+ struct i2c_client *client;
++ u8 disable_mux; /* do not disable mux if val not 0 */
+ };
+
+ /* Provide specs for the PCA954x types we know about */
+@@ -196,6 +197,13 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
+ if (!(data->deselect & (1 << chan)))
+ return 0;
+
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ if (data->disable_mux != 0)
++ data->last_chan = data->chip->nchans;
++ else
++ data->last_chan = 0;
++ return pca954x_reg_write(muxc->parent, client, data->disable_mux);
++#endif
+ /* Deselect active channel */
+ data->last_chan = 0;
+ return pca954x_reg_write(muxc->parent, client, data->last_chan);
+@@ -228,6 +236,28 @@ static int pca954x_probe(struct i2c_client *client,
+ return -ENOMEM;
+ data = i2c_mux_priv(muxc);
+
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ /* The point here is that you must not disable a mux if there
++ * are no pullups on the input or you mess up the I2C. This
++ * needs to be put into the DTS really as the kernel cannot
++ * know this otherwise.
++ */
++ match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
++ if (match)
++ data->chip = of_device_get_match_data(&client->dev);
++ else
++ data->chip = &chips[id->driver_data];
++
++ data->disable_mux = of_node &&
++ of_property_read_bool(of_node, "i2c-mux-never-disable") &&
++ data->chip->muxtype == pca954x_ismux ?
++ data->chip->enable : 0;
++ /* force the first selection */
++ if (data->disable_mux != 0)
++ data->last_chan = data->chip->nchans;
++ else
++ data->last_chan = 0;
++#endif
+ i2c_set_clientdata(client, muxc);
+ data->client = client;
+
+@@ -240,11 +270,16 @@ static int pca954x_probe(struct i2c_client *client,
+ * that the mux is in fact present. This also
+ * initializes the mux to disconnected state.
+ */
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) {
++#else
+ if (i2c_smbus_write_byte(client, 0) < 0) {
++#endif
+ dev_warn(&client->dev, "probe failed\n");
+ return -ENODEV;
+ }
+
++#ifndef CONFIG_ARCH_LAYERSCAPE
+ match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
+ if (match)
+ data->chip = of_device_get_match_data(&client->dev);
+@@ -252,6 +287,7 @@ static int pca954x_probe(struct i2c_client *client,
+ data->chip = &chips[id->driver_data];
+
+ data->last_chan = 0; /* force the first selection */
++#endif
+
+ idle_disconnect_dt = of_node &&
+ of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+@@ -312,6 +348,13 @@ static int pca954x_resume(struct device *dev)
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
+
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ if (data->disable_mux != 0)
++ data->last_chan = data->chip->nchans;
++ else
++ data->last_chan = 0;
++ return i2c_smbus_write_byte(client, data->disable_mux);
++#endif
+ data->last_chan = 0;
+ return i2c_smbus_write_byte(client, 0);
+ }
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch
new file mode 100644
index 0000000000..dd536093e3
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch
@@ -0,0 +1,1338 @@
+From f1874c71c855bd8ca8478a622053276f2c61eeca Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Wed, 27 Sep 2017 10:33:26 +0800
+Subject: [PATCH] iommu: support layerscape
+
+This is a integrated patch for layerscape smmu support.
+
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
+Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/iommu/amd_iommu.c | 56 ++++++----
+ drivers/iommu/arm-smmu-v3.c | 35 ++++++-
+ drivers/iommu/arm-smmu.c | 74 ++++++++++---
+ drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++-------
+ drivers/iommu/intel-iommu.c | 92 ++++++++++++----
+ drivers/iommu/iommu.c | 191 ++++++++++++++++++++++++++++++++--
+ drivers/iommu/mtk_iommu.c | 2 +
+ drivers/iommu/mtk_iommu_v1.c | 2 +
+ include/linux/dma-iommu.h | 11 ++
+ include/linux/iommu.h | 55 +++++++---
+ 10 files changed, 645 insertions(+), 115 deletions(-)
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index c380b7e8..93199931 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
+
+ if (!entry->group)
+ entry->group = generic_device_group(dev);
++ else
++ iommu_group_ref_get(entry->group);
+
+ return entry->group;
+ }
+@@ -3159,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
+ return false;
+ }
+
+-static void amd_iommu_get_dm_regions(struct device *dev,
+- struct list_head *head)
++static void amd_iommu_get_resv_regions(struct device *dev,
++ struct list_head *head)
+ {
++ struct iommu_resv_region *region;
+ struct unity_map_entry *entry;
+ int devid;
+
+@@ -3170,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
+ return;
+
+ list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+- struct iommu_dm_region *region;
++ size_t length;
++ int prot = 0;
+
+ if (devid < entry->devid_start || devid > entry->devid_end)
+ continue;
+
+- region = kzalloc(sizeof(*region), GFP_KERNEL);
++ length = entry->address_end - entry->address_start;
++ if (entry->prot & IOMMU_PROT_IR)
++ prot |= IOMMU_READ;
++ if (entry->prot & IOMMU_PROT_IW)
++ prot |= IOMMU_WRITE;
++
++ region = iommu_alloc_resv_region(entry->address_start,
++ length, prot,
++ IOMMU_RESV_DIRECT);
+ if (!region) {
+ pr_err("Out of memory allocating dm-regions for %s\n",
+ dev_name(dev));
+ return;
+ }
+-
+- region->start = entry->address_start;
+- region->length = entry->address_end - entry->address_start;
+- if (entry->prot & IOMMU_PROT_IR)
+- region->prot |= IOMMU_READ;
+- if (entry->prot & IOMMU_PROT_IW)
+- region->prot |= IOMMU_WRITE;
+-
+ list_add_tail(&region->list, head);
+ }
++
++ region = iommu_alloc_resv_region(MSI_RANGE_START,
++ MSI_RANGE_END - MSI_RANGE_START + 1,
++ 0, IOMMU_RESV_MSI);
++ if (!region)
++ return;
++ list_add_tail(&region->list, head);
++
++ region = iommu_alloc_resv_region(HT_RANGE_START,
++ HT_RANGE_END - HT_RANGE_START + 1,
++ 0, IOMMU_RESV_RESERVED);
++ if (!region)
++ return;
++ list_add_tail(&region->list, head);
+ }
+
+-static void amd_iommu_put_dm_regions(struct device *dev,
++static void amd_iommu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+ {
+- struct iommu_dm_region *entry, *next;
++ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
+ }
+
+-static void amd_iommu_apply_dm_region(struct device *dev,
++static void amd_iommu_apply_resv_region(struct device *dev,
+ struct iommu_domain *domain,
+- struct iommu_dm_region *region)
++ struct iommu_resv_region *region)
+ {
+ struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
+ unsigned long start, end;
+@@ -3228,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = {
+ .add_device = amd_iommu_add_device,
+ .remove_device = amd_iommu_remove_device,
+ .device_group = amd_iommu_device_group,
+- .get_dm_regions = amd_iommu_get_dm_regions,
+- .put_dm_regions = amd_iommu_put_dm_regions,
+- .apply_dm_region = amd_iommu_apply_dm_region,
++ .get_resv_regions = amd_iommu_get_resv_regions,
++ .put_resv_regions = amd_iommu_put_resv_regions,
++ .apply_resv_region = amd_iommu_apply_resv_region,
+ .pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ };
+
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index e6f9b2d7..e3ed8dc5 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -410,6 +410,9 @@
+ /* High-level queue structures */
+ #define ARM_SMMU_POLL_TIMEOUT_US 100
+
++#define MSI_IOVA_BASE 0x8000000
++#define MSI_IOVA_LENGTH 0x100000
++
+ static bool disable_bypass;
+ module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
+ MODULE_PARM_DESC(disable_bypass,
+@@ -1370,8 +1373,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return true;
+- case IOMMU_CAP_INTR_REMAP:
+- return true; /* MSIs are just memory writes */
+ case IOMMU_CAP_NOEXEC:
+ return true;
+ default:
+@@ -1709,6 +1710,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
++ if (domain->type == IOMMU_DOMAIN_IDENTITY)
++ return iova;
++
+ if (!ops)
+ return 0;
+
+@@ -1880,6 +1884,31 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+ return iommu_fwspec_add_ids(dev, args->args, 1);
+ }
+
++static void arm_smmu_get_resv_regions(struct device *dev,
++ struct list_head *head)
++{
++ struct iommu_resv_region *region;
++ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
++
++ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
++ prot, IOMMU_RESV_SW_MSI);
++ if (!region)
++ return;
++
++ list_add_tail(&region->list, head);
++
++ iommu_dma_get_resv_regions(dev, head);
++}
++
++static void arm_smmu_put_resv_regions(struct device *dev,
++ struct list_head *head)
++{
++ struct iommu_resv_region *entry, *next;
++
++ list_for_each_entry_safe(entry, next, head, list)
++ kfree(entry);
++}
++
+ static struct iommu_ops arm_smmu_ops = {
+ .capable = arm_smmu_capable,
+ .domain_alloc = arm_smmu_domain_alloc,
+@@ -1895,6 +1924,8 @@ static struct iommu_ops arm_smmu_ops = {
+ .domain_get_attr = arm_smmu_domain_get_attr,
+ .domain_set_attr = arm_smmu_domain_set_attr,
+ .of_xlate = arm_smmu_of_xlate,
++ .get_resv_regions = arm_smmu_get_resv_regions,
++ .put_resv_regions = arm_smmu_put_resv_regions,
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
+ };
+
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 8f728144..df18dac3 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -49,6 +49,7 @@
+ #include <linux/spinlock.h>
+
+ #include <linux/amba/bus.h>
++#include "../staging/fsl-mc/include/mc-bus.h"
+
+ #include "io-pgtable.h"
+
+@@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg {
+ #define ARM_MMU500_ACTLR_CPRE (1 << 1)
+
+ #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
++#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
+
+ #define CB_PAR_F (1 << 0)
+
+@@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg {
+
+ #define FSYNR0_WNR (1 << 4)
+
++#define MSI_IOVA_BASE 0x8000000
++#define MSI_IOVA_LENGTH 0x100000
++
+ static int force_stage;
+ module_param(force_stage, int, S_IRUGO);
+ MODULE_PARM_DESC(force_stage,
+@@ -1343,6 +1348,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
++ if (domain->type == IOMMU_DOMAIN_IDENTITY)
++ return iova;
++
+ if (!ops)
+ return 0;
+
+@@ -1368,8 +1376,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
+ * requests.
+ */
+ return true;
+- case IOMMU_CAP_INTR_REMAP:
+- return true; /* MSIs are just memory writes */
+ case IOMMU_CAP_NOEXEC:
+ return true;
+ default:
+@@ -1478,10 +1484,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
+ }
+
+ if (group)
+- return group;
++ return iommu_group_ref_get(group);
+
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
++ else if (dev_is_fsl_mc(dev))
++ group = fsl_mc_device_group(dev);
+ else
+ group = generic_device_group(dev);
+
+@@ -1534,17 +1542,44 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+
+ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+ {
+- u32 fwid = 0;
++ u32 mask, fwid = 0;
+
+ if (args->args_count > 0)
+ fwid |= (u16)args->args[0];
+
+ if (args->args_count > 1)
+ fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
++ else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
++ fwid |= (u16)mask << SMR_MASK_SHIFT;
+
+ return iommu_fwspec_add_ids(dev, &fwid, 1);
+ }
+
++static void arm_smmu_get_resv_regions(struct device *dev,
++ struct list_head *head)
++{
++ struct iommu_resv_region *region;
++ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
++
++ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
++ prot, IOMMU_RESV_SW_MSI);
++ if (!region)
++ return;
++
++ list_add_tail(&region->list, head);
++
++ iommu_dma_get_resv_regions(dev, head);
++}
++
++static void arm_smmu_put_resv_regions(struct device *dev,
++ struct list_head *head)
++{
++ struct iommu_resv_region *entry, *next;
++
++ list_for_each_entry_safe(entry, next, head, list)
++ kfree(entry);
++}
++
+ static struct iommu_ops arm_smmu_ops = {
+ .capable = arm_smmu_capable,
+ .domain_alloc = arm_smmu_domain_alloc,
+@@ -1560,6 +1595,8 @@ static struct iommu_ops arm_smmu_ops = {
+ .domain_get_attr = arm_smmu_domain_get_attr,
+ .domain_set_attr = arm_smmu_domain_set_attr,
+ .of_xlate = arm_smmu_of_xlate,
++ .get_resv_regions = arm_smmu_get_resv_regions,
++ .put_resv_regions = arm_smmu_put_resv_regions,
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
+ };
+
+@@ -1581,16 +1618,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+ for (i = 0; i < smmu->num_mapping_groups; ++i)
+ arm_smmu_write_sme(smmu, i);
+
+- /*
+- * Before clearing ARM_MMU500_ACTLR_CPRE, need to
+- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
+- * bit is only present in MMU-500r2 onwards.
+- */
+- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
+- if ((smmu->model == ARM_MMU500) && (major >= 2)) {
++ if (smmu->model == ARM_MMU500) {
++ /*
++ * Before clearing ARM_MMU500_ACTLR_CPRE, need to
++ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
++ * bit is only present in MMU-500r2 onwards.
++ */
++ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
++ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
+- reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
++ if (major >= 2)
++ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
++ /*
++ * Allow unmatched Stream IDs to allocate bypass
++ * TLB entries for reduced latency.
++ */
++ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
+ }
+
+@@ -2024,6 +2067,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+ bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ }
+ #endif
++#ifdef CONFIG_FSL_MC_BUS
++ if (!iommu_present(&fsl_mc_bus_type))
++ bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
++#endif
++
+ return 0;
+ }
+
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 1520e7f0..3ade4153 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
+ phys_addr_t phys;
+ };
+
++enum iommu_dma_cookie_type {
++ IOMMU_DMA_IOVA_COOKIE,
++ IOMMU_DMA_MSI_COOKIE,
++};
++
+ struct iommu_dma_cookie {
+- struct iova_domain iovad;
+- struct list_head msi_page_list;
+- spinlock_t msi_lock;
++ enum iommu_dma_cookie_type type;
++ union {
++ /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
++ struct iova_domain iovad;
++ /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
++ dma_addr_t msi_iova;
++ };
++ struct list_head msi_page_list;
++ spinlock_t msi_lock;
+ };
+
++static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
++{
++ if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
++ return cookie->iovad.granule;
++ return PAGE_SIZE;
++}
++
+ static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
+ {
+- return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
++ struct iommu_dma_cookie *cookie = domain->iova_cookie;
++
++ if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
++ return &cookie->iovad;
++ return NULL;
++}
++
++static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
++{
++ struct iommu_dma_cookie *cookie;
++
++ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
++ if (cookie) {
++ spin_lock_init(&cookie->msi_lock);
++ INIT_LIST_HEAD(&cookie->msi_page_list);
++ cookie->type = type;
++ }
++ return cookie;
+ }
+
+ int iommu_dma_init(void)
+@@ -61,26 +96,54 @@ int iommu_dma_init(void)
+ * callback when domain->type == IOMMU_DOMAIN_DMA.
+ */
+ int iommu_get_dma_cookie(struct iommu_domain *domain)
++{
++ if (domain->iova_cookie)
++ return -EEXIST;
++
++ domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
++ if (!domain->iova_cookie)
++ return -ENOMEM;
++
++ return 0;
++}
++EXPORT_SYMBOL(iommu_get_dma_cookie);
++
++/**
++ * iommu_get_msi_cookie - Acquire just MSI remapping resources
++ * @domain: IOMMU domain to prepare
++ * @base: Start address of IOVA region for MSI mappings
++ *
++ * Users who manage their own IOVA allocation and do not want DMA API support,
++ * but would still like to take advantage of automatic MSI remapping, can use
++ * this to initialise their own domain appropriately. Users should reserve a
++ * contiguous IOVA region, starting at @base, large enough to accommodate the
++ * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
++ * used by the devices attached to @domain.
++ */
++int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+ {
+ struct iommu_dma_cookie *cookie;
+
++ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++ return -EINVAL;
++
+ if (domain->iova_cookie)
+ return -EEXIST;
+
+- cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
++ cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
+ if (!cookie)
+ return -ENOMEM;
+
+- spin_lock_init(&cookie->msi_lock);
+- INIT_LIST_HEAD(&cookie->msi_page_list);
++ cookie->msi_iova = base;
+ domain->iova_cookie = cookie;
+ return 0;
+ }
+-EXPORT_SYMBOL(iommu_get_dma_cookie);
++EXPORT_SYMBOL(iommu_get_msi_cookie);
+
+ /**
+ * iommu_put_dma_cookie - Release a domain's DMA mapping resources
+- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
++ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
++ * iommu_get_msi_cookie()
+ *
+ * IOMMU drivers should normally call this from their domain_free callback.
+ */
+@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
+ if (!cookie)
+ return;
+
+- if (cookie->iovad.granule)
++ if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
+ put_iova_domain(&cookie->iovad);
+
+ list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
+@@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
+ }
+ EXPORT_SYMBOL(iommu_put_dma_cookie);
+
+-static void iova_reserve_pci_windows(struct pci_dev *dev,
+- struct iova_domain *iovad)
++/**
++ * iommu_dma_get_resv_regions - Reserved region driver helper
++ * @dev: Device from iommu_get_resv_regions()
++ * @list: Reserved region list from iommu_get_resv_regions()
++ *
++ * IOMMU drivers can use this to implement their .get_resv_regions callback
++ * for general non-IOMMU-specific reservations. Currently, this covers host
++ * bridge windows for PCI devices.
++ */
++void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+ {
+- struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
++ struct pci_host_bridge *bridge;
+ struct resource_entry *window;
+- unsigned long lo, hi;
+
++ if (!dev_is_pci(dev))
++ return;
++
++ bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
+ resource_list_for_each_entry(window, &bridge->windows) {
++ struct iommu_resv_region *region;
++ phys_addr_t start;
++ size_t length;
++
+ if (resource_type(window->res) != IORESOURCE_MEM)
+ continue;
+
+- lo = iova_pfn(iovad, window->res->start - window->offset);
+- hi = iova_pfn(iovad, window->res->end - window->offset);
++ start = window->res->start - window->offset;
++ length = window->res->end - window->res->start + 1;
++ region = iommu_alloc_resv_region(start, length, 0,
++ IOMMU_RESV_RESERVED);
++ if (!region)
++ return;
++
++ list_add_tail(&region->list, list);
++ }
++}
++EXPORT_SYMBOL(iommu_dma_get_resv_regions);
++
++static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
++ phys_addr_t start, phys_addr_t end)
++{
++ struct iova_domain *iovad = &cookie->iovad;
++ struct iommu_dma_msi_page *msi_page;
++ int i, num_pages;
++
++ start -= iova_offset(iovad, start);
++ num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
++
++ msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
++ if (!msi_page)
++ return -ENOMEM;
++
++ for (i = 0; i < num_pages; i++) {
++ msi_page[i].phys = start;
++ msi_page[i].iova = start;
++ INIT_LIST_HEAD(&msi_page[i].list);
++ list_add(&msi_page[i].list, &cookie->msi_page_list);
++ start += iovad->granule;
++ }
++
++ return 0;
++}
++
++static int iova_reserve_iommu_regions(struct device *dev,
++ struct iommu_domain *domain)
++{
++ struct iommu_dma_cookie *cookie = domain->iova_cookie;
++ struct iova_domain *iovad = &cookie->iovad;
++ struct iommu_resv_region *region;
++ LIST_HEAD(resv_regions);
++ int ret = 0;
++
++ iommu_get_resv_regions(dev, &resv_regions);
++ list_for_each_entry(region, &resv_regions, list) {
++ unsigned long lo, hi;
++
++ /* We ARE the software that manages these! */
++ if (region->type == IOMMU_RESV_SW_MSI)
++ continue;
++
++ lo = iova_pfn(iovad, region->start);
++ hi = iova_pfn(iovad, region->start + region->length - 1);
+ reserve_iova(iovad, lo, hi);
++
++ if (region->type == IOMMU_RESV_MSI)
++ ret = cookie_init_hw_msi_region(cookie, region->start,
++ region->start + region->length);
++ if (ret)
++ break;
+ }
++ iommu_put_resv_regions(dev, &resv_regions);
++
++ return ret;
+ }
+
+ /**
+@@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
+ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+ u64 size, struct device *dev)
+ {
+- struct iova_domain *iovad = cookie_iovad(domain);
++ struct iommu_dma_cookie *cookie = domain->iova_cookie;
++ struct iova_domain *iovad = &cookie->iovad;
+ unsigned long order, base_pfn, end_pfn;
+
+- if (!iovad)
+- return -ENODEV;
++ if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
++ return -EINVAL;
+
+ /* Use the smallest supported page size for IOVA granularity */
+ order = __ffs(domain->pgsize_bitmap);
+@@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+ end_pfn = min_t(unsigned long, end_pfn,
+ domain->geometry.aperture_end >> order);
+ }
++ /*
++ * PCI devices may have larger DMA masks, but still prefer allocating
++ * within a 32-bit mask to avoid DAC addressing. Such limitations don't
++ * apply to the typical platform device, so for those we may as well
++ * leave the cache limit at the top of their range to save an rb_last()
++ * traversal on every allocation.
++ */
++ if (dev && dev_is_pci(dev))
++ end_pfn &= DMA_BIT_MASK(32) >> order;
+
+- /* All we can safely do with an existing domain is enlarge it */
++ /* start_pfn is always nonzero for an already-initialised domain */
+ if (iovad->start_pfn) {
+ if (1UL << order != iovad->granule ||
+- base_pfn != iovad->start_pfn ||
+- end_pfn < iovad->dma_32bit_pfn) {
++ base_pfn != iovad->start_pfn) {
+ pr_warn("Incompatible range for DMA domain\n");
+ return -EFAULT;
+ }
+- iovad->dma_32bit_pfn = end_pfn;
+- } else {
+- init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+- if (dev && dev_is_pci(dev))
+- iova_reserve_pci_windows(to_pci_dev(dev), iovad);
++ /*
++ * If we have devices with different DMA masks, move the free
++ * area cache limit down for the benefit of the smaller one.
++ */
++ iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
++
++ return 0;
+ }
+- return 0;
++
++ init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
++ if (!dev)
++ return 0;
++
++ return iova_reserve_iommu_regions(dev, domain);
+ }
+ EXPORT_SYMBOL(iommu_dma_init_domain);
+
+@@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
+ {
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iommu_dma_msi_page *msi_page;
+- struct iova_domain *iovad = &cookie->iovad;
++ struct iova_domain *iovad = cookie_iovad(domain);
+ struct iova *iova;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
++ size_t size = cookie_msi_granule(cookie);
+
+- msi_addr &= ~(phys_addr_t)iova_mask(iovad);
++ msi_addr &= ~(phys_addr_t)(size - 1);
+ list_for_each_entry(msi_page, &cookie->msi_page_list, list)
+ if (msi_page->phys == msi_addr)
+ return msi_page;
+@@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
+ if (!msi_page)
+ return NULL;
+
+- iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
+- if (!iova)
+- goto out_free_page;
+-
+ msi_page->phys = msi_addr;
+- msi_page->iova = iova_dma_addr(iovad, iova);
+- if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
++ if (iovad) {
++ iova = __alloc_iova(domain, size, dma_get_mask(dev));
++ if (!iova)
++ goto out_free_page;
++ msi_page->iova = iova_dma_addr(iovad, iova);
++ } else {
++ msi_page->iova = cookie->msi_iova;
++ cookie->msi_iova += size;
++ }
++
++ if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
+ goto out_free_iova;
+
+ INIT_LIST_HEAD(&msi_page->list);
+@@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
+ return msi_page;
+
+ out_free_iova:
+- __free_iova(iovad, iova);
++ if (iovad)
++ __free_iova(iovad, iova);
++ else
++ cookie->msi_iova -= size;
+ out_free_page:
+ kfree(msi_page);
+ return NULL;
+@@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+ msg->data = ~0U;
+ } else {
+ msg->address_hi = upper_32_bits(msi_page->iova);
+- msg->address_lo &= iova_mask(&cookie->iovad);
++ msg->address_lo &= cookie_msi_granule(cookie) - 1;
+ msg->address_lo += lower_32_bits(msi_page->iova);
+ }
+ }
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 002f8a42..befbfd30 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
+ u64 end_address; /* reserved end address */
+ struct dmar_dev_scope *devices; /* target devices */
+ int devices_cnt; /* target device count */
++ struct iommu_resv_region *resv; /* reserved region handle */
+ };
+
+ struct dmar_atsr_unit {
+@@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(void) {}
+ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
+ {
+ struct acpi_dmar_reserved_memory *rmrr;
++ int prot = DMA_PTE_READ|DMA_PTE_WRITE;
+ struct dmar_rmrr_unit *rmrru;
++ size_t length;
+
+ rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
+ if (!rmrru)
+- return -ENOMEM;
++ goto out;
+
+ rmrru->hdr = header;
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+ rmrru->base_address = rmrr->base_address;
+ rmrru->end_address = rmrr->end_address;
++
++ length = rmrr->end_address - rmrr->base_address + 1;
++ rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
++ IOMMU_RESV_DIRECT);
++ if (!rmrru->resv)
++ goto free_rmrru;
++
+ rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ &rmrru->devices_cnt);
+- if (rmrru->devices_cnt && rmrru->devices == NULL) {
+- kfree(rmrru);
+- return -ENOMEM;
+- }
++ if (rmrru->devices_cnt && rmrru->devices == NULL)
++ goto free_all;
+
+ list_add(&rmrru->list, &dmar_rmrr_units);
+
+ return 0;
++free_all:
++ kfree(rmrru->resv);
++free_rmrru:
++ kfree(rmrru);
++out:
++ return -ENOMEM;
+ }
+
+ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
+@@ -4484,6 +4498,7 @@ static void intel_iommu_free_dmars(void)
+ list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+ list_del(&rmrru->list);
+ dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
++ kfree(rmrru->resv);
+ kfree(rmrru);
+ }
+
+@@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(struct device *dev)
+ iommu_device_unlink(iommu->iommu_dev, dev);
+ }
+
++static void intel_iommu_get_resv_regions(struct device *device,
++ struct list_head *head)
++{
++ struct iommu_resv_region *reg;
++ struct dmar_rmrr_unit *rmrr;
++ struct device *i_dev;
++ int i;
++
++ rcu_read_lock();
++ for_each_rmrr_units(rmrr) {
++ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
++ i, i_dev) {
++ if (i_dev != device)
++ continue;
++
++ list_add_tail(&rmrr->resv->list, head);
++ }
++ }
++ rcu_read_unlock();
++
++ reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
++ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
++ 0, IOMMU_RESV_MSI);
++ if (!reg)
++ return;
++ list_add_tail(&reg->list, head);
++}
++
++static void intel_iommu_put_resv_regions(struct device *dev,
++ struct list_head *head)
++{
++ struct iommu_resv_region *entry, *next;
++
++ list_for_each_entry_safe(entry, next, head, list) {
++ if (entry->type == IOMMU_RESV_RESERVED)
++ kfree(entry);
++ }
++}
++
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+ #define MAX_NR_PASID_BITS (20)
+ static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
+@@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
+ #endif /* CONFIG_INTEL_IOMMU_SVM */
+
+ static const struct iommu_ops intel_iommu_ops = {
+- .capable = intel_iommu_capable,
+- .domain_alloc = intel_iommu_domain_alloc,
+- .domain_free = intel_iommu_domain_free,
+- .attach_dev = intel_iommu_attach_device,
+- .detach_dev = intel_iommu_detach_device,
+- .map = intel_iommu_map,
+- .unmap = intel_iommu_unmap,
+- .map_sg = default_iommu_map_sg,
+- .iova_to_phys = intel_iommu_iova_to_phys,
+- .add_device = intel_iommu_add_device,
+- .remove_device = intel_iommu_remove_device,
+- .device_group = pci_device_group,
+- .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
++ .capable = intel_iommu_capable,
++ .domain_alloc = intel_iommu_domain_alloc,
++ .domain_free = intel_iommu_domain_free,
++ .attach_dev = intel_iommu_attach_device,
++ .detach_dev = intel_iommu_detach_device,
++ .map = intel_iommu_map,
++ .unmap = intel_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
++ .iova_to_phys = intel_iommu_iova_to_phys,
++ .add_device = intel_iommu_add_device,
++ .remove_device = intel_iommu_remove_device,
++ .get_resv_regions = intel_iommu_get_resv_regions,
++ .put_resv_regions = intel_iommu_put_resv_regions,
++ .device_group = pci_device_group,
++ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+ };
+
+ static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 87d3060f..372fc463 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -68,6 +68,13 @@ struct iommu_group_attribute {
+ const char *buf, size_t count);
+ };
+
++static const char * const iommu_group_resv_type_string[] = {
++ [IOMMU_RESV_DIRECT] = "direct",
++ [IOMMU_RESV_RESERVED] = "reserved",
++ [IOMMU_RESV_MSI] = "msi",
++ [IOMMU_RESV_SW_MSI] = "msi",
++};
++
+ #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
+ struct iommu_group_attribute iommu_group_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+@@ -133,8 +140,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
+ return sprintf(buf, "%s\n", group->name);
+ }
+
++/**
++ * iommu_insert_resv_region - Insert a new region in the
++ * list of reserved regions.
++ * @new: new region to insert
++ * @regions: list of regions
++ *
++ * The new element is sorted by address with respect to the other
++ * regions of the same type. In case it overlaps with another
++ * region of the same type, regions are merged. In case it
++ * overlaps with another region of different type, regions are
++ * not merged.
++ */
++static int iommu_insert_resv_region(struct iommu_resv_region *new,
++ struct list_head *regions)
++{
++ struct iommu_resv_region *region;
++ phys_addr_t start = new->start;
++ phys_addr_t end = new->start + new->length - 1;
++ struct list_head *pos = regions->next;
++
++ while (pos != regions) {
++ struct iommu_resv_region *entry =
++ list_entry(pos, struct iommu_resv_region, list);
++ phys_addr_t a = entry->start;
++ phys_addr_t b = entry->start + entry->length - 1;
++ int type = entry->type;
++
++ if (end < a) {
++ goto insert;
++ } else if (start > b) {
++ pos = pos->next;
++ } else if ((start >= a) && (end <= b)) {
++ if (new->type == type)
++ goto done;
++ else
++ pos = pos->next;
++ } else {
++ if (new->type == type) {
++ phys_addr_t new_start = min(a, start);
++ phys_addr_t new_end = max(b, end);
++
++ list_del(&entry->list);
++ entry->start = new_start;
++ entry->length = new_end - new_start + 1;
++ iommu_insert_resv_region(entry, regions);
++ } else {
++ pos = pos->next;
++ }
++ }
++ }
++insert:
++ region = iommu_alloc_resv_region(new->start, new->length,
++ new->prot, new->type);
++ if (!region)
++ return -ENOMEM;
++
++ list_add_tail(&region->list, pos);
++done:
++ return 0;
++}
++
++static int
++iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
++ struct list_head *group_resv_regions)
++{
++ struct iommu_resv_region *entry;
++ int ret;
++
++ list_for_each_entry(entry, dev_resv_regions, list) {
++ ret = iommu_insert_resv_region(entry, group_resv_regions);
++ if (ret)
++ break;
++ }
++ return ret;
++}
++
++int iommu_get_group_resv_regions(struct iommu_group *group,
++ struct list_head *head)
++{
++ struct iommu_device *device;
++ int ret = 0;
++
++ mutex_lock(&group->mutex);
++ list_for_each_entry(device, &group->devices, list) {
++ struct list_head dev_resv_regions;
++
++ INIT_LIST_HEAD(&dev_resv_regions);
++ iommu_get_resv_regions(device->dev, &dev_resv_regions);
++ ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
++ iommu_put_resv_regions(device->dev, &dev_resv_regions);
++ if (ret)
++ break;
++ }
++ mutex_unlock(&group->mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
++
++static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
++ char *buf)
++{
++ struct iommu_resv_region *region, *next;
++ struct list_head group_resv_regions;
++ char *str = buf;
++
++ INIT_LIST_HEAD(&group_resv_regions);
++ iommu_get_group_resv_regions(group, &group_resv_regions);
++
++ list_for_each_entry_safe(region, next, &group_resv_regions, list) {
++ str += sprintf(str, "0x%016llx 0x%016llx %s\n",
++ (long long int)region->start,
++ (long long int)(region->start +
++ region->length - 1),
++ iommu_group_resv_type_string[region->type]);
++ kfree(region);
++ }
++
++ return (str - buf);
++}
++
+ static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
+
++static IOMMU_GROUP_ATTR(reserved_regions, 0444,
++ iommu_group_show_resv_regions, NULL);
++
+ static void iommu_group_release(struct kobject *kobj)
+ {
+ struct iommu_group *group = to_iommu_group(kobj);
+@@ -212,6 +342,11 @@ struct iommu_group *iommu_group_alloc(void)
+ */
+ kobject_put(&group->kobj);
+
++ ret = iommu_group_create_file(group,
++ &iommu_group_attr_reserved_regions);
++ if (ret)
++ return ERR_PTR(ret);
++
+ pr_debug("Allocated group %d\n", group->id);
+
+ return group;
+@@ -318,7 +453,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
+ struct device *dev)
+ {
+ struct iommu_domain *domain = group->default_domain;
+- struct iommu_dm_region *entry;
++ struct iommu_resv_region *entry;
+ struct list_head mappings;
+ unsigned long pg_size;
+ int ret = 0;
+@@ -331,18 +466,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
+ pg_size = 1UL << __ffs(domain->pgsize_bitmap);
+ INIT_LIST_HEAD(&mappings);
+
+- iommu_get_dm_regions(dev, &mappings);
++ iommu_get_resv_regions(dev, &mappings);
+
+ /* We need to consider overlapping regions for different devices */
+ list_for_each_entry(entry, &mappings, list) {
+ dma_addr_t start, end, addr;
+
+- if (domain->ops->apply_dm_region)
+- domain->ops->apply_dm_region(dev, domain, entry);
++ if (domain->ops->apply_resv_region)
++ domain->ops->apply_resv_region(dev, domain, entry);
+
+ start = ALIGN(entry->start, pg_size);
+ end = ALIGN(entry->start + entry->length, pg_size);
+
++ if (entry->type != IOMMU_RESV_DIRECT)
++ continue;
++
+ for (addr = start; addr < end; addr += pg_size) {
+ phys_addr_t phys_addr;
+
+@@ -358,7 +496,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
+ }
+
+ out:
+- iommu_put_dm_regions(dev, &mappings);
++ iommu_put_resv_regions(dev, &mappings);
+
+ return ret;
+ }
+@@ -562,6 +700,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(iommu_group_get);
+
++/**
++ * iommu_group_ref_get - Increment reference on a group
++ * @group: the group to use, must not be NULL
++ *
++ * This function is called by iommu drivers to take additional references on an
++ * existing group. Returns the given group for convenience.
++ */
++struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
++{
++ kobject_get(group->devices_kobj);
++ return group;
++}
++
+ /**
+ * iommu_group_put - Decrement group reference
+ * @group: the group to use
+@@ -1557,20 +1708,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
+ }
+ EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
+
+-void iommu_get_dm_regions(struct device *dev, struct list_head *list)
++void iommu_get_resv_regions(struct device *dev, struct list_head *list)
+ {
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+- if (ops && ops->get_dm_regions)
+- ops->get_dm_regions(dev, list);
++ if (ops && ops->get_resv_regions)
++ ops->get_resv_regions(dev, list);
+ }
+
+-void iommu_put_dm_regions(struct device *dev, struct list_head *list)
++void iommu_put_resv_regions(struct device *dev, struct list_head *list)
+ {
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+- if (ops && ops->put_dm_regions)
+- ops->put_dm_regions(dev, list);
++ if (ops && ops->put_resv_regions)
++ ops->put_resv_regions(dev, list);
++}
++
++struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
++ size_t length, int prot,
++ enum iommu_resv_type type)
++{
++ struct iommu_resv_region *region;
++
++ region = kzalloc(sizeof(*region), GFP_KERNEL);
++ if (!region)
++ return NULL;
++
++ INIT_LIST_HEAD(&region->list);
++ region->start = start;
++ region->length = length;
++ region->prot = prot;
++ region->type = type;
++ return region;
+ }
+
+ /* Request that a device is direct mapped by the IOMMU */
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index b12c12d7..9799daea 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
+ data->m4u_group = iommu_group_alloc();
+ if (IS_ERR(data->m4u_group))
+ dev_err(dev, "Failed to allocate M4U IOMMU group\n");
++ } else {
++ iommu_group_ref_get(data->m4u_group);
+ }
+ return data->m4u_group;
+ }
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index b8aeb076..c7063e9d 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
+ data->m4u_group = iommu_group_alloc();
+ if (IS_ERR(data->m4u_group))
+ dev_err(dev, "Failed to allocate M4U IOMMU group\n");
++ } else {
++ iommu_group_ref_get(data->m4u_group);
+ }
+ return data->m4u_group;
+ }
+diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
+index 32c58906..36d3206d 100644
+--- a/include/linux/dma-iommu.h
++++ b/include/linux/dma-iommu.h
+@@ -27,6 +27,7 @@ int iommu_dma_init(void);
+
+ /* Domain management interface for IOMMU drivers */
+ int iommu_get_dma_cookie(struct iommu_domain *domain);
++int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
+ void iommu_put_dma_cookie(struct iommu_domain *domain);
+
+ /* Setup call for arch DMA mapping code */
+@@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+ /* The DMA API isn't _quite_ the whole story, though... */
+ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
++void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+
+ #else
+
+@@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
+ return -ENODEV;
+ }
+
++static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
++{
++ return -ENODEV;
++}
++
+ static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
+ {
+ }
+@@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+ {
+ }
+
++static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
++{
++}
++
+ #endif /* CONFIG_IOMMU_DMA */
+ #endif /* __KERNEL__ */
+ #endif /* __DMA_IOMMU_H */
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 436dc213..188599f5 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -117,18 +117,32 @@ enum iommu_attr {
+ DOMAIN_ATTR_MAX,
+ };
+
++/* These are the possible reserved region types */
++enum iommu_resv_type {
++ /* Memory regions which must be mapped 1:1 at all times */
++ IOMMU_RESV_DIRECT,
++ /* Arbitrary "never map this or give it to a device" address ranges */
++ IOMMU_RESV_RESERVED,
++ /* Hardware MSI region (untranslated) */
++ IOMMU_RESV_MSI,
++ /* Software-managed MSI translation window */
++ IOMMU_RESV_SW_MSI,
++};
++
+ /**
+- * struct iommu_dm_region - descriptor for a direct mapped memory region
++ * struct iommu_resv_region - descriptor for a reserved memory region
+ * @list: Linked list pointers
+ * @start: System physical start address of the region
+ * @length: Length of the region in bytes
+ * @prot: IOMMU Protection flags (READ/WRITE/...)
++ * @type: Type of the reserved region
+ */
+-struct iommu_dm_region {
++struct iommu_resv_region {
+ struct list_head list;
+ phys_addr_t start;
+ size_t length;
+ int prot;
++ enum iommu_resv_type type;
+ };
+
+ #ifdef CONFIG_IOMMU_API
+@@ -150,9 +164,9 @@ struct iommu_dm_region {
+ * @device_group: find iommu group for a particular device
+ * @domain_get_attr: Query domain attributes
+ * @domain_set_attr: Change domain attributes
+- * @get_dm_regions: Request list of direct mapping requirements for a device
+- * @put_dm_regions: Free list of direct mapping requirements for a device
+- * @apply_dm_region: Temporary helper call-back for iova reserved ranges
++ * @get_resv_regions: Request list of reserved regions for a device
++ * @put_resv_regions: Free list of reserved regions for a device
++ * @apply_resv_region: Temporary helper call-back for iova reserved ranges
+ * @domain_window_enable: Configure and enable a particular window for a domain
+ * @domain_window_disable: Disable a particular window for a domain
+ * @domain_set_windows: Set the number of windows for a domain
+@@ -184,11 +198,12 @@ struct iommu_ops {
+ int (*domain_set_attr)(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data);
+
+- /* Request/Free a list of direct mapping requirements for a device */
+- void (*get_dm_regions)(struct device *dev, struct list_head *list);
+- void (*put_dm_regions)(struct device *dev, struct list_head *list);
+- void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
+- struct iommu_dm_region *region);
++ /* Request/Free a list of reserved regions for a device */
++ void (*get_resv_regions)(struct device *dev, struct list_head *list);
++ void (*put_resv_regions)(struct device *dev, struct list_head *list);
++ void (*apply_resv_region)(struct device *dev,
++ struct iommu_domain *domain,
++ struct iommu_resv_region *region);
+
+ /* Window handling functions */
+ int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
+@@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
+ extern void iommu_set_fault_handler(struct iommu_domain *domain,
+ iommu_fault_handler_t handler, void *token);
+
+-extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
+-extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
++extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
++extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
+ extern int iommu_request_dm_for_dev(struct device *dev);
++extern struct iommu_resv_region *
++iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
++ enum iommu_resv_type type);
++extern int iommu_get_group_resv_regions(struct iommu_group *group,
++ struct list_head *head);
+
+ extern int iommu_attach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+@@ -253,6 +273,7 @@ extern void iommu_group_remove_device(struct device *dev);
+ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+ int (*fn)(struct device *, void *));
+ extern struct iommu_group *iommu_group_get(struct device *dev);
++extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
+ extern void iommu_group_put(struct iommu_group *group);
+ extern int iommu_group_register_notifier(struct iommu_group *group,
+ struct notifier_block *nb);
+@@ -439,16 +460,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
+ {
+ }
+
+-static inline void iommu_get_dm_regions(struct device *dev,
++static inline void iommu_get_resv_regions(struct device *dev,
+ struct list_head *list)
+ {
+ }
+
+-static inline void iommu_put_dm_regions(struct device *dev,
++static inline void iommu_put_resv_regions(struct device *dev,
+ struct list_head *list)
+ {
+ }
+
++static inline int iommu_get_group_resv_regions(struct iommu_group *group,
++ struct list_head *head)
++{
++ return -ENODEV;
++}
++
+ static inline int iommu_request_dm_for_dev(struct device *dev)
+ {
+ return -ENODEV;
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch b/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch
new file mode 100644
index 0000000000..c10abfbf87
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch
@@ -0,0 +1,182 @@
+From 1d596855b596db88f10b12a1be6fd19e249be170 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:13:29 +0800
+Subject: [PATCH] irqchip: support layerscape
+
+This is a integrated patch for layerscape gic support.
+
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/irqchip/Makefile | 1 +
+ drivers/irqchip/irq-gic-v3-its.c | 1 +
+ include/linux/irqdomain.h | 36 ++++++++++++++++++++++++++++++++++++
+ kernel/irq/irqdomain.c | 39 +++++++++++++++++++++++++++++++++++++++
+ kernel/irq/msi.c | 4 ++--
+ 5 files changed, 79 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
+index e4dbfc85..53d2cd54 100644
+--- a/drivers/irqchip/Makefile
++++ b/drivers/irqchip/Makefile
+@@ -74,3 +74,4 @@ obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
+ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
+ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o
+ obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
++obj-$(CONFIG_QUICC_ENGINE) += irq-qeic.o
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index acb9d250..2f1c8826 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1659,6 +1659,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
+
+ inner_domain->parent = its_parent;
+ inner_domain->bus_token = DOMAIN_BUS_NEXUS;
++ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
+ info->ops = &its_msi_domain_ops;
+ info->data = its;
+ inner_domain->host_data = info;
+diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
+index ffb84604..188eced6 100644
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -183,6 +183,12 @@ enum {
+ /* Irq domain is an IPI domain with single virq */
+ IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
+
++ /* Irq domain implements MSIs */
++ IRQ_DOMAIN_FLAG_MSI = (1 << 4),
++
++ /* Irq domain implements MSI remapping */
++ IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
++
+ /*
+ * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
+ * for implementation specific purposes and ignored by the
+@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+ void *host_data);
+ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
++extern bool irq_domain_check_msi_remap(void);
+ extern void irq_set_default_host(struct irq_domain *host);
+ extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
+ irq_hw_number_t hwirq, int node,
+@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+ {
+ return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
+ }
++
++static inline bool irq_domain_is_msi(struct irq_domain *domain)
++{
++ return domain->flags & IRQ_DOMAIN_FLAG_MSI;
++}
++
++static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
++{
++ return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
++}
++
++extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
++
+ #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+ static inline void irq_domain_activate_irq(struct irq_data *data) { }
+ static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
+@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+ {
+ return false;
+ }
++
++static inline bool irq_domain_is_msi(struct irq_domain *domain)
++{
++ return false;
++}
++
++static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
++{
++ return false;
++}
++
++static inline bool
++irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
++{
++ return false;
++}
+ #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
+ #else /* CONFIG_IRQ_DOMAIN */
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index b59e6768..31805f23 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -277,6 +277,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
+ }
+ EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
+
++/**
++ * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
++ * IRQ remapping
++ *
++ * Return: false if any MSI irq domain does not support IRQ remapping,
++ * true otherwise (including if there is no MSI irq domain)
++ */
++bool irq_domain_check_msi_remap(void)
++{
++ struct irq_domain *h;
++ bool ret = true;
++
++ mutex_lock(&irq_domain_mutex);
++ list_for_each_entry(h, &irq_domain_list, link) {
++ if (irq_domain_is_msi(h) &&
++ !irq_domain_hierarchical_is_msi_remap(h)) {
++ ret = false;
++ break;
++ }
++ }
++ mutex_unlock(&irq_domain_mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
++
+ /**
+ * irq_set_default_host() - Set a "default" irq domain
+ * @domain: default domain pointer
+@@ -1408,6 +1433,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
+ if (domain->ops->alloc)
+ domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
+ }
++
++/**
++ * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
++ * parent has MSI remapping support
++ * @domain: domain pointer
++ */
++bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
++{
++ for (; domain; domain = domain->parent) {
++ if (irq_domain_is_msi_remap(domain))
++ return true;
++ }
++ return false;
++}
+ #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+ /**
+ * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 8a3e8727..2e2b2c45 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -272,8 +272,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ msi_domain_update_chip_ops(info);
+
+- return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
+- &msi_domain_ops, info);
++ return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
++ fwnode, &msi_domain_ops, info);
+ }
+
+ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch b/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch
new file mode 100644
index 0000000000..ff19a6da03
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch
@@ -0,0 +1,611 @@
+From b31046c51c72232363711f0c623df08bf28c37e4 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:21:30 +0800
+Subject: [PATCH] mmc: layerscape support
+
+This is a integrated patch for layerscape mmc support.
+
+Adrian Hunter <adrian.hunter@intel.com>
+Jaehoon Chung <jh80.chung@samsung.com>
+Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/mmc/host/Kconfig | 1 +
+ drivers/mmc/host/sdhci-esdhc.h | 52 +++++---
+ drivers/mmc/host/sdhci-of-esdhc.c | 251 ++++++++++++++++++++++++++++++++++++--
+ drivers/mmc/host/sdhci.c | 45 ++++---
+ drivers/mmc/host/sdhci.h | 3 +
+ 5 files changed, 306 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 5274f503..a1135a92 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -144,6 +144,7 @@ config MMC_SDHCI_OF_ESDHC
+ depends on MMC_SDHCI_PLTFM
+ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
+ select MMC_SDHCI_IO_ACCESSORS
++ select FSL_GUTS
+ help
+ This selects the Freescale eSDHC controller support.
+
+diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
+index de132e28..98898a30 100644
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -24,30 +24,46 @@
+ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+ SDHCI_QUIRK_NO_HISPD_BIT)
+
+-#define ESDHC_PROCTL 0x28
+-
+-#define ESDHC_SYSTEM_CONTROL 0x2c
+-#define ESDHC_CLOCK_MASK 0x0000fff0
+-#define ESDHC_PREDIV_SHIFT 8
+-#define ESDHC_DIVIDER_SHIFT 4
+-#define ESDHC_CLOCK_PEREN 0x00000004
+-#define ESDHC_CLOCK_HCKEN 0x00000002
+-#define ESDHC_CLOCK_IPGEN 0x00000001
+-
+ /* pltfm-specific */
+ #define ESDHC_HOST_CONTROL_LE 0x20
+
+ /*
+- * P2020 interpretation of the SDHCI_HOST_CONTROL register
++ * eSDHC register definition
+ */
+-#define ESDHC_CTRL_4BITBUS (0x1 << 1)
+-#define ESDHC_CTRL_8BITBUS (0x2 << 1)
+-#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
+
+-/* OF-specific */
+-#define ESDHC_DMA_SYSCTL 0x40c
+-#define ESDHC_DMA_SNOOP 0x00000040
++/* Present State Register */
++#define ESDHC_PRSSTAT 0x24
++#define ESDHC_CLOCK_STABLE 0x00000008
++
++/* Protocol Control Register */
++#define ESDHC_PROCTL 0x28
++#define ESDHC_VOLT_SEL 0x00000400
++#define ESDHC_CTRL_4BITBUS (0x1 << 1)
++#define ESDHC_CTRL_8BITBUS (0x2 << 1)
++#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
++#define ESDHC_HOST_CONTROL_RES 0x01
++
++/* System Control Register */
++#define ESDHC_SYSTEM_CONTROL 0x2c
++#define ESDHC_CLOCK_MASK 0x0000fff0
++#define ESDHC_PREDIV_SHIFT 8
++#define ESDHC_DIVIDER_SHIFT 4
++#define ESDHC_CLOCK_SDCLKEN 0x00000008
++#define ESDHC_CLOCK_PEREN 0x00000004
++#define ESDHC_CLOCK_HCKEN 0x00000002
++#define ESDHC_CLOCK_IPGEN 0x00000001
++
++/* Host Controller Capabilities Register 2 */
++#define ESDHC_CAPABILITIES_1 0x114
++
++/* Tuning Block Control Register */
++#define ESDHC_TBCTL 0x120
++#define ESDHC_TB_EN 0x00000004
+
+-#define ESDHC_HOST_CONTROL_RES 0x01
++/* Control Register for DMA transfer */
++#define ESDHC_DMA_SYSCTL 0x40c
++#define ESDHC_PERIPHERAL_CLK_SEL 0x00080000
++#define ESDHC_FLUSH_ASYNC_FIFO 0x00040000
++#define ESDHC_DMA_SNOOP 0x00000040
+
+ #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 3c27401c..4b0f375b 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -16,8 +16,12 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
++#include <linux/of_address.h>
+ #include <linux/delay.h>
+ #include <linux/module.h>
++#include <linux/sys_soc.h>
++#include <linux/clk.h>
++#include <linux/ktime.h>
+ #include <linux/mmc/host.h>
+ #include "sdhci-pltfm.h"
+ #include "sdhci-esdhc.h"
+@@ -28,8 +32,12 @@
+ struct sdhci_esdhc {
+ u8 vendor_ver;
+ u8 spec_ver;
++ bool quirk_incorrect_hostver;
++ unsigned int peripheral_clock;
+ };
+
++static void esdhc_clock_enable(struct sdhci_host *host, bool enable);
++
+ /**
+ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
+ * to make it compatible with SD spec.
+@@ -80,6 +88,17 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ return ret;
+ }
+
++ /*
++ * DTS properties of mmc host are used to enable each speed mode
++ * according to soc and board capability. So clean up
++ * SDR50/SDR104/DDR50 support bits here.
++ */
++ if (spec_reg == SDHCI_CAPABILITIES_1) {
++ ret = value & (~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
++ SDHCI_SUPPORT_DDR50));
++ return ret;
++ }
++
+ ret = value;
+ return ret;
+ }
+@@ -87,6 +106,8 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ static u16 esdhc_readw_fixup(struct sdhci_host *host,
+ int spec_reg, u32 value)
+ {
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u16 ret;
+ int shift = (spec_reg & 0x2) * 8;
+
+@@ -94,6 +115,12 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
+ ret = value & 0xffff;
+ else
+ ret = (value >> shift) & 0xffff;
++ /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
++ * vendor version and spec version information.
++ */
++ if ((spec_reg == SDHCI_HOST_VERSION) &&
++ (esdhc->quirk_incorrect_hostver))
++ ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
+ return ret;
+ }
+
+@@ -235,7 +262,11 @@ static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
+ u32 ret;
+ u32 value;
+
+- value = ioread32be(host->ioaddr + reg);
++ if (reg == SDHCI_CAPABILITIES_1)
++ value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
++ else
++ value = ioread32be(host->ioaddr + reg);
++
+ ret = esdhc_readl_fixup(host, reg, value);
+
+ return ret;
+@@ -246,7 +277,11 @@ static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
+ u32 ret;
+ u32 value;
+
+- value = ioread32(host->ioaddr + reg);
++ if (reg == SDHCI_CAPABILITIES_1)
++ value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
++ else
++ value = ioread32(host->ioaddr + reg);
++
+ ret = esdhc_readl_fixup(host, reg, value);
+
+ return ret;
+@@ -404,15 +439,25 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
+ static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+
+- return pltfm_host->clock;
++ if (esdhc->peripheral_clock)
++ return esdhc->peripheral_clock;
++ else
++ return pltfm_host->clock;
+ }
+
+ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
++ unsigned int clock;
+
+- return pltfm_host->clock / 256 / 16;
++ if (esdhc->peripheral_clock)
++ clock = esdhc->peripheral_clock;
++ else
++ clock = pltfm_host->clock;
++ return clock / 256 / 16;
+ }
+
+ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+@@ -421,17 +466,34 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ int pre_div = 1;
+ int div = 1;
++ ktime_t timeout;
+ u32 temp;
+
+ host->mmc->actual_clock = 0;
+
+- if (clock == 0)
++ if (clock == 0) {
++ esdhc_clock_enable(host, false);
+ return;
++ }
+
+ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+ if (esdhc->vendor_ver < VENDOR_V_23)
+ pre_div = 2;
+
++ /*
++ * Limit SD clock to 167MHz for ls1046a according to its datasheet
++ */
++ if (clock > 167000000 &&
++ of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc"))
++ clock = 167000000;
++
++ /*
++ * Limit SD clock to 125MHz for ls1012a according to its datasheet
++ */
++ if (clock > 125000000 &&
++ of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc"))
++ clock = 125000000;
++
+ /* Workaround to reduce the clock frequency for p1010 esdhc */
+ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
+ if (clock > 20000000)
+@@ -441,8 +503,8 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ }
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+- temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+- | ESDHC_CLOCK_MASK);
++ temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
++ ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+@@ -462,7 +524,20 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+- mdelay(1);
++
++ /* Wait max 20 ms */
++ timeout = ktime_add_ms(ktime_get(), 20);
++ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
++ if (ktime_after(ktime_get(), timeout)) {
++ pr_err("%s: Internal clock never stabilised.\n",
++ mmc_hostname(host->mmc));
++ return;
++ }
++ udelay(10);
++ }
++
++ temp |= ESDHC_CLOCK_SDCLKEN;
++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ }
+
+ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+@@ -487,6 +562,33 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+ sdhci_writel(host, ctrl, ESDHC_PROCTL);
+ }
+
++static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
++{
++ u32 val;
++ ktime_t timeout;
++
++ val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
++
++ if (enable)
++ val |= ESDHC_CLOCK_SDCLKEN;
++ else
++ val &= ~ESDHC_CLOCK_SDCLKEN;
++
++ sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
++
++ /* Wait max 20 ms */
++ timeout = ktime_add_ms(ktime_get(), 20);
++ val = ESDHC_CLOCK_STABLE;
++ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
++ if (ktime_after(ktime_get(), timeout)) {
++ pr_err("%s: Internal clock never stabilised.\n",
++ mmc_hostname(host->mmc));
++ break;
++ }
++ udelay(10);
++ }
++}
++
+ static void esdhc_reset(struct sdhci_host *host, u8 mask)
+ {
+ sdhci_reset(host, mask);
+@@ -495,6 +597,95 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
++/* The SCFG, Supplemental Configuration Unit, provides SoC specific
++ * configuration and status registers for the device. There is a
++ * SDHC IO VSEL control register on SCFG for some platforms. It's
++ * used to support SDHC IO voltage switching.
++ */
++static const struct of_device_id scfg_device_ids[] = {
++ { .compatible = "fsl,t1040-scfg", },
++ { .compatible = "fsl,ls1012a-scfg", },
++ { .compatible = "fsl,ls1046a-scfg", },
++ {}
++};
++
++/* SDHC IO VSEL control register definition */
++#define SCFG_SDHCIOVSELCR 0x408
++#define SDHCIOVSELCR_TGLEN 0x80000000
++#define SDHCIOVSELCR_VSELVAL 0x60000000
++#define SDHCIOVSELCR_SDHC_VS 0x00000001
++
++static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
++ struct mmc_ios *ios)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ struct device_node *scfg_node;
++ void __iomem *scfg_base = NULL;
++ u32 sdhciovselcr;
++ u32 val;
++
++ /*
++ * Signal Voltage Switching is only applicable for Host Controllers
++ * v3.00 and above.
++ */
++ if (host->version < SDHCI_SPEC_300)
++ return 0;
++
++ val = sdhci_readl(host, ESDHC_PROCTL);
++
++ switch (ios->signal_voltage) {
++ case MMC_SIGNAL_VOLTAGE_330:
++ val &= ~ESDHC_VOLT_SEL;
++ sdhci_writel(host, val, ESDHC_PROCTL);
++ return 0;
++ case MMC_SIGNAL_VOLTAGE_180:
++ scfg_node = of_find_matching_node(NULL, scfg_device_ids);
++ if (scfg_node)
++ scfg_base = of_iomap(scfg_node, 0);
++ if (scfg_base) {
++ sdhciovselcr = SDHCIOVSELCR_TGLEN |
++ SDHCIOVSELCR_VSELVAL;
++ iowrite32be(sdhciovselcr,
++ scfg_base + SCFG_SDHCIOVSELCR);
++
++ val |= ESDHC_VOLT_SEL;
++ sdhci_writel(host, val, ESDHC_PROCTL);
++ mdelay(5);
++
++ sdhciovselcr = SDHCIOVSELCR_TGLEN |
++ SDHCIOVSELCR_SDHC_VS;
++ iowrite32be(sdhciovselcr,
++ scfg_base + SCFG_SDHCIOVSELCR);
++ iounmap(scfg_base);
++ } else {
++ val |= ESDHC_VOLT_SEL;
++ sdhci_writel(host, val, ESDHC_PROCTL);
++ }
++ return 0;
++ default:
++ return 0;
++ }
++}
++
++static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ u32 val;
++
++ /* Use tuning block for tuning procedure */
++ esdhc_clock_enable(host, false);
++ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
++ val |= ESDHC_FLUSH_ASYNC_FIFO;
++ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
++
++ val = sdhci_readl(host, ESDHC_TBCTL);
++ val |= ESDHC_TB_EN;
++ sdhci_writel(host, val, ESDHC_TBCTL);
++ esdhc_clock_enable(host, true);
++
++ return sdhci_execute_tuning(mmc, opcode);
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static u32 esdhc_proctl;
+ static int esdhc_of_suspend(struct device *dev)
+@@ -575,10 +766,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
+ .ops = &sdhci_esdhc_le_ops,
+ };
+
++static struct soc_device_attribute soc_incorrect_hostver[] = {
++ { .family = "QorIQ T4240", .revision = "1.0", },
++ { .family = "QorIQ T4240", .revision = "2.0", },
++ { },
++};
++
+ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
+ {
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_esdhc *esdhc;
++ struct device_node *np;
++ struct clk *clk;
++ u32 val;
+ u16 host_ver;
+
+ pltfm_host = sdhci_priv(host);
+@@ -588,6 +788,36 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
+ esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT;
+ esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
++ if (soc_device_match(soc_incorrect_hostver))
++ esdhc->quirk_incorrect_hostver = true;
++ else
++ esdhc->quirk_incorrect_hostver = false;
++
++ np = pdev->dev.of_node;
++ clk = of_clk_get(np, 0);
++ if (!IS_ERR(clk)) {
++ /*
++ * esdhc->peripheral_clock would be assigned with a value
++ * which is eSDHC base clock when use periperal clock.
++ * For ls1046a, the clock value got by common clk API is
++ * peripheral clock while the eSDHC base clock is 1/2
++ * peripheral clock.
++ */
++ if (of_device_is_compatible(np, "fsl,ls1046a-esdhc"))
++ esdhc->peripheral_clock = clk_get_rate(clk) / 2;
++ else
++ esdhc->peripheral_clock = clk_get_rate(clk);
++
++ clk_put(clk);
++ }
++
++ if (esdhc->peripheral_clock) {
++ esdhc_clock_enable(host, false);
++ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
++ val |= ESDHC_PERIPHERAL_CLK_SEL;
++ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
++ esdhc_clock_enable(host, true);
++ }
+ }
+
+ static int sdhci_esdhc_probe(struct platform_device *pdev)
+@@ -610,6 +840,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
++ host->mmc_host_ops.start_signal_voltage_switch =
++ esdhc_signal_voltage_switch;
++ host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
++ host->tuning_delay = 1;
++
+ esdhc_init(pdev, host);
+
+ sdhci_get_of_property(pdev);
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 7d275e72..099c3bf5 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1624,26 +1624,24 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+- if ((ios->timing == MMC_TIMING_SD_HS ||
+- ios->timing == MMC_TIMING_MMC_HS)
+- && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+- ctrl |= SDHCI_CTRL_HISPD;
+- else
+- ctrl &= ~SDHCI_CTRL_HISPD;
++ if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
++ if ((ios->timing == MMC_TIMING_SD_HS ||
++ ios->timing == MMC_TIMING_MMC_HS ||
++ ios->timing == MMC_TIMING_MMC_HS400 ||
++ ios->timing == MMC_TIMING_MMC_HS200 ||
++ ios->timing == MMC_TIMING_MMC_DDR52 ||
++ ios->timing == MMC_TIMING_UHS_SDR50 ||
++ ios->timing == MMC_TIMING_UHS_SDR104 ||
++ ios->timing == MMC_TIMING_UHS_DDR50 ||
++ ios->timing == MMC_TIMING_UHS_SDR25))
++ ctrl |= SDHCI_CTRL_HISPD;
++ else
++ ctrl &= ~SDHCI_CTRL_HISPD;
++ }
+
+ if (host->version >= SDHCI_SPEC_300) {
+ u16 clk, ctrl_2;
+
+- /* In case of UHS-I modes, set High Speed Enable */
+- if ((ios->timing == MMC_TIMING_MMC_HS400) ||
+- (ios->timing == MMC_TIMING_MMC_HS200) ||
+- (ios->timing == MMC_TIMING_MMC_DDR52) ||
+- (ios->timing == MMC_TIMING_UHS_SDR50) ||
+- (ios->timing == MMC_TIMING_UHS_SDR104) ||
+- (ios->timing == MMC_TIMING_UHS_DDR50) ||
+- (ios->timing == MMC_TIMING_UHS_SDR25))
+- ctrl |= SDHCI_CTRL_HISPD;
+-
+ if (!host->preset_enabled) {
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ /*
+@@ -1956,7 +1954,7 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+ return 0;
+ }
+
+-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
++int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ {
+ struct sdhci_host *host = mmc_priv(mmc);
+ u16 ctrl;
+@@ -2015,6 +2013,9 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ return err;
+ }
+
++ if (host->tuning_delay < 0)
++ host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
++
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
+@@ -2127,9 +2128,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+- /* eMMC spec does not require a delay between tuning cycles */
+- if (opcode == MMC_SEND_TUNING_BLOCK)
+- mdelay(1);
++ /* Spec does not require a delay between tuning cycles */
++ if (host->tuning_delay > 0)
++ mdelay(host->tuning_delay);
++
+ } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
+
+ /*
+@@ -2165,6 +2167,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ spin_unlock_irqrestore(&host->lock, flags);
+ return err;
+ }
++EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
+
+ static int sdhci_select_drive_strength(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+@@ -2997,6 +3000,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
+
+ host->flags = SDHCI_SIGNALING_330;
+
++ host->tuning_delay = -1;
++
+ return host;
+ }
+
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 2570455b..088bed43 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -524,6 +524,8 @@ struct sdhci_host {
+ #define SDHCI_TUNING_MODE_1 0
+ #define SDHCI_TUNING_MODE_2 1
+ #define SDHCI_TUNING_MODE_3 2
++ /* Delay (ms) between tuning commands */
++ int tuning_delay;
+
+ unsigned long private[0] ____cacheline_aligned;
+ };
+@@ -689,6 +691,7 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ void sdhci_set_bus_width(struct sdhci_host *host, int width);
+ void sdhci_reset(struct sdhci_host *host, u8 mask);
+ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
++int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+
+ #ifdef CONFIG_PM
+ extern int sdhci_suspend_host(struct sdhci_host *host);
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch b/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch
new file mode 100644
index 0000000000..ad5dd28d63
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch
@@ -0,0 +1,1378 @@
+From adb377019768396f339010ebb9e80fa8384992f7 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:20:30 +0800
+Subject: [PATCH] qe: support layerscape
+
+This is a integrated patch for layerscape qe support.
+
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} | 389 +++++++++++++--------
+ drivers/net/wan/fsl_ucc_hdlc.c | 4 +-
+ drivers/soc/fsl/qe/Kconfig | 2 +-
+ drivers/soc/fsl/qe/Makefile | 2 +-
+ drivers/soc/fsl/qe/qe.c | 80 +++--
+ drivers/soc/fsl/qe/qe_ic.h | 103 ------
+ drivers/soc/fsl/qe/qe_io.c | 42 +--
+ drivers/soc/fsl/qe/qe_tdm.c | 8 +-
+ drivers/soc/fsl/qe/ucc.c | 10 +-
+ drivers/soc/fsl/qe/ucc_fast.c | 74 ++--
+ drivers/tty/serial/ucc_uart.c | 1 +
+ include/soc/fsl/qe/qe.h | 1 -
+ include/soc/fsl/qe/qe_ic.h | 139 --------
+ 13 files changed, 359 insertions(+), 496 deletions(-)
+ rename drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} (54%)
+ delete mode 100644 drivers/soc/fsl/qe/qe_ic.h
+ delete mode 100644 include/soc/fsl/qe/qe_ic.h
+
+diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/irqchip/irq-qeic.c
+similarity index 54%
+rename from drivers/soc/fsl/qe/qe_ic.c
+rename to drivers/irqchip/irq-qeic.c
+index ec2ca864..21e3b43c 100644
+--- a/drivers/soc/fsl/qe/qe_ic.c
++++ b/drivers/irqchip/irq-qeic.c
+@@ -1,7 +1,7 @@
+ /*
+- * arch/powerpc/sysdev/qe_lib/qe_ic.c
++ * drivers/irqchip/irq-qeic.c
+ *
+- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
++ * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+@@ -18,7 +18,11 @@
+ #include <linux/of_address.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <linux/irqdomain.h>
++#include <linux/irqchip.h>
+ #include <linux/errno.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
+ #include <linux/reboot.h>
+ #include <linux/slab.h>
+ #include <linux/stddef.h>
+@@ -26,11 +30,136 @@
+ #include <linux/signal.h>
+ #include <linux/device.h>
+ #include <linux/spinlock.h>
+-#include <asm/irq.h>
++#include <linux/irq.h>
+ #include <asm/io.h>
+-#include <soc/fsl/qe/qe_ic.h>
+
+-#include "qe_ic.h"
++#define NR_QE_IC_INTS 64
++
++/* QE IC registers offset */
++#define QEIC_CICR 0x00
++#define QEIC_CIVEC 0x04
++#define QEIC_CRIPNR 0x08
++#define QEIC_CIPNR 0x0c
++#define QEIC_CIPXCC 0x10
++#define QEIC_CIPYCC 0x14
++#define QEIC_CIPWCC 0x18
++#define QEIC_CIPZCC 0x1c
++#define QEIC_CIMR 0x20
++#define QEIC_CRIMR 0x24
++#define QEIC_CICNR 0x28
++#define QEIC_CIPRTA 0x30
++#define QEIC_CIPRTB 0x34
++#define QEIC_CRICR 0x3c
++#define QEIC_CHIVEC 0x60
++
++/* Interrupt priority registers */
++#define CIPCC_SHIFT_PRI0 29
++#define CIPCC_SHIFT_PRI1 26
++#define CIPCC_SHIFT_PRI2 23
++#define CIPCC_SHIFT_PRI3 20
++#define CIPCC_SHIFT_PRI4 13
++#define CIPCC_SHIFT_PRI5 10
++#define CIPCC_SHIFT_PRI6 7
++#define CIPCC_SHIFT_PRI7 4
++
++/* CICR priority modes */
++#define CICR_GWCC 0x00040000
++#define CICR_GXCC 0x00020000
++#define CICR_GYCC 0x00010000
++#define CICR_GZCC 0x00080000
++#define CICR_GRTA 0x00200000
++#define CICR_GRTB 0x00400000
++#define CICR_HPIT_SHIFT 8
++#define CICR_HPIT_MASK 0x00000300
++#define CICR_HP_SHIFT 24
++#define CICR_HP_MASK 0x3f000000
++
++/* CICNR */
++#define CICNR_WCC1T_SHIFT 20
++#define CICNR_ZCC1T_SHIFT 28
++#define CICNR_YCC1T_SHIFT 12
++#define CICNR_XCC1T_SHIFT 4
++
++/* CRICR */
++#define CRICR_RTA1T_SHIFT 20
++#define CRICR_RTB1T_SHIFT 28
++
++/* Signal indicator */
++#define SIGNAL_MASK 3
++#define SIGNAL_HIGH 2
++#define SIGNAL_LOW 0
++
++#define NUM_OF_QE_IC_GROUPS 6
++
++/* Flags when we init the QE IC */
++#define QE_IC_SPREADMODE_GRP_W 0x00000001
++#define QE_IC_SPREADMODE_GRP_X 0x00000002
++#define QE_IC_SPREADMODE_GRP_Y 0x00000004
++#define QE_IC_SPREADMODE_GRP_Z 0x00000008
++#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
++#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
++
++#define QE_IC_LOW_SIGNAL 0x00000100
++#define QE_IC_HIGH_SIGNAL 0x00000200
++
++#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
++#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
++#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
++#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
++#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
++#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
++#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
++#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
++#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
++#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
++#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
++#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
++#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
++
++/* QE interrupt sources groups */
++enum qe_ic_grp_id {
++ QE_IC_GRP_W = 0, /* QE interrupt controller group W */
++ QE_IC_GRP_X, /* QE interrupt controller group X */
++ QE_IC_GRP_Y, /* QE interrupt controller group Y */
++ QE_IC_GRP_Z, /* QE interrupt controller group Z */
++ QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
++ QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
++};
++
++struct qe_ic {
++ /* Control registers offset */
++ u32 __iomem *regs;
++
++ /* The remapper for this QEIC */
++ struct irq_domain *irqhost;
++
++ /* The "linux" controller struct */
++ struct irq_chip hc_irq;
++
++ /* VIRQ numbers of QE high/low irqs */
++ unsigned int virq_high;
++ unsigned int virq_low;
++};
++
++/*
++ * QE interrupt controller internal structure
++ */
++struct qe_ic_info {
++ /* location of this source at the QIMR register. */
++ u32 mask;
++
++ /* Mask register offset */
++ u32 mask_reg;
++
++ /*
++ * for grouped interrupts sources - the interrupt
++ * code as appears at the group priority register
++ */
++ u8 pri_code;
++
++ /* Group priority register offset */
++ u32 pri_reg;
++};
+
+ static DEFINE_RAW_SPINLOCK(qe_ic_lock);
+
+@@ -175,15 +304,15 @@ static struct qe_ic_info qe_ic_info[] = {
+ },
+ };
+
+-static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
++static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
+ {
+- return in_be32(base + (reg >> 2));
++ return ioread32be(base + (reg >> 2));
+ }
+
+-static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
++static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
+ u32 value)
+ {
+- out_be32(base + (reg >> 2), value);
++ iowrite32be(value, base + (reg >> 2));
+ }
+
+ static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+@@ -285,8 +414,8 @@ static const struct irq_domain_ops qe_ic_host_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ };
+
+-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
++/* Return an interrupt vector or 0 if no interrupt is pending. */
++static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+ {
+ int irq;
+
+@@ -296,13 +425,13 @@ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
+
+ if (irq == 0)
+- return NO_IRQ;
++ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+ }
+
+-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
++/* Return an interrupt vector or 0 if no interrupt is pending. */
++static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+ {
+ int irq;
+
+@@ -312,32 +441,96 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
+
+ if (irq == 0)
+- return NO_IRQ;
++ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+ }
+
+-void __init qe_ic_init(struct device_node *node, unsigned int flags,
+- void (*low_handler)(struct irq_desc *desc),
+- void (*high_handler)(struct irq_desc *desc))
++static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
++{
++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
++ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
++
++ if (cascade_irq != 0)
++ generic_handle_irq(cascade_irq);
++}
++
++static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
++{
++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
++ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
++
++ if (cascade_irq != 0)
++ generic_handle_irq(cascade_irq);
++}
++
++static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
++{
++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
++ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++
++ if (cascade_irq != 0)
++ generic_handle_irq(cascade_irq);
++
++ chip->irq_eoi(&desc->irq_data);
++}
++
++static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
++{
++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
++ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++
++ if (cascade_irq != 0)
++ generic_handle_irq(cascade_irq);
++
++ chip->irq_eoi(&desc->irq_data);
++}
++
++static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
++{
++ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
++ unsigned int cascade_irq;
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++
++ cascade_irq = qe_ic_get_high_irq(qe_ic);
++ if (cascade_irq == 0)
++ cascade_irq = qe_ic_get_low_irq(qe_ic);
++
++ if (cascade_irq != 0)
++ generic_handle_irq(cascade_irq);
++
++ chip->irq_eoi(&desc->irq_data);
++}
++
++static int __init qe_ic_init(struct device_node *node, unsigned int flags)
+ {
+ struct qe_ic *qe_ic;
+ struct resource res;
+- u32 temp = 0, ret, high_active = 0;
++ u32 temp = 0, high_active = 0;
++ int ret = 0;
++
++ if (!node)
++ return -ENODEV;
+
+ ret = of_address_to_resource(node, 0, &res);
+- if (ret)
+- return;
++ if (ret) {
++ ret = -ENODEV;
++ goto err_put_node;
++ }
+
+ qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+- if (qe_ic == NULL)
+- return;
++ if (qe_ic == NULL) {
++ ret = -ENOMEM;
++ goto err_put_node;
++ }
+
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
+ if (qe_ic->irqhost == NULL) {
+- kfree(qe_ic);
+- return;
++ ret = -ENOMEM;
++ goto err_free_qe_ic;
+ }
+
+ qe_ic->regs = ioremap(res.start, resource_size(&res));
+@@ -347,10 +540,10 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
+ qe_ic->virq_high = irq_of_parse_and_map(node, 0);
+ qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+
+- if (qe_ic->virq_low == NO_IRQ) {
+- printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
+- kfree(qe_ic);
+- return;
++ if (qe_ic->virq_low == 0) {
++ pr_err("Failed to map QE_IC low IRQ\n");
++ ret = -ENOMEM;
++ goto err_domain_remove;
+ }
+
+ /* default priority scheme is grouped. If spread mode is */
+@@ -377,136 +570,36 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
+ qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+- irq_set_chained_handler(qe_ic->virq_low, low_handler);
++ irq_set_chained_handler(qe_ic->virq_low, qe_ic_cascade_low_mpic);
+
+- if (qe_ic->virq_high != NO_IRQ &&
++ if (qe_ic->virq_high != 0 &&
+ qe_ic->virq_high != qe_ic->virq_low) {
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+- irq_set_chained_handler(qe_ic->virq_high, high_handler);
+- }
+-}
+-
+-void qe_ic_set_highest_priority(unsigned int virq, int high)
+-{
+- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+- unsigned int src = virq_to_hw(virq);
+- u32 temp = 0;
+-
+- temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
+-
+- temp &= ~CICR_HP_MASK;
+- temp |= src << CICR_HP_SHIFT;
+-
+- temp &= ~CICR_HPIT_MASK;
+- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
+-
+- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+-}
+-
+-/* Set Priority level within its group, from 1 to 8 */
+-int qe_ic_set_priority(unsigned int virq, unsigned int priority)
+-{
+- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+- unsigned int src = virq_to_hw(virq);
+- u32 temp;
+-
+- if (priority > 8 || priority == 0)
+- return -EINVAL;
+- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+- "%s: Invalid hw irq number for QEIC\n", __func__))
+- return -EINVAL;
+- if (qe_ic_info[src].pri_reg == 0)
+- return -EINVAL;
+-
+- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
+-
+- if (priority < 4) {
+- temp &= ~(0x7 << (32 - priority * 3));
+- temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
+- } else {
+- temp &= ~(0x7 << (24 - priority * 3));
+- temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
++ irq_set_chained_handler(qe_ic->virq_high,
++ qe_ic_cascade_high_mpic);
+ }
+-
+- qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
+-
++ of_node_put(node);
+ return 0;
+-}
+-
+-/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
+-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
+-{
+- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+- unsigned int src = virq_to_hw(virq);
+- u32 temp, control_reg = QEIC_CICNR, shift = 0;
+-
+- if (priority > 2 || priority == 0)
+- return -EINVAL;
+- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+- "%s: Invalid hw irq number for QEIC\n", __func__))
+- return -EINVAL;
+-
+- switch (qe_ic_info[src].pri_reg) {
+- case QEIC_CIPZCC:
+- shift = CICNR_ZCC1T_SHIFT;
+- break;
+- case QEIC_CIPWCC:
+- shift = CICNR_WCC1T_SHIFT;
+- break;
+- case QEIC_CIPYCC:
+- shift = CICNR_YCC1T_SHIFT;
+- break;
+- case QEIC_CIPXCC:
+- shift = CICNR_XCC1T_SHIFT;
+- break;
+- case QEIC_CIPRTA:
+- shift = CRICR_RTA1T_SHIFT;
+- control_reg = QEIC_CRICR;
+- break;
+- case QEIC_CIPRTB:
+- shift = CRICR_RTB1T_SHIFT;
+- control_reg = QEIC_CRICR;
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- shift += (2 - priority) * 2;
+- temp = qe_ic_read(qe_ic->regs, control_reg);
+- temp &= ~(SIGNAL_MASK << shift);
+- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
+- qe_ic_write(qe_ic->regs, control_reg, temp);
+
+- return 0;
++err_domain_remove:
++ irq_domain_remove(qe_ic->irqhost);
++err_free_qe_ic:
++ kfree(qe_ic);
++err_put_node:
++ of_node_put(node);
++ return ret;
+ }
+
+-static struct bus_type qe_ic_subsys = {
+- .name = "qe_ic",
+- .dev_name = "qe_ic",
+-};
+-
+-static struct device device_qe_ic = {
+- .id = 0,
+- .bus = &qe_ic_subsys,
+-};
+-
+-static int __init init_qe_ic_sysfs(void)
++static int __init init_qe_ic(struct device_node *node,
++ struct device_node *parent)
+ {
+- int rc;
++ int ret;
+
+- printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
++ ret = qe_ic_init(node, 0);
++ if (ret)
++ return ret;
+
+- rc = subsys_system_register(&qe_ic_subsys, NULL);
+- if (rc) {
+- printk(KERN_ERR "Failed registering qe_ic sys class\n");
+- return -ENODEV;
+- }
+- rc = device_register(&device_qe_ic);
+- if (rc) {
+- printk(KERN_ERR "Failed registering qe_ic sys device\n");
+- return -ENODEV;
+- }
+ return 0;
+ }
+
+-subsys_initcall(init_qe_ic_sysfs);
++IRQCHIP_DECLARE(qeic, "fsl,qe-ic", init_qe_ic);
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index 65647533..27e11404 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
+ /* set bd status and length */
+ bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
+
+- iowrite16be(bd_status, &bd->status);
+ iowrite16be(skb->len, &bd->length);
++ iowrite16be(bd_status, &bd->status);
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W_S))
+@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
+ struct sk_buff *skb;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct qe_bd *bd;
+- u32 bd_status;
++ u16 bd_status;
+ u16 length, howmany = 0;
+ u8 *bdbuffer;
+ int i;
+diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
+index 73a2e08b..b26b6431 100644
+--- a/drivers/soc/fsl/qe/Kconfig
++++ b/drivers/soc/fsl/qe/Kconfig
+@@ -4,7 +4,7 @@
+
+ config QUICC_ENGINE
+ bool "Freescale QUICC Engine (QE) Support"
+- depends on FSL_SOC && PPC32
++ depends on OF && HAS_IOMEM
+ select GENERIC_ALLOCATOR
+ select CRC32
+ help
+diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
+index 2031d385..51e47264 100644
+--- a/drivers/soc/fsl/qe/Makefile
++++ b/drivers/soc/fsl/qe/Makefile
+@@ -1,7 +1,7 @@
+ #
+ # Makefile for the linux ppc-specific parts of QE
+ #
+-obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
++obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_io.o
+ obj-$(CONFIG_CPM) += qe_common.o
+ obj-$(CONFIG_UCC) += ucc.o
+ obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
+diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
+index 2707a827..2b53e852 100644
+--- a/drivers/soc/fsl/qe/qe.c
++++ b/drivers/soc/fsl/qe/qe.c
+@@ -33,8 +33,6 @@
+ #include <asm/pgtable.h>
+ #include <soc/fsl/qe/immap_qe.h>
+ #include <soc/fsl/qe/qe.h>
+-#include <asm/prom.h>
+-#include <asm/rheap.h>
+
+ static void qe_snums_init(void);
+ static int qe_sdma_init(void);
+@@ -109,15 +107,27 @@ void qe_reset(void)
+ panic("sdma init failed!");
+ }
+
++/* issue commands to QE, return 0 on success while -EIO on error
++ *
++ * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on
++ * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8
++ * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3,
++ * QE_CR_SUBBLOCK_IDMA1 - 4 and such on.
++ * @mcn_protocol: specifies mode for the command for non-MCC, should be
++ * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART
++ * and such on.
++ * @cmd_input: command related data.
++ */
+ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+ {
+ unsigned long flags;
+ u8 mcn_shift = 0, dev_shift = 0;
+- u32 ret;
++ int ret;
++ int i;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ if (cmd == QE_RESET) {
+- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
++ iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr);
+ } else {
+ if (cmd == QE_ASSIGN_PAGE) {
+ /* Here device is the SNUM, not sub-block */
+@@ -134,20 +144,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+ mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+ }
+
+- out_be32(&qe_immr->cp.cecdr, cmd_input);
+- out_be32(&qe_immr->cp.cecr,
+- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
+- mcn_protocol << mcn_shift));
++ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
++ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) |
++ (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
+ }
+
+ /* wait for the QE_CR_FLG to clear */
+- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+- 100, 0);
++ ret = -EIO;
++ for (i = 0; i < 100; i++) {
++ if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) {
++ ret = 0;
++ break;
++ }
++ udelay(1);
++ }
++
+ /* On timeout (e.g. failure), the expression will be false (ret == 0),
+ otherwise it will be true (ret == 1). */
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+- return ret == 1;
++ return ret;
+ }
+ EXPORT_SYMBOL(qe_issue_cmd);
+
+@@ -166,8 +182,8 @@ static unsigned int brg_clk = 0;
+ unsigned int qe_get_brg_clk(void)
+ {
+ struct device_node *qe;
+- int size;
+- const u32 *prop;
++ u32 val;
++ int ret;
+
+ if (brg_clk)
+ return brg_clk;
+@@ -179,9 +195,9 @@ unsigned int qe_get_brg_clk(void)
+ return brg_clk;
+ }
+
+- prop = of_get_property(qe, "brg-frequency", &size);
+- if (prop && size == sizeof(*prop))
+- brg_clk = *prop;
++ ret = of_property_read_u32(qe, "brg-frequency", &val);
++ if (!ret)
++ brg_clk = val;
+
+ of_node_put(qe);
+
+@@ -221,7 +237,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+ tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ QE_BRGC_ENABLE | div16;
+
+- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
++ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
+
+ return 0;
+ }
+@@ -355,9 +371,9 @@ static int qe_sdma_init(void)
+ return -ENOMEM;
+ }
+
+- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
+- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
+- (0x1 << QE_SDMR_CEN_SHIFT)));
++ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
++ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
++ &sdma->sdmr);
+
+ return 0;
+ }
+@@ -395,14 +411,14 @@ static void qe_upload_microcode(const void *base,
+ "uploading microcode '%s'\n", ucode->id);
+
+ /* Use auto-increment */
+- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
+- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
++ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
++ QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
+
+ for (i = 0; i < be32_to_cpu(ucode->count); i++)
+- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
++ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
+
+ /* Set I-RAM Ready Register */
+- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
++ iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
+ }
+
+ /*
+@@ -487,7 +503,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
+ * If the microcode calls for it, split the I-RAM.
+ */
+ if (!firmware->split)
+- setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
++ qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+ if (firmware->soc.model)
+ printk(KERN_INFO
+@@ -521,11 +537,11 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
+ u32 trap = be32_to_cpu(ucode->traps[j]);
+
+ if (trap)
+- out_be32(&qe_immr->rsp[i].tibcr[j], trap);
++ iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
+ }
+
+ /* Enable traps */
+- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
++ iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
+ }
+
+ qe_firmware_uploaded = 1;
+@@ -644,9 +660,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
+ unsigned int qe_get_num_of_snums(void)
+ {
+ struct device_node *qe;
+- int size;
+ unsigned int num_of_snums;
+- const u32 *prop;
++ u32 val;
++ int ret;
+
+ num_of_snums = 28; /* The default number of snum for threads is 28 */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+@@ -660,9 +676,9 @@ unsigned int qe_get_num_of_snums(void)
+ return num_of_snums;
+ }
+
+- prop = of_get_property(qe, "fsl,qe-num-snums", &size);
+- if (prop && size == sizeof(*prop)) {
+- num_of_snums = *prop;
++ ret = of_property_read_u32(qe, "fsl,qe-num-snums", &val);
++ if (!ret) {
++ num_of_snums = val;
+ if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
+ /* No QE ever has fewer than 28 SNUMs */
+ pr_err("QE: number of snum is invalid\n");
+diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h
+deleted file mode 100644
+index 926a2ed4..00000000
+--- a/drivers/soc/fsl/qe/qe_ic.h
++++ /dev/null
+@@ -1,103 +0,0 @@
+-/*
+- * drivers/soc/fsl/qe/qe_ic.h
+- *
+- * QUICC ENGINE Interrupt Controller Header
+- *
+- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+- *
+- * Author: Li Yang <leoli@freescale.com>
+- * Based on code from Shlomi Gridish <gridish@freescale.com>
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the
+- * Free Software Foundation; either version 2 of the License, or (at your
+- * option) any later version.
+- */
+-#ifndef _POWERPC_SYSDEV_QE_IC_H
+-#define _POWERPC_SYSDEV_QE_IC_H
+-
+-#include <soc/fsl/qe/qe_ic.h>
+-
+-#define NR_QE_IC_INTS 64
+-
+-/* QE IC registers offset */
+-#define QEIC_CICR 0x00
+-#define QEIC_CIVEC 0x04
+-#define QEIC_CRIPNR 0x08
+-#define QEIC_CIPNR 0x0c
+-#define QEIC_CIPXCC 0x10
+-#define QEIC_CIPYCC 0x14
+-#define QEIC_CIPWCC 0x18
+-#define QEIC_CIPZCC 0x1c
+-#define QEIC_CIMR 0x20
+-#define QEIC_CRIMR 0x24
+-#define QEIC_CICNR 0x28
+-#define QEIC_CIPRTA 0x30
+-#define QEIC_CIPRTB 0x34
+-#define QEIC_CRICR 0x3c
+-#define QEIC_CHIVEC 0x60
+-
+-/* Interrupt priority registers */
+-#define CIPCC_SHIFT_PRI0 29
+-#define CIPCC_SHIFT_PRI1 26
+-#define CIPCC_SHIFT_PRI2 23
+-#define CIPCC_SHIFT_PRI3 20
+-#define CIPCC_SHIFT_PRI4 13
+-#define CIPCC_SHIFT_PRI5 10
+-#define CIPCC_SHIFT_PRI6 7
+-#define CIPCC_SHIFT_PRI7 4
+-
+-/* CICR priority modes */
+-#define CICR_GWCC 0x00040000
+-#define CICR_GXCC 0x00020000
+-#define CICR_GYCC 0x00010000
+-#define CICR_GZCC 0x00080000
+-#define CICR_GRTA 0x00200000
+-#define CICR_GRTB 0x00400000
+-#define CICR_HPIT_SHIFT 8
+-#define CICR_HPIT_MASK 0x00000300
+-#define CICR_HP_SHIFT 24
+-#define CICR_HP_MASK 0x3f000000
+-
+-/* CICNR */
+-#define CICNR_WCC1T_SHIFT 20
+-#define CICNR_ZCC1T_SHIFT 28
+-#define CICNR_YCC1T_SHIFT 12
+-#define CICNR_XCC1T_SHIFT 4
+-
+-/* CRICR */
+-#define CRICR_RTA1T_SHIFT 20
+-#define CRICR_RTB1T_SHIFT 28
+-
+-/* Signal indicator */
+-#define SIGNAL_MASK 3
+-#define SIGNAL_HIGH 2
+-#define SIGNAL_LOW 0
+-
+-struct qe_ic {
+- /* Control registers offset */
+- volatile u32 __iomem *regs;
+-
+- /* The remapper for this QEIC */
+- struct irq_domain *irqhost;
+-
+- /* The "linux" controller struct */
+- struct irq_chip hc_irq;
+-
+- /* VIRQ numbers of QE high/low irqs */
+- unsigned int virq_high;
+- unsigned int virq_low;
+-};
+-
+-/*
+- * QE interrupt controller internal structure
+- */
+-struct qe_ic_info {
+- u32 mask; /* location of this source at the QIMR register. */
+- u32 mask_reg; /* Mask register offset */
+- u8 pri_code; /* for grouped interrupts sources - the interrupt
+- code as appears at the group priority register */
+- u32 pri_reg; /* Group priority register offset */
+-};
+-
+-#endif /* _POWERPC_SYSDEV_QE_IC_H */
+diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
+index 7ae59abc..8966e8b6 100644
+--- a/drivers/soc/fsl/qe/qe_io.c
++++ b/drivers/soc/fsl/qe/qe_io.c
+@@ -22,8 +22,6 @@
+
+ #include <asm/io.h>
+ #include <soc/fsl/qe/qe.h>
+-#include <asm/prom.h>
+-#include <sysdev/fsl_soc.h>
+
+ #undef DEBUG
+
+@@ -61,16 +59,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+ /* Set open drain, if required */
+- tmp_val = in_be32(&par_io->cpodr);
++ tmp_val = ioread32be(&par_io->cpodr);
+ if (open_drain)
+- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
++ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
+ else
+- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
++ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
+
+ /* define direction */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+- in_be32(&par_io->cpdir2) :
+- in_be32(&par_io->cpdir1);
++ ioread32be(&par_io->cpdir2) :
++ ioread32be(&par_io->cpdir1);
+
+ /* get all bits mask for 2 bit per port */
+ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+@@ -82,34 +80,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+- out_be32(&par_io->cpdir2,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
+ } else {
+- out_be32(&par_io->cpdir1,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
+ }
+ /* define pin assignment */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+- in_be32(&par_io->cppar2) :
+- in_be32(&par_io->cppar1);
++ ioread32be(&par_io->cppar2) :
++ ioread32be(&par_io->cppar1);
+
+ new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+- out_be32(&par_io->cppar2,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
+ } else {
+- out_be32(&par_io->cppar1,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
+ }
+ }
+ EXPORT_SYMBOL(__par_io_config_pin);
+@@ -137,12 +131,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
+ /* calculate pin location */
+ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+- tmp_val = in_be32(&par_io[port].cpdata);
++ tmp_val = ioread32be(&par_io[port].cpdata);
+
+ if (val == 0) /* clear */
+- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
++ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
+ else /* set */
+- out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
++ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
+
+ return 0;
+ }
+diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
+index a1048b44..818e6798 100644
+--- a/drivers/soc/fsl/qe/qe_tdm.c
++++ b/drivers/soc/fsl/qe/qe_tdm.c
+@@ -227,10 +227,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+- setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+- SIR_LAST);
+- setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+- SIR_LAST);
++ qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
++ SIR_LAST);
++ qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 +
++ (utdm->num_of_ts - 1)], SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
+index c646d871..bc64b834 100644
+--- a/drivers/soc/fsl/qe/ucc.c
++++ b/drivers/soc/fsl/qe/ucc.c
+@@ -39,7 +39,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+- clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
++ qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+@@ -84,7 +84,7 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
+ return -EINVAL;
+ }
+
+- clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
++ qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
+
+ return 0;
+@@ -113,9 +113,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ if (set)
+- setbits32(cmxucr, mask << shift);
++ qe_setbits32(cmxucr, mask << shift);
+ else
+- clrbits32(cmxucr, mask << shift);
++ qe_clrbits32(cmxucr, mask << shift);
+
+ return 0;
+ }
+@@ -211,7 +211,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+ if (mode == COMM_DIR_RX)
+ shift += 4;
+
+- clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
++ qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
+index 83d8d16e..5115e935 100644
+--- a/drivers/soc/fsl/qe/ucc_fast.c
++++ b/drivers/soc/fsl/qe/ucc_fast.c
+@@ -33,41 +33,41 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
+ printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+ printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
++ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
+ printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
++ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
+ printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
++ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
+ printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
++ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
+ printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
++ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
+ printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
++ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
+ printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
+- &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
++ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
+ printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
++ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
+ printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
++ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
+ printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
++ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
+ printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
++ &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset));
+ printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
++ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
+ printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
++ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
+ printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
++ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
+ printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
++ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
+ printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
++ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
+ printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
++ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
+ printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+- &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
++ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
+ }
+ EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+@@ -89,7 +89,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
+
+ void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+ {
+- out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
++ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
+ }
+ EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+@@ -101,7 +101,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+ uf_regs = uccf->uf_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+- gumr = in_be32(&uf_regs->gumr);
++ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr |= UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 1;
+@@ -110,7 +110,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+ gumr |= UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 1;
+ }
+- out_be32(&uf_regs->gumr, gumr);
++ iowrite32be(gumr, &uf_regs->gumr);
+ }
+ EXPORT_SYMBOL(ucc_fast_enable);
+
+@@ -122,7 +122,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+ uf_regs = uccf->uf_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+- gumr = in_be32(&uf_regs->gumr);
++ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr &= ~UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 0;
+@@ -131,7 +131,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+ gumr &= ~UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 0;
+ }
+- out_be32(&uf_regs->gumr, gumr);
++ iowrite32be(gumr, &uf_regs->gumr);
+ }
+ EXPORT_SYMBOL(ucc_fast_disable);
+
+@@ -263,12 +263,13 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
+ gumr |= uf_info->tenc;
+ gumr |= uf_info->tcrc;
+ gumr |= uf_info->mode;
+- out_be32(&uf_regs->gumr, gumr);
++ iowrite32be(gumr, &uf_regs->gumr);
+
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+- if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
++ if (IS_ERR_VALUE((unsigned long)uccf->
++ ucc_fast_tx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+ __func__);
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
+@@ -281,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
+ qe_muram_alloc(uf_info->urfs +
+ UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+- if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
++ if (IS_ERR_VALUE((unsigned long)uccf->
++ ucc_fast_rx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+ __func__);
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
+@@ -290,15 +292,15 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
+ }
+
+ /* Set Virtual Fifo registers */
+- out_be16(&uf_regs->urfs, uf_info->urfs);
+- out_be16(&uf_regs->urfet, uf_info->urfet);
+- out_be16(&uf_regs->urfset, uf_info->urfset);
+- out_be16(&uf_regs->utfs, uf_info->utfs);
+- out_be16(&uf_regs->utfet, uf_info->utfet);
+- out_be16(&uf_regs->utftt, uf_info->utftt);
++ iowrite16be(uf_info->urfs, &uf_regs->urfs);
++ iowrite16be(uf_info->urfet, &uf_regs->urfet);
++ iowrite16be(uf_info->urfset, &uf_regs->urfset);
++ iowrite16be(uf_info->utfs, &uf_regs->utfs);
++ iowrite16be(uf_info->utfet, &uf_regs->utfet);
++ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+- out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
+- out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
++ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
++ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
+
+ /* Mux clocking */
+ /* Grant Support */
+@@ -366,14 +368,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
+ }
+
+ /* Set interrupt mask register at UCC level. */
+- out_be32(&uf_regs->uccm, uf_info->uccm_mask);
++ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+- out_be32(&uf_regs->ucce, 0xffffffff);
++ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ *uccf_ret = uccf;
+ return 0;
+diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
+index 481eb298..ee409fdf 100644
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -34,6 +34,7 @@
+ #include <soc/fsl/qe/ucc_slow.h>
+
+ #include <linux/firmware.h>
++#include <asm/cpm.h>
+ #include <asm/reg.h>
+
+ /*
+diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
+index 70339d79..f7a14f2d 100644
+--- a/include/soc/fsl/qe/qe.h
++++ b/include/soc/fsl/qe/qe.h
+@@ -21,7 +21,6 @@
+ #include <linux/spinlock.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+-#include <asm/cpm.h>
+ #include <soc/fsl/qe/immap_qe.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+diff --git a/include/soc/fsl/qe/qe_ic.h b/include/soc/fsl/qe/qe_ic.h
+deleted file mode 100644
+index 1e155ca6..00000000
+--- a/include/soc/fsl/qe/qe_ic.h
++++ /dev/null
+@@ -1,139 +0,0 @@
+-/*
+- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+- *
+- * Authors: Shlomi Gridish <gridish@freescale.com>
+- * Li Yang <leoli@freescale.com>
+- *
+- * Description:
+- * QE IC external definitions and structure.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the
+- * Free Software Foundation; either version 2 of the License, or (at your
+- * option) any later version.
+- */
+-#ifndef _ASM_POWERPC_QE_IC_H
+-#define _ASM_POWERPC_QE_IC_H
+-
+-#include <linux/irq.h>
+-
+-struct device_node;
+-struct qe_ic;
+-
+-#define NUM_OF_QE_IC_GROUPS 6
+-
+-/* Flags when we init the QE IC */
+-#define QE_IC_SPREADMODE_GRP_W 0x00000001
+-#define QE_IC_SPREADMODE_GRP_X 0x00000002
+-#define QE_IC_SPREADMODE_GRP_Y 0x00000004
+-#define QE_IC_SPREADMODE_GRP_Z 0x00000008
+-#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
+-#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
+-
+-#define QE_IC_LOW_SIGNAL 0x00000100
+-#define QE_IC_HIGH_SIGNAL 0x00000200
+-
+-#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
+-#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
+-#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
+-#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
+-#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
+-#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
+-#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
+-#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
+-#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
+-#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
+-#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
+-#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
+-#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
+-
+-/* QE interrupt sources groups */
+-enum qe_ic_grp_id {
+- QE_IC_GRP_W = 0, /* QE interrupt controller group W */
+- QE_IC_GRP_X, /* QE interrupt controller group X */
+- QE_IC_GRP_Y, /* QE interrupt controller group Y */
+- QE_IC_GRP_Z, /* QE interrupt controller group Z */
+- QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
+- QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
+-};
+-
+-#ifdef CONFIG_QUICC_ENGINE
+-void qe_ic_init(struct device_node *node, unsigned int flags,
+- void (*low_handler)(struct irq_desc *desc),
+- void (*high_handler)(struct irq_desc *desc));
+-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
+-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
+-#else
+-static inline void qe_ic_init(struct device_node *node, unsigned int flags,
+- void (*low_handler)(struct irq_desc *desc),
+- void (*high_handler)(struct irq_desc *desc))
+-{}
+-static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+-{ return 0; }
+-static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+-{ return 0; }
+-#endif /* CONFIG_QUICC_ENGINE */
+-
+-void qe_ic_set_highest_priority(unsigned int virq, int high);
+-int qe_ic_set_priority(unsigned int virq, unsigned int priority);
+-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
+-
+-static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
+-{
+- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+-
+- if (cascade_irq != NO_IRQ)
+- generic_handle_irq(cascade_irq);
+-}
+-
+-static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
+-{
+- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+-
+- if (cascade_irq != NO_IRQ)
+- generic_handle_irq(cascade_irq);
+-}
+-
+-static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
+-{
+- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+- struct irq_chip *chip = irq_desc_get_chip(desc);
+-
+- if (cascade_irq != NO_IRQ)
+- generic_handle_irq(cascade_irq);
+-
+- chip->irq_eoi(&desc->irq_data);
+-}
+-
+-static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
+-{
+- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+- struct irq_chip *chip = irq_desc_get_chip(desc);
+-
+- if (cascade_irq != NO_IRQ)
+- generic_handle_irq(cascade_irq);
+-
+- chip->irq_eoi(&desc->irq_data);
+-}
+-
+-static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
+-{
+- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+- unsigned int cascade_irq;
+- struct irq_chip *chip = irq_desc_get_chip(desc);
+-
+- cascade_irq = qe_ic_get_high_irq(qe_ic);
+- if (cascade_irq == NO_IRQ)
+- cascade_irq = qe_ic_get_low_irq(qe_ic);
+-
+- if (cascade_irq != NO_IRQ)
+- generic_handle_irq(cascade_irq);
+-
+- chip->irq_eoi(&desc->irq_data);
+-}
+-
+-#endif /* _ASM_POWERPC_QE_IC_H */
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch b/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch
new file mode 100644
index 0000000000..b186fb31ee
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch
@@ -0,0 +1,688 @@
+From 7e7944c484954ff7b5d53047194e59bfffd1540a Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:20:55 +0800
+Subject: [PATCH] rtc: support layerscape
+
+This is a integrated patch for layerscape rtc support.
+
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/rtc/rtc-pcf85263.c | 665 +++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 665 insertions(+)
+ create mode 100644 drivers/rtc/rtc-pcf85263.c
+
+diff --git a/drivers/rtc/rtc-pcf85263.c b/drivers/rtc/rtc-pcf85263.c
+new file mode 100644
+index 00000000..629c2840
+--- /dev/null
++++ b/drivers/rtc/rtc-pcf85263.c
+@@ -0,0 +1,665 @@
++/*
++ * rtc-pcf85263 Driver for the NXP PCF85263 RTC
++ * Copyright 2016 Parkeon
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/rtc.h>
++#include <linux/i2c.h>
++#include <linux/bcd.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/regmap.h>
++
++
++#define DRV_NAME "rtc-pcf85263"
++
++/* Quartz capacitance */
++#define PCF85263_QUARTZCAP_7pF 0
++#define PCF85263_QUARTZCAP_6pF 1
++#define PCF85263_QUARTZCAP_12p5pF 2
++
++/* Quartz drive strength */
++#define PCF85263_QUARTZDRIVE_NORMAL 0
++#define PCF85263_QUARTZDRIVE_LOW 1
++#define PCF85263_QUARTZDRIVE_HIGH 2
++
++
++#define PCF85263_REG_RTC_SC 0x01 /* Seconds */
++#define PCF85263_REG_RTC_SC_OS BIT(7) /* Oscilator stopped flag */
++
++#define PCF85263_REG_RTC_MN 0x02 /* Minutes */
++#define PCF85263_REG_RTC_HR 0x03 /* Hours */
++#define PCF85263_REG_RTC_DT 0x04 /* Day of month 1-31 */
++#define PCF85263_REG_RTC_DW 0x05 /* Day of week 0-6 */
++#define PCF85263_REG_RTC_MO 0x06 /* Month 1-12 */
++#define PCF85263_REG_RTC_YR 0x07 /* Year 0-99 */
++
++#define PCF85263_REG_ALM1_SC 0x08 /* Seconds */
++#define PCF85263_REG_ALM1_MN 0x09 /* Minutes */
++#define PCF85263_REG_ALM1_HR 0x0a /* Hours */
++#define PCF85263_REG_ALM1_DT 0x0b /* Day of month 1-31 */
++#define PCF85263_REG_ALM1_MO 0x0c /* Month 1-12 */
++
++#define PCF85263_REG_ALM_CTL 0x10
++#define PCF85263_REG_ALM_CTL_ALL_A1E 0x1f /* sec,min,hr,day,mon alarm 1 */
++
++#define PCF85263_REG_OSC 0x25
++#define PCF85263_REG_OSC_CL_MASK (BIT(0) | BIT(1))
++#define PCF85263_REG_OSC_CL_SHIFT 0
++#define PCF85263_REG_OSC_OSCD_MASK (BIT(2) | BIT(3))
++#define PCF85263_REG_OSC_OSCD_SHIFT 2
++#define PCF85263_REG_OSC_LOWJ BIT(4)
++#define PCF85263_REG_OSC_12H BIT(5)
++
++#define PCF85263_REG_PINIO 0x27
++#define PCF85263_REG_PINIO_INTAPM_MASK (BIT(0) | BIT(1))
++#define PCF85263_REG_PINIO_INTAPM_SHIFT 0
++#define PCF85263_INTAPM_INTA (0x2 << PCF85263_REG_PINIO_INTAPM_SHIFT)
++#define PCF85263_INTAPM_HIGHZ (0x3 << PCF85263_REG_PINIO_INTAPM_SHIFT)
++#define PCF85263_REG_PINIO_TSPM_MASK (BIT(2) | BIT(3))
++#define PCF85263_REG_PINIO_TSPM_SHIFT 2
++#define PCF85263_TSPM_DISABLED (0x0 << PCF85263_REG_PINIO_TSPM_SHIFT)
++#define PCF85263_TSPM_INTB (0x1 << PCF85263_REG_PINIO_TSPM_SHIFT)
++#define PCF85263_REG_PINIO_CLKDISABLE BIT(7)
++
++#define PCF85263_REG_FUNCTION 0x28
++#define PCF85263_REG_FUNCTION_COF_MASK 0x7
++#define PCF85263_REG_FUNCTION_COF_OFF 0x7 /* No clock output */
++
++#define PCF85263_REG_INTA_CTL 0x29
++#define PCF85263_REG_INTB_CTL 0x2A
++#define PCF85263_REG_INTx_CTL_A1E BIT(4) /* Alarm 1 */
++#define PCF85263_REG_INTx_CTL_ILP BIT(7) /* 0=pulse, 1=level */
++
++#define PCF85263_REG_FLAGS 0x2B
++#define PCF85263_REG_FLAGS_A1F BIT(5)
++
++#define PCF85263_REG_RAM_BYTE 0x2c
++
++#define PCF85263_REG_STOPENABLE 0x2e
++#define PCF85263_REG_STOPENABLE_STOP BIT(0)
++
++#define PCF85263_REG_RESET 0x2f /* Reset command */
++#define PCF85263_REG_RESET_CMD_CPR 0xa4 /* Clear prescaler */
++
++#define PCF85263_MAX_REG 0x2f
++
++#define PCF85263_HR_PM BIT(5)
++
++enum pcf85263_irqpin {
++ PCF85263_IRQPIN_NONE,
++ PCF85263_IRQPIN_INTA,
++ PCF85263_IRQPIN_INTB
++};
++
++static const char *const pcf85263_irqpin_names[] = {
++ [PCF85263_IRQPIN_NONE] = "None",
++ [PCF85263_IRQPIN_INTA] = "INTA",
++ [PCF85263_IRQPIN_INTB] = "INTB"
++};
++
++struct pcf85263 {
++ struct device *dev;
++ struct rtc_device *rtc;
++ struct regmap *regmap;
++ enum pcf85263_irqpin irq_pin;
++ int irq;
++ bool mode_12h;
++};
++
++/*
++ * Helpers to convert 12h to 24h and vice versa.
++ * Values in register are stored in BCD with a PM flag in bit 5
++ *
++ * 23:00 <=> 11PM <=> 0x31
++ * 00:00 <=> 12AM <=> 0x12
++ * 01:00 <=> 1AM <=> 0x01
++ * 12:00 <=> 12PM <=> 0x32
++ * 13:00 <=> 1PM <=> 0x21
++ */
++static int pcf85263_bcd12h_to_bin24h(int regval)
++{
++ int hr = bcd2bin(regval & 0x1f);
++ bool pm = regval & PCF85263_HR_PM;
++
++ if (hr == 12)
++ return pm ? 12 : 0;
++
++ return pm ? hr + 12 : hr;
++}
++
++static int pcf85263_bin24h_to_bcd12h(int hr24)
++{
++ bool pm = hr24 >= 12;
++ int hr12 = hr24 % 12;
++
++ if (!hr12)
++ hr12++;
++
++ return bin2bcd(hr12) | pm ? 0 : PCF85263_HR_PM;
++}
++
++static int pcf85263_read_time(struct device *dev, struct rtc_time *tm)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++ const int first = PCF85263_REG_RTC_SC;
++ const int last = PCF85263_REG_RTC_YR;
++ const int len = last - first + 1;
++ u8 regs[len];
++ u8 hr_reg;
++ int ret;
++
++ ret = regmap_bulk_read(pcf85263->regmap, first, regs, len);
++ if (ret)
++ return ret;
++
++ if (regs[PCF85263_REG_RTC_SC - first] & PCF85263_REG_RTC_SC_OS) {
++ dev_warn(dev, "Oscillator stop detected, date/time is not reliable.\n");
++ return -EINVAL;
++ }
++
++ tm->tm_sec = bcd2bin(regs[PCF85263_REG_RTC_SC - first] & 0x7f);
++ tm->tm_min = bcd2bin(regs[PCF85263_REG_RTC_MN - first] & 0x7f);
++
++ hr_reg = regs[PCF85263_REG_RTC_HR - first];
++ if (pcf85263->mode_12h)
++ tm->tm_hour = pcf85263_bcd12h_to_bin24h(hr_reg);
++ else
++ tm->tm_hour = bcd2bin(hr_reg & 0x3f);
++
++ tm->tm_mday = bcd2bin(regs[PCF85263_REG_RTC_DT - first]);
++ tm->tm_wday = bcd2bin(regs[PCF85263_REG_RTC_DW - first]);
++ tm->tm_mon = bcd2bin(regs[PCF85263_REG_RTC_MO - first]) - 1;
++ tm->tm_year = bcd2bin(regs[PCF85263_REG_RTC_YR - first]);
++
++ tm->tm_year += 100; /* Assume 21st century */
++
++ return 0;
++}
++
++static int pcf85263_set_time(struct device *dev, struct rtc_time *tm)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++
++ /*
++ * Before setting time need to stop RTC and disable prescaler
++ * Do this all in a single I2C transaction exploiting wraparound
++ * as described in data sheet.
++ * This means that the array below must be in register order
++ */
++ u8 regs[] = {
++ PCF85263_REG_STOPENABLE_STOP, /* STOP */
++ PCF85263_REG_RESET_CMD_CPR, /* Disable prescaler */
++ /* Wrap around to register 0 (1/100s) */
++ 0, /* 1/100s always zero. */
++ bin2bcd(tm->tm_sec),
++ bin2bcd(tm->tm_min),
++ bin2bcd(tm->tm_hour), /* 24-hour */
++ bin2bcd(tm->tm_mday),
++ bin2bcd(tm->tm_wday + 1),
++ bin2bcd(tm->tm_mon + 1),
++ bin2bcd(tm->tm_year % 100)
++ };
++ int ret;
++
++ ret = regmap_bulk_write(pcf85263->regmap, PCF85263_REG_STOPENABLE,
++ regs, sizeof(regs));
++ if (ret)
++ return ret;
++
++ /* As we have set the time in 24H update the hardware for that */
++ if (pcf85263->mode_12h) {
++ pcf85263->mode_12h = false;
++ ret = regmap_update_bits(pcf85263->regmap, PCF85263_REG_OSC,
++ PCF85263_REG_OSC_12H, 0);
++ if (ret)
++ return ret;
++ }
++
++ /* Start it again */
++ return regmap_write(pcf85263->regmap, PCF85263_REG_STOPENABLE, 0);
++}
++
++static int pcf85263_enable_alarm(struct pcf85263 *pcf85263, bool enable)
++{
++ int reg;
++ int ret;
++
++ ret = regmap_update_bits(pcf85263->regmap, PCF85263_REG_ALM_CTL,
++ PCF85263_REG_ALM_CTL_ALL_A1E,
++ enable ? PCF85263_REG_ALM_CTL_ALL_A1E : 0);
++ if (ret)
++ return ret;
++
++ switch (pcf85263->irq_pin) {
++ case PCF85263_IRQPIN_NONE:
++ return 0;
++
++ case PCF85263_IRQPIN_INTA:
++ reg = PCF85263_REG_INTA_CTL;
++ break;
++
++ case PCF85263_IRQPIN_INTB:
++ reg = PCF85263_REG_INTB_CTL;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return regmap_update_bits(pcf85263->regmap, reg,
++ PCF85263_REG_INTx_CTL_A1E,
++ enable ? PCF85263_REG_INTx_CTL_A1E : 0);
++}
++
++static int pcf85263_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++ struct rtc_time *tm = &alarm->time;
++ const int first = PCF85263_REG_ALM1_SC;
++ const int last = PCF85263_REG_ALM1_MO;
++ const int len = last - first + 1;
++ u8 regs[len];
++ u8 hr_reg;
++ unsigned int regval;
++ int ret;
++
++ ret = regmap_bulk_read(pcf85263->regmap, first, regs, len);
++ if (ret)
++ return ret;
++
++ tm->tm_sec = bcd2bin(regs[PCF85263_REG_ALM1_SC - first] & 0x7f);
++ tm->tm_min = bcd2bin(regs[PCF85263_REG_ALM1_MN - first] & 0x7f);
++
++ hr_reg = regs[PCF85263_REG_ALM1_HR - first];
++ if (pcf85263->mode_12h)
++ tm->tm_hour = pcf85263_bcd12h_to_bin24h(hr_reg);
++ else
++ tm->tm_hour = bcd2bin(hr_reg & 0x3f);
++
++ tm->tm_mday = bcd2bin(regs[PCF85263_REG_ALM1_DT - first]);
++ tm->tm_mon = bcd2bin(regs[PCF85263_REG_ALM1_MO - first]) - 1;
++ tm->tm_year = -1;
++ tm->tm_wday = -1;
++
++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_ALM_CTL, &regval);
++ if (ret)
++ return ret;
++ alarm->enabled = !!(regval & PCF85263_REG_ALM_CTL_ALL_A1E);
++
++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_FLAGS, &regval);
++ if (ret)
++ return ret;
++ alarm->pending = !!(regval & PCF85263_REG_FLAGS_A1F);
++
++ return 0;
++}
++
++static int pcf85263_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++ struct rtc_time *tm = &alarm->time;
++ const int first = PCF85263_REG_ALM1_SC;
++ const int last = PCF85263_REG_ALM1_MO;
++ const int len = last - first + 1;
++ u8 regs[len];
++ int ret;
++
++ /* Disable alarm comparison during update */
++ ret = pcf85263_enable_alarm(pcf85263, false);
++ if (ret)
++ return ret;
++
++ /* Clear any pending alarm (write 0=>clr, 1=>no change) */
++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_FLAGS,
++ (unsigned int)(~PCF85263_REG_FLAGS_A1F));
++ if (ret)
++ return ret;
++
++ /* Set the alarm time registers */
++ regs[PCF85263_REG_ALM1_SC - first] = bin2bcd(tm->tm_sec);
++ regs[PCF85263_REG_ALM1_MN - first] = bin2bcd(tm->tm_min);
++ regs[PCF85263_REG_ALM1_HR - first] = pcf85263->mode_12h ?
++ pcf85263_bin24h_to_bcd12h(tm->tm_hour) :
++ bin2bcd(tm->tm_hour);
++ regs[PCF85263_REG_ALM1_DT - first] = bin2bcd(tm->tm_mday);
++ regs[PCF85263_REG_ALM1_MO - first] = bin2bcd(tm->tm_mon + 1);
++
++ ret = regmap_bulk_write(pcf85263->regmap, first, regs, sizeof(regs));
++ if (ret)
++ return ret;
++
++ if (alarm->enabled)
++ ret = pcf85263_enable_alarm(pcf85263, true);
++
++ return ret;
++}
++
++static int pcf85263_alarm_irq_enable(struct device *dev, unsigned int enable)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++
++ return pcf85263_enable_alarm(pcf85263, !!enable);
++}
++
++static irqreturn_t pcf85263_irq(int irq, void *data)
++{
++ struct pcf85263 *pcf85263 = data;
++ unsigned int regval;
++ int ret;
++
++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_FLAGS, &regval);
++ if (ret)
++ return IRQ_NONE;
++
++ if (regval & PCF85263_REG_FLAGS_A1F) {
++ regmap_write(pcf85263->regmap, PCF85263_REG_FLAGS,
++ (unsigned int)(~PCF85263_REG_FLAGS_A1F));
++
++ rtc_update_irq(pcf85263->rtc, 1, RTC_IRQF | RTC_AF);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int pcf85263_check_osc_stopped(struct pcf85263 *pcf85263)
++{
++ unsigned int regval;
++ int ret;
++
++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_RTC_SC, &regval);
++ if (ret)
++ return ret;
++
++ ret = regval & PCF85263_REG_RTC_SC_OS ? 1 : 0;
++ if (ret)
++ dev_warn(pcf85263->dev, "Oscillator stop detected, date/time is not reliable.\n");
++
++ return ret;
++}
++
++#ifdef CONFIG_RTC_INTF_DEV
++static int pcf85263_ioctl(struct device *dev,
++ unsigned int cmd, unsigned long arg)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++ int ret;
++
++ switch (cmd) {
++ case RTC_VL_READ:
++ ret = pcf85263_check_osc_stopped(pcf85263);
++ if (ret < 0)
++ return ret;
++
++ if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
++ return -EFAULT;
++ return 0;
++
++ case RTC_VL_CLR:
++ return regmap_update_bits(pcf85263->regmap,
++ PCF85263_REG_RTC_SC,
++ PCF85263_REG_RTC_SC_OS, 0);
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++#else
++#define pcf85263_ioctl NULL
++#endif
++
++static int pcf85263_init_hw(struct pcf85263 *pcf85263)
++{
++ struct device_node *np = pcf85263->dev->of_node;
++ unsigned int regval;
++ u32 propval;
++ int ret;
++
++ /* Determine if oscilator has been stopped (probably low power) */
++ ret = pcf85263_check_osc_stopped(pcf85263);
++ if (ret < 0) {
++ /* Log here since this is the first hw access on probe */
++ dev_err(pcf85263->dev, "Unable to read register\n");
++
++ return ret;
++ }
++
++ /* Determine 12/24H mode */
++ ret = regmap_read(pcf85263->regmap, PCF85263_REG_OSC, &regval);
++ if (ret)
++ return ret;
++ pcf85263->mode_12h = !!(regval & PCF85263_REG_OSC_12H);
++
++ /* Set oscilator register */
++ regval &= ~PCF85263_REG_OSC_12H; /* keep current 12/24 h setting */
++
++ propval = PCF85263_QUARTZCAP_12p5pF;
++ of_property_read_u32(np, "quartz-load-capacitance", &propval);
++ regval |= ((propval << PCF85263_REG_OSC_CL_SHIFT)
++ & PCF85263_REG_OSC_CL_MASK);
++
++ propval = PCF85263_QUARTZDRIVE_NORMAL;
++ of_property_read_u32(np, "quartz-drive-strength", &propval);
++ regval |= ((propval << PCF85263_REG_OSC_OSCD_SHIFT)
++ & PCF85263_REG_OSC_OSCD_MASK);
++
++ if (of_property_read_bool(np, "quartz-low-jitter"))
++ regval |= PCF85263_REG_OSC_LOWJ;
++
++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_OSC, regval);
++ if (ret)
++ return ret;
++
++ /* Set function register (RTC mode, 1s tick, clock output static) */
++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_FUNCTION,
++ PCF85263_REG_FUNCTION_COF_OFF);
++ if (ret)
++ return ret;
++
++ /* Set all interrupts to disabled, level mode */
++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_INTA_CTL,
++ PCF85263_REG_INTx_CTL_ILP);
++ if (ret)
++ return ret;
++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_INTB_CTL,
++ PCF85263_REG_INTx_CTL_ILP);
++ if (ret)
++ return ret;
++
++ /* Setup IO pin config register */
++ regval = PCF85263_REG_PINIO_CLKDISABLE;
++ switch (pcf85263->irq_pin) {
++ case PCF85263_IRQPIN_INTA:
++ regval |= (PCF85263_INTAPM_INTA | PCF85263_TSPM_DISABLED);
++ break;
++ case PCF85263_IRQPIN_INTB:
++ regval |= (PCF85263_INTAPM_HIGHZ | PCF85263_TSPM_INTB);
++ break;
++ case PCF85263_IRQPIN_NONE:
++ regval |= (PCF85263_INTAPM_HIGHZ | PCF85263_TSPM_DISABLED);
++ break;
++ }
++ ret = regmap_write(pcf85263->regmap, PCF85263_REG_PINIO, regval);
++
++ return ret;
++}
++
++static const struct rtc_class_ops rtc_ops = {
++ .ioctl = pcf85263_ioctl,
++ .read_time = pcf85263_read_time,
++ .set_time = pcf85263_set_time,
++ .read_alarm = pcf85263_read_alarm,
++ .set_alarm = pcf85263_set_alarm,
++ .alarm_irq_enable = pcf85263_alarm_irq_enable,
++};
++
++static const struct regmap_config pcf85263_regmap_cfg = {
++ .reg_bits = 8,
++ .val_bits = 8,
++ .max_register = PCF85263_MAX_REG,
++};
++
++/*
++ * On some boards the interrupt line may not be wired to the CPU but only to
++ * a power supply circuit.
++ * In that case no interrupt will be specified in the device tree but the
++ * wakeup-source DT property may be used to enable wakeup programming in
++ * sysfs
++ */
++static bool pcf85263_can_wakeup_machine(struct pcf85263 *pcf85263)
++{
++ return pcf85263->irq ||
++ of_property_read_bool(pcf85263->dev->of_node, "wakeup-source");
++}
++
++static int pcf85263_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct device *dev = &client->dev;
++ struct pcf85263 *pcf85263;
++ int ret;
++
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
++ I2C_FUNC_SMBUS_BYTE_DATA |
++ I2C_FUNC_SMBUS_I2C_BLOCK))
++ return -ENODEV;
++
++ pcf85263 = devm_kzalloc(dev, sizeof(*pcf85263), GFP_KERNEL);
++ if (!pcf85263)
++ return -ENOMEM;
++
++ pcf85263->dev = dev;
++ pcf85263->irq = client->irq;
++ dev_set_drvdata(dev, pcf85263);
++
++ pcf85263->regmap = devm_regmap_init_i2c(client, &pcf85263_regmap_cfg);
++ if (IS_ERR(pcf85263->regmap)) {
++ ret = PTR_ERR(pcf85263->regmap);
++ dev_err(dev, "regmap allocation failed (%d)\n", ret);
++
++ return ret;
++ }
++
++ /* Determine which interrupt pin the board uses */
++ if (pcf85263_can_wakeup_machine(pcf85263)) {
++ if (of_property_match_string(dev->of_node,
++ "interrupt-names", "INTB") >= 0)
++ pcf85263->irq_pin = PCF85263_IRQPIN_INTB;
++ else
++ pcf85263->irq_pin = PCF85263_IRQPIN_INTA;
++ } else {
++ pcf85263->irq_pin = PCF85263_IRQPIN_NONE;
++ }
++
++ ret = pcf85263_init_hw(pcf85263);
++ if (ret)
++ return ret;
++
++ if (pcf85263->irq) {
++ ret = devm_request_threaded_irq(dev, pcf85263->irq, NULL,
++ pcf85263_irq,
++ IRQF_ONESHOT,
++ dev->driver->name, pcf85263);
++ if (ret) {
++ dev_err(dev, "irq %d unavailable (%d)\n",
++ pcf85263->irq, ret);
++ pcf85263->irq = 0;
++ }
++ }
++
++ if (pcf85263_can_wakeup_machine(pcf85263))
++ device_init_wakeup(dev, true);
++
++ pcf85263->rtc = devm_rtc_device_register(dev, dev->driver->name,
++ &rtc_ops, THIS_MODULE);
++ ret = PTR_ERR_OR_ZERO(pcf85263->rtc);
++ if (ret)
++ return ret;
++
++ /* We cannot support UIE mode if we do not have an IRQ line */
++ if (!pcf85263->irq)
++ pcf85263->rtc->uie_unsupported = 1;
++
++ dev_info(pcf85263->dev,
++ "PCF85263 RTC (irqpin=%s irq=%d)\n",
++ pcf85263_irqpin_names[pcf85263->irq_pin],
++ pcf85263->irq);
++
++ return 0;
++}
++
++static int pcf85263_remove(struct i2c_client *client)
++{
++ struct pcf85263 *pcf85263 = i2c_get_clientdata(client);
++
++ if (pcf85263_can_wakeup_machine(pcf85263))
++ device_init_wakeup(pcf85263->dev, false);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int pcf85263_suspend(struct device *dev)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++ int ret = 0;
++
++ if (device_may_wakeup(dev))
++ ret = enable_irq_wake(pcf85263->irq);
++
++ return ret;
++}
++
++static int pcf85263_resume(struct device *dev)
++{
++ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
++ int ret = 0;
++
++ if (device_may_wakeup(dev))
++ ret = disable_irq_wake(pcf85263->irq);
++
++ return ret;
++}
++
++#endif
++
++static const struct i2c_device_id pcf85263_id[] = {
++ { "pcf85263", 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, pcf85263_id);
++
++#ifdef CONFIG_OF
++static const struct of_device_id pcf85263_of_match[] = {
++ { .compatible = "nxp,pcf85263" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, pcf85263_of_match);
++#endif
++
++static SIMPLE_DEV_PM_OPS(pcf85263_pm_ops, pcf85263_suspend, pcf85263_resume);
++
++static struct i2c_driver pcf85263_driver = {
++ .driver = {
++ .name = "rtc-pcf85263",
++ .of_match_table = of_match_ptr(pcf85263_of_match),
++ .pm = &pcf85263_pm_ops,
++ },
++ .probe = pcf85263_probe,
++ .remove = pcf85263_remove,
++ .id_table = pcf85263_id,
++};
++
++module_i2c_driver(pcf85263_driver);
++
++MODULE_AUTHOR("Martin Fuzzey <mfuzzey@parkeon.com>");
++MODULE_DESCRIPTION("PCF85263 RTC Driver");
++MODULE_LICENSE("GPL");
++
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch b/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch
new file mode 100644
index 0000000000..eab230ef27
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch
@@ -0,0 +1,445 @@
+From a12f522b48a8cb637c1c026b46a76b2ef7983f8d Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:12:41 +0800
+Subject: [PATCH] spi: support layerscape
+
+This is a integrated patch for layerscape dspi support.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/spi/Kconfig | 1 +
+ drivers/spi/spi-fsl-dspi.c | 309 ++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 305 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index b7995474..8e281e47 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -365,6 +365,7 @@ config SPI_FSL_SPI
+ config SPI_FSL_DSPI
+ tristate "Freescale DSPI controller"
+ select REGMAP_MMIO
++ depends on HAS_DMA
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ help
+ This enables support for the Freescale DSPI controller in master
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index a67b0ff6..15201645 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -15,6 +15,8 @@
+
+ #include <linux/clk.h>
+ #include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
+ #include <linux/err.h>
+ #include <linux/errno.h>
+ #include <linux/interrupt.h>
+@@ -40,6 +42,7 @@
+ #define TRAN_STATE_WORD_ODD_NUM 0x04
+
+ #define DSPI_FIFO_SIZE 4
++#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
+
+ #define SPI_MCR 0x00
+ #define SPI_MCR_MASTER (1 << 31)
+@@ -72,6 +75,11 @@
+ #define SPI_SR_TCFQF 0x80000000
+ #define SPI_SR_CLEAR 0xdaad0000
+
++#define SPI_RSER_TFFFE BIT(25)
++#define SPI_RSER_TFFFD BIT(24)
++#define SPI_RSER_RFDFE BIT(17)
++#define SPI_RSER_RFDFD BIT(16)
++
+ #define SPI_RSER 0x30
+ #define SPI_RSER_EOQFE 0x10000000
+ #define SPI_RSER_TCFQE 0x80000000
+@@ -109,6 +117,8 @@
+
+ #define SPI_TCR_TCNT_MAX 0x10000
+
++#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
++
+ struct chip_data {
+ u32 mcr_val;
+ u32 ctar_val;
+@@ -118,6 +128,7 @@ struct chip_data {
+ enum dspi_trans_mode {
+ DSPI_EOQ_MODE = 0,
+ DSPI_TCFQ_MODE,
++ DSPI_DMA_MODE,
+ };
+
+ struct fsl_dspi_devtype_data {
+@@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
+ };
+
+ static const struct fsl_dspi_devtype_data vf610_data = {
+- .trans_mode = DSPI_EOQ_MODE,
++ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 2,
+ };
+
+@@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
+ .max_clock_factor = 8,
+ };
+
++struct fsl_dspi_dma {
++ /* Length of transfer in words of DSPI_FIFO_SIZE */
++ u32 curr_xfer_len;
++
++ u32 *tx_dma_buf;
++ struct dma_chan *chan_tx;
++ dma_addr_t tx_dma_phys;
++ struct completion cmd_tx_complete;
++ struct dma_async_tx_descriptor *tx_desc;
++
++ u32 *rx_dma_buf;
++ struct dma_chan *chan_rx;
++ dma_addr_t rx_dma_phys;
++ struct completion cmd_rx_complete;
++ struct dma_async_tx_descriptor *rx_desc;
++};
++
+ struct fsl_dspi {
+ struct spi_master *master;
+ struct platform_device *pdev;
+@@ -166,8 +194,11 @@ struct fsl_dspi {
+ u32 waitflags;
+
+ u32 spi_tcnt;
++ struct fsl_dspi_dma *dma;
+ };
+
++static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
++
+ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
+ {
+ unsigned int val;
+@@ -177,6 +208,255 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
+ return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
+ }
+
++static void dspi_tx_dma_callback(void *arg)
++{
++ struct fsl_dspi *dspi = arg;
++ struct fsl_dspi_dma *dma = dspi->dma;
++
++ complete(&dma->cmd_tx_complete);
++}
++
++static void dspi_rx_dma_callback(void *arg)
++{
++ struct fsl_dspi *dspi = arg;
++ struct fsl_dspi_dma *dma = dspi->dma;
++ int rx_word;
++ int i;
++ u16 d;
++
++ rx_word = is_double_byte_mode(dspi);
++
++ if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
++ for (i = 0; i < dma->curr_xfer_len; i++) {
++ d = dspi->dma->rx_dma_buf[i];
++ rx_word ? (*(u16 *)dspi->rx = d) :
++ (*(u8 *)dspi->rx = d);
++ dspi->rx += rx_word + 1;
++ }
++ }
++
++ complete(&dma->cmd_rx_complete);
++}
++
++static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
++{
++ struct fsl_dspi_dma *dma = dspi->dma;
++ struct device *dev = &dspi->pdev->dev;
++ int time_left;
++ int tx_word;
++ int i;
++
++ tx_word = is_double_byte_mode(dspi);
++
++ for (i = 0; i < dma->curr_xfer_len; i++) {
++ dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
++ if ((dspi->cs_change) && (!dspi->len))
++ dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
++ }
++
++ dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
++ dma->tx_dma_phys,
++ dma->curr_xfer_len *
++ DMA_SLAVE_BUSWIDTH_4_BYTES,
++ DMA_MEM_TO_DEV,
++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++ if (!dma->tx_desc) {
++ dev_err(dev, "Not able to get desc for DMA xfer\n");
++ return -EIO;
++ }
++
++ dma->tx_desc->callback = dspi_tx_dma_callback;
++ dma->tx_desc->callback_param = dspi;
++ if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
++ dev_err(dev, "DMA submit failed\n");
++ return -EINVAL;
++ }
++
++ dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
++ dma->rx_dma_phys,
++ dma->curr_xfer_len *
++ DMA_SLAVE_BUSWIDTH_4_BYTES,
++ DMA_DEV_TO_MEM,
++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++ if (!dma->rx_desc) {
++ dev_err(dev, "Not able to get desc for DMA xfer\n");
++ return -EIO;
++ }
++
++ dma->rx_desc->callback = dspi_rx_dma_callback;
++ dma->rx_desc->callback_param = dspi;
++ if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
++ dev_err(dev, "DMA submit failed\n");
++ return -EINVAL;
++ }
++
++ reinit_completion(&dspi->dma->cmd_rx_complete);
++ reinit_completion(&dspi->dma->cmd_tx_complete);
++
++ dma_async_issue_pending(dma->chan_rx);
++ dma_async_issue_pending(dma->chan_tx);
++
++ time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
++ DMA_COMPLETION_TIMEOUT);
++ if (time_left == 0) {
++ dev_err(dev, "DMA tx timeout\n");
++ dmaengine_terminate_all(dma->chan_tx);
++ dmaengine_terminate_all(dma->chan_rx);
++ return -ETIMEDOUT;
++ }
++
++ time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
++ DMA_COMPLETION_TIMEOUT);
++ if (time_left == 0) {
++ dev_err(dev, "DMA rx timeout\n");
++ dmaengine_terminate_all(dma->chan_tx);
++ dmaengine_terminate_all(dma->chan_rx);
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
++
++static int dspi_dma_xfer(struct fsl_dspi *dspi)
++{
++ struct fsl_dspi_dma *dma = dspi->dma;
++ struct device *dev = &dspi->pdev->dev;
++ int curr_remaining_bytes;
++ int bytes_per_buffer;
++ int word = 1;
++ int ret = 0;
++
++ if (is_double_byte_mode(dspi))
++ word = 2;
++ curr_remaining_bytes = dspi->len;
++ bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
++ while (curr_remaining_bytes) {
++ /* Check if current transfer fits the DMA buffer */
++ dma->curr_xfer_len = curr_remaining_bytes / word;
++ if (dma->curr_xfer_len > bytes_per_buffer)
++ dma->curr_xfer_len = bytes_per_buffer;
++
++ ret = dspi_next_xfer_dma_submit(dspi);
++ if (ret) {
++ dev_err(dev, "DMA transfer failed\n");
++ goto exit;
++
++ } else {
++ curr_remaining_bytes -= dma->curr_xfer_len * word;
++ if (curr_remaining_bytes < 0)
++ curr_remaining_bytes = 0;
++ }
++ }
++
++exit:
++ return ret;
++}
++
++static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
++{
++ struct fsl_dspi_dma *dma;
++ struct dma_slave_config cfg;
++ struct device *dev = &dspi->pdev->dev;
++ int ret;
++
++ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
++ if (!dma)
++ return -ENOMEM;
++
++ dma->chan_rx = dma_request_slave_channel(dev, "rx");
++ if (!dma->chan_rx) {
++ dev_err(dev, "rx dma channel not available\n");
++ ret = -ENODEV;
++ return ret;
++ }
++
++ dma->chan_tx = dma_request_slave_channel(dev, "tx");
++ if (!dma->chan_tx) {
++ dev_err(dev, "tx dma channel not available\n");
++ ret = -ENODEV;
++ goto err_tx_channel;
++ }
++
++ dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
++ &dma->tx_dma_phys, GFP_KERNEL);
++ if (!dma->tx_dma_buf) {
++ ret = -ENOMEM;
++ goto err_tx_dma_buf;
++ }
++
++ dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
++ &dma->rx_dma_phys, GFP_KERNEL);
++ if (!dma->rx_dma_buf) {
++ ret = -ENOMEM;
++ goto err_rx_dma_buf;
++ }
++
++ cfg.src_addr = phy_addr + SPI_POPR;
++ cfg.dst_addr = phy_addr + SPI_PUSHR;
++ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ cfg.src_maxburst = 1;
++ cfg.dst_maxburst = 1;
++
++ cfg.direction = DMA_DEV_TO_MEM;
++ ret = dmaengine_slave_config(dma->chan_rx, &cfg);
++ if (ret) {
++ dev_err(dev, "can't configure rx dma channel\n");
++ ret = -EINVAL;
++ goto err_slave_config;
++ }
++
++ cfg.direction = DMA_MEM_TO_DEV;
++ ret = dmaengine_slave_config(dma->chan_tx, &cfg);
++ if (ret) {
++ dev_err(dev, "can't configure tx dma channel\n");
++ ret = -EINVAL;
++ goto err_slave_config;
++ }
++
++ dspi->dma = dma;
++ init_completion(&dma->cmd_tx_complete);
++ init_completion(&dma->cmd_rx_complete);
++
++ return 0;
++
++err_slave_config:
++ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
++ dma->rx_dma_buf, dma->rx_dma_phys);
++err_rx_dma_buf:
++ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
++ dma->tx_dma_buf, dma->tx_dma_phys);
++err_tx_dma_buf:
++ dma_release_channel(dma->chan_tx);
++err_tx_channel:
++ dma_release_channel(dma->chan_rx);
++
++ devm_kfree(dev, dma);
++ dspi->dma = NULL;
++
++ return ret;
++}
++
++static void dspi_release_dma(struct fsl_dspi *dspi)
++{
++ struct fsl_dspi_dma *dma = dspi->dma;
++ struct device *dev = &dspi->pdev->dev;
++
++ if (dma) {
++ if (dma->chan_tx) {
++ dma_unmap_single(dev, dma->tx_dma_phys,
++ DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
++ dma_release_channel(dma->chan_tx);
++ }
++
++ if (dma->chan_rx) {
++ dma_unmap_single(dev, dma->rx_dma_phys,
++ DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
++ dma_release_channel(dma->chan_rx);
++ }
++ }
++}
++
+ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
+ unsigned long clkrate)
+ {
+@@ -425,6 +705,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
+ dspi_tcfq_write(dspi);
+ break;
++ case DSPI_DMA_MODE:
++ regmap_write(dspi->regmap, SPI_RSER,
++ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
++ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
++ status = dspi_dma_xfer(dspi);
++ break;
+ default:
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
+ trans_mode);
+@@ -432,9 +718,13 @@ static int dspi_transfer_one_message(struct spi_master *master,
+ goto out;
+ }
+
+- if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
+- dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
+- dspi->waitflags = 0;
++ if (trans_mode != DSPI_DMA_MODE) {
++ if (wait_event_interruptible(dspi->waitq,
++ dspi->waitflags))
++ dev_err(&dspi->pdev->dev,
++ "wait transfer complete fail!\n");
++ dspi->waitflags = 0;
++ }
+
+ if (transfer->delay_usecs)
+ udelay(transfer->delay_usecs);
+@@ -712,7 +1002,8 @@ static int dspi_probe(struct platform_device *pdev)
+ if (IS_ERR(dspi->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(dspi->regmap));
+- return PTR_ERR(dspi->regmap);
++ ret = PTR_ERR(dspi->regmap);
++ goto out_master_put;
+ }
+
+ dspi_init(dspi);
+@@ -740,6 +1031,13 @@ static int dspi_probe(struct platform_device *pdev)
+ if (ret)
+ goto out_master_put;
+
++ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
++ if (dspi_request_dma(dspi, res->start)) {
++ dev_err(&pdev->dev, "can't get dma channels\n");
++ goto out_clk_put;
++ }
++ }
++
+ master->max_speed_hz =
+ clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
+
+@@ -768,6 +1066,7 @@ static int dspi_remove(struct platform_device *pdev)
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ /* Disconnect from the SPI framework */
++ dspi_release_dma(dspi);
+ clk_disable_unprepare(dspi->clk);
+ spi_unregister_master(dspi->master);
+
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch b/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch
new file mode 100644
index 0000000000..3e0d30f61d
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch
@@ -0,0 +1,163 @@
+From 469daac0faff06209bc1d1390571b860d153a82b Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Wed, 27 Sep 2017 10:33:47 +0800
+Subject: [PATCH] tty: serial: support layerscape
+
+This is a integrated patch for layerscape uart support.
+
+Signed-off-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
+Signed-off-by: Stefan Agner <stefan@agner.ch>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/tty/serial/fsl_lpuart.c | 66 ++++++++++++++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 76103f2c..61453820 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -231,6 +231,8 @@
+ #define DEV_NAME "ttyLP"
+ #define UART_NR 6
+
++static DECLARE_BITMAP(linemap, UART_NR);
++
+ struct lpuart_port {
+ struct uart_port port;
+ struct clk *clk;
+@@ -1348,6 +1350,18 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ /* ask the core to calculate the divisor */
+ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+
++ /*
++ * Need to update the Ring buffer length according to the selected
++ * baud rate and restart Rx DMA path.
++ *
++ * Since timer function acqures sport->port.lock, need to stop before
++ * acquring same lock because otherwise del_timer_sync() can deadlock.
++ */
++ if (old && sport->lpuart_dma_rx_use) {
++ del_timer_sync(&sport->lpuart_timer);
++ lpuart_dma_rx_free(&sport->port);
++ }
++
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->port.read_status_mask = 0;
+@@ -1397,22 +1411,11 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ /* restore control register */
+ writeb(old_cr2, sport->port.membase + UARTCR2);
+
+- /*
+- * If new baud rate is set, we will also need to update the Ring buffer
+- * length according to the selected baud rate and restart Rx DMA path.
+- */
+- if (old) {
+- if (sport->lpuart_dma_rx_use) {
+- del_timer_sync(&sport->lpuart_timer);
+- lpuart_dma_rx_free(&sport->port);
+- }
+-
+- if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
+- sport->lpuart_dma_rx_use = true;
++ if (old && sport->lpuart_dma_rx_use) {
++ if (!lpuart_start_rx_dma(sport))
+ rx_dma_timer_init(sport);
+- } else {
++ else
+ sport->lpuart_dma_rx_use = false;
+- }
+ }
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+@@ -1640,6 +1643,13 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ struct lpuart_port *sport = lpuart_ports[co->index];
+ unsigned char old_cr2, cr2;
++ unsigned long flags;
++ int locked = 1;
++
++ if (sport->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&sport->port.lock, flags);
++ else
++ spin_lock_irqsave(&sport->port.lock, flags);
+
+ /* first save CR2 and then disable interrupts */
+ cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
+@@ -1654,6 +1664,9 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
+ barrier();
+
+ writeb(old_cr2, sport->port.membase + UARTCR2);
++
++ if (locked)
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ static void
+@@ -1661,6 +1674,13 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ struct lpuart_port *sport = lpuart_ports[co->index];
+ unsigned long old_cr, cr;
++ unsigned long flags;
++ int locked = 1;
++
++ if (sport->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&sport->port.lock, flags);
++ else
++ spin_lock_irqsave(&sport->port.lock, flags);
+
+ /* first save CR2 and then disable interrupts */
+ cr = old_cr = lpuart32_read(sport->port.membase + UARTCTRL);
+@@ -1675,6 +1695,9 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
+ barrier();
+
+ lpuart32_write(old_cr, sport->port.membase + UARTCTRL);
++
++ if (locked)
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ /*
+@@ -1899,9 +1922,13 @@ static int lpuart_probe(struct platform_device *pdev)
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+- dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+- return ret;
++ ret = find_first_zero_bit(linemap, UART_NR);
++ if (ret >= UART_NR) {
++ dev_err(&pdev->dev, "port line is full, add device failed\n");
++ return ret;
++ }
+ }
++ set_bit(ret, linemap);
+ sport->port.line = ret;
+ sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
+
+@@ -1983,6 +2010,7 @@ static int lpuart_remove(struct platform_device *pdev)
+ struct lpuart_port *sport = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&lpuart_reg, &sport->port);
++ clear_bit(sport->port.line, linemap);
+
+ clk_disable_unprepare(sport->clk);
+
+@@ -2067,12 +2095,10 @@ static int lpuart_resume(struct device *dev)
+
+ if (sport->lpuart_dma_rx_use) {
+ if (sport->port.irq_wake) {
+- if (!lpuart_start_rx_dma(sport)) {
+- sport->lpuart_dma_rx_use = true;
++ if (!lpuart_start_rx_dma(sport))
+ rx_dma_timer_init(sport);
+- } else {
++ else
+ sport->lpuart_dma_rx_use = false;
+- }
+ }
+ }
+
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch b/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch
new file mode 100644
index 0000000000..ce29f5b17a
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch
@@ -0,0 +1,1471 @@
+From f8daa8e984213554008e73cd155530dceec5a109 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Wed, 27 Sep 2017 10:34:07 +0800
+Subject: [PATCH] usb: support layerscape
+
+This is a integrated patch for layerscape usb support.
+
+Signed-off-by: yinbo.zhu <yinbo.zhu@nxp.com>
+Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
+Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
+Signed-off-by: Changming Huang <jerry.huang@nxp.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Rajesh Bhagat <rajesh.bhagat@freescale.com>
+Signed-off-by: Suresh Gupta <suresh.gupta@freescale.com>
+Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/usb/common/common.c | 50 ++++++
+ drivers/usb/core/hub.c | 8 +
+ drivers/usb/dwc3/core.c | 235 ++++++++++++++++++++++++++-
+ drivers/usb/dwc3/core.h | 46 +++++-
+ drivers/usb/dwc3/host.c | 15 +-
+ drivers/usb/gadget/udc/fsl_udc_core.c | 46 +++---
+ drivers/usb/gadget/udc/fsl_usb2_udc.h | 16 +-
+ drivers/usb/host/Kconfig | 2 +-
+ drivers/usb/host/ehci-fsl.c | 289 +++++++++++++++++++++++++++++++---
+ drivers/usb/host/ehci-fsl.h | 3 +
+ drivers/usb/host/ehci-hub.c | 2 +
+ drivers/usb/host/ehci.h | 5 +
+ drivers/usb/host/fsl-mph-dr-of.c | 12 ++
+ drivers/usb/phy/phy-fsl-usb.c | 59 +++++--
+ drivers/usb/phy/phy-fsl-usb.h | 8 +
+ include/linux/usb.h | 1 +
+ include/linux/usb/of.h | 2 +
+ 17 files changed, 726 insertions(+), 73 deletions(-)
+
+diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
+index 5ef8da6e..176dee01 100644
+--- a/drivers/usb/common/common.c
++++ b/drivers/usb/common/common.c
+@@ -105,6 +105,56 @@ static const char *const usb_dr_modes[] = {
+ [USB_DR_MODE_OTG] = "otg",
+ };
+
++/**
++ * of_usb_get_dr_mode - Get dual role mode for given device_node
++ * @np: Pointer to the given device_node
++ *
++ * The function gets phy interface string from property 'dr_mode',
++ * and returns the correspondig enum usb_dr_mode
++ */
++enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
++{
++ const char *dr_mode;
++ int err, i;
++
++ err = of_property_read_string(np, "dr_mode", &dr_mode);
++ if (err < 0)
++ return USB_DR_MODE_UNKNOWN;
++
++ for (i = 0; i < ARRAY_SIZE(usb_dr_modes); i++)
++ if (!strcmp(dr_mode, usb_dr_modes[i]))
++ return i;
++
++ return USB_DR_MODE_UNKNOWN;
++}
++EXPORT_SYMBOL_GPL(of_usb_get_dr_mode);
++
++/**
++ * of_usb_get_maximum_speed - Get maximum requested speed for a given USB
++ * controller.
++ * @np: Pointer to the given device_node
++ *
++ * The function gets the maximum speed string from property "maximum-speed",
++ * and returns the corresponding enum usb_device_speed.
++ */
++enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np)
++{
++ const char *maximum_speed;
++ int err;
++ int i;
++
++ err = of_property_read_string(np, "maximum-speed", &maximum_speed);
++ if (err < 0)
++ return USB_SPEED_UNKNOWN;
++
++ for (i = 0; i < ARRAY_SIZE(speed_names); i++)
++ if (strcmp(maximum_speed, speed_names[i]) == 0)
++ return i;
++
++ return USB_SPEED_UNKNOWN;
++}
++EXPORT_SYMBOL_GPL(of_usb_get_maximum_speed);
++
+ static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str)
+ {
+ int ret;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 80d4ef31..e23acf03 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4412,6 +4412,14 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ else
+ speed = usb_speed_string(udev->speed);
+
++#if !defined(CONFIG_FSL_USB2_OTG) && !defined(CONFIG_FSL_USB2_OTG_MODULE)
++if (udev->speed != USB_SPEED_SUPER)
++ dev_info(&udev->dev,
++ "%s %s USB device number %d using %s\n",
++ (udev->config) ? "reset" : "new", speed,
++ devnum, udev->bus->controller->driver->name);
++#endif
++
+ if (udev->speed < USB_SPEED_SUPER)
+ dev_info(&udev->dev,
+ "%s %s USB device number %d using %s\n",
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index fea44690..e34ef90a 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -58,6 +58,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
+ enum usb_dr_mode mode;
+ struct device *dev = dwc->dev;
+ unsigned int hw_mode;
++ struct device_node *node = dev->of_node;
+
+ if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+ dwc->dr_mode = USB_DR_MODE_OTG;
+@@ -83,6 +84,24 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
+ mode = USB_DR_MODE_HOST;
+ break;
+ default:
++ /* Adjust Frame Length */
++ if (dwc->configure_gfladj)
++ dwc3_writel(dwc->regs, DWC3_GFLADJ, GFLADJ_30MHZ_REG_SEL |
++ GFLADJ_30MHZ(GFLADJ_30MHZ_DEFAULT));
++
++ /* Change burst beat and outstanding pipelined transfers requests */
++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0,
++ (dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) & ~0xff) | 0xf);
++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG1,
++ dwc3_readl(dwc->regs, DWC3_GSBUSCFG1) | 0xf00);
++
++ /* Enable Snooping */
++ if (node && of_dma_is_coherent(node)) {
++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0,
++ dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) | 0x22220000);
++ dev_dbg(dev, "enabled snooping for usb\n");
++ }
++
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+ mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+@@ -213,8 +232,9 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
+
+ reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+ dft = reg & DWC3_GFLADJ_30MHZ_MASK;
+- if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
+- "request value same as default, ignoring\n")) {
++ if (dft == dwc->fladj) {
++ dev_warn(dwc->dev, "request value same as default, ignoring\n");
++ } else {
+ reg &= ~DWC3_GFLADJ_30MHZ_MASK;
+ reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
+ dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
+@@ -579,6 +599,99 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
+ return 0;
+ }
+
++/* set global soc bus configuration registers */
++static void dwc3_set_soc_bus_cfg(struct dwc3 *dwc)
++{
++ struct device *dev = dwc->dev;
++ u32 *vals;
++ u32 cfg;
++ int ntype;
++ int ret;
++ int i;
++
++ cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
++
++ /*
++ * Handle property "snps,incr-burst-type-adjustment".
++ * Get the number of value from this property:
++ * result <= 0, means this property is not supported.
++ * result = 1, means INCRx burst mode supported.
++ * result > 1, means undefined length burst mode supported.
++ */
++ ntype = device_property_read_u32_array(dev,
++ "snps,incr-burst-type-adjustment", NULL, 0);
++ if (ntype > 0) {
++ vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
++ if (!vals) {
++ dev_err(dev, "Error to get memory\n");
++ return;
++ }
++ /* Get INCR burst type, and parse it */
++ ret = device_property_read_u32_array(dev,
++ "snps,incr-burst-type-adjustment", vals, ntype);
++ if (ret) {
++ dev_err(dev, "Error to get property\n");
++ return;
++ }
++ *(dwc->incrx_type + 1) = vals[0];
++ if (ntype > 1) {
++ *dwc->incrx_type = 1;
++ for (i = 1; i < ntype; i++) {
++ if (vals[i] > *(dwc->incrx_type + 1))
++ *(dwc->incrx_type + 1) = vals[i];
++ }
++ } else
++ *dwc->incrx_type = 0;
++
++ /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
++ cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
++ if (*dwc->incrx_type)
++ cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
++ switch (*(dwc->incrx_type + 1)) {
++ case 256:
++ cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
++ break;
++ case 128:
++ cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
++ break;
++ case 64:
++ cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
++ break;
++ case 32:
++ cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
++ break;
++ case 16:
++ cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
++ break;
++ case 8:
++ cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
++ break;
++ case 4:
++ cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
++ break;
++ case 1:
++ break;
++ default:
++ dev_err(dev, "Invalid property\n");
++ break;
++ }
++ }
++
++ /* Handle usb snooping */
++ if (dwc->dma_snooping_quirk) {
++ cfg &= ~DWC3_GSBUSCFG0_SNP_MASK;
++ cfg |= (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DATARD_SHIFT) |
++ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DESCRD_SHIFT) |
++ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DATAWR_SHIFT) |
++ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DESCWR_SHIFT);
++ }
++
++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
++
++}
++
++
++
+ static void dwc3_core_exit(struct dwc3 *dwc)
+ {
+ dwc3_event_buffers_cleanup(dwc);
+@@ -721,6 +834,8 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ if (ret)
+ goto err1;
+
++ dwc3_set_soc_bus_cfg(dwc);
++
+ /* Adjust Frame Length */
+ dwc3_frame_length_adjustment(dwc);
+
+@@ -919,11 +1034,109 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
+ }
+ }
+
++static void dwc3_get_properties(struct dwc3 *dwc)
++{
++ struct device *dev = dwc->dev;
++ struct device_node *node = dev->of_node;
++ u8 lpm_nyet_threshold;
++ u8 tx_de_emphasis;
++ u8 hird_threshold;
++
++ /* default to highest possible threshold */
++ lpm_nyet_threshold = 0xff;
++
++ /* default to -3.5dB de-emphasis */
++ tx_de_emphasis = 1;
++
++ /*
++ * default to assert utmi_sleep_n and use maximum allowed HIRD
++ * threshold value of 0b1100
++ */
++ hird_threshold = 12;
++
++ dwc->maximum_speed = usb_get_maximum_speed(dev);
++ dwc->dr_mode = usb_get_dr_mode(dev);
++ dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
++
++ dwc->sysdev_is_parent = device_property_read_bool(dev,
++ "linux,sysdev_is_parent");
++ if (dwc->sysdev_is_parent)
++ dwc->sysdev = dwc->dev->parent;
++ else
++ dwc->sysdev = dwc->dev;
++
++ dwc->has_lpm_erratum = device_property_read_bool(dev,
++ "snps,has-lpm-erratum");
++ device_property_read_u8(dev, "snps,lpm-nyet-threshold",
++ &lpm_nyet_threshold);
++ dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
++ "snps,is-utmi-l1-suspend");
++ device_property_read_u8(dev, "snps,hird-threshold",
++ &hird_threshold);
++ dwc->usb3_lpm_capable = device_property_read_bool(dev,
++ "snps,usb3_lpm_capable");
++
++ dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
++
++ dwc->configure_gfladj =
++ of_property_read_bool(node, "configure-gfladj");
++ dwc->dr_mode = usb_get_dr_mode(dev);
++
++ dwc->disable_scramble_quirk = device_property_read_bool(dev,
++ "snps,disable_scramble_quirk");
++ dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
++ "snps,u2exit_lfps_quirk");
++ dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
++ "snps,u2ss_inp3_quirk");
++ dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
++ "snps,req_p1p2p3_quirk");
++ dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
++ "snps,del_p1p2p3_quirk");
++ dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
++ "snps,del_phy_power_chg_quirk");
++ dwc->lfps_filter_quirk = device_property_read_bool(dev,
++ "snps,lfps_filter_quirk");
++ dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
++ "snps,rx_detect_poll_quirk");
++ dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
++ "snps,dis_u3_susphy_quirk");
++ dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
++ "snps,dis_u2_susphy_quirk");
++ dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
++ "snps,dis_enblslpm_quirk");
++ dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
++ "snps,dis_rxdet_inp3_quirk");
++ dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
++ "snps,dis-u2-freeclk-exists-quirk");
++ dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
++ "snps,dis-del-phy-power-chg-quirk");
++ dwc->dma_snooping_quirk = device_property_read_bool(dev,
++ "snps,dma-snooping");
++
++ dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
++ "snps,tx_de_emphasis_quirk");
++ device_property_read_u8(dev, "snps,tx_de_emphasis",
++ &tx_de_emphasis);
++ device_property_read_string(dev, "snps,hsphy_interface",
++ &dwc->hsphy_interface);
++ device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
++ &dwc->fladj);
++
++ dwc->lpm_nyet_threshold = lpm_nyet_threshold;
++ dwc->tx_de_emphasis = tx_de_emphasis;
++
++ dwc->hird_threshold = hird_threshold
++ | (dwc->is_utmi_l1_suspend << 4);
++
++ dwc->imod_interval = 0;
++}
++
+ #define DWC3_ALIGN_MASK (16 - 1)
+
+ static int dwc3_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
++ struct device_node *node = dev->of_node;
+ struct resource *res;
+ struct dwc3 *dwc;
+ u8 lpm_nyet_threshold;
+@@ -955,6 +1168,11 @@ static int dwc3_probe(struct platform_device *pdev)
+ dwc->xhci_resources[0].flags = res->flags;
+ dwc->xhci_resources[0].name = res->name;
+
++ if (node) {
++ dwc->configure_gfladj =
++ of_property_read_bool(node, "configure-gfladj");
++ }
++
+ res->start += DWC3_GLOBALS_REGS_START;
+
+ /*
+@@ -997,6 +1215,12 @@ static int dwc3_probe(struct platform_device *pdev)
+ dwc->usb3_lpm_capable = device_property_read_bool(dev,
+ "snps,usb3_lpm_capable");
+
++ dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
++
++ dwc->configure_gfladj =
++ of_property_read_bool(node, "configure-gfladj");
++ dwc->dr_mode = of_usb_get_dr_mode(node);
++
+ dwc->disable_scramble_quirk = device_property_read_bool(dev,
+ "snps,disable_scramble_quirk");
+ dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
+@@ -1041,6 +1265,8 @@ static int dwc3_probe(struct platform_device *pdev)
+ dwc->hird_threshold = hird_threshold
+ | (dwc->is_utmi_l1_suspend << 4);
+
++ dwc3_get_properties(dwc);
++
+ platform_set_drvdata(pdev, dwc);
+ dwc3_cache_hwparams(dwc);
+
+@@ -1064,6 +1290,11 @@ static int dwc3_probe(struct platform_device *pdev)
+ if (ret < 0)
+ goto err1;
+
++ /* Adjust Frame Length */
++ if (dwc->configure_gfladj)
++ dwc3_writel(dwc->regs, DWC3_GFLADJ, GFLADJ_30MHZ_REG_SEL |
++ GFLADJ_30MHZ(GFLADJ_30MHZ_DEFAULT));
++
+ pm_runtime_forbid(dev);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 884c4371..9151eef4 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -26,6 +26,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/mm.h>
+ #include <linux/debugfs.h>
++#include <linux/of_address.h>
+
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+@@ -154,6 +155,32 @@
+
+ /* Bit fields */
+
++/* Global SoC Bus Configuration Register 0 */
++#define AXI3_CACHE_TYPE_AW 0x8 /* write allocate */
++#define AXI3_CACHE_TYPE_AR 0x4 /* read allocate */
++#define AXI3_CACHE_TYPE_SNP 0x2 /* cacheable */
++#define AXI3_CACHE_TYPE_BUF 0x1 /* bufferable */
++#define DWC3_GSBUSCFG0_DATARD_SHIFT 28
++#define DWC3_GSBUSCFG0_DESCRD_SHIFT 24
++#define DWC3_GSBUSCFG0_DATAWR_SHIFT 20
++#define DWC3_GSBUSCFG0_DESCWR_SHIFT 16
++#define DWC3_GSBUSCFG0_SNP_MASK 0xffff0000
++#define DWC3_GSBUSCFG0_DATABIGEND (1 << 11)
++#define DWC3_GSBUSCFG0_DESCBIGEND (1 << 10)
++#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */
++#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */
++#define DWC3_GSBUSCFG0_INCR64BRSTENA (1 << 5) /* INCR64 burst */
++#define DWC3_GSBUSCFG0_INCR32BRSTENA (1 << 4) /* INCR32 burst */
++#define DWC3_GSBUSCFG0_INCR16BRSTENA (1 << 3) /* INCR16 burst */
++#define DWC3_GSBUSCFG0_INCR8BRSTENA (1 << 2) /* INCR8 burst */
++#define DWC3_GSBUSCFG0_INCR4BRSTENA (1 << 1) /* INCR4 burst */
++#define DWC3_GSBUSCFG0_INCRBRSTENA (1 << 0) /* undefined length enable */
++#define DWC3_GSBUSCFG0_INCRBRST_MASK 0xff
++
++/* Global SoC Bus Configuration Register 1 */
++#define DWC3_GSBUSCFG1_1KPAGEENA (1 << 12) /* 1K page boundary enable */
++#define DWC3_GSBUSCFG1_PTRANSLIMIT_MASK 0xf00
++
+ /* Global Debug Queue/FIFO Space Available Register */
+ #define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f)
+ #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
+@@ -180,7 +207,6 @@
+ #define DWC3_GCTL_CLK_PIPE (1)
+ #define DWC3_GCTL_CLK_PIPEHALF (2)
+ #define DWC3_GCTL_CLK_MASK (3)
+-
+ #define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12)
+ #define DWC3_GCTL_PRTCAPDIR(n) ((n) << 12)
+ #define DWC3_GCTL_PRTCAP_HOST 1
+@@ -289,6 +315,10 @@
+ /* Global Frame Length Adjustment Register */
+ #define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
+ #define DWC3_GFLADJ_30MHZ_MASK 0x3f
++#define GFLADJ_30MHZ_REG_SEL (1 << 7)
++#define GFLADJ_30MHZ(n) ((n) & 0x3f)
++#define GFLADJ_30MHZ_DEFAULT 0x20
++
+
+ /* Global User Control Register 2 */
+ #define DWC3_GUCTL2_RST_ACTBITLATER (1 << 14)
+@@ -753,6 +783,7 @@ struct dwc3_scratchpad_array {
+ * @regs: base address for our registers
+ * @regs_size: address space size
+ * @fladj: frame length adjustment
++ * @incrx_type: INCR burst type adjustment
+ * @irq_gadget: peripheral controller's IRQ number
+ * @nr_scratch: number of scratch buffers
+ * @u1u2: only used on revisions <1.83a for workaround
+@@ -847,6 +878,7 @@ struct dwc3 {
+ spinlock_t lock;
+
+ struct device *dev;
++ struct device *sysdev;
+
+ struct platform_device *xhci;
+ struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM];
+@@ -872,6 +904,12 @@ struct dwc3 {
+ enum usb_phy_interface hsphy_mode;
+
+ u32 fladj;
++ /*
++ * For INCR burst type.
++ * First field: for undefined length INCR burst type enable.
++ * Second field: for INCRx burst type enable
++ */
++ u32 incrx_type[2];
+ u32 irq_gadget;
+ u32 nr_scratch;
+ u32 u1u2;
+@@ -948,9 +986,12 @@ struct dwc3 {
+ unsigned ep0_bounced:1;
+ unsigned ep0_expect_in:1;
+ unsigned has_hibernation:1;
++ unsigned sysdev_is_parent:1;
+ unsigned has_lpm_erratum:1;
+ unsigned is_utmi_l1_suspend:1;
+ unsigned is_fpga:1;
++ unsigned needs_fifo_resize:1;
++ unsigned configure_gfladj:1;
+ unsigned pending_events:1;
+ unsigned pullups_connected:1;
+ unsigned setup_packet_pending:1;
+@@ -971,9 +1012,12 @@ struct dwc3 {
+ unsigned dis_rxdet_inp3_quirk:1;
+ unsigned dis_u2_freeclk_exists_quirk:1;
+ unsigned dis_del_phy_power_chg_quirk:1;
++ unsigned dma_snooping_quirk:1;
+
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
++
++ u16 imod_interval;
+ };
+
+ /* -------------------------------------------------------------------------- */
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index 626d87d5..f1b98273 100644
+--- a/drivers/usb/dwc3/host.c
++++ b/drivers/usb/dwc3/host.c
+@@ -17,6 +17,8 @@
+
+ #include <linux/platform_device.h>
+
++#include <linux/of_device.h>
++
+ #include "core.h"
+
+ int dwc3_host_init(struct dwc3 *dwc)
+@@ -73,12 +75,21 @@ int dwc3_host_init(struct dwc3 *dwc)
+ return -ENOMEM;
+ }
+
+- dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
++ if (IS_ENABLED(CONFIG_OF) && dwc->dev->of_node)
++ of_dma_configure(&xhci->dev, dwc->dev->of_node);
++ else
++ dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
+
+- xhci->dev.parent = dwc->dev;
++ xhci->dev.parent = dwc->dev;
+ xhci->dev.dma_mask = dwc->dev->dma_mask;
+ xhci->dev.dma_parms = dwc->dev->dma_parms;
+
++ /* set DMA operations */
++ if (dwc->dev->of_node && of_dma_is_coherent(dwc->dev->of_node)) {
++ xhci->dev.archdata.dma_ops = dwc->dev->archdata.dma_ops;
++ dev_dbg(dwc->dev, "set dma_ops for usb\n");
++ }
++
+ dwc->xhci = xhci;
+
+ ret = platform_device_add_resources(xhci, dwc->xhci_resources,
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index aac0ce8a..fe49e758 100644
+--- a/drivers/usb/gadget/udc/fsl_udc_core.c
++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
+@@ -198,7 +198,11 @@ __acquires(ep->udc->lock)
+
+ spin_unlock(&ep->udc->lock);
+
+- usb_gadget_giveback_request(&ep->ep, &req->req);
++ /* this complete() should a func implemented by gadget layer,
++ * eg fsg->bulk_in_complete()
++ */
++ if (req->req.complete)
++ usb_gadget_giveback_request(&ep->ep, &req->req);
+
+ spin_lock(&ep->udc->lock);
+ ep->stopped = stopped;
+@@ -245,10 +249,10 @@ static int dr_controller_setup(struct fsl_udc *udc)
+ if (udc->pdata->have_sysif_regs) {
+ if (udc->pdata->controller_ver) {
+ /* controller version 1.6 or above */
+- ctrl = __raw_readl(&usb_sys_regs->control);
++ ctrl = ioread32be(&usb_sys_regs->control);
+ ctrl &= ~USB_CTRL_UTMI_PHY_EN;
+ ctrl |= USB_CTRL_USB_EN;
+- __raw_writel(ctrl, &usb_sys_regs->control);
++ iowrite32be(ctrl, &usb_sys_regs->control);
+ }
+ }
+ portctrl |= PORTSCX_PTS_ULPI;
+@@ -257,13 +261,14 @@ static int dr_controller_setup(struct fsl_udc *udc)
+ portctrl |= PORTSCX_PTW_16BIT;
+ /* fall through */
+ case FSL_USB2_PHY_UTMI:
++ case FSL_USB2_PHY_UTMI_DUAL:
+ if (udc->pdata->have_sysif_regs) {
+ if (udc->pdata->controller_ver) {
+ /* controller version 1.6 or above */
+- ctrl = __raw_readl(&usb_sys_regs->control);
++ ctrl = ioread32be(&usb_sys_regs->control);
+ ctrl |= (USB_CTRL_UTMI_PHY_EN |
+ USB_CTRL_USB_EN);
+- __raw_writel(ctrl, &usb_sys_regs->control);
++ iowrite32be(ctrl, &usb_sys_regs->control);
+ mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI
+ PHY CLK to become stable - 10ms*/
+ }
+@@ -329,22 +334,22 @@ static int dr_controller_setup(struct fsl_udc *udc)
+ /* Config control enable i/o output, cpu endian register */
+ #ifndef CONFIG_ARCH_MXC
+ if (udc->pdata->have_sysif_regs) {
+- ctrl = __raw_readl(&usb_sys_regs->control);
++ ctrl = ioread32be(&usb_sys_regs->control);
+ ctrl |= USB_CTRL_IOENB;
+- __raw_writel(ctrl, &usb_sys_regs->control);
++ iowrite32be(ctrl, &usb_sys_regs->control);
+ }
+ #endif
+
+-#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
++#if !defined(CONFIG_NOT_COHERENT_CACHE)
+ /* Turn on cache snooping hardware, since some PowerPC platforms
+ * wholly rely on hardware to deal with cache coherent. */
+
+ if (udc->pdata->have_sysif_regs) {
+ /* Setup Snooping for all the 4GB space */
+ tmp = SNOOP_SIZE_2GB; /* starts from 0x0, size 2G */
+- __raw_writel(tmp, &usb_sys_regs->snoop1);
++ iowrite32be(tmp, &usb_sys_regs->snoop1);
+ tmp |= 0x80000000; /* starts from 0x8000000, size 2G */
+- __raw_writel(tmp, &usb_sys_regs->snoop2);
++ iowrite32be(tmp, &usb_sys_regs->snoop2);
+ }
+ #endif
+
+@@ -1057,7 +1062,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
+ struct ep_queue_head *qh;
+
+ ep = container_of(_ep, struct fsl_ep, ep);
+- if (!_ep || (!ep->ep.desc && ep_index(ep) != 0))
++ if (!_ep || !ep->ep.desc || (ep_index(ep) == 0))
+ return -ENODEV;
+
+ udc = (struct fsl_udc *)ep->udc;
+@@ -1599,14 +1604,13 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
+ struct fsl_req *curr_req)
+ {
+ struct ep_td_struct *curr_td;
+- int td_complete, actual, remaining_length, j, tmp;
++ int actual, remaining_length, j, tmp;
+ int status = 0;
+ int errors = 0;
+ struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
+ int direction = pipe % 2;
+
+ curr_td = curr_req->head;
+- td_complete = 0;
+ actual = curr_req->req.length;
+
+ for (j = 0; j < curr_req->dtd_count; j++) {
+@@ -1651,11 +1655,9 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
+ status = -EPROTO;
+ break;
+ } else {
+- td_complete++;
+ break;
+ }
+ } else {
+- td_complete++;
+ VDBG("dTD transmitted successful");
+ }
+
+@@ -1698,7 +1700,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
+ curr_ep = get_ep_by_pipe(udc, i);
+
+ /* If the ep is configured */
+- if (curr_ep->name == NULL) {
++ if (strncmp(curr_ep->name, "ep", 2)) {
+ WARNING("Invalid EP?");
+ continue;
+ }
+@@ -2420,10 +2422,12 @@ static int fsl_udc_probe(struct platform_device *pdev)
+ usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
+ #endif
+
++#ifdef CONFIG_ARCH_MXC
+ /* Initialize USB clocks */
+ ret = fsl_udc_clk_init(pdev);
+ if (ret < 0)
+ goto err_iounmap_noclk;
++#endif
+
+ /* Read Device Controller Capability Parameters register */
+ dccparams = fsl_readl(&dr_regs->dccparams);
+@@ -2463,9 +2467,11 @@ static int fsl_udc_probe(struct platform_device *pdev)
+ dr_controller_setup(udc_controller);
+ }
+
++#ifdef CONFIG_ARCH_MXC
+ ret = fsl_udc_clk_finalize(pdev);
+ if (ret)
+ goto err_free_irq;
++#endif
+
+ /* Setup gadget structure */
+ udc_controller->gadget.ops = &fsl_gadget_ops;
+@@ -2478,6 +2484,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
+ /* Setup gadget.dev and register with kernel */
+ dev_set_name(&udc_controller->gadget.dev, "gadget");
+ udc_controller->gadget.dev.of_node = pdev->dev.of_node;
++ set_dma_ops(&udc_controller->gadget.dev, pdev->dev.archdata.dma_ops);
+
+ if (!IS_ERR_OR_NULL(udc_controller->transceiver))
+ udc_controller->gadget.is_otg = 1;
+@@ -2529,7 +2536,9 @@ static int fsl_udc_probe(struct platform_device *pdev)
+ err_iounmap:
+ if (pdata->exit)
+ pdata->exit(pdev);
++#ifdef CONFIG_ARCH_MXC
+ fsl_udc_clk_release();
++#endif
+ err_iounmap_noclk:
+ iounmap(dr_regs);
+ err_release_mem_region:
+@@ -2557,8 +2566,9 @@ static int fsl_udc_remove(struct platform_device *pdev)
+ udc_controller->done = &done;
+ usb_del_gadget_udc(&udc_controller->gadget);
+
++#ifdef CONFIG_ARCH_MXC
+ fsl_udc_clk_release();
+-
++#endif
+ /* DR has been stopped in usb_gadget_unregister_driver() */
+ remove_proc_file();
+
+@@ -2570,7 +2580,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
+ dma_pool_destroy(udc_controller->td_pool);
+ free_irq(udc_controller->irq, udc_controller);
+ iounmap(dr_regs);
+- if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
++ if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
+ release_mem_region(res->start, resource_size(res));
+
+ /* free udc --wait for the release() finished */
+diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
+index 84715625..f76c4ddd 100644
+--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
++++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
+@@ -20,6 +20,10 @@
+ #define USB_MAX_CTRL_PAYLOAD 64
+ #define USB_DR_SYS_OFFSET 0x400
+
++#ifdef CONFIG_SOC_LS1021A
++#undef CONFIG_ARCH_MXC
++#endif
++
+ /* USB DR device mode registers (Little Endian) */
+ struct usb_dr_device {
+ /* Capability register */
+@@ -597,18 +601,6 @@ struct platform_device;
+ int fsl_udc_clk_init(struct platform_device *pdev);
+ int fsl_udc_clk_finalize(struct platform_device *pdev);
+ void fsl_udc_clk_release(void);
+-#else
+-static inline int fsl_udc_clk_init(struct platform_device *pdev)
+-{
+- return 0;
+-}
+-static inline int fsl_udc_clk_finalize(struct platform_device *pdev)
+-{
+- return 0;
+-}
+-static inline void fsl_udc_clk_release(void)
+-{
+-}
+ #endif
+
+ #endif
+diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
+index 0b80cee3..a57d95c3 100644
+--- a/drivers/usb/host/Kconfig
++++ b/drivers/usb/host/Kconfig
+@@ -165,7 +165,7 @@ config XPS_USB_HCD_XILINX
+
+ config USB_EHCI_FSL
+ tristate "Support for Freescale PPC on-chip EHCI USB controller"
+- depends on FSL_SOC
++ depends on USB_EHCI_HCD
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Variation of ARC USB block used in some Freescale chips.
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index 9f5ffb62..cd16860c 100644
+--- a/drivers/usb/host/ehci-fsl.c
++++ b/drivers/usb/host/ehci-fsl.c
+@@ -37,13 +37,141 @@
+ #include <linux/fsl_devices.h>
+ #include <linux/of_platform.h>
+
++#ifdef CONFIG_PPC
++#include <asm/fsl_pm.h>
++#include <linux/suspend.h>
++#endif
++
+ #include "ehci.h"
+ #include "ehci-fsl.h"
+
++#define FSL_USB_PHY_ADDR 0xffe214000
++
++struct ccsr_usb_port_ctrl {
++ u32 ctrl;
++ u32 drvvbuscfg;
++ u32 pwrfltcfg;
++ u32 sts;
++ u8 res_14[0xc];
++ u32 bistcfg;
++ u32 biststs;
++ u32 abistcfg;
++ u32 abiststs;
++ u8 res_30[0x10];
++ u32 xcvrprg;
++ u32 anaprg;
++ u32 anadrv;
++ u32 anasts;
++};
++
++struct ccsr_usb_phy {
++ u32 id;
++ struct ccsr_usb_port_ctrl port1;
++ u8 res_50[0xc];
++ u32 tvr;
++ u32 pllprg[4];
++ u8 res_70[0x4];
++ u32 anaccfg;
++ u32 dbg;
++ u8 res_7c[0x4];
++ struct ccsr_usb_port_ctrl port2;
++ u8 res_dc[0x334];
++};
++
+ #define DRIVER_DESC "Freescale EHCI Host controller driver"
+ #define DRV_NAME "ehci-fsl"
+
+ static struct hc_driver __read_mostly fsl_ehci_hc_driver;
++struct ehci_fsl {
++ /* store current hcd state for otg;
++ * have_hcd is true when host drv al already part of otg framework,
++ * otherwise false;
++ * hcd_add is true when otg framework wants to add host
++ * drv as part of otg;flase when it wants to remove it
++ */
++unsigned have_hcd:1;
++unsigned hcd_add:1;
++};
++
++static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
++{
++struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++return container_of(ehci, struct ehci_fsl, ehci);
++}
++
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
++static void do_change_hcd(struct work_struct *work)
++{
++struct ehci_hcd *ehci = container_of(work, struct ehci_hcd,
++ change_hcd_work);
++struct usb_hcd *hcd = ehci_to_hcd(ehci);
++struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
++void __iomem *non_ehci = hcd->regs;
++int retval;
++
++ if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) {
++ writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE);
++ /* host, gadget and otg share same int line */
++ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
++ if (retval == 0)
++ ehci_fsl->have_hcd = 1;
++ } else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) {
++ usb_remove_hcd(hcd);
++ ehci_fsl->have_hcd = 0;
++ }
++}
++#endif
++
++struct ehci_fsl {
++ struct ehci_hcd ehci;
++
++#ifdef CONFIG_PM
++struct ehci_regs saved_regs;
++struct ccsr_usb_phy saved_phy_regs;
++/* Saved USB PHY settings, need to restore after deep sleep. */
++u32 usb_ctrl;
++#endif
++ /*
++ * store current hcd state for otg;
++ * have_hcd is true when host drv al already part of otg framework,
++ * otherwise false;
++ * hcd_add is true when otg framework wants to add host
++ * drv as part of otg;flase when it wants to remove it
++ */
++unsigned have_hcd:1;
++unsigned hcd_add:1;
++};
++
++static strut ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
++{
++struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++return container_of(ehci, struct ehci_fsl, ehci);
++}
++
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
++static void do_change_hcd(struct work_struct *work)
++{
++struct ehci_hcd *ehci = container_of(work, struct ehci_hcd,
++change_hcd_work);
++struct usb_hcd *hcd = ehci_to_hcd(ehci);
++struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
++void __iomem *non_ehci = hcd->regs;
++int retval;
++
++if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) {
++writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE);
++/* host, gadget and otg share same int line */
++retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
++if (retval == 0)
++ehci_fsl->have_hcd = 1;
++} else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) {
++ usb_remove_hcd(hcd);
++ehci_fsl->have_hcd = 0;
++}
++}
++#endif
+
+ /* configure so an HC device and id are always provided */
+ /* always called with process context; sleeping is OK */
+@@ -131,6 +259,12 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
+ clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK, 0x4);
+
++ /* Set USB_EN bit to select ULPI phy for USB controller version 2.5 */
++ if (pdata->controller_ver == FSL_USB_VER_2_5 &&
++ pdata->phy_mode == FSL_USB2_PHY_ULPI)
++ iowrite32be(USB_CTRL_USB_EN, hcd->regs + FSL_SOC_USB_CTRL);
++
++
+ /*
+ * Enable UTMI phy and program PTS field in UTMI mode before asserting
+ * controller reset for USB Controller version 2.5
+@@ -143,16 +277,20 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
+
+ /* Don't need to set host mode here. It will be done by tdi_reset() */
+
+- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
++ retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_NO_SUSPEND);
+ if (retval != 0)
+ goto err2;
+ device_wakeup_enable(hcd->self.controller);
+
+-#ifdef CONFIG_USB_OTG
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
+ if (pdata->operating_mode == FSL_USB2_DR_OTG) {
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+
+ hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
++
++ INIT_WORK(&ehci->change_hcd_work, do_change_hcd);
++
+ dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, phy=0x%p\n",
+ hcd, ehci, hcd->usb_phy);
+
+@@ -168,6 +306,11 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
+ retval = -ENODEV;
+ goto err2;
+ }
++
++ ehci_fsl->have_hcd = 1;
++ } else {
++ dev_err(&pdev->dev, "wrong operating mode\n");
++ return -ENODEV;
+ }
+ #endif
+ return retval;
+@@ -181,6 +324,18 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
+ return retval;
+ }
+
++static bool usb_phy_clk_valid(struct usb_hcd *hcd,
++ enum fsl_usb2_phy_modes phy_mode)
++{
++ void __iomem *non_ehci = hcd->regs;
++ bool ret = true;
++
++ if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID))
++ ret = false;
++
++ return ret;
++}
++
+ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
+ enum fsl_usb2_phy_modes phy_mode,
+ unsigned int port_offset)
+@@ -219,6 +374,21 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
+ /* fall through */
+ case FSL_USB2_PHY_UTMI:
+ case FSL_USB2_PHY_UTMI_DUAL:
++ if (pdata->has_fsl_erratum_a006918) {
++ pr_warn("fsl-ehci: USB PHY clock invalid\n");
++ return -EINVAL;
++ }
++
++ /* PHY_CLK_VALID bit is de-featured from all controller
++ * versions below 2.4 and is to be checked only for
++ * internal UTMI phy
++ */
++ if (pdata->controller_ver > FSL_USB_VER_2_4 &&
++ pdata->have_sysif_regs && !usb_phy_clk_valid(hcd)) {
++ pr_err("fsl-ehci: USB PHY clock invalid\n");
++ return -EINVAL;
++ }
++
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
+ /* controller version 1.6 or above */
+ clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+@@ -292,14 +462,9 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
+ return -EINVAL;
+
+ if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
+- unsigned int chip, rev, svr;
+-
+- svr = mfspr(SPRN_SVR);
+- chip = svr >> 16;
+- rev = (svr >> 4) & 0xf;
+
+ /* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */
+- if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055))
++ if (pdata->has_fsl_erratum_14 == 1)
+ ehci->has_fsl_port_bug = 1;
+
+ if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
+@@ -379,16 +544,57 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
+ return retval;
+ }
+
+-struct ehci_fsl {
+- struct ehci_hcd ehci;
+
+ #ifdef CONFIG_PM
+- /* Saved USB PHY settings, need to restore after deep sleep. */
+- u32 usb_ctrl;
+-#endif
+-};
++void __iomem *phy_reg;
+
+-#ifdef CONFIG_PM
++#ifdef CONFIG_PPC
++/* save usb registers */
++static int ehci_fsl_save_context(struct usb_hcd *hcd)
++{
++ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ void __iomem *non_ehci = hcd->regs;
++ struct device *dev = hcd->self.controller;
++ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
++
++ if (pdata->phy_mode == FSL_USB2_PHY_UTMI_DUAL) {
++ phy_reg = ioremap(FSL_USB_PHY_ADDR,
++ sizeof(struct ccsr_usb_phy));
++ _memcpy_fromio((void *)&ehci_fsl->saved_phy_regs, phy_reg,
++ sizeof(struct ccsr_usb_phy));
++ }
++
++ _memcpy_fromio((void *)&ehci_fsl->saved_regs, ehci->regs,
++ sizeof(struct ehci_regs));
++ ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
++
++ return 0;
++}
++
++/*Restore usb registers */
++static int ehci_fsl_restore_context(struct usb_hcd *hcd)
++{
++ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ void __iomem *non_ehci = hcd->regs;
++ struct device *dev = hcd->self.controller;
++ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
++
++ if (pdata->phy_mode == FSL_USB2_PHY_UTMI_DUAL) {
++ if (phy_reg)
++ _memcpy_toio(phy_reg,
++ (void *)&ehci_fsl->saved_phy_regs,
++ sizeof(struct ccsr_usb_phy));
++ }
++
++ _memcpy_toio(ehci->regs, (void *)&ehci_fsl->saved_regs,
++ sizeof(struct ehci_regs));
++ iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
++
++ return 0;
++}
++#endif
+
+ #ifdef CONFIG_PPC_MPC512x
+ static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
+@@ -535,26 +741,43 @@ static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
+ }
+ #endif /* CONFIG_PPC_MPC512x */
+
+-static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
+-{
+- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+-
+- return container_of(ehci, struct ehci_fsl, ehci);
+-}
+-
+ static int ehci_fsl_drv_suspend(struct device *dev)
+ {
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ void __iomem *non_ehci = hcd->regs;
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
++ struct usb_bus host = hcd->self;
++#endif
++
++#ifdef CONFIG_PPC
++suspend_state_t pm_state;
++pm_state = pm_suspend_state();
++
++if (pm_state == PM_SUSPEND_MEM)
++ ehci_fsl_save_context(hcd);
++#endif
+
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ return ehci_fsl_mpc512x_drv_suspend(dev);
+ }
+
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
++ if (host.is_otg) {
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++ /* remove hcd */
++ ehci_fsl->hcd_add = 0;
++ schedule_work(&ehci->change_hcd_work);
++ host.is_otg = 0;
++ return 0;
++ }
++#endif
++
+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
+ device_may_wakeup(dev));
++
+ if (!fsl_deep_sleep())
+ return 0;
+
+@@ -568,12 +791,34 @@ static int ehci_fsl_drv_resume(struct device *dev)
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
++ struct usb_bus host = hcd->self;
++#endif
++
++#ifdef CONFIG_PPC
++suspend_state_t pm_state;
++pm_state = pm_suspend_state();
++
++if (pm_state == PM_SUSPEND_MEM)
++ ehci_fsl_restore_context(hcd);
++#endif
+
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ return ehci_fsl_mpc512x_drv_resume(dev);
+ }
+
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
++ if (host.is_otg) {
++ /* add hcd */
++ ehci_fsl->hcd_add = 1;
++ schedule_work(&ehci->change_hcd_work);
++ usb_hcd_resume_root_hub(hcd);
++ host.is_otg = 0;
++ return 0;
++ }
++#endif
++
+ ehci_prepare_ports_for_controller_resume(ehci);
+ if (!fsl_deep_sleep())
+ return 0;
+diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
+index 1a8a60a5..42ea2976 100644
+--- a/drivers/usb/host/ehci-fsl.h
++++ b/drivers/usb/host/ehci-fsl.h
+@@ -63,4 +63,7 @@
+ #define UTMI_PHY_EN (1<<9)
+ #define ULPI_PHY_CLK_SEL (1<<10)
+ #define PHY_CLK_VALID (1<<17)
++
++/* Retry count for checking UTMI PHY CLK validity */
++#define UTMI_PHY_CLK_VALID_CHK_RETRY 5
+ #endif /* _EHCI_FSL_H */
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 255acca8..c8838c33 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -305,6 +305,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
+ ehci_writel(ehci, t2, reg);
++ if (ehci_has_fsl_susp_errata(ehci))
++ usleep_range(10000, 20000);
+ changed = 1;
+ }
+ }
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index 3b06bb77..f296d1fb 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -180,6 +180,9 @@ struct ehci_hcd { /* one per controller */
+ unsigned periodic_count; /* periodic activity count */
+ unsigned uframe_periodic_max; /* max periodic time per uframe */
+
++#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
++ struct work_struct change_hcd_work;
++#endif
+
+ /* list of itds & sitds completed while now_frame was still active */
+ struct list_head cached_itd_list;
+@@ -706,8 +709,10 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
+ * incoming packets get corrupted in HS mode
+ */
+ #define ehci_has_fsl_hs_errata(e) ((e)->has_fsl_hs_errata)
++#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata)
+ #else
+ #define ehci_has_fsl_hs_errata(e) (0)
++#define ehci_has_fsl_susp_errata(e) (0)
+ #endif
+
+ /*
+diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
+index f07ccb25..1e59ea9f 100644
+--- a/drivers/usb/host/fsl-mph-dr-of.c
++++ b/drivers/usb/host/fsl-mph-dr-of.c
+@@ -226,6 +226,18 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
+ of_property_read_bool(np, "fsl,usb-erratum-a007792");
+ pdata->has_fsl_erratum_a005275 =
+ of_property_read_bool(np, "fsl,usb-erratum-a005275");
++ pdata->has_fsl_erratum_a005697 =
++ of_property_read_bool(np, "fsl,usb_erratum-a005697");
++ if (of_get_property(np, "fsl,erratum_a006918", NULL))
++ pdata->has_fsl_erratum_a006918 = 1;
++ else
++ pdata->has_fsl_erratum_a006918 = 0;
++
++ if (of_get_property(np, "fsl,usb_erratum_14", NULL))
++ pdata->has_fsl_erratum_14 = 1;
++ else
++ pdata->has_fsl_erratum_14 = 0;
++
+
+ /*
+ * Determine whether phy_clk_valid needs to be checked
+diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
+index 94eb2923..836355fa 100644
+--- a/drivers/usb/phy/phy-fsl-usb.c
++++ b/drivers/usb/phy/phy-fsl-usb.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2007,2008 Freescale semiconductor, Inc.
++ * Copyright 2007,2008 Freescale Semiconductor, Inc.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+@@ -463,6 +463,7 @@ void otg_reset_controller(void)
+ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
+ {
+ struct usb_otg *otg = fsm->otg;
++ struct usb_bus *host = otg->host;
+ struct device *dev;
+ struct fsl_otg *otg_dev =
+ container_of(otg->usb_phy, struct fsl_otg, phy);
+@@ -486,6 +487,7 @@ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
+ otg_reset_controller();
+ VDBG("host on......\n");
+ if (dev->driver->pm && dev->driver->pm->resume) {
++ host->is_otg = 1;
+ retval = dev->driver->pm->resume(dev);
+ if (fsm->id) {
+ /* default-b */
+@@ -510,8 +512,11 @@ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
+ else {
+ VDBG("host off......\n");
+ if (dev && dev->driver) {
+- if (dev->driver->pm && dev->driver->pm->suspend)
++ if (dev->driver->pm &&
++ dev->driver->pm->suspend) {
++ host->is_otg = 1;
+ retval = dev->driver->pm->suspend(dev);
++ }
+ if (fsm->id)
+ /* default-b */
+ fsl_otg_drv_vbus(fsm, 0);
+@@ -539,8 +544,17 @@ int fsl_otg_start_gadget(struct otg_fsm *fsm, int on)
+ dev = otg->gadget->dev.parent;
+
+ if (on) {
+- if (dev->driver->resume)
++ /* Delay gadget resume to synchronize between host and gadget
++ * drivers. Upon role-reversal host drv is shutdown by kernel
++ * worker thread. By the time host drv shuts down, controller
++ * gets programmed for gadget role. Shutting host drv after
++ * this results in controller getting reset, and it stops
++ * responding to otg events
++ */
++ if (dev->driver->resume) {
++ msleep(1000);
+ dev->driver->resume(dev);
++ }
+ } else {
+ if (dev->driver->suspend)
+ dev->driver->suspend(dev, otg_suspend_state);
+@@ -672,6 +686,10 @@ static void fsl_otg_event(struct work_struct *work)
+ fsl_otg_start_host(fsm, 0);
+ otg_drv_vbus(fsm, 0);
+ fsl_otg_start_gadget(fsm, 1);
++ } else {
++ fsl_otg_start_gadget(fsm, 0);
++ otg_drv_vbus(fsm, 1);
++ fsl_otg_start_host(fsm, 1);
+ }
+ }
+
+@@ -724,6 +742,7 @@ irqreturn_t fsl_otg_isr(int irq, void *dev_id)
+ {
+ struct otg_fsm *fsm = &((struct fsl_otg *)dev_id)->fsm;
+ struct usb_otg *otg = ((struct fsl_otg *)dev_id)->phy.otg;
++ struct fsl_otg *otg_dev = dev_id;
+ u32 otg_int_src, otg_sc;
+
+ otg_sc = fsl_readl(&usb_dr_regs->otgsc);
+@@ -753,18 +772,8 @@ irqreturn_t fsl_otg_isr(int irq, void *dev_id)
+ otg->gadget->is_a_peripheral = !fsm->id;
+ VDBG("ID int (ID is %d)\n", fsm->id);
+
+- if (fsm->id) { /* switch to gadget */
+- schedule_delayed_work(
+- &((struct fsl_otg *)dev_id)->otg_event,
+- 100);
+- } else { /* switch to host */
+- cancel_delayed_work(&
+- ((struct fsl_otg *)dev_id)->
+- otg_event);
+- fsl_otg_start_gadget(fsm, 0);
+- otg_drv_vbus(fsm, 1);
+- fsl_otg_start_host(fsm, 1);
+- }
++ schedule_delayed_work(&otg_dev->otg_event, 100);
++
+ return IRQ_HANDLED;
+ }
+ }
+@@ -923,12 +932,32 @@ int usb_otg_start(struct platform_device *pdev)
+ temp &= ~(PORTSC_PHY_TYPE_SEL | PORTSC_PTW);
+ switch (pdata->phy_mode) {
+ case FSL_USB2_PHY_ULPI:
++ if (pdata->controller_ver) {
++ /* controller version 1.6 or above */
++ setbits32(&p_otg->dr_mem_map->control,
++ USB_CTRL_ULPI_PHY_CLK_SEL);
++ /*
++ * Due to controller issue of PHY_CLK_VALID in ULPI
++ * mode, we set USB_CTRL_USB_EN before checking
++ * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
++ */
++ clrsetbits_be32(&p_otg->dr_mem_map->control,
++ USB_CTRL_UTMI_PHY_EN, USB_CTRL_IOENB);
++ }
+ temp |= PORTSC_PTS_ULPI;
+ break;
+ case FSL_USB2_PHY_UTMI_WIDE:
+ temp |= PORTSC_PTW_16BIT;
+ /* fall through */
+ case FSL_USB2_PHY_UTMI:
++ if (pdata->controller_ver) {
++ /* controller version 1.6 or above */
++ setbits32(&p_otg->dr_mem_map->control,
++ USB_CTRL_UTMI_PHY_EN);
++ /* Delay for UTMI PHY CLK to become stable - 10ms */
++ mdelay(FSL_UTMI_PHY_DLY);
++ }
++ setbits32(&p_otg->dr_mem_map->control, USB_CTRL_UTMI_PHY_EN);
+ temp |= PORTSC_PTS_UTMI;
+ /* fall through */
+ default:
+diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
+index 23149954..c4c08730 100644
+--- a/drivers/usb/phy/phy-fsl-usb.h
++++ b/drivers/usb/phy/phy-fsl-usb.h
+@@ -199,6 +199,14 @@
+ /* control Register Bit Masks */
+ #define USB_CTRL_IOENB (0x1<<2)
+ #define USB_CTRL_ULPI_INT0EN (0x1<<0)
++#define USB_CTRL_WU_INT_EN (0x1<<1)
++#define USB_CTRL_LINE_STATE_FILTER__EN (0x1<<3)
++#define USB_CTRL_KEEP_OTG_ON (0x1<<4)
++#define USB_CTRL_OTG_PORT (0x1<<5)
++#define USB_CTRL_PLL_RESET (0x1<<8)
++#define USB_CTRL_UTMI_PHY_EN (0x1<<9)
++#define USB_CTRL_ULPI_PHY_CLK_SEL (0x1<<10)
++#define USB_CTRL_PHY_CLK_VALID (0x1<<17)
+
+ /* BCSR5 */
+ #define BCSR5_INT_USB (0x02)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index eba1f10e..c334e281 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -362,6 +362,7 @@ struct usb_bus {
+ * for control transfers?
+ */
+ u8 otg_port; /* 0, or number of OTG/HNP port */
++ unsigned is_otg:1; /* true when host is also otg */
+ unsigned is_b_host:1; /* true during some HNP roleswitches */
+ unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
+ unsigned no_stop_on_short:1; /*
+diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
+index 5ff9032e..2a57e0d2 100644
+--- a/include/linux/usb/of.h
++++ b/include/linux/usb/of.h
+@@ -11,6 +11,8 @@
+ #include <linux/usb/otg.h>
+ #include <linux/usb/phy.h>
+
++enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
++
+ #if IS_ENABLED(CONFIG_OF)
+ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
+ bool of_usb_host_tpl_support(struct device_node *np);
+--
+2.14.1
+
diff --git a/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch b/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch
new file mode 100644
index 0000000000..1dba7b3158
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch
@@ -0,0 +1,1192 @@
+From 8d82d92ea697145c32bb36d9f39afd5bb0927bc2 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Wed, 27 Sep 2017 10:34:46 +0800
+Subject: [PATCH] vfio: support layerscape
+
+This is a integrated patch for layerscape vfio support.
+
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/vfio/Kconfig | 1 +
+ drivers/vfio/Makefile | 1 +
+ drivers/vfio/fsl-mc/Kconfig | 9 +
+ drivers/vfio/fsl-mc/Makefile | 2 +
+ drivers/vfio/fsl-mc/vfio_fsl_mc.c | 753 ++++++++++++++++++++++++++++++
+ drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++++
+ drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 +++
+ drivers/vfio/vfio_iommu_type1.c | 39 +-
+ include/uapi/linux/vfio.h | 1 +
+ 9 files changed, 1058 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/vfio/fsl-mc/Kconfig
+ create mode 100644 drivers/vfio/fsl-mc/Makefile
+ create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
+ create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+ create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+
+diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
+index da6e2ce7..8a8a33e0 100644
+--- a/drivers/vfio/Kconfig
++++ b/drivers/vfio/Kconfig
+@@ -48,4 +48,5 @@ menuconfig VFIO_NOIOMMU
+
+ source "drivers/vfio/pci/Kconfig"
+ source "drivers/vfio/platform/Kconfig"
++source "drivers/vfio/fsl-mc/Kconfig"
+ source "virt/lib/Kconfig"
+diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
+index 7b8a31f6..560f0c67 100644
+--- a/drivers/vfio/Makefile
++++ b/drivers/vfio/Makefile
+@@ -7,3 +7,4 @@ obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
+ obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o
+ obj-$(CONFIG_VFIO_PCI) += pci/
+ obj-$(CONFIG_VFIO_PLATFORM) += platform/
++obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/
+diff --git a/drivers/vfio/fsl-mc/Kconfig b/drivers/vfio/fsl-mc/Kconfig
+new file mode 100644
+index 00000000..b1a527d6
+--- /dev/null
++++ b/drivers/vfio/fsl-mc/Kconfig
+@@ -0,0 +1,9 @@
++config VFIO_FSL_MC
++ tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
++ depends on VFIO && FSL_MC_BUS && EVENTFD
++ help
++ Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
++ (Management Complex) devices. This is required to passthrough
++ fsl-mc bus devices using the VFIO framework.
++
++ If you don't know what to do here, say N.
+diff --git a/drivers/vfio/fsl-mc/Makefile b/drivers/vfio/fsl-mc/Makefile
+new file mode 100644
+index 00000000..2aca75af
+--- /dev/null
++++ b/drivers/vfio/fsl-mc/Makefile
+@@ -0,0 +1,2 @@
++vfio-fsl_mc-y := vfio_fsl_mc.o
++obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
+diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+new file mode 100644
+index 00000000..9dc32d27
+--- /dev/null
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+@@ -0,0 +1,753 @@
++/*
++ * Freescale Management Complex (MC) device passthrough using VFIO
++ *
++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016-2017 NXP
++ * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/device.h>
++#include <linux/iommu.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/vfio.h>
++#include <linux/delay.h>
++
++#include "../../staging/fsl-mc/include/mc.h"
++#include "../../staging/fsl-mc/include/mc-bus.h"
++#include "../../staging/fsl-mc/include/mc-sys.h"
++#include "../../staging/fsl-mc/bus/dprc-cmd.h"
++
++#include "vfio_fsl_mc_private.h"
++
++#define DRIVER_VERSION "0.10"
++#define DRIVER_AUTHOR "Bharat Bhushan <bharat.bhushan@nxp.com>"
++#define DRIVER_DESC "VFIO for FSL-MC devices - User Level meta-driver"
++
++static DEFINE_MUTEX(driver_lock);
++
++/* FSl-MC device regions (address and size) are aligned to 64K.
++ * While MC firmware reports size less than 64K for some objects (it actually
++ * reports size which does not include reserved space beyond valid bytes).
++ * Align the size to PAGE_SIZE for userspace to mmap.
++ */
++static size_t aligned_region_size(struct fsl_mc_device *mc_dev, int index)
++{
++ size_t size;
++
++ size = resource_size(&mc_dev->regions[index]);
++ return PAGE_ALIGN(size);
++}
++
++static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
++{
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++ int count = mc_dev->obj_desc.region_count;
++ int i;
++
++ vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
++ GFP_KERNEL);
++ if (!vdev->regions)
++ return -ENOMEM;
++
++ for (i = 0; i < mc_dev->obj_desc.region_count; i++) {
++ vdev->regions[i].addr = mc_dev->regions[i].start;
++ vdev->regions[i].size = aligned_region_size(mc_dev, i);
++ vdev->regions[i].type = VFIO_FSL_MC_REGION_TYPE_MMIO;
++ if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE)
++ vdev->regions[i].type |=
++ VFIO_FSL_MC_REGION_TYPE_CACHEABLE;
++ vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP;
++ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
++ if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
++ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
++ }
++
++ vdev->num_regions = mc_dev->obj_desc.region_count;
++ return 0;
++}
++
++static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
++{
++ int i;
++
++ for (i = 0; i < vdev->num_regions; i++)
++ iounmap(vdev->regions[i].ioaddr);
++
++ vdev->num_regions = 0;
++ kfree(vdev->regions);
++}
++
++static int vfio_fsl_mc_open(void *device_data)
++{
++ struct vfio_fsl_mc_device *vdev = device_data;
++ int ret;
++
++ if (!try_module_get(THIS_MODULE))
++ return -ENODEV;
++
++ mutex_lock(&driver_lock);
++ if (!vdev->refcnt) {
++ ret = vfio_fsl_mc_regions_init(vdev);
++ if (ret)
++ goto error_region_init;
++
++ ret = vfio_fsl_mc_irqs_init(vdev);
++ if (ret)
++ goto error_irq_init;
++ }
++
++ vdev->refcnt++;
++ mutex_unlock(&driver_lock);
++ return 0;
++
++error_irq_init:
++ vfio_fsl_mc_regions_cleanup(vdev);
++error_region_init:
++ mutex_unlock(&driver_lock);
++ if (ret)
++ module_put(THIS_MODULE);
++
++ return ret;
++}
++
++static void vfio_fsl_mc_release(void *device_data)
++{
++ struct vfio_fsl_mc_device *vdev = device_data;
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++
++ mutex_lock(&driver_lock);
++
++ if (!(--vdev->refcnt)) {
++ vfio_fsl_mc_regions_cleanup(vdev);
++ vfio_fsl_mc_irqs_cleanup(vdev);
++ }
++
++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
++ dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ mc_dev->obj_desc.id);
++
++ mutex_unlock(&driver_lock);
++
++ module_put(THIS_MODULE);
++}
++
++static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
++ unsigned long arg)
++{
++ struct vfio_fsl_mc_device *vdev = device_data;
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++ unsigned long minsz;
++
++ if (WARN_ON(!mc_dev))
++ return -ENODEV;
++
++ switch (cmd) {
++ case VFIO_DEVICE_GET_INFO:
++ {
++ struct vfio_device_info info;
++
++ minsz = offsetofend(struct vfio_device_info, num_irqs);
++
++ if (copy_from_user(&info, (void __user *)arg, minsz))
++ return -EFAULT;
++
++ if (info.argsz < minsz)
++ return -EINVAL;
++
++ info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
++ info.num_regions = mc_dev->obj_desc.region_count;
++ info.num_irqs = mc_dev->obj_desc.irq_count;
++
++ return copy_to_user((void __user *)arg, &info, minsz);
++ }
++ case VFIO_DEVICE_GET_REGION_INFO:
++ {
++ struct vfio_region_info info;
++
++ minsz = offsetofend(struct vfio_region_info, offset);
++
++ if (copy_from_user(&info, (void __user *)arg, minsz))
++ return -EFAULT;
++
++ if (info.argsz < minsz)
++ return -EINVAL;
++
++ if (info.index >= vdev->num_regions)
++ return -EINVAL;
++
++ /* map offset to the physical address */
++ info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
++ info.size = vdev->regions[info.index].size;
++ info.flags = vdev->regions[info.index].flags;
++
++ return copy_to_user((void __user *)arg, &info, minsz);
++ }
++ case VFIO_DEVICE_GET_IRQ_INFO:
++ {
++ struct vfio_irq_info info;
++
++ minsz = offsetofend(struct vfio_irq_info, count);
++ if (copy_from_user(&info, (void __user *)arg, minsz))
++ return -EFAULT;
++
++ if (info.argsz < minsz)
++ return -EINVAL;
++
++ if (info.index >= mc_dev->obj_desc.irq_count)
++ return -EINVAL;
++
++ if (vdev->mc_irqs != NULL) {
++ info.flags = vdev->mc_irqs[info.index].flags;
++ info.count = vdev->mc_irqs[info.index].count;
++ } else {
++ /*
++ * If IRQs are not initialized then these can not
++ * be configuted and used by user-space/
++ */
++ info.flags = 0;
++ info.count = 0;
++ }
++
++ return copy_to_user((void __user *)arg, &info, minsz);
++ }
++ case VFIO_DEVICE_SET_IRQS:
++ {
++ struct vfio_irq_set hdr;
++ u8 *data = NULL;
++ int ret = 0;
++
++ minsz = offsetofend(struct vfio_irq_set, count);
++
++ if (copy_from_user(&hdr, (void __user *)arg, minsz))
++ return -EFAULT;
++
++ if (hdr.argsz < minsz)
++ return -EINVAL;
++
++ if (hdr.index >= mc_dev->obj_desc.irq_count)
++ return -EINVAL;
++
++ if (hdr.start != 0 || hdr.count > 1)
++ return -EINVAL;
++
++ if (hdr.count == 0 &&
++ (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) ||
++ !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER)))
++ return -EINVAL;
++
++ if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
++ VFIO_IRQ_SET_ACTION_TYPE_MASK))
++ return -EINVAL;
++
++ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
++ size_t size;
++
++ if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
++ size = sizeof(uint8_t);
++ else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
++ size = sizeof(int32_t);
++ else
++ return -EINVAL;
++
++ if (hdr.argsz - minsz < hdr.count * size)
++ return -EINVAL;
++
++ data = memdup_user((void __user *)(arg + minsz),
++ hdr.count * size);
++ if (IS_ERR(data))
++ return PTR_ERR(data);
++ }
++
++ ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
++ hdr.index, hdr.start,
++ hdr.count, data);
++ return ret;
++ }
++ case VFIO_DEVICE_RESET:
++ {
++ return -EINVAL;
++ }
++ default:
++ return -EINVAL;
++ }
++}
++
++static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct vfio_fsl_mc_device *vdev = device_data;
++ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
++ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
++ struct vfio_fsl_mc_region *region;
++ uint64_t data[8];
++ int i;
++
++ /* Read ioctl supported only for DPRC device */
++ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
++ return -EINVAL;
++
++ if (index >= vdev->num_regions)
++ return -EINVAL;
++
++ region = &vdev->regions[index];
++
++ if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
++ return -EINVAL;
++
++ if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
++ return -EINVAL;
++
++ if (!region->ioaddr) {
++ region->ioaddr = ioremap_nocache(region->addr, region->size);
++ if (!region->ioaddr)
++ return -ENOMEM;
++ }
++
++ if (count != 64 || off != 0)
++ return -EINVAL;
++
++ for (i = 7; i >= 0; i--)
++ data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
++
++ if (copy_to_user(buf, data, 64))
++ return -EFAULT;
++
++ return count;
++}
++
++#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
++#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
++
++static int vfio_fsl_mc_dprc_wait_for_response(void __iomem *ioaddr)
++{
++ enum mc_cmd_status status;
++ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
++
++ for (;;) {
++ u64 header;
++ struct mc_cmd_header *resp_hdr;
++
++ __iormb();
++ header = readq(ioaddr);
++ __iormb();
++
++ resp_hdr = (struct mc_cmd_header *)&header;
++ status = (enum mc_cmd_status)resp_hdr->status;
++ if (status != MC_CMD_STATUS_READY)
++ break;
++
++ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
++ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
++ if (timeout_usecs == 0)
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
++
++static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
++{
++ int i;
++
++ /* Write at command header in the end */
++ for (i = 7; i >= 0; i--)
++ writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t));
++
++ /* Wait for response before returning to user-space
++ * This can be optimized in future to even prepare response
++ * before returning to user-space and avoid read ioctl.
++ */
++ return vfio_fsl_mc_dprc_wait_for_response(ioaddr);
++}
++
++static int vfio_handle_dprc_commands(void __iomem *ioaddr, uint64_t *cmd_data)
++{
++ uint64_t cmd_hdr = cmd_data[0];
++ int cmd = (cmd_hdr >> 52) & 0xfff;
++
++ switch (cmd) {
++ case DPRC_CMDID_OPEN:
++ default:
++ return vfio_fsl_mc_send_command(ioaddr, cmd_data);
++ }
++
++ return 0;
++}
++
++static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct vfio_fsl_mc_device *vdev = device_data;
++ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
++ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
++ struct vfio_fsl_mc_region *region;
++ uint64_t data[8];
++ int ret;
++
++ /* Write ioctl supported only for DPRC device */
++ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
++ return -EINVAL;
++
++ if (index >= vdev->num_regions)
++ return -EINVAL;
++
++ region = &vdev->regions[index];
++
++ if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
++ return -EINVAL;
++
++ if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
++ return -EINVAL;
++
++ if (!region->ioaddr) {
++ region->ioaddr = ioremap_nocache(region->addr, region->size);
++ if (!region->ioaddr)
++ return -ENOMEM;
++ }
++
++ if (count != 64 || off != 0)
++ return -EINVAL;
++
++ if (copy_from_user(&data, buf, 64))
++ return -EFAULT;
++
++ ret = vfio_handle_dprc_commands(region->ioaddr, data);
++ if (ret)
++ return ret;
++
++ return count;
++}
++
++static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
++ struct vm_area_struct *vma)
++{
++ u64 size = vma->vm_end - vma->vm_start;
++ u64 pgoff, base;
++
++ pgoff = vma->vm_pgoff &
++ ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
++ base = pgoff << PAGE_SHIFT;
++
++ if (region.size < PAGE_SIZE || base + size > region.size)
++ return -EINVAL;
++ /*
++ * Set the REGION_TYPE_CACHEABLE (QBman CENA regs) to be the
++ * cache inhibited area of the portal to avoid coherency issues
++ * if a user migrates to another core.
++ */
++ if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE)
++ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
++ else
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
++
++ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ size, vma->vm_page_prot);
++}
++
++/* Allows mmaping fsl_mc device regions in assigned DPRC */
++static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
++{
++ struct vfio_fsl_mc_device *vdev = device_data;
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++ unsigned long size, addr;
++ int index;
++
++ index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
++
++ if (vma->vm_end < vma->vm_start)
++ return -EINVAL;
++ if (vma->vm_start & ~PAGE_MASK)
++ return -EINVAL;
++ if (vma->vm_end & ~PAGE_MASK)
++ return -EINVAL;
++ if (!(vma->vm_flags & VM_SHARED))
++ return -EINVAL;
++ if (index >= vdev->num_regions)
++ return -EINVAL;
++
++ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
++ return -EINVAL;
++
++ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
++ && (vma->vm_flags & VM_READ))
++ return -EINVAL;
++
++ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
++ && (vma->vm_flags & VM_WRITE))
++ return -EINVAL;
++
++ addr = vdev->regions[index].addr;
++ size = vdev->regions[index].size;
++
++ vma->vm_private_data = mc_dev;
++
++ if (vdev->regions[index].type & VFIO_FSL_MC_REGION_TYPE_MMIO)
++ return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
++
++ return -EFAULT;
++}
++
++static const struct vfio_device_ops vfio_fsl_mc_ops = {
++ .name = "vfio-fsl-mc",
++ .open = vfio_fsl_mc_open,
++ .release = vfio_fsl_mc_release,
++ .ioctl = vfio_fsl_mc_ioctl,
++ .read = vfio_fsl_mc_read,
++ .write = vfio_fsl_mc_write,
++ .mmap = vfio_fsl_mc_mmap,
++};
++
++static int vfio_fsl_mc_initialize_dprc(struct vfio_fsl_mc_device *vdev)
++{
++ struct device *root_dprc_dev;
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++ struct device *dev = &mc_dev->dev;
++ struct fsl_mc_bus *mc_bus;
++ struct irq_domain *mc_msi_domain;
++ unsigned int irq_count;
++ int ret;
++
++ /* device must be DPRC */
++ if (strcmp(mc_dev->obj_desc.type, "dprc"))
++ return -EINVAL;
++
++ /* mc_io must be un-initialized */
++ WARN_ON(mc_dev->mc_io);
++
++ /* allocate a portal from the root DPRC for vfio use */
++ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
++ if (WARN_ON(!root_dprc_dev))
++ return -EINVAL;
++
++ ret = fsl_mc_portal_allocate(to_fsl_mc_device(root_dprc_dev),
++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &mc_dev->mc_io);
++ if (ret < 0)
++ goto clean_msi_domain;
++
++ /* Reset MCP before move on */
++ ret = fsl_mc_portal_reset(mc_dev->mc_io);
++ if (ret < 0) {
++ dev_err(dev, "dprc portal reset failed: error = %d\n", ret);
++ goto free_mc_portal;
++ }
++
++ /* MSI domain set up */
++ ret = fsl_mc_find_msi_domain(root_dprc_dev->parent, &mc_msi_domain);
++ if (ret < 0)
++ goto free_mc_portal;
++
++ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
++
++ ret = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
++ &mc_dev->mc_handle);
++ if (ret) {
++ dev_err(dev, "dprc_open() failed: error = %d\n", ret);
++ goto free_mc_portal;
++ }
++
++ /* Initialize resource pool */
++ fsl_mc_init_all_resource_pools(mc_dev);
++
++ mc_bus = to_fsl_mc_bus(mc_dev);
++
++ if (!mc_bus->irq_resources) {
++ irq_count = FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS;
++ ret = fsl_mc_populate_irq_pool(mc_bus, irq_count);
++ if (ret < 0) {
++ dev_err(dev, "%s: Failed to init irq-pool\n", __func__);
++ goto clean_resource_pool;
++ }
++ }
++
++ mutex_init(&mc_bus->scan_mutex);
++
++ mutex_lock(&mc_bus->scan_mutex);
++ ret = dprc_scan_objects(mc_dev, mc_dev->driver_override,
++ &irq_count);
++ mutex_unlock(&mc_bus->scan_mutex);
++ if (ret) {
++ dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret);
++ goto clean_irq_pool;
++ }
++
++ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
++ dev_warn(&mc_dev->dev,
++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
++ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
++ }
++
++ return 0;
++
++clean_irq_pool:
++ fsl_mc_cleanup_irq_pool(mc_bus);
++
++clean_resource_pool:
++ fsl_mc_cleanup_all_resource_pools(mc_dev);
++ dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++
++free_mc_portal:
++ fsl_mc_portal_free(mc_dev->mc_io);
++
++clean_msi_domain:
++ dev_set_msi_domain(&mc_dev->dev, NULL);
++
++ return ret;
++}
++
++static int vfio_fsl_mc_device_remove(struct device *dev, void *data)
++{
++ struct fsl_mc_device *mc_dev;
++
++ WARN_ON(dev == NULL);
++
++ mc_dev = to_fsl_mc_device(dev);
++ if (WARN_ON(mc_dev == NULL))
++ return -ENODEV;
++
++ fsl_mc_device_remove(mc_dev);
++ return 0;
++}
++
++static void vfio_fsl_mc_cleanup_dprc(struct vfio_fsl_mc_device *vdev)
++{
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++ struct fsl_mc_bus *mc_bus;
++
++ /* device must be DPRC */
++ if (strcmp(mc_dev->obj_desc.type, "dprc"))
++ return;
++
++ device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove);
++
++ mc_bus = to_fsl_mc_bus(mc_dev);
++ if (dev_get_msi_domain(&mc_dev->dev))
++ fsl_mc_cleanup_irq_pool(mc_bus);
++
++ dev_set_msi_domain(&mc_dev->dev, NULL);
++
++ fsl_mc_cleanup_all_resource_pools(mc_dev);
++ dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++ fsl_mc_portal_free(mc_dev->mc_io);
++}
++
++static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
++{
++ struct iommu_group *group;
++ struct vfio_fsl_mc_device *vdev;
++ struct device *dev = &mc_dev->dev;
++ int ret;
++
++ group = vfio_iommu_group_get(dev);
++ if (!group) {
++ dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__);
++ return -EINVAL;
++ }
++
++ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
++ if (!vdev) {
++ vfio_iommu_group_put(group, dev);
++ return -ENOMEM;
++ }
++
++ vdev->mc_dev = mc_dev;
++
++ ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
++ if (ret) {
++ dev_err(dev, "%s: Failed to add to vfio group\n", __func__);
++ goto free_vfio_device;
++ }
++
++ /* DPRC container scanned and it's chilren bound with vfio driver */
++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
++ ret = vfio_fsl_mc_initialize_dprc(vdev);
++ if (ret) {
++ vfio_del_group_dev(dev);
++ goto free_vfio_device;
++ }
++ } else {
++ struct fsl_mc_device *mc_bus_dev;
++
++ /* Non-dprc devices share mc_io from the parent dprc */
++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
++ if (mc_bus_dev == NULL) {
++ vfio_del_group_dev(dev);
++ goto free_vfio_device;
++ }
++
++ mc_dev->mc_io = mc_bus_dev->mc_io;
++
++ /* Inherit parent MSI domain */
++ dev_set_msi_domain(&mc_dev->dev,
++ dev_get_msi_domain(mc_dev->dev.parent));
++ }
++ return 0;
++
++free_vfio_device:
++ kfree(vdev);
++ vfio_iommu_group_put(group, dev);
++ return ret;
++}
++
++static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
++{
++ struct vfio_fsl_mc_device *vdev;
++ struct device *dev = &mc_dev->dev;
++
++ vdev = vfio_del_group_dev(dev);
++ if (!vdev)
++ return -EINVAL;
++
++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
++ vfio_fsl_mc_cleanup_dprc(vdev);
++ else
++ dev_set_msi_domain(&mc_dev->dev, NULL);
++
++ mc_dev->mc_io = NULL;
++
++ vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
++ kfree(vdev);
++
++ return 0;
++}
++
++/*
++ * vfio-fsl_mc is a meta-driver, so use driver_override interface to
++ * bind a fsl_mc container with this driver and match_id_table is NULL.
++ */
++static struct fsl_mc_driver vfio_fsl_mc_driver = {
++ .probe = vfio_fsl_mc_probe,
++ .remove = vfio_fsl_mc_remove,
++ .match_id_table = NULL,
++ .driver = {
++ .name = "vfio-fsl-mc",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init vfio_fsl_mc_driver_init(void)
++{
++ return fsl_mc_driver_register(&vfio_fsl_mc_driver);
++}
++
++static void __exit vfio_fsl_mc_driver_exit(void)
++{
++ fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
++}
++
++module_init(vfio_fsl_mc_driver_init);
++module_exit(vfio_fsl_mc_driver_exit);
++
++MODULE_VERSION(DRIVER_VERSION);
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
+diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+new file mode 100644
+index 00000000..eb244bb0
+--- /dev/null
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+@@ -0,0 +1,199 @@
++/*
++ * Freescale Management Complex (MC) device passthrough using VFIO
++ *
++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
++ * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/vfio.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/eventfd.h>
++#include <linux/msi.h>
++
++#include "../../staging/fsl-mc/include/mc.h"
++#include "vfio_fsl_mc_private.h"
++
++static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
++{
++ struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
++
++ eventfd_signal(mc_irq->trigger, 1);
++ return IRQ_HANDLED;
++}
++
++static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev,
++ unsigned int index, unsigned int start,
++ unsigned int count, uint32_t flags,
++ void *data)
++{
++ return -EINVAL;
++}
++
++static int vfio_fsl_mc_irq_unmask(struct vfio_fsl_mc_device *vdev,
++ unsigned int index, unsigned int start,
++ unsigned int count, uint32_t flags,
++ void *data)
++{
++ return -EINVAL;
++}
++
++static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
++ int index, int fd)
++{
++ struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
++ struct eventfd_ctx *trigger;
++ int hwirq;
++ int ret;
++
++ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
++ if (irq->trigger) {
++ free_irq(hwirq, irq);
++ kfree(irq->name);
++ eventfd_ctx_put(irq->trigger);
++ irq->trigger = NULL;
++ }
++
++ if (fd < 0) /* Disable only */
++ return 0;
++
++ irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
++ hwirq, dev_name(&vdev->mc_dev->dev));
++ if (!irq->name)
++ return -ENOMEM;
++
++ trigger = eventfd_ctx_fdget(fd);
++ if (IS_ERR(trigger)) {
++ kfree(irq->name);
++ return PTR_ERR(trigger);
++ }
++
++ irq->trigger = trigger;
++
++ ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
++ irq->name, irq);
++ if (ret) {
++ kfree(irq->name);
++ eventfd_ctx_put(trigger);
++ irq->trigger = NULL;
++ return ret;
++ }
++
++ return 0;
++}
++
++int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev)
++{
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++ struct vfio_fsl_mc_irq *mc_irq;
++ int irq_count;
++ int ret, i;
++
++ /* Device does not support any interrupt */
++ if (mc_dev->obj_desc.irq_count == 0)
++ return 0;
++
++ irq_count = mc_dev->obj_desc.irq_count;
++
++ mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL);
++ if (mc_irq == NULL)
++ return -ENOMEM;
++
++ /* Allocate IRQs */
++ ret = fsl_mc_allocate_irqs(mc_dev);
++ if (ret) {
++ kfree(mc_irq);
++ return ret;
++ }
++
++ for (i = 0; i < irq_count; i++) {
++ mc_irq[i].count = 1;
++ mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
++ }
++
++ vdev->mc_irqs = mc_irq;
++
++ return 0;
++}
++
++/* Free All IRQs for the given MC object */
++void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
++{
++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
++ int irq_count = mc_dev->obj_desc.irq_count;
++ int i;
++
++ /* Device does not support any interrupt */
++ if (mc_dev->obj_desc.irq_count == 0)
++ return;
++
++ for (i = 0; i < irq_count; i++)
++ vfio_set_trigger(vdev, i, -1);
++
++ fsl_mc_free_irqs(mc_dev);
++ kfree(vdev->mc_irqs);
++}
++
++static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
++ unsigned int index, unsigned int start,
++ unsigned int count, uint32_t flags,
++ void *data)
++{
++ struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
++ int hwirq;
++
++ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
++ return vfio_set_trigger(vdev, index, -1);
++
++ if (start != 0 || count != 1)
++ return -EINVAL;
++
++ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
++ int32_t fd = *(int32_t *)data;
++
++ return vfio_set_trigger(vdev, index, fd);
++ }
++
++ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
++
++ if (flags & VFIO_IRQ_SET_DATA_NONE) {
++ vfio_fsl_mc_irq_handler(hwirq, irq);
++
++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
++ uint8_t trigger = *(uint8_t *)data;
++
++ if (trigger)
++ vfio_fsl_mc_irq_handler(hwirq, irq);
++ }
++
++ return 0;
++}
++
++int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
++ uint32_t flags, unsigned int index,
++ unsigned int start, unsigned int count,
++ void *data)
++{
++ int ret = -ENOTTY;
++
++ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
++ case VFIO_IRQ_SET_ACTION_MASK:
++ ret = vfio_fsl_mc_irq_mask(vdev, index, start, count,
++ flags, data);
++ break;
++ case VFIO_IRQ_SET_ACTION_UNMASK:
++ ret = vfio_fsl_mc_irq_unmask(vdev, index, start, count,
++ flags, data);
++ break;
++ case VFIO_IRQ_SET_ACTION_TRIGGER:
++ ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start,
++ count, flags, data);
++ break;
++ }
++
++ return ret;
++}
+diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+new file mode 100644
+index 00000000..34e75754
+--- /dev/null
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+@@ -0,0 +1,55 @@
++/*
++ * Freescale Management Complex VFIO private declarations
++ *
++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
++ * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#ifndef VFIO_FSL_MC_PRIVATE_H
++#define VFIO_FSL_MC_PRIVATE_H
++
++#define VFIO_FSL_MC_OFFSET_SHIFT 40
++#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1)
++
++#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) (off >> VFIO_FSL_MC_OFFSET_SHIFT)
++
++#define VFIO_FSL_MC_INDEX_TO_OFFSET(index) \
++ ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT)
++
++struct vfio_fsl_mc_irq {
++ u32 flags;
++ u32 count;
++ struct eventfd_ctx *trigger;
++ char *name;
++};
++
++struct vfio_fsl_mc_region {
++ u32 flags;
++#define VFIO_FSL_MC_REGION_TYPE_MMIO 1
++#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE 2
++ u32 type;
++ u64 addr;
++ resource_size_t size;
++ void __iomem *ioaddr;
++};
++
++struct vfio_fsl_mc_device {
++ struct fsl_mc_device *mc_dev;
++ int refcnt;
++ u32 num_regions;
++ struct vfio_fsl_mc_region *regions;
++ struct vfio_fsl_mc_irq *mc_irqs;
++};
++
++int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev);
++void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev);
++int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
++ uint32_t flags, unsigned int index,
++ unsigned int start, unsigned int count,
++ void *data);
++#endif /* VFIO_PCI_PRIVATE_H */
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 1d48e62f..f0a39331 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -36,6 +36,8 @@
+ #include <linux/uaccess.h>
+ #include <linux/vfio.h>
+ #include <linux/workqueue.h>
++#include <linux/dma-iommu.h>
++#include <linux/irqdomain.h>
+
+ #define DRIVER_VERSION "0.2"
+ #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+@@ -720,6 +722,27 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+ __free_pages(pages, order);
+ }
+
++static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
++{
++ struct list_head group_resv_regions;
++ struct iommu_resv_region *region, *next;
++ bool ret = false;
++
++ INIT_LIST_HEAD(&group_resv_regions);
++ iommu_get_group_resv_regions(group, &group_resv_regions);
++ list_for_each_entry(region, &group_resv_regions, list) {
++ if (region->type == IOMMU_RESV_SW_MSI) {
++ *base = region->start;
++ ret = true;
++ goto out;
++ }
++ }
++out:
++ list_for_each_entry_safe(region, next, &group_resv_regions, list)
++ kfree(region);
++ return ret;
++}
++
+ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ struct iommu_group *iommu_group)
+ {
+@@ -728,6 +751,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ struct vfio_domain *domain, *d;
+ struct bus_type *bus = NULL;
+ int ret;
++ bool resv_msi, msi_remap;
++ phys_addr_t resv_msi_base;
+
+ mutex_lock(&iommu->lock);
+
+@@ -774,11 +799,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ if (ret)
+ goto out_domain;
+
++ resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
++
+ INIT_LIST_HEAD(&domain->group_list);
+ list_add(&group->next, &domain->group_list);
+
+- if (!allow_unsafe_interrupts &&
+- !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
++ msi_remap = resv_msi ? irq_domain_check_msi_remap() :
++ iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
++
++ if (!allow_unsafe_interrupts && !msi_remap) {
+ pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
+ __func__);
+ ret = -EPERM;
+@@ -820,6 +849,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ if (ret)
+ goto out_detach;
+
++ if (resv_msi) {
++ ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
++ if (ret)
++ goto out_detach;
++ }
++
+ list_add(&domain->next, &iommu->domain_list);
+
+ mutex_unlock(&iommu->lock);
+diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
+index 255a2113..1bbaa13d 100644
+--- a/include/uapi/linux/vfio.h
++++ b/include/uapi/linux/vfio.h
+@@ -198,6 +198,7 @@ struct vfio_device_info {
+ #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
+ #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
+ #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
++#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 5) /* vfio-fsl-mc device */
+ __u32 num_regions; /* Max region index + 1 */
+ __u32 num_irqs; /* Max IRQ index + 1 */
+ };
+--
+2.14.1
+