aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch')
-rw-r--r--target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch11518
1 files changed, 11518 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch
new file mode 100644
index 0000000000..eab1e656d2
--- /dev/null
+++ b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch
@@ -0,0 +1,11518 @@
+From 464b4d9b8282e0f1e5040e4914505f91ce4d3750 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 25 Sep 2017 12:06:25 +0800
+Subject: [PATCH] fsl-mc: layerscape support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is a integrated patch for layerscape mc-bus support.
+
+Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
+Signed-off-by: Shiva Kerdel <shiva@exdev.nl>
+Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/staging/fsl-mc/bus/Kconfig | 41 +-
+ drivers/staging/fsl-mc/bus/Makefile | 10 +-
+ drivers/staging/fsl-mc/bus/dpbp-cmd.h | 80 ++
+ drivers/staging/fsl-mc/bus/dpbp.c | 450 +--------
+ drivers/staging/fsl-mc/bus/dpcon-cmd.h | 85 ++
+ drivers/staging/fsl-mc/bus/dpcon.c | 317 ++++++
+ drivers/staging/fsl-mc/bus/dpio/Makefile | 11 +
+ .../{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} | 73 +-
+ drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 296 ++++++
+ drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt | 135 +++
+ drivers/staging/fsl-mc/bus/dpio/dpio-service.c | 689 +++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/dpio.c | 224 +++++
+ drivers/staging/fsl-mc/bus/dpio/dpio.h | 109 ++
+ drivers/staging/fsl-mc/bus/dpio/qbman-portal.c | 1049 ++++++++++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman-portal.h | 662 ++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 853 ++++++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 +++
+ drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 171 ++++
+ drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 112 +--
+ drivers/staging/fsl-mc/bus/dpmcp.c | 374 +------
+ drivers/staging/fsl-mc/bus/dpmcp.h | 127 +--
+ drivers/staging/fsl-mc/bus/dpmng-cmd.h | 14 +-
+ drivers/staging/fsl-mc/bus/dpmng.c | 37 +-
+ drivers/staging/fsl-mc/bus/dprc-cmd.h | 82 +-
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 38 +-
+ drivers/staging/fsl-mc/bus/dprc.c | 629 +-----------
+ drivers/staging/fsl-mc/bus/fsl-mc-allocator.c | 78 +-
+ drivers/staging/fsl-mc/bus/fsl-mc-bus.c | 318 +++---
+ drivers/staging/fsl-mc/bus/fsl-mc-iommu.c | 104 ++
+ drivers/staging/fsl-mc/bus/fsl-mc-msi.c | 3 +-
+ drivers/staging/fsl-mc/bus/fsl-mc-private.h | 6 +-
+ .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 11 +-
+ drivers/staging/fsl-mc/bus/mc-io.c | 4 +-
+ drivers/staging/fsl-mc/bus/mc-ioctl.h | 22 +
+ drivers/staging/fsl-mc/bus/mc-restool.c | 405 ++++++++
+ drivers/staging/fsl-mc/bus/mc-sys.c | 14 +-
+ drivers/staging/fsl-mc/include/dpaa2-fd.h | 706 +++++++++++++
+ drivers/staging/fsl-mc/include/dpaa2-global.h | 202 ++++
+ drivers/staging/fsl-mc/include/dpaa2-io.h | 190 ++++
+ drivers/staging/fsl-mc/include/dpbp-cmd.h | 185 ----
+ drivers/staging/fsl-mc/include/dpbp.h | 158 +--
+ drivers/staging/fsl-mc/include/dpcon.h | 115 +++
+ drivers/staging/fsl-mc/include/dpmng.h | 16 +-
+ drivers/staging/fsl-mc/include/dpopr.h | 110 ++
+ drivers/staging/fsl-mc/include/dprc.h | 470 +++------
+ drivers/staging/fsl-mc/include/mc-bus.h | 7 +-
+ drivers/staging/fsl-mc/include/mc-cmd.h | 44 +-
+ drivers/staging/fsl-mc/include/mc-sys.h | 3 +-
+ drivers/staging/fsl-mc/include/mc.h | 17 +-
+ 49 files changed, 7380 insertions(+), 2612 deletions(-)
+ create mode 100644 drivers/staging/fsl-mc/bus/dpbp-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpcon-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile
+ rename drivers/staging/fsl-mc/{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} (64%)
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+ create mode 100644 drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c
+ create mode 100644 drivers/staging/fsl-mc/include/dpaa2-fd.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpaa2-global.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpaa2-io.h
+ delete mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpcon.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpopr.h
+
+diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
+index 1f959339..67847c0e 100644
+--- a/drivers/staging/fsl-mc/bus/Kconfig
++++ b/drivers/staging/fsl-mc/bus/Kconfig
+@@ -1,25 +1,40 @@
+ #
+-# Freescale Management Complex (MC) bus drivers
++# DPAA2 fsl-mc bus
+ #
+-# Copyright (C) 2014 Freescale Semiconductor, Inc.
++# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ #
+ # This file is released under the GPLv2
+ #
+
+ config FSL_MC_BUS
+- bool "Freescale Management Complex (MC) bus driver"
+- depends on OF && ARM64
++ bool "QorIQ DPAA2 fsl-mc bus driver"
++ depends on OF && ARCH_LAYERSCAPE
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+- Driver to enable the bus infrastructure for the Freescale
+- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
+- module of the QorIQ LS2 SoCs, that does resource management
+- for hardware building-blocks in the SoC that can be used
+- to dynamically create networking hardware objects such as
+- network interfaces (NICs), crypto accelerator instances,
+- or L2 switches.
++ Driver to enable the bus infrastructure for the QorIQ DPAA2
++ architecture. The fsl-mc bus driver handles discovery of
++ DPAA2 objects (which are represented as Linux devices) and
++ binding objects to drivers.
+
+- Only enable this option when building the kernel for
+- Freescale QorQIQ LS2xxxx SoCs.
++config FSL_MC_DPIO
++ tristate "QorIQ DPAA2 DPIO driver"
++ depends on FSL_MC_BUS
++ help
++ Driver for the DPAA2 DPIO object. A DPIO provides queue and
++ buffer management facilities for software to interact with
++ other DPAA2 objects. This driver does not expose the DPIO
++ objects individually, but groups them under a service layer
++ API.
+
++config FSL_QBMAN_DEBUG
++ tristate "Freescale QBMAN Debug APIs"
++ depends on FSL_MC_DPIO
++ help
++ QBMan debug assistant APIs.
+
++config FSL_MC_RESTOOL
++ tristate "Freescale Management Complex (MC) restool driver"
++ depends on FSL_MC_BUS
++ help
++ Driver that provides kernel support for the Freescale Management
++ Complex resource manager user-space tool.
+diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
+index 38716fd5..e7e2239c 100644
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -17,4 +17,12 @@ mc-bus-driver-objs := fsl-mc-bus.o \
+ fsl-mc-msi.o \
+ irq-gic-v3-its-fsl-mc-msi.o \
+ dpmcp.o \
+- dpbp.o
++ dpbp.o \
++ dpcon.o \
++ fsl-mc-iommu.o
++
++# MC DPIO driver
++obj-$(CONFIG_FSL_MC_DPIO) += dpio/
++
++# MC restool kernel support
++obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o
+diff --git a/drivers/staging/fsl-mc/bus/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
+new file mode 100644
+index 00000000..8aa65452
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
+@@ -0,0 +1,80 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPBP_CMD_H
++#define _FSL_DPBP_CMD_H
++
++/* DPBP Version */
++#define DPBP_VER_MAJOR 3
++#define DPBP_VER_MINOR 2
++
++/* Command versioning */
++#define DPBP_CMD_BASE_VERSION 1
++#define DPBP_CMD_ID_OFFSET 4
++
++#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
++#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
++#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
++
++#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
++#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
++#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
++#define DPBP_CMDID_RESET DPBP_CMD(0x005)
++#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
++
++struct dpbp_cmd_open {
++ __le32 dpbp_id;
++};
++
++struct dpbp_cmd_destroy {
++ __le32 object_id;
++};
++
++#define DPBP_ENABLE 0x1
++
++struct dpbp_rsp_is_enabled {
++ u8 enabled;
++};
++
++struct dpbp_rsp_get_attributes {
++ /* response word 0 */
++ __le16 pad;
++ __le16 bpid;
++ __le32 id;
++ /* response word 1 */
++ __le16 version_major;
++ __le16 version_minor;
++};
++
++#endif /* _FSL_DPBP_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
+index 5d4cd812..d9e450a6 100644
+--- a/drivers/staging/fsl-mc/bus/dpbp.c
++++ b/drivers/staging/fsl-mc/bus/dpbp.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -32,7 +32,8 @@
+ #include "../include/mc-sys.h"
+ #include "../include/mc-cmd.h"
+ #include "../include/dpbp.h"
+-#include "../include/dpbp-cmd.h"
++
++#include "dpbp-cmd.h"
+
+ /**
+ * dpbp_open() - Open a control session for the specified object.
+@@ -104,74 +105,6 @@ int dpbp_close(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dpbp_close);
+
+-/**
+- * dpbp_create() - Create the DPBP object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @cfg: Configuration structure
+- * @token: Returned token; use in subsequent API calls
+- *
+- * Create the DPBP object, allocate required resources and
+- * perform required initialization.
+- *
+- * The object can be created either by declaring it in the
+- * DPL file, or by calling this function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent calls to
+- * this specific object. For objects that are created using the
+- * DPL file, call dpbp_open function to get an authentication
+- * token first.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpbp_cfg *cfg,
+- u16 *token)
+-{
+- struct mc_command cmd = { 0 };
+- int err;
+-
+- (void)(cfg); /* unused */
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
+- cmd_flags, 0);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- *token = mc_cmd_hdr_read_token(&cmd);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- *
+- * Return: '0' on Success; error code otherwise.
+- */
+-int dpbp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+ /**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -250,6 +183,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+
+ return 0;
+ }
++EXPORT_SYMBOL(dpbp_is_enabled);
+
+ /**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+@@ -272,310 +206,7 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+ }
+-
+-/**
+- * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: Identifies the interrupt index to configure
+- * @irq_cfg: IRQ configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpbp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_irq *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_irq() - Get IRQ information from the DPBP.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: IRQ attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpbp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq *cmd_params;
+- struct dpbp_rsp_get_irq *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_set_irq_enable() - Set overall interrupt state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @en: Interrupt state - enable = 1, disable = 0
+- *
+- * Allows GPP software to control when interrupts are generated.
+- * Each interrupt can have up to 32 causes. The enable/disable control's the
+- * overall interrupt state. if the interrupt is disabled no causes will cause
+- * an interrupt.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_irq_enable *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params;
+- cmd_params->enable = en & DPBP_ENABLE;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_irq_enable() - Get overall interrupt state
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @en: Returned interrupt state - enable = 1, disable = 0
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq_enable *cmd_params;
+- struct dpbp_rsp_get_irq_enable *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params;
+- *en = rsp_params->enabled & DPBP_ENABLE;
+- return 0;
+-}
+-
+-/**
+- * dpbp_set_irq_mask() - Set interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Event mask to trigger interrupt;
+- * each bit:
+- * 0 = ignore event
+- * 1 = consider event for asserting IRQ
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_irq_mask *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params;
+- cmd_params->mask = cpu_to_le32(mask);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_irq_mask() - Get interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Returned event mask to trigger interrupt
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq_mask *cmd_params;
+- struct dpbp_rsp_get_irq_mask *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params;
+- *mask = le32_to_cpu(rsp_params->mask);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_get_irq_status() - Get the current status of any pending interrupts.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @status: Returned interrupts status - one bit per cause:
+- * 0 = no interrupt pending
+- * 1 = interrupt pending
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq_status *cmd_params;
+- struct dpbp_rsp_get_irq_status *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(*status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params;
+- *status = le32_to_cpu(rsp_params->status);
+-
+- return 0;
+-}
+-
+-/**
+- * dpbp_clear_irq_status() - Clear a pending interrupt's status
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @status: Bits to clear (W1C) - one bit per cause:
+- * 0 = don't change
+- * 1 = clear status bit
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_clear_irq_status *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
++EXPORT_SYMBOL(dpbp_reset);
+
+ /**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+@@ -609,83 +240,40 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+ }
+ EXPORT_SYMBOL(dpbp_get_attributes);
+
+ /**
+- * dpbp_set_notifications() - Set notifications towards software
+- * @mc_io: Pointer to MC portal's I/O object
++ * dpbp_get_api_version - Get Data Path Buffer Pool API version
++ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @cfg: notifications configuration
++ * @major_ver: Major version of Buffer Pool API
++ * @minor_ver: Minor version of Buffer Pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg)
++int dpbp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
+ {
+ struct mc_command cmd = { 0 };
+- struct dpbp_cmd_set_notifications *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params;
+- cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry);
+- cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit);
+- cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry);
+- cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit);
+- cmd_params->options = cpu_to_le16(cfg->options);
+- cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+- cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_notifications() - Get the notifications configuration
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @cfg: notifications configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_rsp_get_notifications *rsp_params;
+ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
+- cmd_flags,
+- token);
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
+- /* send command to mc*/
++ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params;
+- cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry);
+- cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit);
+- cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry);
+- cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit);
+- cfg->options = le16_to_cpu(rsp_params->options);
+- cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+- cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+ }
++EXPORT_SYMBOL(dpbp_get_api_version);
+diff --git a/drivers/staging/fsl-mc/bus/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
+new file mode 100644
+index 00000000..2bb66988
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
+@@ -0,0 +1,85 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPCON_CMD_H
++#define _FSL_DPCON_CMD_H
++
++/* DPCON Version */
++#define DPCON_VER_MAJOR 3
++#define DPCON_VER_MINOR 2
++
++/* Command versioning */
++#define DPCON_CMD_BASE_VERSION 1
++#define DPCON_CMD_ID_OFFSET 4
++
++#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
++#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
++#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
++
++#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
++#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
++#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
++#define DPCON_CMDID_RESET DPCON_CMD(0x005)
++#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
++
++#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
++
++struct dpcon_cmd_open {
++ __le32 dpcon_id;
++};
++
++#define DPCON_ENABLE 1
++
++struct dpcon_rsp_is_enabled {
++ u8 enabled;
++};
++
++struct dpcon_rsp_get_attr {
++ /* response word 0 */
++ __le32 id;
++ __le16 qbman_ch_id;
++ u8 num_priorities;
++ u8 pad;
++};
++
++struct dpcon_cmd_set_notification {
++ /* cmd word 0 */
++ __le32 dpio_id;
++ u8 priority;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le64 user_ctx;
++};
++
++#endif /* _FSL_DPCON_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c
+new file mode 100644
+index 00000000..eb713578
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpcon.c
+@@ -0,0 +1,317 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../include/mc-sys.h"
++#include "../include/mc-cmd.h"
++#include "../include/dpcon.h"
++
++#include "dpcon-cmd.h"
++
++/**
++ * dpcon_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpcon_id: DPCON unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpcon_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpcon_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_cmd_open *dpcon_cmd;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
++ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_open);
++
++/**
++ * dpcon_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_close);
++
++/**
++ * dpcon_enable() - Enable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_enable);
++
++/**
++ * dpcon_disable() - Disable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_disable);
++
++/**
++ * dpcon_is_enabled() - Check if the DPCON is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_rsp_is_enabled *dpcon_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
++ *en = dpcon_rsp->enabled & DPCON_ENABLE;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_is_enabled);
++
++/**
++ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_reset);
++
++/**
++ * dpcon_get_attributes() - Retrieve DPCON attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_rsp_get_attr *dpcon_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
++ attr->id = le32_to_cpu(dpcon_rsp->id);
++ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
++ attr->num_priorities = dpcon_rsp->num_priorities;
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_get_attributes);
++
++/**
++ * dpcon_set_notification() - Set DPCON notification destination
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @cfg: Notification parameters
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ struct dpcon_cmd_set_notification *dpcon_cmd;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
++ cmd_flags,
++ token);
++ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
++ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
++ dpcon_cmd->priority = cfg->priority;
++ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_set_notification);
++
++/**
++ * dpcon_get_api_version - Get Data Path Concentrator API version
++ * @mc_io: Pointer to MC portal's DPCON object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of DPCON API
++ * @minor_ver: Minor version of DPCON API
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_get_api_version);
+diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile
+new file mode 100644
+index 00000000..1c28794e
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
+@@ -0,0 +1,11 @@
++#
++# QorIQ DPAA2 DPIO driver
++#
++
++subdir-ccflags-y := -Werror
++
++obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
++
++fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
++
++obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o
+diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+similarity index 64%
+rename from drivers/staging/fsl-mc/include/dpcon-cmd.h
+rename to drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+index 536b2ef1..b2dc6e76 100644
+--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+@@ -1,4 +1,6 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +13,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -29,34 +30,46 @@
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+-#ifndef _FSL_DPCON_CMD_H
+-#define _FSL_DPCON_CMD_H
++#ifndef _FSL_DPIO_CMD_H
++#define _FSL_DPIO_CMD_H
++
++/* DPIO Version */
++#define DPIO_VER_MAJOR 4
++#define DPIO_VER_MINOR 2
++
++/* Command Versioning */
++
++#define DPIO_CMD_ID_OFFSET 4
++#define DPIO_CMD_BASE_VERSION 1
+
+-/* DPCON Version */
+-#define DPCON_VER_MAJOR 2
+-#define DPCON_VER_MINOR 1
++#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+ /* Command IDs */
+-#define DPCON_CMDID_CLOSE 0x800
+-#define DPCON_CMDID_OPEN 0x808
+-#define DPCON_CMDID_CREATE 0x908
+-#define DPCON_CMDID_DESTROY 0x900
+-
+-#define DPCON_CMDID_ENABLE 0x002
+-#define DPCON_CMDID_DISABLE 0x003
+-#define DPCON_CMDID_GET_ATTR 0x004
+-#define DPCON_CMDID_RESET 0x005
+-#define DPCON_CMDID_IS_ENABLED 0x006
+-
+-#define DPCON_CMDID_SET_IRQ 0x010
+-#define DPCON_CMDID_GET_IRQ 0x011
+-#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPCON_CMDID_SET_IRQ_MASK 0x014
+-#define DPCON_CMDID_GET_IRQ_MASK 0x015
+-#define DPCON_CMDID_GET_IRQ_STATUS 0x016
+-#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPCON_CMDID_SET_NOTIFICATION 0x100
+-
+-#endif /* _FSL_DPCON_CMD_H */
++#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
++#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
++#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
++#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
++#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
++#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
++
++struct dpio_cmd_open {
++ __le32 dpio_id;
++};
++
++#define DPIO_CHANNEL_MODE_MASK 0x3
++
++struct dpio_rsp_get_attr {
++ /* cmd word 0 */
++ __le32 id;
++ __le16 qbman_portal_id;
++ u8 num_priorities;
++ u8 channel_mode;
++ /* cmd word 1 */
++ __le64 qbman_portal_ce_addr;
++ /* cmd word 2 */
++ __le64 qbman_portal_ci_addr;
++ /* cmd word 3 */
++ __le32 qbman_version;
++};
++
++#endif /* _FSL_DPIO_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+new file mode 100644
+index 00000000..8c8244a1
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+@@ -0,0 +1,296 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++
++#include "../../include/mc.h"
++#include "../../include/dpaa2-io.h"
++
++#include "qbman-portal.h"
++#include "dpio.h"
++#include "dpio-cmd.h"
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("DPIO Driver");
++
++struct dpio_priv {
++ struct dpaa2_io *io;
++};
++
++static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct dpio_priv *priv = dev_get_drvdata(dev);
++
++ return dpaa2_io_irq(priv->io);
++}
++
++static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
++{
++ struct fsl_mc_device_irq *irq;
++
++ irq = dpio_dev->irqs[0];
++
++ /* clear the affinity hint */
++ irq_set_affinity_hint(irq->msi_desc->irq, NULL);
++}
++
++static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
++{
++ struct dpio_priv *priv;
++ int error;
++ struct fsl_mc_device_irq *irq;
++ cpumask_t mask;
++
++ priv = dev_get_drvdata(&dpio_dev->dev);
++
++ irq = dpio_dev->irqs[0];
++ error = devm_request_irq(&dpio_dev->dev,
++ irq->msi_desc->irq,
++ dpio_irq_handler,
++ 0,
++ dev_name(&dpio_dev->dev),
++ &dpio_dev->dev);
++ if (error < 0) {
++ dev_err(&dpio_dev->dev,
++ "devm_request_irq() failed: %d\n",
++ error);
++ return error;
++ }
++
++ /* set the affinity hint */
++ cpumask_clear(&mask);
++ cpumask_set_cpu(cpu, &mask);
++ if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
++ dev_err(&dpio_dev->dev,
++ "irq_set_affinity failed irq %d cpu %d\n",
++ irq->msi_desc->irq, cpu);
++
++ return 0;
++}
++
++static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
++{
++ struct dpio_attr dpio_attrs;
++ struct dpaa2_io_desc desc;
++ struct dpio_priv *priv;
++ int err = -ENOMEM;
++ struct device *dev = &dpio_dev->dev;
++ static int next_cpu = -1;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ goto err_priv_alloc;
++
++ dev_set_drvdata(dev, priv);
++
++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
++ if (err) {
++ dev_dbg(dev, "MC portal allocation failed\n");
++ err = -EPROBE_DEFER;
++ goto err_mcportal;
++ }
++
++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
++ &dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
++ &dpio_attrs);
++ if (err) {
++ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
++ goto err_get_attr;
++ }
++ desc.qman_version = dpio_attrs.qbman_version;
++
++ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_enable() failed %d\n", err);
++ goto err_get_attr;
++ }
++
++ /* initialize DPIO descriptor */
++ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
++ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
++ desc.dpio_id = dpio_dev->obj_desc.id;
++
++ /* get the cpu to use for the affinity hint */
++ if (next_cpu == -1)
++ next_cpu = cpumask_first(cpu_online_mask);
++ else
++ next_cpu = cpumask_next(next_cpu, cpu_online_mask);
++
++ if (!cpu_possible(next_cpu)) {
++ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
++ err = -ERANGE;
++ goto err_allocate_irqs;
++ }
++ desc.cpu = next_cpu;
++
++ /*
++ * Set the CENA regs to be the cache enabled area of the portal to
++ * achieve the best performance.
++ */
++ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
++ resource_size(&dpio_dev->regions[0]));
++ desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
++ resource_size(&dpio_dev->regions[1]));
++
++ err = fsl_mc_allocate_irqs(dpio_dev);
++ if (err) {
++ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
++ goto err_allocate_irqs;
++ }
++
++ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
++ if (err)
++ goto err_register_dpio_irq;
++
++ priv->io = dpaa2_io_create(&desc);
++ if (!priv->io) {
++ dev_err(dev, "dpaa2_io_create failed\n");
++ goto err_dpaa2_io_create;
++ }
++
++ dev_info(dev, "probed\n");
++ dev_dbg(dev, " receives_notifications = %d\n",
++ desc.receives_notifications);
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ fsl_mc_portal_free(dpio_dev->mc_io);
++
++ return 0;
++
++err_dpaa2_io_create:
++ unregister_dpio_irq_handlers(dpio_dev);
++err_register_dpio_irq:
++ fsl_mc_free_irqs(dpio_dev);
++err_allocate_irqs:
++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++err_get_attr:
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++err_open:
++ fsl_mc_portal_free(dpio_dev->mc_io);
++err_mcportal:
++ dev_set_drvdata(dev, NULL);
++err_priv_alloc:
++ return err;
++}
++
++/* Tear down interrupts for a given DPIO object */
++static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
++{
++ unregister_dpio_irq_handlers(dpio_dev);
++ fsl_mc_free_irqs(dpio_dev);
++}
++
++static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
++{
++ struct device *dev;
++ struct dpio_priv *priv;
++ int err;
++
++ dev = &dpio_dev->dev;
++ priv = dev_get_drvdata(dev);
++
++ dpaa2_io_down(priv->io);
++
++ dpio_teardown_irqs(dpio_dev);
++
++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_mcportal;
++ }
++
++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
++ &dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++
++ fsl_mc_portal_free(dpio_dev->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++
++ return 0;
++
++err_open:
++ fsl_mc_portal_free(dpio_dev->mc_io);
++err_mcportal:
++ return err;
++}
++
++static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpio",
++ },
++ { .vendor = 0x0 }
++};
++
++static struct fsl_mc_driver dpaa2_dpio_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_dpio_probe,
++ .remove = dpaa2_dpio_remove,
++ .match_id_table = dpaa2_dpio_match_id_table
++};
++
++static int dpio_driver_init(void)
++{
++ return fsl_mc_driver_register(&dpaa2_dpio_driver);
++}
++
++static void dpio_driver_exit(void)
++{
++ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
++}
++module_init(dpio_driver_init);
++module_exit(dpio_driver_exit);
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
+new file mode 100644
+index 00000000..0ba67716
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
+@@ -0,0 +1,135 @@
++Copyright 2016 NXP
++
++Introduction
++------------
++
++A DPAA2 DPIO (Data Path I/O) is a hardware object that provides
++interfaces to enqueue and dequeue frames to/from network interfaces
++and other accelerators. A DPIO also provides hardware buffer
++pool management for network interfaces.
++
++This document provides an overview the Linux DPIO driver, its
++subcomponents, and its APIs.
++
++See Documentation/dpaa2/overview.txt for a general overview of DPAA2
++and the general DPAA2 driver architecture in Linux.
++
++Driver Overview
++---------------
++
++The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and
++provides services that:
++ A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue
++ frames for their respective objects
++ B) allow drivers to register callbacks for data availability notifications
++ when data becomes available on a queue or channel
++ C) allow drivers to manage hardware buffer pools
++
++The Linux DPIO driver consists of 3 primary components--
++ DPIO object driver-- fsl-mc driver that manages the DPIO object
++ DPIO service-- provides APIs to other Linux drivers for services
++ QBman portal interface-- sends portal commands, gets responses
++
++ fsl-mc other
++ bus drivers
++ | |
++ +---+----+ +------+-----+
++ |DPIO obj| |DPIO service|
++ | driver |---| (DPIO) |
++ +--------+ +------+-----+
++ |
++ +------+-----+
++ | QBman |
++ | portal i/f |
++ +------------+
++ |
++ hardware
++
++The diagram below shows how the DPIO driver components fit with the other
++DPAA2 Linux driver components:
++ +------------+
++ | OS Network |
++ | Stack |
++ +------------+ +------------+
++ | Allocator |. . . . . . . | Ethernet |
++ |(DPMCP,DPBP)| | (DPNI) |
++ +-.----------+ +---+---+----+
++ . . ^ |
++ . . <data avail, | |<enqueue,
++ . . tx confirm> | | dequeue>
++ +-------------+ . | |
++ | DPRC driver | . +--------+ +------------+
++ | (DPRC) | . . |DPIO obj| |DPIO service|
++ +----------+--+ | driver |-| (DPIO) |
++ | +--------+ +------+-----+
++ |<dev add/remove> +------|-----+
++ | | QBman |
++ +----+--------------+ | portal i/f |
++ | MC-bus driver | +------------+
++ | | |
++ | /soc/fsl-mc | |
++ +-------------------+ |
++ |
++ =========================================|=========|========================
++ +-+--DPIO---|-----------+
++ | | |
++ | QBman Portal |
++ +-----------------------+
++
++ ============================================================================
++
++
++DPIO Object Driver (dpio-driver.c)
++----------------------------------
++
++ The dpio-driver component registers with the fsl-mc bus to handle objects of
++ type "dpio". The implementation of probe() handles basic initialization
++ of the DPIO including mapping of the DPIO regions (the QBman SW portal)
++ and initializing interrupts and registering irq handlers. The dpio-driver
++ registers the probed DPIO with dpio-service.
++
++DPIO service (dpio-service.c, dpaa2-io.h)
++------------------------------------------
++
++ The dpio service component provides queuing, notification, and buffers
++ management services to DPAA2 drivers, such as the Ethernet driver. A system
++ will typically allocate 1 DPIO object per CPU to allow queuing operations
++ to happen simultaneously across all CPUs.
++
++ Notification handling
++ dpaa2_io_service_register()
++ dpaa2_io_service_deregister()
++ dpaa2_io_service_rearm()
++
++ Queuing
++ dpaa2_io_service_pull_fq()
++ dpaa2_io_service_pull_channel()
++ dpaa2_io_service_enqueue_fq()
++ dpaa2_io_service_enqueue_qd()
++ dpaa2_io_store_create()
++ dpaa2_io_store_destroy()
++ dpaa2_io_store_next()
++
++ Buffer pool management
++ dpaa2_io_service_release()
++ dpaa2_io_service_acquire()
++
++QBman portal interface (qbman-portal.c)
++---------------------------------------
++
++ The qbman-portal component provides APIs to do the low level hardware
++ bit twiddling for operations such as:
++ -initializing Qman software portals
++ -building and sending portal commands
++ -portal interrupt configuration and processing
++
++ The qbman-portal APIs are not public to other drivers, and are
++ only used by dpio-service.
++
++Other (dpaa2-fd.h, dpaa2-global.h)
++----------------------------------
++
++ Frame descriptor and scatter-gather definitions and the APIs used to
++ manipulate them are defined in dpaa2-fd.h.
++
++ Dequeue result struct and parsing APIs are defined in dpaa2-global.h.
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+new file mode 100644
+index 00000000..46c32a67
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+@@ -0,0 +1,689 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/types.h>
++#include "../../include/mc.h"
++#include "../../include/dpaa2-io.h"
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++
++#include "dpio.h"
++#include "qbman-portal.h"
++#include "qbman_debug.h"
++
++struct dpaa2_io {
++ atomic_t refs;
++ struct dpaa2_io_desc dpio_desc;
++ struct qbman_swp_desc swp_desc;
++ struct qbman_swp *swp;
++ struct list_head node;
++ /* protect against multiple management commands */
++ spinlock_t lock_mgmt_cmd;
++ /* protect notifications list */
++ spinlock_t lock_notifications;
++ struct list_head notifications;
++};
++
++struct dpaa2_io_store {
++ unsigned int max;
++ dma_addr_t paddr;
++ struct dpaa2_dq *vaddr;
++ void *alloced_addr; /* unaligned value from kmalloc() */
++ unsigned int idx; /* position of the next-to-be-returned entry */
++ struct qbman_swp *swp; /* portal used to issue VDQCR */
++ struct device *dev; /* device used for DMA mapping */
++};
++
++/* keep a per cpu array of DPIOs for fast access */
++static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
++static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
++static DEFINE_SPINLOCK(dpio_list_lock);
++
++static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
++ int cpu)
++{
++ if (d)
++ return d;
++
++ if (unlikely(cpu >= num_possible_cpus()))
++ return NULL;
++
++ /*
++ * If cpu == -1, choose the current cpu, with no guarantees about
++ * potentially being migrated away.
++ */
++ if (unlikely(cpu < 0))
++ cpu = smp_processor_id();
++
++ /* If a specific cpu was requested, pick it up immediately */
++ return dpio_by_cpu[cpu];
++}
++
++static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
++{
++ if (d)
++ return d;
++
++ spin_lock(&dpio_list_lock);
++ d = list_entry(dpio_list.next, struct dpaa2_io, node);
++ list_del(&d->node);
++ list_add_tail(&d->node, &dpio_list);
++ spin_unlock(&dpio_list_lock);
++
++ return d;
++}
++
++/**
++ * dpaa2_io_create() - create a dpaa2_io object.
++ * @desc: the dpaa2_io descriptor
++ *
++ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
++ * DPIO object.
++ *
++ * Return a valid dpaa2_io object for success, or NULL for failure.
++ */
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
++{
++ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
++
++ if (!obj)
++ return NULL;
++
++ /* check if CPU is out of range (-1 means any cpu) */
++ if (desc->cpu >= num_possible_cpus()) {
++ kfree(obj);
++ return NULL;
++ }
++
++ atomic_set(&obj->refs, 1);
++ obj->dpio_desc = *desc;
++ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
++ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
++ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
++ obj->swp = qbman_swp_init(&obj->swp_desc);
++
++ if (!obj->swp) {
++ kfree(obj);
++ return NULL;
++ }
++
++ INIT_LIST_HEAD(&obj->node);
++ spin_lock_init(&obj->lock_mgmt_cmd);
++ spin_lock_init(&obj->lock_notifications);
++ INIT_LIST_HEAD(&obj->notifications);
++
++ /* For now only enable DQRR interrupts */
++ qbman_swp_interrupt_set_trigger(obj->swp,
++ QBMAN_SWP_INTERRUPT_DQRI);
++ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
++ if (obj->dpio_desc.receives_notifications)
++ qbman_swp_push_set(obj->swp, 0, 1);
++
++ spin_lock(&dpio_list_lock);
++ list_add_tail(&obj->node, &dpio_list);
++ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
++ dpio_by_cpu[desc->cpu] = obj;
++ spin_unlock(&dpio_list_lock);
++
++ return obj;
++}
++EXPORT_SYMBOL(dpaa2_io_create);
++
++/**
++ * dpaa2_io_down() - release the dpaa2_io object.
++ * @d: the dpaa2_io object to be released.
++ *
++ * The "struct dpaa2_io" type can represent an individual DPIO object (as
++ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
++ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
++ * each handle obtained should be released using this function.
++ */
++void dpaa2_io_down(struct dpaa2_io *d)
++{
++ if (!atomic_dec_and_test(&d->refs))
++ return;
++ kfree(d);
++}
++EXPORT_SYMBOL(dpaa2_io_down);
++
++#define DPAA_POLL_MAX 32
++
++/**
++ * dpaa2_io_irq() - ISR for DPIO interrupts
++ *
++ * @obj: the given DPIO object.
++ *
++ * Return IRQ_HANDLED for success or IRQ_NONE if there
++ * were no pending interrupts.
++ */
++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
++{
++ const struct dpaa2_dq *dq;
++ int max = 0;
++ struct qbman_swp *swp;
++ u32 status;
++
++ swp = obj->swp;
++ status = qbman_swp_interrupt_read_status(swp);
++ if (!status)
++ return IRQ_NONE;
++
++ dq = qbman_swp_dqrr_next(swp);
++ while (dq) {
++ if (qbman_result_is_SCN(dq)) {
++ struct dpaa2_io_notification_ctx *ctx;
++ u64 q64;
++
++ q64 = qbman_result_SCN_ctx(dq);
++ ctx = (void *)q64;
++ ctx->cb(ctx);
++ } else {
++ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
++ }
++ qbman_swp_dqrr_consume(swp, dq);
++ ++max;
++ if (max > DPAA_POLL_MAX)
++ goto done;
++ dq = qbman_swp_dqrr_next(swp);
++ }
++done:
++ qbman_swp_interrupt_clear_status(swp, status);
++ qbman_swp_interrupt_set_inhibit(swp, 0);
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(dpaa2_io_irq);
++
++/**
++ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
++ * notifications on the given DPIO service.
++ * @d: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * The caller should make the MC command to attach a DPAA2 object to
++ * a DPIO after this function completes successfully. In that way:
++ * (a) The DPIO service is "ready" to handle a notification arrival
++ * (which might happen before the "attach" command to MC has
++ * returned control of execution back to the caller)
++ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
++ * 'qman64' parameters that it should pass along in the MC command
++ * in order for the object to be configured to produce the right
++ * notification fields to the DPIO service.
++ *
++ * Return 0 for success, or -ENODEV for failure.
++ */
++int dpaa2_io_service_register(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++
++ d = service_select_by_cpu(d, ctx->desired_cpu);
++ if (!d)
++ return -ENODEV;
++
++ ctx->dpio_id = d->dpio_desc.dpio_id;
++ ctx->qman64 = (u64)ctx;
++ ctx->dpio_private = d;
++ spin_lock_irqsave(&d->lock_notifications, irqflags);
++ list_add(&ctx->node, &d->notifications);
++ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++
++ /* Enable the generation of CDAN notifications */
++ if (ctx->is_cdan)
++ qbman_swp_CDAN_set_context_enable(d->swp,
++ (u16)ctx->id,
++ ctx->qman64);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_service_register);
++
++/**
++ * dpaa2_io_service_deregister - The opposite of 'register'.
++ * @service: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * This function should be called only after sending the MC command to
++ * to detach the notification-producing device from the DPIO.
++ */
++void dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_io *d = ctx->dpio_private;
++ unsigned long irqflags;
++
++ if (ctx->is_cdan)
++ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
++
++ spin_lock_irqsave(&d->lock_notifications, irqflags);
++ list_del(&ctx->node);
++ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++}
++EXPORT_SYMBOL(dpaa2_io_service_deregister);
++
++/**
++ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
++ * @d: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
++ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
++ * traffic source for as long as it likes. Eventually it may wish to "rearm"
++ * that source to allow it to produce another FQDAN/CDAN, that's what this
++ * function achieves.
++ *
++ * Return 0 for success.
++ */
++int dpaa2_io_service_rearm(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = service_select_by_cpu(d, ctx->desired_cpu);
++ if (!unlikely(d))
++ return -ENODEV;
++
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ if (ctx->is_cdan)
++ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
++ else
++ err = qbman_swp_fq_schedule(d->swp, ctx->id);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_rearm);
++
++/**
++ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
++ qbman_pull_desc_set_fq(&pd, fqid);
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++ s->swp = d->swp;
++ err = qbman_swp_pull(d->swp, &pd);
++ if (err)
++ s->swp = NULL;
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
++
++/**
++ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
++ * @d: the given DPIO service.
++ * @channelid: the given channel id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
++ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ s->swp = d->swp;
++ err = qbman_swp_pull(d->swp, &pd);
++ if (err)
++ s->swp = NULL;
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_pull_channel);
++
++/**
++ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
++ u32 fqid,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_fq(&ed, fqid);
++
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
++
++/**
++ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
++ * @d: the given DPIO service.
++ * @qdid: the given queuing destination id.
++ * @prio: the given queuing priority.
++ * @qdbin: the given queuing destination bin.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
++ u32 qdid, u8 prio, u16 qdbin,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
++
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd);
++
++/**
++ * dpaa2_io_service_release() - Release buffers to a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffers to be released.
++ * @num_buffers: the number of the buffers to be released.
++ *
++ * Return 0 for success, and negative error code for failure.
++ */
++int dpaa2_io_service_release(struct dpaa2_io *d,
++ u32 bpid,
++ const u64 *buffers,
++ unsigned int num_buffers)
++{
++ struct qbman_release_desc rd;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_release_desc_clear(&rd);
++ qbman_release_desc_set_bpid(&rd, bpid);
++
++ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
++}
++EXPORT_SYMBOL(dpaa2_io_service_release);
++
++/**
++ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffer addresses for acquired buffers.
++ * @num_buffers: the expected number of the buffers to acquire.
++ *
++ * Return a negative error code if the command failed, otherwise it returns
++ * the number of buffers acquired, which may be less than the number requested.
++ * Eg. if the buffer pool is empty, this will return zero.
++ */
++int dpaa2_io_service_acquire(struct dpaa2_io *d,
++ u32 bpid,
++ u64 *buffers,
++ unsigned int num_buffers)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_acquire);
++
++/*
++ * 'Stores' are reusable memory blocks for holding dequeue results, and to
++ * assist with parsing those results.
++ */
++
++/**
++ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
++ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
++ * @dev: the device to allow mapping/unmapping the DMAable region.
++ *
++ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
++ * The 'dpaa2_io_store' returned is a DPIO service managed object.
++ *
++ * Return pointer to dpaa2_io_store struct for successfuly created storage
++ * memory, or NULL on error.
++ */
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev)
++{
++ struct dpaa2_io_store *ret;
++ size_t size;
++
++ if (!max_frames || (max_frames > 16))
++ return NULL;
++
++ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
++ if (!ret)
++ return NULL;
++
++ ret->max = max_frames;
++ size = max_frames * sizeof(struct dpaa2_dq) + 64;
++ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
++ if (!ret->alloced_addr) {
++ kfree(ret);
++ return NULL;
++ }
++
++ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
++ ret->paddr = dma_map_single(dev, ret->vaddr,
++ sizeof(struct dpaa2_dq) * max_frames,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, ret->paddr)) {
++ kfree(ret->alloced_addr);
++ kfree(ret);
++ return NULL;
++ }
++
++ ret->idx = 0;
++ ret->dev = dev;
++
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_store_create);
++
++/**
++ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
++ * result.
++ * @s: the storage memory to be destroyed.
++ */
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
++{
++ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
++ DMA_FROM_DEVICE);
++ kfree(s->alloced_addr);
++ kfree(s);
++}
++EXPORT_SYMBOL(dpaa2_io_store_destroy);
++
++/**
++ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
++ * @s: the dpaa2_io_store object.
++ * @is_last: indicate whether this is the last frame in the pull command.
++ *
++ * When an object driver performs dequeues to a dpaa2_io_store, this function
++ * can be used to determine when the next frame result is available. Once
++ * this function returns non-NULL, a subsequent call to it will try to find
++ * the next dequeue result.
++ *
++ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
++ * was empty, then this function will also return NULL (rather than expecting
++ * the caller to always check for this. As such, "is_last" can be used to
++ * differentiate between "end-of-empty-dequeue" and "still-waiting".
++ *
++ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
++ */
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
++{
++ int match;
++ struct dpaa2_dq *ret = &s->vaddr[s->idx];
++
++ match = qbman_result_has_new_result(s->swp, ret);
++ if (!match) {
++ *is_last = 0;
++ return NULL;
++ }
++
++ s->idx++;
++
++ if (dpaa2_dq_is_pull_complete(ret)) {
++ *is_last = 1;
++ s->idx = 0;
++ /*
++ * If we get an empty dequeue result to terminate a zero-results
++ * vdqcr, return NULL to the caller rather than expecting him to
++ * check non-NULL results every time.
++ */
++ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
++ ret = NULL;
++ } else {
++ *is_last = 0;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_store_next);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++/**
++ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
++ * @d: the given DPIO object.
++ * @fqid: the id of frame queue to be queried.
++ * @fcnt: the queried frame count.
++ * @bcnt: the queried byte count.
++ *
++ * Knowing the FQ count at run-time can be useful in debugging situations.
++ * The instantaneous frame- and byte-count are hereby returned.
++ *
++ * Return 0 for a successful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
++ u32 *fcnt, u32 *bcnt)
++{
++ struct qbman_attr state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->swp;
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ ret = qbman_fq_query_state(swp, fqid, &state);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *fcnt = qbman_fq_state_frame_count(&state);
++ *bcnt = qbman_fq_state_byte_count(&state);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_fq_count);
++
++/**
++ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a
++ * buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the index of buffer pool to be queried.
++ * @num: the queried number of buffers in the buffer pool.
++ *
++ * Return 0 for a sucessful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, u32 *num)
++{
++ struct qbman_attr state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->swp;
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ ret = qbman_bp_query(swp, bpid, &state);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *num = qbman_bp_info_num_free_bufs(&state);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_bp_count);
++#endif
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c
+new file mode 100644
+index 00000000..d81e0232
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
+@@ -0,0 +1,224 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../include/mc-sys.h"
++#include "../../include/mc-cmd.h"
++
++#include "dpio.h"
++#include "dpio-cmd.h"
++
++/*
++ * Data Path I/O Portal API
++ * Contains initialization APIs and runtime control APIs for DPIO
++ */
++
++/**
++ * dpio_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpio_id: DPIO unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpio_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpio_id,
++ u16 *token)
++{
++ struct mc_command cmd = { 0 };
++ struct dpio_cmd_open *dpio_cmd;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
++ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpio_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_get_attributes() - Retrieve DPIO attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpio_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ struct dpio_rsp_get_attr *dpio_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
++ attr->id = le32_to_cpu(dpio_rsp->id);
++ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
++ attr->num_priorities = dpio_rsp->num_priorities;
++ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
++ attr->qbman_portal_ce_offset =
++ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
++ attr->qbman_portal_ci_offset =
++ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
++ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
++
++ return 0;
++}
++
++/**
++ * dpio_get_api_version - Get Data Path I/O API version
++ * @mc_io: Pointer to MC portal's DPIO object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of DPIO API
++ * @minor_ver: Minor version of DPIO API
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.h b/drivers/staging/fsl-mc/bus/dpio/dpio.h
+new file mode 100644
+index 00000000..ced1103d
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h
+@@ -0,0 +1,109 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPIO_H
++#define __FSL_DPIO_H
++
++struct fsl_mc_io;
++
++int dpio_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpio_id,
++ u16 *token);
++
++int dpio_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * enum dpio_channel_mode - DPIO notification channel mode
++ * @DPIO_NO_CHANNEL: No support for notification channel
++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
++ * dedicated channel in the DPIO; user should point the queue's
++ * destination in the relevant interface to this DPIO
++ */
++enum dpio_channel_mode {
++ DPIO_NO_CHANNEL = 0,
++ DPIO_LOCAL_CHANNEL = 1,
++};
++
++/**
++ * struct dpio_cfg - Structure representing DPIO configuration
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ */
++struct dpio_cfg {
++ enum dpio_channel_mode channel_mode;
++ u8 num_priorities;
++};
++
++int dpio_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpio_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpio_attr - Structure representing DPIO attributes
++ * @id: DPIO object ID
++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
++ * @qbman_portal_id: Software portal ID
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ * @qbman_version: QBMAN version
++ */
++struct dpio_attr {
++ int id;
++ u64 qbman_portal_ce_offset;
++ u64 qbman_portal_ci_offset;
++ u16 qbman_portal_id;
++ enum dpio_channel_mode channel_mode;
++ u8 num_priorities;
++ u32 qbman_version;
++};
++
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpio_attr *attr);
++
++int dpio_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++#endif /* __FSL_DPIO_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+new file mode 100644
+index 00000000..e14fb65b
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+@@ -0,0 +1,1049 @@
++/*
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <asm/cacheflush.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include "../../include/dpaa2-global.h"
++
++#include "qbman-portal.h"
++
++struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
++struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
++
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++#define QMAN_REV_MASK 0xffff0000
++
++/* All QBMan command and result structures use this "valid bit" encoding */
++#define QB_VALID_BIT ((u32)0x80)
++
++/* QBMan portal management command codes */
++#define QBMAN_MC_ACQUIRE 0x30
++#define QBMAN_WQCHAN_CONFIGURE 0x46
++
++/* CINH register offsets */
++#define QBMAN_CINH_SWP_EQAR 0x8c0
++#define QBMAN_CINH_SWP_DQPI 0xa00
++#define QBMAN_CINH_SWP_DCAP 0xac0
++#define QBMAN_CINH_SWP_SDQCR 0xb00
++#define QBMAN_CINH_SWP_RAR 0xcc0
++#define QBMAN_CINH_SWP_ISR 0xe00
++#define QBMAN_CINH_SWP_IER 0xe40
++#define QBMAN_CINH_SWP_ISDR 0xe80
++#define QBMAN_CINH_SWP_IIR 0xec0
++
++/* CENA register offsets */
++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_CR 0x600
++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
++#define QBMAN_CENA_SWP_VDQCR 0x780
++
++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
++
++/* Define token used to determine if response written to memory is valid */
++#define QMAN_DQ_TOKEN_VALID 1
++
++/* SDQCR attribute codes */
++#define QB_SDQCR_FC_SHIFT 29
++#define QB_SDQCR_FC_MASK 0x1
++#define QB_SDQCR_DCT_SHIFT 24
++#define QB_SDQCR_DCT_MASK 0x3
++#define QB_SDQCR_TOK_SHIFT 16
++#define QB_SDQCR_TOK_MASK 0xff
++#define QB_SDQCR_SRC_SHIFT 0
++#define QB_SDQCR_SRC_MASK 0xffff
++
++/* opaque token for static dequeues */
++#define QMAN_SDQCR_TOKEN 0xbb
++
++enum qbman_sdqcr_dct {
++ qbman_sdqcr_dct_null = 0,
++ qbman_sdqcr_dct_prio_ics,
++ qbman_sdqcr_dct_active_ics,
++ qbman_sdqcr_dct_active
++};
++
++enum qbman_sdqcr_fc {
++ qbman_sdqcr_fc_one = 0,
++ qbman_sdqcr_fc_up_to_3 = 1
++};
++
++#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
++#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
++static inline void qbman_inval_prefetch(struct qbman_swp *p, uint32_t offset)
++{
++ dcivac(p->addr_cena + offset);
++ prefetch(p->addr_cena + offset);
++}
++
++/* Portal Access */
++
++static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
++{
++ return readl_relaxed(p->addr_cinh + offset);
++}
++
++static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
++ u32 value)
++{
++ writel_relaxed(value, p->addr_cinh + offset);
++}
++
++static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
++{
++ return p->addr_cena + offset;
++}
++
++#define QBMAN_CINH_SWP_CFG 0xd00
++
++#define SWP_CFG_DQRR_MF_SHIFT 20
++#define SWP_CFG_EST_SHIFT 16
++#define SWP_CFG_WN_SHIFT 14
++#define SWP_CFG_RPM_SHIFT 12
++#define SWP_CFG_DCM_SHIFT 10
++#define SWP_CFG_EPM_SHIFT 8
++#define SWP_CFG_SD_SHIFT 5
++#define SWP_CFG_SP_SHIFT 4
++#define SWP_CFG_SE_SHIFT 3
++#define SWP_CFG_DP_SHIFT 2
++#define SWP_CFG_DE_SHIFT 1
++#define SWP_CFG_EP_SHIFT 0
++
++static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
++ u8 epm, int sd, int sp, int se,
++ int dp, int de, int ep)
++{
++ return cpu_to_le32 (max_fill << SWP_CFG_DQRR_MF_SHIFT |
++ est << SWP_CFG_EST_SHIFT |
++ wn << SWP_CFG_WN_SHIFT |
++ rpm << SWP_CFG_RPM_SHIFT |
++ dcm << SWP_CFG_DCM_SHIFT |
++ epm << SWP_CFG_EPM_SHIFT |
++ sd << SWP_CFG_SD_SHIFT |
++ sp << SWP_CFG_SP_SHIFT |
++ se << SWP_CFG_SE_SHIFT |
++ dp << SWP_CFG_DP_SHIFT |
++ de << SWP_CFG_DE_SHIFT |
++ ep << SWP_CFG_EP_SHIFT);
++}
++
++/**
++ * qbman_swp_init() - Create a functional object representing the given
++ * QBMan portal descriptor.
++ * @d: the given qbman swp descriptor
++ *
++ * Return qbman_swp portal for success, NULL if the object cannot
++ * be created.
++ */
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
++{
++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
++ u32 reg;
++
++ if (!p)
++ return NULL;
++ p->desc = d;
++ p->mc.valid_bit = QB_VALID_BIT;
++ p->sdq = 0;
++ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
++ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
++ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
++
++ atomic_set(&p->vdq.available, 1);
++ p->vdq.valid_bit = QB_VALID_BIT;
++ p->dqrr.next_idx = 0;
++ p->dqrr.valid_bit = QB_VALID_BIT;
++
++ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
++ p->dqrr.dqrr_size = 4;
++ p->dqrr.reset_bug = 1;
++ } else {
++ p->dqrr.dqrr_size = 8;
++ p->dqrr.reset_bug = 0;
++ }
++
++ p->addr_cena = d->cena_bar;
++ p->addr_cinh = d->cinh_bar;
++
++ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
++ 0, /* Writes cacheable */
++ 0, /* EQCR_CI stashing threshold */
++ 3, /* RPM: Valid bit mode, RCR in array mode */
++ 2, /* DCM: Discrete consumption ack mode */
++ 3, /* EPM: Valid bit mode, EQCR in array mode */
++ 0, /* mem stashing drop enable == FALSE */
++ 1, /* mem stashing priority == TRUE */
++ 0, /* mem stashing enable == FALSE */
++ 1, /* dequeue stashing priority == TRUE */
++ 0, /* dequeue stashing enable == FALSE */
++ 0); /* EQCR_CI stashing priority == FALSE */
++
++ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
++ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
++ if (!reg) {
++ pr_err("qbman: the portal is not enabled!\n");
++ return NULL;
++ }
++
++ /*
++ * SDQCR needs to be initialized to 0 when no channels are
++ * being dequeued from or else the QMan HW will indicate an
++ * error. The values that were calculated above will be
++ * applied when dequeues from a specific channel are enabled.
++ */
++ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
++ return p;
++}
++
++/**
++ * qbman_swp_finish() - Create and destroy a functional object representing
++ * the given QBMan portal descriptor.
++ * @p: the qbman_swp object to be destroyed
++ */
++void qbman_swp_finish(struct qbman_swp *p)
++{
++ kfree(p);
++}
++
++/**
++ * qbman_swp_interrupt_read_status()
++ * @p: the given software portal
++ *
++ * Return the value in the SWP_ISR register.
++ */
++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
++}
++
++/**
++ * qbman_swp_interrupt_clear_status()
++ * @p: the given software portal
++ * @mask: The mask to clear in SWP_ISR register
++ */
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
++}
++
++/**
++ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
++ * @p: the given software portal
++ *
++ * Return the value in the SWP_IER register.
++ */
++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
++}
++
++/**
++ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
++ * @p: the given software portal
++ * @mask: The mask of bits to enable in SWP_IER
++ */
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
++}
++
++/**
++ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
++ * @p: the given software portal object
++ *
++ * Return the value in the SWP_IIR register.
++ */
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
++}
++
++/**
++ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
++ * @p: the given software portal object
++ * @mask: The mask to set in SWP_IIR register
++ */
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
++}
++
++/*
++ * Different management commands all use this common base layer of code to issue
++ * commands and poll for results.
++ */
++
++/*
++ * Returns a pointer to where the caller should fill in their management command
++ * (caller should ignore the verb byte)
++ */
++void *qbman_swp_mc_start(struct qbman_swp *p)
++{
++ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
++}
++
++/*
++ * Commits merges in the caller-supplied command verb (which should not include
++ * the valid-bit) and submits the command to hardware
++ */
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
++{
++ u8 *v = cmd;
++
++ dma_wmb();
++ *v = cmd_verb | p->mc.valid_bit;
++ dccvac(cmd);
++}
++
++/*
++ * Checks for a completed response (returns non-NULL if only if the response
++ * is complete).
++ */
++void *qbman_swp_mc_result(struct qbman_swp *p)
++{
++ u32 *ret, verb;
++
++ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++
++ /* Remove the valid-bit - command completed if the rest is non-zero */
++ verb = ret[0] & ~QB_VALID_BIT;
++ if (!verb)
++ return NULL;
++ p->mc.valid_bit ^= QB_VALID_BIT;
++ return ret;
++}
++
++#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
++enum qb_enqueue_commands {
++ enqueue_empty = 0,
++ enqueue_response_always = 1,
++ enqueue_rejects_to_fq = 2
++};
++
++#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
++#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
++#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
++
++/**
++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_eq_desc_clear(struct qbman_eq_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++/**
++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ */
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
++{
++ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
++ if (respond_success)
++ d->verb |= enqueue_response_always;
++ else
++ d->verb |= enqueue_rejects_to_fq;
++}
++
++/*
++ * Exactly one of the following descriptor "targets" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * -enqueue to a frame queue
++ * -enqueue to a queuing destination
++ */
++
++/**
++ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
++ * @d: the enqueue descriptor
++ * @fqid: the id of the frame queue to be enqueued
++ */
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
++{
++ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
++ d->tgtid = cpu_to_le32(fqid);
++}
++
++/**
++ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
++ * @d: the enqueue descriptor
++ * @qdid: the id of the queuing destination to be enqueued
++ * @qd_bin: the queuing destination bin
++ * @qd_prio: the queuing destination priority
++ */
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
++ u32 qd_bin, u32 qd_prio)
++{
++ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
++ d->tgtid = cpu_to_le32(qdid);
++ d->qdbin = cpu_to_le16(qd_bin);
++ d->qpri = qd_prio;
++}
++
++#define EQAR_IDX(eqar) ((eqar) & 0x7)
++#define EQAR_VB(eqar) ((eqar) & 0x80)
++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
++
++/**
++ * qbman_swp_enqueue() - Issue an enqueue command
++ * @s: the software portal used for enqueue
++ * @d: the enqueue descriptor
++ * @fd: the frame descriptor to be enqueued
++ *
++ * Please note that 'fd' should only be NULL if the "action" of the
++ * descriptor is "orp_hole" or "orp_nesn".
++ *
++ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
++ */
++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc *p;
++ u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
++
++ if (!EQAR_SUCCESS(eqar))
++ return -EBUSY;
++
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
++ memcpy(&p->dca, &d->dca, 31);
++ memcpy(&p->fd, fd, sizeof(*fd));
++
++ /* Set the verb byte, have to substitute in the valid-bit */
++ dma_wmb();
++ p->verb = d->verb | EQAR_VB(eqar);
++ dccvac(p);
++
++ return 0;
++}
++
++/* Static (push) dequeue */
++
++/**
++ * qbman_swp_push_get() - Get the push dequeue setup
++ * @p: the software portal object
++ * @channel_idx: the channel index to query
++ * @enabled: returned boolean to show whether the push dequeue is enabled
++ * for the given channel
++ */
++void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
++{
++ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
++
++ WARN_ON(channel_idx > 15);
++ *enabled = src | (1 << channel_idx);
++}
++
++/**
++ * qbman_swp_push_set() - Enable or disable push dequeue
++ * @p: the software portal object
++ * @channel_idx: the channel index (0 to 15)
++ * @enable: enable or disable push dequeue
++ */
++void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
++{
++ u16 dqsrc;
++
++ WARN_ON(channel_idx > 15);
++ if (enable)
++ s->sdq |= 1 << channel_idx;
++ else
++ s->sdq &= ~(1 << channel_idx);
++
++ /* Read make the complete src map. If no channels are enabled
++ * the SDQCR must be 0 or else QMan will assert errors
++ */
++ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
++ if (dqsrc != 0)
++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
++ else
++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
++}
++
++#define QB_VDQCR_VERB_DCT_SHIFT 0
++#define QB_VDQCR_VERB_DT_SHIFT 2
++#define QB_VDQCR_VERB_RLS_SHIFT 4
++#define QB_VDQCR_VERB_WAE_SHIFT 5
++
++enum qb_pull_dt_e {
++ qb_pull_dt_channel,
++ qb_pull_dt_workqueue,
++ qb_pull_dt_framequeue
++};
++
++/**
++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state
++ * @d: the pull dequeue descriptor to be cleared
++ */
++void qbman_pull_desc_clear(struct qbman_pull_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++/**
++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
++ * @d: the pull dequeue descriptor to be set
++ * @storage: the pointer of the memory to store the dequeue result
++ * @storage_phys: the physical address of the storage memory
++ * @stash: to indicate whether write allocate is enabled
++ *
++ * If not called, or if called with 'storage' as NULL, the result pull dequeues
++ * will produce results to DQRR. If 'storage' is non-NULL, then results are
++ * produced to the given memory location (using the DMA address which
++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
++ * those writes to main-memory express a cache-warming attribute.
++ */
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash)
++{
++ /* save the virtual address */
++ d->rsp_addr_virt = (u64)storage;
++
++ if (!storage) {
++ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
++ return;
++ }
++ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
++ if (stash)
++ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
++ else
++ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
++
++ d->rsp_addr = cpu_to_le64(storage_phys);
++}
++
++/**
++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
++ * @d: the pull dequeue descriptor to be set
++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
++ */
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
++{
++ d->numf = numframes - 1;
++}
++
++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, u8 token)
++{
++ d->tok = token;
++}
++
++/*
++ * Exactly one of the following descriptor "actions" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * - pull dequeue from the given frame queue (FQ)
++ * - pull dequeue from any FQ in the given work queue (WQ)
++ * - pull dequeue from any FQ in any WQ in the given channel
++ */
++
++/**
++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
++ * @fqid: the frame queue index of the given FQ
++ */
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
++{
++ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(fqid);
++}
++
++/**
++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
++ * @wqid: composed of channel id and wqid within the channel
++ * @dct: the dequeue command type
++ */
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
++ enum qbman_pull_type_e dct)
++{
++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(wqid);
++}
++
++/**
++ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
++ * dequeues
++ * @chid: the channel id to be dequeued
++ * @dct: the dequeue command type
++ */
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
++ enum qbman_pull_type_e dct)
++{
++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(chid);
++}
++
++/**
++ * qbman_swp_pull() - Issue the pull dequeue command
++ * @s: the software portal object
++ * @d: the software portal descriptor which has been configured with
++ * the set of qbman_pull_desc_set_*() calls
++ *
++ * Return 0 for success, and -EBUSY if the software portal is not ready
++ * to do pull dequeue.
++ */
++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
++{
++ struct qbman_pull_desc *p;
++
++ if (!atomic_dec_and_test(&s->vdq.available)) {
++ atomic_inc(&s->vdq.available);
++ return -EBUSY;
++ }
++ s->vdq.storage = (void *)d->rsp_addr_virt;
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
++ p->numf = d->numf;
++ p->tok = QMAN_DQ_TOKEN_VALID;
++ p->dq_src = d->dq_src;
++ p->rsp_addr = d->rsp_addr;
++ p->rsp_addr_virt = d->rsp_addr_virt;
++ dma_wmb();
++
++ /* Set the verb byte, have to substitute in the valid-bit */
++ p->verb = d->verb | s->vdq.valid_bit;
++ s->vdq.valid_bit ^= QB_VALID_BIT;
++ dccvac(p);
++
++ return 0;
++}
++
++#define QMAN_DQRR_PI_MASK 0xf
++
++/**
++ * qbman_swp_dqrr_next() - Get an valid DQRR entry
++ * @s: the software portal object
++ *
++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
++ * only once, so repeated calls can return a sequence of DQRR entries, without
++ * requiring they be consumed immediately or in any particular order.
++ */
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
++{
++ u32 verb;
++ u32 response_verb;
++ u32 flags;
++ struct dpaa2_dq *p;
++
++ /* Before using valid-bit to detect if something is there, we have to
++ * handle the case of the DQRR reset bug...
++ */
++ if (unlikely(s->dqrr.reset_bug)) {
++ /*
++ * We pick up new entries by cache-inhibited producer index,
++ * which means that a non-coherent mapping would require us to
++ * invalidate and read *only* once that PI has indicated that
++ * there's an entry here. The first trip around the DQRR ring
++ * will be much less efficient than all subsequent trips around
++ * it...
++ */
++ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
++ QMAN_DQRR_PI_MASK;
++
++ /* there are new entries if pi != next_idx */
++ if (pi == s->dqrr.next_idx)
++ return NULL;
++
++ /*
++ * if next_idx is/was the last ring index, and 'pi' is
++ * different, we can disable the workaround as all the ring
++ * entries have now been DMA'd to so valid-bit checking is
++ * repaired. Note: this logic needs to be based on next_idx
++ * (which increments one at a time), rather than on pi (which
++ * can burst and wrap-around between our snapshots of it).
++ */
++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
++ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
++ s->dqrr.next_idx, pi);
++ s->dqrr.reset_bug = 0;
++ }
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ }
++
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ verb = p->dq.verb;
++
++ /*
++ * If the valid-bit isn't of the expected polarity, nothing there. Note,
++ * in the DQRR reset bug workaround, we shouldn't need to skip these
++ * check, because we've already determined that a new entry is available
++ * and we've invalidated the cacheline before reading it, so the
++ * valid-bit behaviour is repaired and should tell us what we already
++ * knew from reading PI.
++ */
++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ return NULL;
++ }
++ /*
++ * There's something there. Move "next_idx" attention to the next ring
++ * entry (and prefetch it) before returning what we found.
++ */
++ s->dqrr.next_idx++;
++ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
++ if (!s->dqrr.next_idx)
++ s->dqrr.valid_bit ^= QB_VALID_BIT;
++
++ /*
++ * If this is the final response to a volatile dequeue command
++ * indicate that the vdq is available
++ */
++ flags = p->dq.stat;
++ response_verb = verb & QBMAN_RESULT_MASK;
++ if ((response_verb == QBMAN_RESULT_DQ) &&
++ (flags & DPAA2_DQ_STAT_VOLATILE) &&
++ (flags & DPAA2_DQ_STAT_EXPIRED))
++ atomic_inc(&s->vdq.available);
++
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++
++ return p;
++}
++
++/**
++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
++ * qbman_swp_dqrr_next().
++ * @s: the software portal object
++ * @dq: the DQRR entry to be consumed
++ */
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
++{
++ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
++}
++
++/**
++ * qbman_result_has_new_result() - Check and get the dequeue response from the
++ * dq storage memory set in pull dequeue command
++ * @s: the software portal object
++ * @dq: the dequeue result read from the memory
++ *
++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
++ * dequeue result.
++ *
++ * Only used for user-provided storage of dequeue results, not DQRR. For
++ * efficiency purposes, the driver will perform any required endianness
++ * conversion to ensure that the user's dequeue result storage is in host-endian
++ * format. As such, once the user has called qbman_result_has_new_result() and
++ * been returned a valid dequeue result, they should not call it again on
++ * the same memory location (except of course if another dequeue command has
++ * been executed to produce a new result to that location).
++ */
++int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
++{
++ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
++ return 0;
++
++ /*
++ * Set token to be 0 so we will detect change back to 1
++ * next time the looping is traversed. Const is cast away here
++ * as we want users to treat the dequeue responses as read only.
++ */
++ ((struct dpaa2_dq *)dq)->dq.tok = 0;
++
++ /*
++ * Determine whether VDQCR is available based on whether the
++ * current result is sitting in the first storage location of
++ * the busy command.
++ */
++ if (s->vdq.storage == dq) {
++ s->vdq.storage = NULL;
++ atomic_inc(&s->vdq.available);
++ }
++
++ return 1;
++}
++
++/**
++ * qbman_release_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_release_desc_clear(struct qbman_release_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++ d->verb = 1 << 5; /* Release Command Valid */
++}
++
++/**
++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
++ */
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
++{
++ d->bpid = cpu_to_le16(bpid);
++}
++
++/**
++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
++ * interrupt source should be asserted after the release command is completed.
++ */
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
++{
++ if (enable)
++ d->verb |= 1 << 6;
++ else
++ d->verb &= ~(1 << 6);
++}
++
++#define RAR_IDX(rar) ((rar) & 0x7)
++#define RAR_VB(rar) ((rar) & 0x80)
++#define RAR_SUCCESS(rar) ((rar) & 0x100)
++
++/**
++ * qbman_swp_release() - Issue a buffer release command
++ * @s: the software portal object
++ * @d: the release descriptor
++ * @buffers: a pointer pointing to the buffer address to be released
++ * @num_buffers: number of buffers to be released, must be less than 8
++ *
++ * Return 0 for success, -EBUSY if the release command ring is not ready.
++ */
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const u64 *buffers, unsigned int num_buffers)
++{
++ int i;
++ struct qbman_release_desc *p;
++ u32 rar;
++
++ if (!num_buffers || (num_buffers > 7))
++ return -EINVAL;
++
++ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
++ if (!RAR_SUCCESS(rar))
++ return -EBUSY;
++
++ /* Start the release command */
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ /* Copy the caller's buffer pointers to the command */
++ for (i = 0; i < num_buffers; i++)
++ p->buf[i] = cpu_to_le64(buffers[i]);
++ p->bpid = d->bpid;
++
++ /*
++ * Set the verb byte, have to substitute in the valid-bit and the number
++ * of buffers.
++ */
++ dma_wmb();
++ p->verb = d->verb | RAR_VB(rar) | num_buffers;
++ dccvac(p);
++
++ return 0;
++}
++
++struct qbman_acquire_desc {
++ u8 verb;
++ u8 reserved;
++ u16 bpid;
++ u8 num;
++ u8 reserved2[59];
++};
++
++struct qbman_acquire_rslt {
++ u8 verb;
++ u8 rslt;
++ u16 reserved;
++ u8 num;
++ u8 reserved2[3];
++ u64 buf[7];
++};
++
++/**
++ * qbman_swp_acquire() - Issue a buffer acquire command
++ * @s: the software portal object
++ * @bpid: the buffer pool index
++ * @buffers: a pointer pointing to the acquired buffer addresses
++ * @num_buffers: number of buffers to be acquired, must be less than 8
++ *
++ * Return 0 for success, or negative error code if the acquire command
++ * fails.
++ */
++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
++ unsigned int num_buffers)
++{
++ struct qbman_acquire_desc *p;
++ struct qbman_acquire_rslt *r;
++ int i;
++
++ if (!num_buffers || (num_buffers > 7))
++ return -EINVAL;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ p->bpid = cpu_to_le16(bpid);
++ p->num = num_buffers;
++
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
++ if (unlikely(!r)) {
++ pr_err("qbman: acquire from BPID %d failed, no response\n",
++ bpid);
++ return -EIO;
++ }
++
++ /* Decode the outcome */
++ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
++
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
++ bpid, r->rslt);
++ return -EIO;
++ }
++
++ WARN_ON(r->num > num_buffers);
++
++ /* Copy the acquired buffers to the caller's array */
++ for (i = 0; i < r->num; i++)
++ buffers[i] = le64_to_cpu(r->buf[i]);
++
++ return (int)r->num;
++}
++
++struct qbman_alt_fq_state_desc {
++ u8 verb;
++ u8 reserved[3];
++ u32 fqid;
++ u8 reserved2[56];
++};
++
++struct qbman_alt_fq_state_rslt {
++ u8 verb;
++ u8 rslt;
++ u8 reserved[62];
++};
++
++#define ALT_FQ_FQID_MASK 0x00FFFFFF
++
++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
++ u8 alt_fq_verb)
++{
++ struct qbman_alt_fq_state_desc *p;
++ struct qbman_alt_fq_state_rslt *r;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ p->fqid = cpu_to_le32(fqid) & ALT_FQ_FQID_MASK;
++
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
++ if (unlikely(!r)) {
++ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
++ alt_fq_verb);
++ return -EIO;
++ }
++
++ /* Decode the outcome */
++ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
++
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
++ fqid, r->verb, r->rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++struct qbman_cdan_ctrl_desc {
++ u8 verb;
++ u8 reserved;
++ u16 ch;
++ u8 we;
++ u8 ctrl;
++ u16 reserved2;
++ u64 cdan_ctx;
++ u8 reserved3[48];
++
++};
++
++struct qbman_cdan_ctrl_rslt {
++ u8 verb;
++ u8 rslt;
++ u16 ch;
++ u8 reserved[60];
++};
++
++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
++ u8 we_mask, u8 cdan_en,
++ u64 ctx)
++{
++ struct qbman_cdan_ctrl_desc *p = NULL;
++ struct qbman_cdan_ctrl_rslt *r = NULL;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ p->ch = cpu_to_le16(channelid);
++ p->we = we_mask;
++ if (cdan_en)
++ p->ctrl = 1;
++ else
++ p->ctrl = 0;
++ p->cdan_ctx = cpu_to_le64(ctx);
++
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
++ if (unlikely(!r)) {
++ pr_err("qbman: wqchan config failed, no response\n");
++ return -EIO;
++ }
++
++ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
++
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
++ channelid, r->rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+new file mode 100644
+index 00000000..4254034c
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+@@ -0,0 +1,662 @@
++/*
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_QBMAN_PORTAL_H
++#define __FSL_QBMAN_PORTAL_H
++
++#include "qbman_private.h"
++#include "../../include/dpaa2-fd.h"
++
++struct dpaa2_dq;
++struct qbman_swp;
++
++/* qbman software portal descriptor structure */
++struct qbman_swp_desc {
++ void *cena_bar; /* Cache-enabled portal base address */
++ void *cinh_bar; /* Cache-inhibited portal base address */
++ u32 qman_version;
++};
++
++#define QBMAN_SWP_INTERRUPT_EQRI 0x01
++#define QBMAN_SWP_INTERRUPT_EQDI 0x02
++#define QBMAN_SWP_INTERRUPT_DQRI 0x04
++#define QBMAN_SWP_INTERRUPT_RCRI 0x08
++#define QBMAN_SWP_INTERRUPT_RCDI 0x10
++#define QBMAN_SWP_INTERRUPT_VDCI 0x20
++
++/* the structure for pull dequeue descriptor */
++struct qbman_pull_desc {
++ u8 verb;
++ u8 numf;
++ u8 tok;
++ u8 reserved;
++ u32 dq_src;
++ u64 rsp_addr;
++ u64 rsp_addr_virt;
++ u8 padding[40];
++};
++
++enum qbman_pull_type_e {
++ /* dequeue with priority precedence, respect intra-class scheduling */
++ qbman_pull_type_prio = 1,
++ /* dequeue with active FQ precedence, respect ICS */
++ qbman_pull_type_active,
++ /* dequeue with active FQ precedence, no ICS */
++ qbman_pull_type_active_noics
++};
++
++/* Definitions for parsing dequeue entries */
++#define QBMAN_RESULT_MASK 0x7f
++#define QBMAN_RESULT_DQ 0x60
++#define QBMAN_RESULT_FQRN 0x21
++#define QBMAN_RESULT_FQRNI 0x22
++#define QBMAN_RESULT_FQPN 0x24
++#define QBMAN_RESULT_FQDAN 0x25
++#define QBMAN_RESULT_CDAN 0x26
++#define QBMAN_RESULT_CSCN_MEM 0x27
++#define QBMAN_RESULT_CGCU 0x28
++#define QBMAN_RESULT_BPSCN 0x29
++#define QBMAN_RESULT_CSCN_WQ 0x2a
++
++/* QBMan FQ management command codes */
++#define QBMAN_FQ_SCHEDULE 0x48
++#define QBMAN_FQ_FORCE 0x49
++#define QBMAN_FQ_XON 0x4d
++#define QBMAN_FQ_XOFF 0x4e
++
++/* structure of enqueue descriptor */
++struct qbman_eq_desc {
++ u8 verb;
++ u8 dca;
++ u16 seqnum;
++ u16 orpid;
++ u16 reserved1;
++ u32 tgtid;
++ u32 tag;
++ u16 qdbin;
++ u8 qpri;
++ u8 reserved[3];
++ u8 wae;
++ u8 rspid;
++ u64 rsp_addr;
++ u8 fd[32];
++};
++
++/* buffer release descriptor */
++struct qbman_release_desc {
++ u8 verb;
++ u8 reserved;
++ u16 bpid;
++ u32 reserved2;
++ u64 buf[7];
++};
++
++/* Management command result codes */
++#define QBMAN_MC_RSLT_OK 0xf0
++
++#define CODE_CDAN_WE_EN 0x1
++#define CODE_CDAN_WE_CTX 0x4
++
++/* portal data structure */
++struct qbman_swp {
++ const struct qbman_swp_desc *desc;
++ void __iomem *addr_cena;
++ void __iomem *addr_cinh;
++
++ /* Management commands */
++ struct {
++ u32 valid_bit; /* 0x00 or 0x80 */
++ } mc;
++
++ /* Push dequeues */
++ u32 sdq;
++
++ /* Volatile dequeues */
++ struct {
++ atomic_t available; /* indicates if a command can be sent */
++ u32 valid_bit; /* 0x00 or 0x80 */
++ struct dpaa2_dq *storage; /* NULL if DQRR */
++ } vdq;
++
++ /* DQRR */
++ struct {
++ u32 next_idx;
++ u32 valid_bit;
++ u8 dqrr_size;
++ int reset_bug; /* indicates dqrr reset workaround is needed */
++ } dqrr;
++};
++
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
++void qbman_swp_finish(struct qbman_swp *p);
++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
++
++void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
++void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
++
++void qbman_pull_desc_clear(struct qbman_pull_desc *d);
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash);
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
++ enum qbman_pull_type_e dct);
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
++ enum qbman_pull_type_e dct);
++
++int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
++
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
++
++int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
++
++void qbman_eq_desc_clear(struct qbman_eq_desc *d);
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
++ u32 qd_bin, u32 qd_prio);
++
++int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
++ const struct dpaa2_fd *fd);
++
++void qbman_release_desc_clear(struct qbman_release_desc *d);
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
++
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const u64 *buffers, unsigned int num_buffers);
++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
++ unsigned int num_buffers);
++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
++ u8 alt_fq_verb);
++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
++ u8 we_mask, u8 cdan_en,
++ u64 ctx);
++
++void *qbman_swp_mc_start(struct qbman_swp *p);
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
++void *qbman_swp_mc_result(struct qbman_swp *p);
++
++/**
++ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
++ * @dq: the dequeue result to be checked
++ *
++ * DQRR entries may contain non-dequeue results, ie. notifications
++ */
++static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
++}
++
++/**
++ * qbman_result_is_SCN() - Check the dequeue result is notification or not
++ * @dq: the dequeue result to be checked
++ *
++ */
++static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
++{
++ return !qbman_result_is_DQ(dq);
++}
++
++/* FQ Data Availability */
++static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
++}
++
++/* Channel Data Availability */
++static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
++}
++
++/* Congestion State Change */
++static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
++}
++
++/* Buffer Pool State Change */
++static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
++}
++
++/* Congestion Group Count Update */
++static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
++}
++
++/* Retirement */
++static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
++}
++
++/* Retirement Immediate */
++static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
++}
++
++ /* Park */
++static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
++}
++
++/**
++ * qbman_result_SCN_state() - Get the state field in State-change notification
++ */
++static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
++{
++ return scn->scn.state;
++}
++
++#define SCN_RID_MASK 0x00FFFFFF
++
++/**
++ * qbman_result_SCN_rid() - Get the resource id in State-change notification
++ */
++static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
++{
++ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
++}
++
++/**
++ * qbman_result_SCN_ctx() - Get the context data in State-change notification
++ */
++static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
++{
++ return le64_to_cpu(scn->scn.ctx);
++}
++
++/**
++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
++ * @s: the software portal object
++ * @fqid: the index of frame queue to be scheduled
++ *
++ * There are a couple of different ways that a FQ can end up parked state,
++ * This schedules it.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
++}
++
++/**
++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
++ * @s: the software portal object
++ * @fqid: the index of frame queue to be forced
++ *
++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
++ * and thus be available for selection by any channel-dequeuing behaviour (push
++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
++ * empty at the time this happens, the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
++}
++
++/**
++ * qbman_swp_fq_xon() - sets FQ flow-control to XON
++ * @s: the software portal object
++ * @fqid: the index of frame queue
++ *
++ * This setting doesn't affect enqueues to the FQ, just dequeues.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
++}
++
++/**
++ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
++ * @s: the software portal object
++ * @fqid: the index of frame queue
++ *
++ * This setting doesn't affect enqueues to the FQ, just dequeues.
++ * XOFF FQs will remain in the tenatively-scheduled state, even when
++ * non-empty, meaning they won't be selected for scheduled dequeuing.
++ * If a FQ is changed to XOFF after it had already become truly-scheduled
++ * to a channel, and a pull dequeue of that channel occurs that selects
++ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
++}
++
++/* If the user has been allocated a channel object that is going to generate
++ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
++ * necessary.
++ *
++ * CDAN-enabled channels only generate a single CDAN notification, after which
++ * they need to be reenabled before they'll generate another. The idea is
++ * that pull dequeuing will occur in reaction to the CDAN, followed by a
++ * reenable step. Each function generates a distinct command to hardware, so a
++ * combination function is provided if the user wishes to modify the "context"
++ * (which shows up in each CDAN message) each time they reenable, as a single
++ * command to hardware.
++ */
++
++/**
++ * qbman_swp_CDAN_set_context() - Set CDAN context
++ * @s: the software portal object
++ * @channelid: the channel index
++ * @ctx: the context to be set in CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
++ u64 ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_CTX,
++ 0, ctx);
++}
++
++/**
++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 1, 0);
++}
++
++/**
++ * qbman_swp_CDAN_disable() - disable CDAN for the channel
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 0, 0);
++}
++
++/**
++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ * @ctx:i the context set in CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
++ u16 channelid,
++ u64 ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
++ 1, ctx);
++}
++
++/* Wraps up submit + poll-for-result */
++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
++ u8 cmd_verb)
++{
++ int loopvar = 1000;
++
++ qbman_swp_mc_submit(swp, cmd, cmd_verb);
++
++ do {
++ cmd = qbman_swp_mc_result(swp);
++ } while (!cmd && loopvar--);
++
++ WARN_ON(!loopvar);
++
++ return cmd;
++}
++
++/* ------------ */
++/* qb_attr_code */
++/* ------------ */
++
++/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
++ * is either serving as a configuration command or a query result. The
++ * representation is inherently little-endian, as the indexing of the words is
++ * itself little-endian in nature and layerscape is little endian for anything
++ * that crosses a word boundary too (64-bit fields are the obvious examples).
++ */
++struct qb_attr_code {
++ unsigned int word; /* which u32[] array member encodes the field */
++ unsigned int lsoffset; /* encoding offset from ls-bit */
++ unsigned int width; /* encoding width. (bool must be 1.) */
++};
++
++/* Some pre-defined codes */
++extern struct qb_attr_code code_generic_verb;
++extern struct qb_attr_code code_generic_rslt;
++
++/* Macros to define codes */
++#define QB_CODE(a, b, c) { a, b, c}
++#define QB_CODE_NULL \
++ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
++
++/* Rotate a code "ms", meaning that it moves from less-significant bytes to
++ * more-significant, from less-significant words to more-significant, etc. The
++ * "ls" version does the inverse, from more-significant towards
++ * less-significant.
++ */
++static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ code->lsoffset += bits;
++ while (code->lsoffset > 31) {
++ code->word++;
++ code->lsoffset -= 32;
++ }
++}
++
++static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ /* Don't be fooled, this trick should work because the types are
++ * unsigned. So the case that interests the while loop (the rotate has
++ * gone too far and the word count needs to compensate for it), is
++ * manifested when lsoffset is negative. But that equates to a really
++ * large unsigned value, starting with lots of "F"s. As such, we can
++ * continue adding 32 back to it until it wraps back round above zero,
++ * to a value of 31 or less...
++ */
++ code->lsoffset -= bits;
++ while (code->lsoffset > 31) {
++ code->word--;
++ code->lsoffset += 32;
++ }
++}
++
++/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
++#define qb_attr_code_for_ms(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ms(code, bits))
++#define qb_attr_code_for_ls(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ls(code, bits))
++
++static inline void word_copy(void *d, const void *s, unsigned int cnt)
++{
++ u32 *dd = d;
++ const u32 *ss = s;
++
++ while (cnt--)
++ *(dd++) = *(ss++);
++}
++
++/*
++ * Currently, the CENA support code expects each 32-bit word to be written in
++ * host order, and these are converted to hardware (little-endian) order on
++ * command submission. However, 64-bit quantities are must be written (and read)
++ * as two 32-bit words with the least-significant word first, irrespective of
++ * host endianness.
++ */
++static inline void u64_to_le32_copy(void *d, const u64 *s,
++ unsigned int cnt)
++{
++ u32 *dd = d;
++ const u32 *ss = (const u32 *)s;
++
++ while (cnt--) {
++ /*
++ * TBD: the toolchain was choking on the use of 64-bit types up
++ * until recently so this works entirely with 32-bit variables.
++ * When 64-bit types become usable again, investigate better
++ * ways of doing this.
++ */
++#if defined(__BIG_ENDIAN)
++ *(dd++) = ss[1];
++ *(dd++) = ss[0];
++ ss += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++
++static inline void u64_from_le32_copy(u64 *d, const void *s,
++ unsigned int cnt)
++{
++ const u32 *ss = s;
++ u32 *dd = (u32 *)d;
++
++ while (cnt--) {
++#if defined(__BIG_ENDIAN)
++ dd[1] = *(ss++);
++ dd[0] = *(ss++);
++ dd += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++
++/* decode a field from a cacheline */
++static inline u32 qb_attr_code_decode(const struct qb_attr_code *code,
++ const u32 *cacheline)
++{
++ return d32_u32(code->lsoffset, code->width, cacheline[code->word]);
++}
++
++static inline u64 qb_attr_code_decode_64(const struct qb_attr_code *code,
++ const u64 *cacheline)
++{
++ u64 res;
++
++ u64_from_le32_copy(&res, &cacheline[code->word / 2], 1);
++ return res;
++}
++
++/* encode a field to a cacheline */
++static inline void qb_attr_code_encode(const struct qb_attr_code *code,
++ u32 *cacheline, u32 val)
++{
++ cacheline[code->word] =
++ r32_u32(code->lsoffset, code->width, cacheline[code->word])
++ | e32_u32(code->lsoffset, code->width, val);
++}
++
++static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
++ u64 *cacheline, u64 val)
++{
++ u64_to_le32_copy(&cacheline[code->word / 2], &val, 1);
++}
++
++/* Small-width signed values (two's-complement) will decode into medium-width
++ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
++ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
++ * 249. Likewise -120 would decode as 136.) This function allows the caller to
++ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
++ * encoding, will become 0xfffffff9 if you cast the return value to u32).
++ */
++static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
++ u32 val)
++{
++ WARN_ON(val >= (1 << code->width));
++ /* If the high bit was set, it was encoding a negative */
++ if (val >= (1 << (code->width - 1)))
++ return (int32_t)0 - (int32_t)(((u32)1 << code->width) -
++ val);
++ /* Otherwise, it was encoding a positive */
++ return (int32_t)val;
++}
++
++/* ---------------------- */
++/* Descriptors/cachelines */
++/* ---------------------- */
++
++/* To avoid needless dynamic allocation, the driver API often gives the caller
++ * a "descriptor" type that the caller can instantiate however they like.
++ * Ultimately though, it is just a cacheline of binary storage (or something
++ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
++ * holding pre-formatted pieces of hardware commands. The performance-critical
++ * code can then copy these descriptors directly into hardware command
++ * registers more efficiently than trying to construct/format commands
++ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
++ * order for the compiler to know its size, but the internal details are not
++ * exposed. The following macro is used within the driver for converting *any*
++ * descriptor pointer to a usable array pointer. The use of a macro (instead of
++ * an inline) is necessary to work with different descriptor types and to work
++ * correctly with const and non-const inputs (and similarly-qualified outputs).
++ */
++#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
++
++#endif /* __FSL_QBMAN_PORTAL_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+new file mode 100644
+index 00000000..1c77fa6a
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+@@ -0,0 +1,853 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/errno.h>
++
++#include "../../include/dpaa2-global.h"
++#include "qbman-portal.h"
++#include "qbman_debug.h"
++
++/* QBMan portal management command code */
++#define QBMAN_BP_QUERY 0x32
++#define QBMAN_FQ_QUERY 0x44
++#define QBMAN_FQ_QUERY_NP 0x45
++#define QBMAN_CGR_QUERY 0x51
++#define QBMAN_WRED_QUERY 0x54
++#define QBMAN_CGR_STAT_QUERY 0x55
++#define QBMAN_CGR_STAT_QUERY_CLR 0x56
++
++enum qbman_attr_usage_e {
++ qbman_attr_usage_fq,
++ qbman_attr_usage_bpool,
++ qbman_attr_usage_cgr,
++};
++
++struct int_qbman_attr {
++ u32 words[32];
++ enum qbman_attr_usage_e usage;
++};
++
++#define attr_type_set(a, e) \
++{ \
++ struct qbman_attr *__attr = a; \
++ enum qbman_attr_usage_e __usage = e; \
++ ((struct int_qbman_attr *)__attr)->usage = __usage; \
++}
++
++#define ATTR32(d) (&(d)->dont_manipulate_directly[0])
++#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16])
++
++static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1);
++static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1);
++static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1);
++static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16);
++static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16);
++static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16);
++static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16);
++static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14);
++static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32);
++static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16);
++static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3);
++static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32);
++static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32);
++static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8);
++static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8);
++static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8);
++
++void qbman_bp_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_bpool);
++}
++
++int qbman_bp_query(struct qbman_swp *s, u32 bpid,
++ struct qbman_attr *a)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *attr = ATTR32(a);
++
++ qbman_bp_attr_clear(a);
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_bp_bpid, p, bpid);
++
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != QBMAN_BP_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt);
++ return -EIO;
++ }
++
++ /* For the query, word[0] of the result contains only the
++ * verb/rslt fields, so skip word[0].
++ */
++ word_copy(&attr[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae)
++{
++ u32 *p = ATTR32(a);
++
++ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p);
++ *va = !!qb_attr_code_decode(&code_bp_va, p);
++ *wae = !!qb_attr_code_decode(&code_bp_wae, p);
++}
++
++static u32 qbman_bp_thresh_to_value(u32 val)
++{
++ return (val & 0xff) << ((val & 0xf00) >> 8);
++}
++
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet)
++{
++ u32 *p = ATTR32(a);
++
++ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet,
++ p));
++}
++
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt)
++{
++ u32 *p = ATTR32(a);
++
++ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt,
++ p));
++}
++
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet)
++{
++ u32 *p = ATTR32(a);
++
++ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet,
++ p));
++}
++
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt)
++{
++ u32 *p = ATTR32(a);
++
++ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt,
++ p));
++}
++
++void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset)
++{
++ u32 *p = ATTR32(a);
++
++ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset,
++ p));
++}
++
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt)
++{
++ u32 *p = ATTR32(a);
++
++ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt,
++ p));
++}
++
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid)
++{
++ u32 *p = ATTR32(a);
++
++ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p);
++}
++
++void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl)
++{
++ u32 *p = ATTR32(a);
++
++ *icid = qb_attr_code_decode(&code_bp_icid, p);
++ *pl = !!qb_attr_code_decode(&code_bp_pl, p);
++}
++
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr)
++{
++ u32 *p = ATTR32(a);
++
++ *bpscn_addr = ((u64)qb_attr_code_decode(&code_bp_bpscn_addr_hi,
++ p) << 32) |
++ (u64)qb_attr_code_decode(&code_bp_bpscn_addr_lo,
++ p);
++}
++
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx)
++{
++ u32 *p = ATTR32(a);
++
++ *bpscn_ctx = ((u64)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p)
++ << 32) |
++ (u64)qb_attr_code_decode(&code_bp_bpscn_ctx_lo,
++ p);
++}
++
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ)
++{
++ u32 *p = ATTR32(a);
++
++ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p);
++}
++
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1);
++}
++
++int qbman_bp_info_is_depleted(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2);
++}
++
++int qbman_bp_info_is_surplus(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4);
++}
++
++u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_fill, p);
++}
++
++u32 qbman_bp_info_hdptr(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdptr, p);
++}
++
++u32 qbman_bp_info_sdcnt(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sdcnt, p);
++}
++
++u32 qbman_bp_info_hdcnt(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdcnt, p);
++}
++
++u32 qbman_bp_info_sscnt(struct qbman_attr *a)
++{
++ u32 *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sscnt, p);
++}
++
++static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16);
++static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15);
++static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8);
++static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15);
++static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12);
++static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1);
++static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1);
++static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1);
++static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1);
++static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1);
++static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1);
++static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32);
++static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15);
++static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1);
++static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24);
++static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24);
++
++void qbman_fq_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_fq);
++}
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, u32 fqid, struct qbman_attr *desc)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *d = ATTR32(desc);
++
++ qbman_fq_attr_clear(desc);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != QBMAN_FQ_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ /*
++ * For the configure, word[0] of the command contains only the WE-mask.
++ * For the query, word[0] of the result contains only the verb/rslt
++ * fields. Skip word[0] in the latter case.
++ */
++ word_copy(&d[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl)
++{
++ u32 *p = ATTR32(d);
++
++ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p);
++}
++
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid)
++{
++ u32 *p = ATTR32(d);
++
++ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p);
++}
++
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq)
++{
++ u32 *p = ATTR32(d);
++
++ *destwq = qb_attr_code_decode(&code_fq_destwq, p);
++}
++
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred)
++{
++ u32 *p = ATTR32(d);
++
++ *icscred = qb_attr_code_decode(&code_fq_icscred, p);
++}
++
++static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5);
++static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8);
++static u32 qbman_thresh_to_value(u32 val)
++{
++ u32 m, e;
++
++ m = qb_attr_code_decode(&code_tdthresh_mant, &val);
++ e = qb_attr_code_decode(&code_tdthresh_exp, &val);
++ return m << e;
++}
++
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh)
++{
++ u32 *p = ATTR32(d);
++
++ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh,
++ p));
++}
++
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len)
++{
++ u32 *p = ATTR32(d);
++
++ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p);
++ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p);
++ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len,
++ qb_attr_code_decode(&code_fq_oa_len, p));
++}
++
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps)
++{
++ u32 *p = ATTR32(d);
++
++ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p);
++ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p);
++ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p);
++ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p);
++}
++
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo)
++{
++ u32 *p = ATTR32(d);
++
++ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p);
++ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p);
++}
++
++void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl)
++{
++ u32 *p = ATTR32(d);
++
++ *icid = qb_attr_code_decode(&code_fq_icid, p);
++ *pl = !!qb_attr_code_decode(&code_fq_pl, p);
++}
++
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid)
++{
++ u32 *p = ATTR32(d);
++
++ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p);
++}
++
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid)
++{
++ u32 *p = ATTR32(d);
++
++ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p);
++}
++
++/* Query FQ Non-Programmalbe Fields */
++static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3);
++static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1);
++static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1);
++static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1);
++static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1);
++static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24);
++static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32);
++
++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
++ struct qbman_attr *state)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *d = ATTR32(state);
++
++ qbman_fq_attr_clear(state);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != QBMAN_FQ_QUERY_NP);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ word_copy(&d[0], &p[0], 16);
++ return 0;
++}
++
++u32 qbman_fq_state_schedstate(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_state, p);
++}
++
++int qbman_fq_state_force_eligible(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_fe, p);
++}
++
++int qbman_fq_state_xoff(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_x, p);
++}
++
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_r, p);
++}
++
++int qbman_fq_state_overflow_error(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_oe, p);
++}
++
++u32 qbman_fq_state_frame_count(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_frm_cnt, p);
++}
++
++u32 qbman_fq_state_byte_count(const struct qbman_attr *state)
++{
++ const u32 *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_byte_cnt, p);
++}
++
++/* Query CGR */
++static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1);
++static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1);
++static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1);
++static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2);
++static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1);
++static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1);
++static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1);
++static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1);
++static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5);
++static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1);
++static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13);
++static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13);
++static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16);
++static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32);
++
++void qbman_cgr_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_cgr);
++}
++
++int qbman_cgr_query(struct qbman_swp *s, u32 cgid, struct qbman_attr *attr)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 *d[2];
++ int i;
++ u32 query_verb;
++
++ d[0] = ATTR32(attr);
++ d[1] = ATTR32_1(attr);
++
++ qbman_cgr_attr_clear(attr);
++
++ for (i = 0; i < 2; i++) {
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++ /* For the configure, word[0] of the command contains only the
++ * verb/cgid. For the query, word[0] of the result contains
++ * only the verb/rslt fields. Skip word[0] in the latter case.
++ */
++ word_copy(&d[i][1], &p[1], 15);
++ }
++ return 0;
++}
++
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd)
++ {
++ u32 *p = ATTR32(d);
++ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter,
++ p);
++ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p);
++ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p);
++}
++
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode,
++ int *rej_cnt_mode, int *cscn_bdi)
++{
++ u32 *p = ATTR32(d);
++ *mode = qb_attr_code_decode(&code_cgr_mode, p);
++ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p);
++ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p);
++}
++
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va)
++{
++ u32 *p = ATTR32(d);
++ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter,
++ p);
++ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p);
++ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p);
++ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p);
++ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p);
++}
++
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ u32 *i_cnt_wr_bnd)
++{
++ u32 *p = ATTR32(d);
++ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p);
++ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p);
++}
++
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en)
++{
++ u32 *p = ATTR32(d);
++ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p);
++}
++
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres)
++{
++ u32 *p = ATTR32(d);
++ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres, p));
++}
++
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ u32 *cs_thres_x)
++{
++ u32 *p = ATTR32(d);
++ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres_x, p));
++}
++
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres)
++{
++ u32 *p = ATTR32(d);
++ *td_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_td_thres, p));
++}
++
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp)
++{
++ u32 *p = ATTR32(d);
++ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p);
++}
++
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid)
++{
++ u32 *p = ATTR32(d);
++ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p);
++}
++
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ u32 *cscn_vcgid)
++{
++ u32 *p = ATTR32(d);
++ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p);
++}
++
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid,
++ int *pl)
++{
++ u32 *p = ATTR32(d);
++ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p);
++ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p);
++}
++
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ u64 *cg_wr_addr)
++{
++ u32 *p = ATTR32(d);
++ *cg_wr_addr = ((u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi,
++ p) << 32) |
++ (u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo,
++ p);
++}
++
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx)
++{
++ u32 *p = ATTR32(d);
++ *cscn_ctx = ((u64)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p)
++ << 32) |
++ (u64)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p);
++}
++
++#define WRED_EDP_WORD(n) (18 + (n) / 4)
++#define WRED_EDP_OFFSET(n) (8 * ((n) % 4))
++#define WRED_PARM_DP_WORD(n) ((n) + 20)
++#define WRED_WE_EDP(n) (16 + (n) * 2)
++#define WRED_WE_PARM_DP(n) (17 + (n) * 2)
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx,
++ int *edp)
++{
++ u32 *p = ATTR32(d);
++ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx),
++ WRED_EDP_OFFSET(idx), 8);
++ *edp = (int)qb_attr_code_decode(&code_wred_edp, p);
++}
++
++void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth,
++ u64 *maxth, u8 *maxp)
++{
++ u8 ma, mn, step_i, step_s, pn;
++
++ ma = (u8)(dp >> 24);
++ mn = (u8)(dp >> 19) & 0x1f;
++ step_i = (u8)(dp >> 11);
++ step_s = (u8)(dp >> 6) & 0x1f;
++ pn = (u8)dp & 0x3f;
++
++ *maxp = ((pn << 2) * 100) / 256;
++
++ if (mn == 0)
++ *maxth = ma;
++ else
++ *maxth = ((ma + 256) * (1 << (mn - 1)));
++
++ if (step_s == 0)
++ *minth = *maxth - step_i;
++ else
++ *minth = *maxth - (256 + step_i) * (1 << (step_s - 1));
++}
++
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx,
++ u32 *dp)
++{
++ u32 *p = ATTR32(d);
++ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx),
++ 0, 8);
++ *dp = qb_attr_code_decode(&code_wred_parm_dp, p);
++}
++
++/* Query CGR/CCGR/CQ statistics */
++static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8);
++static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16);
++static int qbman_cgr_statistics_query(struct qbman_swp *s, u32 cgid,
++ int clear, u32 command_type,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ u32 *p;
++ u32 verb, rslt;
++ u32 query_verb;
++ u32 hi, lo;
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ if (command_type < 2)
++ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type);
++ query_verb = clear ?
++ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY;
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ WARN_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query statistics of CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++
++ if (*frame_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p);
++ *frame_cnt = ((u64)hi << 32) | (u64)lo;
++ }
++ if (*byte_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p);
++ *byte_cnt = ((u64)hi << 32) | (u64)lo;
++ }
++
++ return 0;
++}
++
++int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0xff,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 1,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0,
++ frame_cnt, byte_cnt);
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+new file mode 100644
+index 00000000..0a247a49
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+@@ -0,0 +1,136 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++struct qbman_attr {
++ u32 dont_manipulate_directly[40];
++};
++
++/* Buffer pool query commands */
++int qbman_bp_query(struct qbman_swp *s, u32 bpid,
++ struct qbman_attr *a);
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae);
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet);
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt);
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet);
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt);
++void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset);
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt);
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid);
++void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl);
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr);
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx);
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ);
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a);
++int qbman_bp_info_is_depleted(struct qbman_attr *a);
++int qbman_bp_info_is_surplus(struct qbman_attr *a);
++u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a);
++u32 qbman_bp_info_hdptr(struct qbman_attr *a);
++u32 qbman_bp_info_sdcnt(struct qbman_attr *a);
++u32 qbman_bp_info_hdcnt(struct qbman_attr *a);
++u32 qbman_bp_info_sscnt(struct qbman_attr *a);
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, u32 fqid,
++ struct qbman_attr *desc);
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl);
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid);
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq);
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred);
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh);
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len);
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps);
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo);
++void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl);
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid);
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid);
++
++/* FQ query command for non-programmable fields*/
++enum qbman_fq_schedstate_e {
++ qbman_fq_schedstate_oos = 0,
++ qbman_fq_schedstate_retired,
++ qbman_fq_schedstate_tentatively_scheduled,
++ qbman_fq_schedstate_truly_scheduled,
++ qbman_fq_schedstate_parked,
++ qbman_fq_schedstate_held_active,
++};
++
++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
++ struct qbman_attr *state);
++u32 qbman_fq_state_schedstate(const struct qbman_attr *state);
++int qbman_fq_state_force_eligible(const struct qbman_attr *state);
++int qbman_fq_state_xoff(const struct qbman_attr *state);
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state);
++int qbman_fq_state_overflow_error(const struct qbman_attr *state);
++u32 qbman_fq_state_frame_count(const struct qbman_attr *state);
++u32 qbman_fq_state_byte_count(const struct qbman_attr *state);
++
++/* CGR query */
++int qbman_cgr_query(struct qbman_swp *s, u32 cgid,
++ struct qbman_attr *attr);
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd);
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode,
++ int *rej_cnt_mode, int *cscn_bdi);
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va);
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ u32 *i_cnt_wr_bnd);
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en);
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres);
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ u32 *cs_thres_x);
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres);
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp);
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid);
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ u32 *cscn_vcgid);
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid,
++ int *pl);
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ u64 *cg_wr_addr);
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx);
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx,
++ int *edp);
++void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth,
++ u64 *maxth, u8 *maxp);
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx,
++ u32 *dp);
++
++/* CGR/CCGR/CQ statistics query */
++int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt);
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt);
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear,
++ u64 *frame_cnt, u64 *byte_cnt);
+diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_private.h b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+new file mode 100644
+index 00000000..98a64be2
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+@@ -0,0 +1,171 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/* Perform extra checking */
++#define QBMAN_CHECKING
++
++/* To maximise the amount of logic that is common between the Linux driver and
++ * other targets (such as the embedded MC firmware), we pivot here between the
++ * inclusion of two platform-specific headers.
++ *
++ * The first, qbman_sys_decl.h, includes any and all required system headers as
++ * well as providing any definitions for the purposes of compatibility. The
++ * second, qbman_sys.h, is where platform-specific routines go.
++ *
++ * The point of the split is that the platform-independent code (including this
++ * header) may depend on platform-specific declarations, yet other
++ * platform-specific routines may depend on platform-independent definitions.
++ */
++
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++
++/* When things go wrong, it is a convenient trick to insert a few FOO()
++ * statements in the code to trace progress. TODO: remove this once we are
++ * hacking the code less actively.
++ */
++#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
++
++/* Any time there is a register interface which we poll on, this provides a
++ * "break after x iterations" scheme for it. It's handy for debugging, eg.
++ * where you don't want millions of lines of log output from a polling loop
++ * that won't, because such things tend to drown out the earlier log output
++ * that might explain what caused the problem. (NB: put ";" after each macro!)
++ * TODO: we should probably remove this once we're done sanitising the
++ * simulator...
++ */
++#define DBG_POLL_START(loopvar) (loopvar = 1000)
++#define DBG_POLL_CHECK(loopvar) \
++ do {if (!((loopvar)--)) WARN_ON(1); } while (0)
++
++/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
++ * and widths, these macro-generated encode/decode/isolate/remove inlines can
++ * be used.
++ *
++ * Eg. to "d"ecode a 14-bit field out of a register (into a "u16" type),
++ * where the field is located 3 bits "up" from the least-significant bit of the
++ * register (ie. the field location within the 32-bit register corresponds to a
++ * mask of 0x0001fff8), you would do;
++ * u16 field = d32_u16(3, 14, reg_value);
++ *
++ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
++ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
++ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
++ * LS bit), do;
++ * reg_value |= e32_int(19, 1, !!field);
++ *
++ * If you wish to read-modify-write a register, such that you leave the 14-bit
++ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
++ * value using;
++ * reg_value = i32_u16(3, 14, reg_value);
++ *
++ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
++ * zero) but leaving all other fields as-is;
++ * reg_val = r32_int(19, 1, reg_value);
++ *
++ */
++#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
++ (u32)((1 << width) - 1))
++#define DECLARE_CODEC32(t) \
++static inline u32 e32_##t(u32 lsoffset, u32 width, t val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return ((u32)val & MAKE_MASK32(width)) << lsoffset; \
++} \
++static inline t d32_##t(u32 lsoffset, u32 width, u32 val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
++} \
++static inline u32 i32_##t(u32 lsoffset, u32 width, \
++ u32 val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
++} \
++static inline u32 r32_##t(u32 lsoffset, u32 width, \
++ u32 val) \
++{ \
++ WARN_ON(width > (sizeof(t) * 8)); \
++ return ~(MAKE_MASK32(width) << lsoffset) & val; \
++}
++DECLARE_CODEC32(u32)
++DECLARE_CODEC32(u16)
++DECLARE_CODEC32(u8)
++DECLARE_CODEC32(int)
++
++ /*********************/
++ /* Debugging assists */
++ /*********************/
++
++static inline void __hexdump(unsigned long start, unsigned long end,
++ unsigned long p, size_t sz,
++ const unsigned char *c)
++{
++ while (start < end) {
++ unsigned int pos = 0;
++ char buf[64];
++ int nl = 0;
++
++ pos += sprintf(buf + pos, "%08lx: ", start);
++ do {
++ if ((start < p) || (start >= (p + sz)))
++ pos += sprintf(buf + pos, "..");
++ else
++ pos += sprintf(buf + pos, "%02x", *(c++));
++ if (!(++start & 15)) {
++ buf[pos++] = '\n';
++ nl = 1;
++ } else {
++ nl = 0;
++ if (!(start & 1))
++ buf[pos++] = ' ';
++ if (!(start & 3))
++ buf[pos++] = ' ';
++ }
++ } while (start & 15);
++ if (!nl)
++ buf[pos++] = '\n';
++ buf[pos] = '\0';
++ pr_info("%s", buf);
++ }
++}
++
++static inline void hexdump(const void *ptr, size_t sz)
++{
++ unsigned long p = (unsigned long)ptr;
++ unsigned long start = p & ~15ul;
++ unsigned long end = (p + sz + 15) & ~15ul;
++ const unsigned char *c = ptr;
++
++ __hexdump(start, end, p, sz, c);
++}
+diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+index d098a6d8..384a13d0 100644
+--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -33,108 +33,24 @@
+ #define _FSL_DPMCP_CMD_H
+
+ /* Minimal supported DPMCP Version */
+-#define DPMCP_MIN_VER_MAJOR 3
+-#define DPMCP_MIN_VER_MINOR 0
++#define DPMCP_MIN_VER_MAJOR 3
++#define DPMCP_MIN_VER_MINOR 0
+
+-/* Command IDs */
+-#define DPMCP_CMDID_CLOSE 0x800
+-#define DPMCP_CMDID_OPEN 0x80b
+-#define DPMCP_CMDID_CREATE 0x90b
+-#define DPMCP_CMDID_DESTROY 0x900
++/* Command versioning */
++#define DPMCP_CMD_BASE_VERSION 1
++#define DPMCP_CMD_ID_OFFSET 4
+
+-#define DPMCP_CMDID_GET_ATTR 0x004
+-#define DPMCP_CMDID_RESET 0x005
++#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
++#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
++#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
+
+-#define DPMCP_CMDID_SET_IRQ 0x010
+-#define DPMCP_CMDID_GET_IRQ 0x011
+-#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPMCP_CMDID_SET_IRQ_MASK 0x014
+-#define DPMCP_CMDID_GET_IRQ_MASK 0x015
+-#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
++#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+ struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+ };
+
+-struct dpmcp_cmd_create {
+- __le32 portal_id;
+-};
+-
+-struct dpmcp_cmd_set_irq {
+- /* cmd word 0 */
+- u8 irq_index;
+- u8 pad[3];
+- __le32 irq_val;
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+-};
+-
+-struct dpmcp_cmd_get_irq {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq {
+- /* cmd word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* cmd word 1 */
+- __le64 irq_paddr;
+- /* cmd word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-#define DPMCP_ENABLE 0x1
+-
+-struct dpmcp_cmd_set_irq_enable {
+- u8 enable;
+- u8 pad[3];
+- u8 irq_index;
+-};
+-
+-struct dpmcp_cmd_get_irq_enable {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_enable {
+- u8 enabled;
+-};
+-
+-struct dpmcp_cmd_set_irq_mask {
+- __le32 mask;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_cmd_get_irq_mask {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_mask {
+- __le32 mask;
+-};
+-
+-struct dpmcp_cmd_get_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_status {
+- __le32 status;
+-};
+-
+-struct dpmcp_rsp_get_attributes {
+- /* response word 0 */
+- __le32 pad;
+- __le32 id;
+- /* response word 1 */
+- __le16 version_major;
+- __le16 version_minor;
+-};
+-
+ #endif /* _FSL_DPMCP_CMD_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
+index 55766f78..ad4c8b43 100644
+--- a/drivers/staging/fsl-mc/bus/dpmcp.c
++++ b/drivers/staging/fsl-mc/bus/dpmcp.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -103,76 +103,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
+ return mc_send_command(mc_io, &cmd);
+ }
+
+-/**
+- * dpmcp_create() - Create the DPMCP object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @cfg: Configuration structure
+- * @token: Returned token; use in subsequent API calls
+- *
+- * Create the DPMCP object, allocate required resources and
+- * perform required initialization.
+- *
+- * The object can be created either by declaring it in the
+- * DPL file, or by calling this function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent calls to
+- * this specific object. For objects that are created using the
+- * DPL file, call dpmcp_open function to get an authentication
+- * token first.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpmcp_cfg *cfg,
+- u16 *token)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_create *cmd_params;
+-
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
+- cmd_flags, 0);
+- cmd_params = (struct dpmcp_cmd_create *)cmd.params;
+- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- *token = mc_cmd_hdr_read_token(&cmd);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- *
+- * Return: '0' on Success; error code otherwise.
+- */
+-int dpmcp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+ /**
+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -196,309 +126,33 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
+ }
+
+ /**
+- * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: Identifies the interrupt index to configure
+- * @irq_cfg: IRQ configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpmcp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq() - Get IRQ information from the DPMCP.
+- * @mc_io: Pointer to MC portal's I/O object
++ * dpmcp_get_api_version - Get Data Path Management Command Portal API version
++ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: IRQ attributes
++ * @major_ver: Major version of Data Path Management Command Portal API
++ * @minor_ver: Minor version of Data Path Management Command Portal API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpmcp_irq_cfg *irq_cfg)
++int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
+ {
+ struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq *cmd_params;
+- struct dpmcp_rsp_get_irq *rsp_params;
+ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
+- return 0;
+-}
+-
+-/**
+- * dpmcp_set_irq_enable() - Set overall interrupt state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @en: Interrupt state - enable = 1, disable = 0
+- *
+- * Allows GPP software to control when interrupts are generated.
+- * Each interrupt can have up to 32 causes. The enable/disable control's the
+- * overall interrupt state. if the interrupt is disabled no causes will cause
+- * an interrupt.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq_enable *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params;
+- cmd_params->enable = en & DPMCP_ENABLE;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq_enable() - Get overall interrupt state
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @en: Returned interrupt state - enable = 1, disable = 0
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_enable *cmd_params;
+- struct dpmcp_rsp_get_irq_enable *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params;
+- *en = rsp_params->enabled & DPMCP_ENABLE;
+- return 0;
+-}
+-
+-/**
+- * dpmcp_set_irq_mask() - Set interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Event mask to trigger interrupt;
+- * each bit:
+- * 0 = ignore event
+- * 1 = consider event for asserting IRQ
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq_mask *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params;
+- cmd_params->mask = cpu_to_le32(mask);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq_mask() - Get interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Returned event mask to trigger interrupt
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_mask *cmd_params;
+- struct dpmcp_rsp_get_irq_mask *rsp_params;
+-
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params;
+- *mask = le32_to_cpu(rsp_params->mask);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_get_irq_status() - Get the current status of any pending interrupts.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @status: Returned interrupts status - one bit per cause:
+- * 0 = no interrupt pending
+- * 1 = interrupt pending
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_status *cmd_params;
+- struct dpmcp_rsp_get_irq_status *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(*status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params;
+- *status = le32_to_cpu(rsp_params->status);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_get_attributes - Retrieve DPMCP attributes.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @attr: Returned object's attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpmcp_attr *attr)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_rsp_get_attributes *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR,
+- cmd_flags, token);
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
+- /* send command to mc*/
++ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
+- attr->id = le32_to_cpu(rsp_params->id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+ }
+diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
+index fe79d4d9..f616031e 100644
+--- a/drivers/staging/fsl-mc/bus/dpmcp.h
++++ b/drivers/staging/fsl-mc/bus/dpmcp.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -32,128 +32,29 @@
+ #ifndef __FSL_DPMCP_H
+ #define __FSL_DPMCP_H
+
+-/* Data Path Management Command Portal API
++/*
++ * Data Path Management Command Portal API
+ * Contains initialization APIs and runtime control APIs for DPMCP
+ */
+
+ struct fsl_mc_io;
+
+ int dpmcp_open(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
++ u32 cmd_flags,
+ int dpmcp_id,
+- uint16_t *token);
+-
+-/* Get portal ID from pool */
+-#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
++ u16 *token);
+
+ int dpmcp_close(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
+-
+-/**
+- * struct dpmcp_cfg - Structure representing DPMCP configuration
+- * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID
+- * from pool
+- */
+-struct dpmcp_cfg {
+- int portal_id;
+-};
+-
+-int dpmcp_create(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- const struct dpmcp_cfg *cfg,
+- uint16_t *token);
++ u32 cmd_flags,
++ u16 token);
+
+-int dpmcp_destroy(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
++int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
+
+ int dpmcp_reset(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
+-
+-/* IRQ */
+-/* IRQ Index */
+-#define DPMCP_IRQ_INDEX 0
+-/* irq event - Indicates that the link state changed */
+-#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001
+-
+-/**
+- * struct dpmcp_irq_cfg - IRQ configuration
+- * @paddr: Address that must be written to signal a message-based interrupt
+- * @val: Value to write into irq_addr address
+- * @irq_num: A user defined number associated with this IRQ
+- */
+-struct dpmcp_irq_cfg {
+- uint64_t paddr;
+- uint32_t val;
+- int irq_num;
+-};
+-
+-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- struct dpmcp_irq_cfg *irq_cfg);
+-
+-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- int *type,
+- struct dpmcp_irq_cfg *irq_cfg);
+-
+-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint8_t en);
+-
+-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint8_t *en);
+-
+-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t mask);
+-
+-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t *mask);
+-
+-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t *status);
+-
+-/**
+- * struct dpmcp_attr - Structure representing DPMCP attributes
+- * @id: DPMCP object ID
+- * @version: DPMCP version
+- */
+-struct dpmcp_attr {
+- int id;
+- /**
+- * struct version - Structure representing DPMCP version
+- * @major: DPMCP major version
+- * @minor: DPMCP minor version
+- */
+- struct {
+- uint16_t major;
+- uint16_t minor;
+- } version;
+-};
+-
+-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- struct dpmcp_attr *attr);
++ u32 cmd_flags,
++ u16 token);
+
+ #endif /* __FSL_DPMCP_H */
+diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+index a7b77d58..cdddfb80 100644
+--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+@@ -12,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -41,13 +40,14 @@
+ #ifndef __FSL_DPMNG_CMD_H
+ #define __FSL_DPMNG_CMD_H
+
+-/* Command IDs */
+-#define DPMNG_CMDID_GET_CONT_ID 0x830
+-#define DPMNG_CMDID_GET_VERSION 0x831
++/* Command versioning */
++#define DPMNG_CMD_BASE_VERSION 1
++#define DPMNG_CMD_ID_OFFSET 4
+
+-struct dpmng_rsp_get_container_id {
+- __le32 container_id;
+-};
++#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
+ struct dpmng_rsp_get_version {
+ __le32 revision;
+diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
+index 96b1d677..ad5d5bbe 100644
+--- a/drivers/staging/fsl-mc/bus/dpmng.c
++++ b/drivers/staging/fsl-mc/bus/dpmng.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(mc_get_version);
+
+-/**
+- * dpmng_get_container_id() - Get container ID associated with a given portal.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @container_id: Requested container ID
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int *container_id)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmng_rsp_get_container_id *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
+- cmd_flags,
+- 0);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
+- *container_id = le32_to_cpu(rsp_params->container_id);
+-
+- return 0;
+-}
+-
+diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
+index 009d6567..b7d8c345 100644
+--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
+@@ -12,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -42,48 +41,39 @@
+ #define _FSL_DPRC_CMD_H
+
+ /* Minimal supported DPRC Version */
+-#define DPRC_MIN_VER_MAJOR 5
++#define DPRC_MIN_VER_MAJOR 6
+ #define DPRC_MIN_VER_MINOR 0
+
++/* Command versioning */
++#define DPRC_CMD_BASE_VERSION 1
++#define DPRC_CMD_ID_OFFSET 4
++
++#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
++
+ /* Command IDs */
+-#define DPRC_CMDID_CLOSE 0x800
+-#define DPRC_CMDID_OPEN 0x805
+-#define DPRC_CMDID_CREATE 0x905
+-
+-#define DPRC_CMDID_GET_ATTR 0x004
+-#define DPRC_CMDID_RESET_CONT 0x005
+-
+-#define DPRC_CMDID_SET_IRQ 0x010
+-#define DPRC_CMDID_GET_IRQ 0x011
+-#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPRC_CMDID_SET_IRQ_MASK 0x014
+-#define DPRC_CMDID_GET_IRQ_MASK 0x015
+-#define DPRC_CMDID_GET_IRQ_STATUS 0x016
+-#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPRC_CMDID_CREATE_CONT 0x151
+-#define DPRC_CMDID_DESTROY_CONT 0x152
+-#define DPRC_CMDID_SET_RES_QUOTA 0x155
+-#define DPRC_CMDID_GET_RES_QUOTA 0x156
+-#define DPRC_CMDID_ASSIGN 0x157
+-#define DPRC_CMDID_UNASSIGN 0x158
+-#define DPRC_CMDID_GET_OBJ_COUNT 0x159
+-#define DPRC_CMDID_GET_OBJ 0x15A
+-#define DPRC_CMDID_GET_RES_COUNT 0x15B
+-#define DPRC_CMDID_GET_RES_IDS 0x15C
+-#define DPRC_CMDID_GET_OBJ_REG 0x15E
+-#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
+-#define DPRC_CMDID_GET_OBJ_IRQ 0x160
+-#define DPRC_CMDID_SET_OBJ_LABEL 0x161
+-#define DPRC_CMDID_GET_OBJ_DESC 0x162
+-
+-#define DPRC_CMDID_CONNECT 0x167
+-#define DPRC_CMDID_DISCONNECT 0x168
+-#define DPRC_CMDID_GET_POOL 0x169
+-#define DPRC_CMDID_GET_POOL_COUNT 0x16A
+-
+-#define DPRC_CMDID_GET_CONNECTION 0x16C
++#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
++#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
++#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
++
++#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
++#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
++
++#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
++#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
++#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
++#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
++#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
++#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
++#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
++#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
++
++#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
++#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
++#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
++#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
++#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
++#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
++#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
+
+ struct dprc_cmd_open {
+ __le32 container_id;
+@@ -199,9 +189,6 @@ struct dprc_rsp_get_attributes {
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+- /* response word 2 */
+- __le16 version_major;
+- __le16 version_minor;
+ };
+
+ struct dprc_cmd_set_res_quota {
+@@ -367,11 +354,16 @@ struct dprc_cmd_get_obj_region {
+
+ struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+- __le64 pad;
++ __le64 pad0;
+ /* response word 1 */
+- __le64 base_addr;
++ __le32 base_addr;
++ __le32 pad1;
+ /* response word 2 */
+ __le32 size;
++ u8 type;
++ u8 pad2[3];
++ /* response word 3 */
++ __le32 flags;
+ };
+
+ struct dprc_cmd_set_obj_label {
+diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
+index c5ee4639..f6e6211b 100644
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale data path resource container (DPRC) driver
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -160,6 +160,8 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the
++ * DPRC, or NULL, if none.
+ * @obj_desc_array: array of device descriptors for child devices currently
+ * present in the physical DPRC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+@@ -169,6 +171,7 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ * in the physical DPRC.
+ */
+ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ struct dprc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+ {
+@@ -188,11 +191,12 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
++ put_device(&child_dev->dev);
+ continue;
+ }
+
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+- &child_dev);
++ driver_override, &child_dev);
+ if (error < 0)
+ continue;
+ }
+@@ -202,6 +206,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the
++ * DPRC, or NULL, if none.
+ * @total_irq_count: total number of IRQs needed by objects in the DPRC.
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+@@ -217,6 +223,7 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ * of the device drivers for the non-allocatable devices.
+ */
+ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ unsigned int *total_irq_count)
+ {
+ int num_child_objects;
+@@ -297,7 +304,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
++ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array,
+ num_child_objects);
+
+ if (child_obj_desc_array)
+@@ -328,7 +335,7 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+- error = dprc_scan_objects(mc_bus_dev, &irq_count);
++ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count);
+ mutex_unlock(&mc_bus->scan_mutex);
+ if (error < 0)
+ goto error;
+@@ -415,7 +422,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+ unsigned int irq_count;
+
+- error = dprc_scan_objects(mc_dev, &irq_count);
++ error = dprc_scan_objects(mc_dev, NULL, &irq_count);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+@@ -505,7 +512,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+ dprc_irq0_handler,
+ dprc_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+- "FSL MC DPRC irq0",
++ dev_name(&mc_dev->dev),
+ &mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+@@ -597,6 +604,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ bool mc_io_created = false;
+ bool msi_domain_set = false;
++ u16 major_ver, minor_ver;
+
+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+ return -EINVAL;
+@@ -669,13 +677,21 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
+ goto error_cleanup_open;
+ }
+
+- if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
+- (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
+- mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
++ error = dprc_get_api_version(mc_dev->mc_io, 0,
++ &major_ver,
++ &minor_ver);
++ if (error < 0) {
++ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
++ error);
++ goto error_cleanup_open;
++ }
++
++ if (major_ver < DPRC_MIN_VER_MAJOR ||
++ (major_ver == DPRC_MIN_VER_MAJOR &&
++ minor_ver < DPRC_MIN_VER_MINOR)) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+- mc_bus->dprc_attr.version.major,
+- mc_bus->dprc_attr.version.minor);
++ major_ver, minor_ver);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
+index 9fea3def..764cd3fb 100644
+--- a/drivers/staging/fsl-mc/bus/dprc.c
++++ b/drivers/staging/fsl-mc/bus/dprc.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -99,93 +99,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dprc_close);
+
+-/**
+- * dprc_create_container() - Create child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @cfg: Child container configuration
+- * @child_container_id: Returned child container ID
+- * @child_portal_offset: Returned child portal offset from MC portal base
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_create_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_cfg *cfg,
+- int *child_container_id,
+- u64 *child_portal_offset)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_create_container *cmd_params;
+- struct dprc_rsp_create_container *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd_params = (struct dprc_cmd_create_container *)cmd.params;
+- cmd_params->options = cpu_to_le32(cfg->options);
+- cmd_params->icid = cpu_to_le16(cfg->icid);
+- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+- strncpy(cmd_params->label, cfg->label, 16);
+- cmd_params->label[15] = '\0';
+-
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_create_container *)cmd.params;
+- *child_container_id = le32_to_cpu(rsp_params->child_container_id);
+- *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_destroy_container() - Destroy child container.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the container to destroy
+- *
+- * This function terminates the child container, so following this call the
+- * child container ID becomes invalid.
+- *
+- * Notes:
+- * - All resources and objects of the destroyed container are returned to the
+- * parent container or destroyed if were created be the destroyed container.
+- * - This function destroy all the child containers of the specified
+- * container prior to destroying the container itself.
+- *
+- * warning: Only the parent container is allowed to destroy a child policy
+- * Container 0 can't be destroyed
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- */
+-int dprc_destroy_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_destroy_container *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_destroy_container *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+ /**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -565,279 +478,6 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ attr->icid = le16_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_set_res_quota() - Set allocation policy for a specific resource/object
+- * type in a child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the child container
+- * @type: Resource/object type
+- * @quota: Sets the maximum number of resources of the selected type
+- * that the child container is allowed to allocate from its parent;
+- * when quota is set to -1, the policy is the same as container's
+- * general policy.
+- *
+- * Allocation policy determines whether or not a container may allocate
+- * resources from its parent. Each container has a 'global' allocation policy
+- * that is set when the container is created.
+- *
+- * This function sets allocation policy for a specific resource type.
+- * The default policy for all resource types matches the container's 'global'
+- * allocation policy.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- * @warning Only the parent container is allowed to change a child policy.
+- */
+-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 quota)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_res_quota *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- cmd_params->quota = cpu_to_le16(quota);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_res_quota() - Gets the allocation policy of a specific
+- * resource/object type in a child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id; ID of the child container
+- * @type: resource/object type
+- * @quota: Returnes the maximum number of resources of the selected type
+- * that the child container is allowed to allocate from the parent;
+- * when quota is set to -1, the policy is the same as container's
+- * general policy.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 *quota)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_res_quota *cmd_params;
+- struct dprc_rsp_get_res_quota *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params;
+- *quota = le16_to_cpu(rsp_params->quota);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_assign() - Assigns objects or resource to a child container.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @container_id: ID of the child container
+- * @res_req: Describes the type and amount of resources to
+- * assign to the given container
+- *
+- * Assignment is usually done by a parent (this DPRC) to one of its child
+- * containers.
+- *
+- * According to the DPRC allocation policy, the assigned resources may be taken
+- * (allocated) from the container's ancestors, if not enough resources are
+- * available in the container itself.
+- *
+- * The type of assignment depends on the dprc_res_req options, as follows:
+- * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have
+- * the explicit base ID specified at the id_base_align field of res_req.
+- * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be
+- * aligned to the value given at id_base_align field of res_req.
+- * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment,
+- * and indicates that the object must be set to the plugged state.
+- *
+- * A container may use this function with its own ID in order to change a
+- * object state to plugged or unplugged.
+- *
+- * If IRQ information has been set in the child DPRC, it will signal an
+- * interrupt following every change in its object assignment.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_assign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int container_id,
+- struct dprc_res_req *res_req)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_assign *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_assign *)cmd.params;
+- cmd_params->container_id = cpu_to_le32(container_id);
+- cmd_params->options = cpu_to_le32(res_req->options);
+- cmd_params->num = cpu_to_le32(res_req->num);
+- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+- strncpy(cmd_params->type, res_req->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_unassign() - Un-assigns objects or resources from a child container
+- * and moves them into this (parent) DPRC.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the child container
+- * @res_req: Describes the type and amount of resources to un-assign from
+- * the child container
+- *
+- * Un-assignment of objects can succeed only if the object is not in the
+- * plugged or opened state.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_unassign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- struct dprc_res_req *res_req)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_unassign *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_unassign *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- cmd_params->options = cpu_to_le32(res_req->options);
+- cmd_params->num = cpu_to_le32(res_req->num);
+- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+- strncpy(cmd_params->type, res_req->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_pool_count() - Get the number of dprc's pools
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @mc_io: Pointer to MC portal's I/O object
+- * @token: Token of DPRC object
+- * @pool_count: Returned number of resource pools in the dprc
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *pool_count)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_rsp_get_pool_count *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params;
+- *pool_count = le32_to_cpu(rsp_params->pool_count);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_get_pool() - Get the type (string) of a certain dprc's pool
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @pool_index; Index of the pool to be queried (< pool_count)
+- * @type: The type of the pool
+- *
+- * The pool types retrieved one by one by incrementing
+- * pool_index up to (not including) the value of pool_count returned
+- * from dprc_get_pool_count(). dprc_get_pool_count() must
+- * be called prior to dprc_get_pool().
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_pool(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int pool_index,
+- char *type)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_pool *cmd_params;
+- struct dprc_rsp_get_pool *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_pool *)cmd.params;
+- cmd_params->pool_index = cpu_to_le32(pool_index);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_pool *)cmd.params;
+- strncpy(type, rsp_params->type, 16);
+- type[15] = '\0';
+
+ return 0;
+ }
+@@ -933,64 +573,6 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dprc_get_obj);
+
+-/**
+- * dprc_get_obj_desc() - Get object descriptor.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: The type of the object to get its descriptor.
+- * @obj_id: The id of the object to get its descriptor
+- * @obj_desc: The returned descriptor to fill and return to the user
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- */
+-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- struct dprc_obj_desc *obj_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_obj_desc *cmd_params;
+- struct dprc_rsp_get_obj_desc *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- strncpy(cmd_params->type, obj_type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params;
+- obj_desc->id = le32_to_cpu(rsp_params->id);
+- obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+- obj_desc->irq_count = rsp_params->irq_count;
+- obj_desc->region_count = rsp_params->region_count;
+- obj_desc->state = le32_to_cpu(rsp_params->state);
+- obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+- obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+- obj_desc->flags = le16_to_cpu(rsp_params->flags);
+- strncpy(obj_desc->type, rsp_params->type, 16);
+- obj_desc->type[15] = '\0';
+- strncpy(obj_desc->label, rsp_params->label, 16);
+- obj_desc->label[15] = '\0';
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_obj_desc);
+-
+ /**
+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -1129,52 +711,6 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
+ }
+ EXPORT_SYMBOL(dprc_get_res_count);
+
+-/**
+- * dprc_get_res_ids() - Obtains IDs of free resources in the container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @type: pool type
+- * @range_desc: range descriptor
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- struct dprc_res_ids_range_desc *range_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_res_ids *cmd_params;
+- struct dprc_rsp_get_res_ids *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params;
+- cmd_params->iter_status = range_desc->iter_status;
+- cmd_params->base_id = cpu_to_le32(range_desc->base_id);
+- cmd_params->last_id = cpu_to_le32(range_desc->last_id);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params;
+- range_desc->iter_status = rsp_params->iter_status;
+- range_desc->base_id = le32_to_cpu(rsp_params->base_id);
+- range_desc->last_id = le32_to_cpu(rsp_params->last_id);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_res_ids);
+-
+ /**
+ * dprc_get_obj_region() - Get region information for a specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+@@ -1216,160 +752,66 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+- region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
++ region_desc->base_offset = le32_to_cpu(rsp_params->base_addr);
+ region_desc->size = le32_to_cpu(rsp_params->size);
++ region_desc->type = rsp_params->type;
++ region_desc->flags = le32_to_cpu(rsp_params->flags);
+
+ return 0;
+ }
+ EXPORT_SYMBOL(dprc_get_obj_region);
+
+ /**
+- * dprc_set_obj_label() - Set object label.
+- * @mc_io: Pointer to MC portal's I/O object
++ * dprc_get_api_version - Get Data Path Resource Container API version
++ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: Object's type
+- * @obj_id: Object's ID
+- * @label: The required label. The maximum length is 16 chars.
++ * @major_ver: Major version of Data Path Resource Container API
++ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- char *label)
++int dprc_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
+ {
+ struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_obj_label *cmd_params;
++ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- strncpy(cmd_params->label, label, 16);
+- cmd_params->label[15] = '\0';
+- strncpy(cmd_params->obj_type, obj_type, 16);
+- cmd_params->obj_type[15] = '\0';
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dprc_set_obj_label);
+-
+-/**
+- * dprc_connect() - Connect two endpoints to create a network link between them
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint1: Endpoint 1 configuration parameters
+- * @endpoint2: Endpoint 2 configuration parameters
+- * @cfg: Connection configuration. The connection configuration is ignored for
+- * connections made to DPMAC objects, where rate is retrieved from the
+- * MAC configuration.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_connect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- const struct dprc_endpoint *endpoint2,
+- const struct dprc_connection_cfg *cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_connect *cmd_params;
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_connect *)cmd.params;
+- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+- cmd_params->ep2_id = cpu_to_le32(endpoint2->id);
+- cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id);
+- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+- cmd_params->ep1_type[15] = '\0';
+- cmd_params->max_rate = cpu_to_le32(cfg->max_rate);
+- cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate);
+- strncpy(cmd_params->ep2_type, endpoint2->type, 16);
+- cmd_params->ep2_type[15] = '\0';
++ /* retrieve response parameters */
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
++ return 0;
+ }
+
+ /**
+- * dprc_disconnect() - Disconnect one endpoint to remove its network connection
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint: Endpoint configuration parameters
++ * dprc_get_container_id - Get container ID associated with a given portal.
++ * @mc_io: Pointer to Mc portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+-int dprc_disconnect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_disconnect *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_disconnect *)cmd.params;
+- cmd_params->id = cpu_to_le32(endpoint->id);
+- cmd_params->interface_id = cpu_to_le32(endpoint->if_id);
+- strncpy(cmd_params->type, endpoint->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_connection() - Get connected endpoint and link status if connection
+- * exists.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint1: Endpoint 1 configuration parameters
+- * @endpoint2: Returned endpoint 2 configuration parameters
+- * @state: Returned link state:
+- * 1 - link is up;
+- * 0 - link is down;
+- * -1 - no connection (endpoint2 information is irrelevant)
+- *
+- * Return: '0' on Success; -ENAVAIL if connection does not exist.
+- */
+-int dprc_get_connection(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- struct dprc_endpoint *endpoint2,
+- int *state)
++int dprc_get_container_id(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int *container_id)
+ {
+ struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_connection *cmd_params;
+- struct dprc_rsp_get_connection *rsp_params;
+ int err;
+
+ /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+- cmd_params->ep1_type[15] = '\0';
++ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+@@ -1377,12 +819,7 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
+ return err;
+
+ /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+- endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+- endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id);
+- strncpy(endpoint2->type, rsp_params->ep2_type, 16);
+- endpoint2->type[15] = '\0';
+- *state = le32_to_cpu(rsp_params->state);
++ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+ }
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+index e93ab53b..ce07096c 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+@@ -1,7 +1,7 @@
+ /*
+- * Freescale MC object device allocator driver
++ * fsl-mc object allocator driver
+ *
+- * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+@@ -12,9 +12,9 @@
+ #include <linux/msi.h>
+ #include "../include/mc-bus.h"
+ #include "../include/mc-sys.h"
+-#include "../include/dpbp-cmd.h"
+-#include "../include/dpcon-cmd.h"
+
++#include "dpbp-cmd.h"
++#include "dpcon-cmd.h"
+ #include "fsl-mc-private.h"
+
+ #define FSL_MC_IS_ALLOCATABLE(_obj_type) \
+@@ -23,15 +23,12 @@
+ strcmp(_obj_type, "dpcon") == 0)
+
+ /**
+- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
+- * pool of a given MC bus
++ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
++ * pool of a given fsl-mc bus
+ *
+- * @mc_bus: pointer to the MC bus
+- * @pool_type: MC bus pool type
+- * @mc_dev: Pointer to allocatable MC object device
+- *
+- * It adds an allocatable MC object device to a container's resource pool of
+- * the given resource type
++ * @mc_bus: pointer to the fsl-mc bus
++ * @pool_type: pool type
++ * @mc_dev: pointer to allocatable fsl-mc device
+ */
+ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ *mc_bus,
+@@ -95,10 +92,10 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
+ * resource pool
+ *
+- * @mc_dev: Pointer to allocatable MC object device
++ * @mc_dev: pointer to allocatable fsl-mc device
+ *
+- * It permanently removes an allocatable MC object device from the resource
+- * pool, the device is currently in, as long as it is in the pool's free list.
++ * It permanently removes an allocatable fsl-mc device from the resource
++ * pool. It's an error if the device is in use.
+ */
+ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
+ *mc_dev)
+@@ -255,17 +252,18 @@ void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+ EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+
+ /**
+- * fsl_mc_object_allocate - Allocates a MC object device of the given
+- * pool type from a given MC bus
++ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
++ * pool type from a given fsl-mc bus instance
+ *
+- * @mc_dev: MC device for which the MC object device is to be allocated
+- * @pool_type: MC bus resource pool type
+- * @new_mc_dev: Pointer to area where the pointer to the allocated
+- * MC object device is to be returned
++ * @mc_dev: fsl-mc device which is used in conjunction with the
++ * allocated object
++ * @pool_type: pool type
++ * @new_mc_dev: pointer to area where the pointer to the allocated device
++ * is to be returned
+ *
+- * This function allocates a MC object device from the device's parent DPRC,
+- * from the corresponding MC bus' pool of allocatable MC object devices of
+- * the given resource type. mc_dev cannot be a DPRC itself.
++ * Allocatable objects are always used in conjunction with some functional
++ * device. This function allocates an object of the specified type from
++ * the DPRC containing the functional device.
+ *
+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
+ * portals are allocated using fsl_mc_portal_allocate(), instead of
+@@ -312,10 +310,9 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+
+ /**
+- * fsl_mc_object_free - Returns an allocatable MC object device to the
+- * corresponding resource pool of a given MC bus.
+- *
+- * @mc_adev: Pointer to the MC object device
++ * fsl_mc_object_free - Returns an fsl-mc object to the resource
++ * pool where it came from.
++ * @mc_adev: Pointer to the fsl-mc device
+ */
+ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+ {
+@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+ EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
+ /*
+- * Initialize the interrupt pool associated with a MC bus.
+- * It allocates a block of IRQs from the GIC-ITS
++ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
++ * ID. A block of IRQs is pre-allocated and maintained in a pool
++ * from which devices can allocate them when needed.
++ */
++
++/*
++ * Initialize the interrupt pool associated with an fsl-mc bus.
++ * It allocates a block of IRQs from the GIC-ITS.
+ */
+ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count)
+@@ -395,7 +398,7 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+ /**
+- * Teardown the interrupt pool associated with an MC bus.
++ * Teardown the interrupt pool associated with an fsl-mc bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+ EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+ /**
+- * It allocates the IRQs required by a given MC object device. The
+- * IRQs are allocated from the interrupt pool associated with the
+- * MC bus that contains the device, if the device is not a DPRC device.
+- * Otherwise, the IRQs are allocated from the interrupt pool associated
+- * with the MC bus that represents the DPRC device itself.
++ * Allocate the IRQs required by a given fsl-mc device.
+ */
+ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+ {
+@@ -495,8 +494,7 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+ EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+ /*
+- * It frees the IRQs that were allocated for a MC object device, by
+- * returning them to the corresponding interrupt pool.
++ * Frees the IRQs that were allocated for an fsl-mc device.
+ */
+ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+ {
+@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+ return error;
+
+ dev_dbg(&mc_dev->dev,
+- "Allocatable MC object device bound to fsl_mc_allocator driver");
++ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
+ return 0;
+ }
+
+@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+ }
+
+ dev_dbg(&mc_dev->dev,
+- "Allocatable MC object device unbound from fsl_mc_allocator driver");
++ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
+ return 0;
+ }
+
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+index 44f64b6f..30a48df3 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus driver
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -9,6 +9,8 @@
+ * warranty of any kind, whether express or implied.
+ */
+
++#define pr_fmt(fmt) "fsl-mc: " fmt
++
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+ #include <linux/of_address.h>
+@@ -25,8 +27,6 @@
+ #include "fsl-mc-private.h"
+ #include "dprc-cmd.h"
+
+-static struct kmem_cache *mc_dev_cache;
+-
+ /**
+ * Default DMA mask for devices on a fsl-mc bus
+ */
+@@ -34,7 +34,7 @@ static struct kmem_cache *mc_dev_cache;
+
+ /**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+- * @root_mc_bus_dev: MC object device representing the root DPRC
++ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+ * @translation_ranges: array of bus to system address translation ranges
+ */
+@@ -62,8 +62,8 @@ struct fsl_mc_addr_translation_range {
+
+ /**
+ * fsl_mc_bus_match - device to driver matching callback
+- * @dev: the MC object device structure to match against
+- * @drv: the device driver to search for matching MC object device id
++ * @dev: the fsl-mc device to match against
++ * @drv: the device driver to search for matching fsl-mc object type
+ * structures
+ *
+ * Returns 1 on success, 0 otherwise.
+@@ -75,8 +75,11 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+
+- if (WARN_ON(!fsl_mc_bus_exists()))
++ /* When driver_override is set, only bind to the matching driver */
++ if (mc_dev->driver_override) {
++ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
+ goto out;
++ }
+
+ if (!mc_drv->match_id_table)
+ goto out;
+@@ -91,7 +94,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+
+ /*
+ * Traverse the match_id table of the given driver, trying to find
+- * a matching for the given MC object device.
++ * a matching for the given device.
+ */
+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+ if (id->vendor == mc_dev->obj_desc.vendor &&
+@@ -132,23 +135,141 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(modalias);
+
++static ssize_t rescan_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long val;
++ unsigned int irq_count;
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
++
++ if (!fsl_mc_is_root_dprc(dev))
++ return -EINVAL;
++
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
++
++ if (val) {
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
++ mutex_unlock(&root_mc_bus->scan_mutex);
++ }
++
++ return count;
++}
++static DEVICE_ATTR_WO(rescan);
++
++static ssize_t driver_override_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ const char *driver_override, *old = mc_dev->driver_override;
++ char *cp;
++
++ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
++ return -EINVAL;
++
++ if (count >= (PAGE_SIZE - 1))
++ return -EINVAL;
++
++ driver_override = kstrndup(buf, count, GFP_KERNEL);
++ if (!driver_override)
++ return -ENOMEM;
++
++ cp = strchr(driver_override, '\n');
++ if (cp)
++ *cp = '\0';
++
++ if (strlen(driver_override)) {
++ mc_dev->driver_override = driver_override;
++ } else {
++ kfree(driver_override);
++ mc_dev->driver_override = NULL;
++ }
++
++ kfree(old);
++
++ return count;
++}
++
++static ssize_t driver_override_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++
++ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
++}
++static DEVICE_ATTR_RW(driver_override);
++
+ static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
++ &dev_attr_rescan.attr,
++ &dev_attr_driver_override.attr,
+ NULL,
+ };
+
+ ATTRIBUTE_GROUPS(fsl_mc_dev);
+
++static int scan_fsl_mc_bus(struct device *dev, void *data)
++{
++ unsigned int irq_count;
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
++
++ if (fsl_mc_is_root_dprc(dev)) {
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
++ mutex_unlock(&root_mc_bus->scan_mutex);
++ }
++
++ return 0;
++}
++
++static ssize_t bus_rescan_store(struct bus_type *bus,
++ const char *buf, size_t count)
++{
++ unsigned long val;
++
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
++
++ if (val)
++ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
++
++ return count;
++}
++static BUS_ATTR(rescan, (S_IWUSR | S_IWGRP), NULL, bus_rescan_store);
++
++static struct attribute *fsl_mc_bus_attrs[] = {
++ &bus_attr_rescan.attr,
++ NULL,
++};
++
++static const struct attribute_group fsl_mc_bus_group = {
++ .attrs = fsl_mc_bus_attrs,
++};
++
++static const struct attribute_group *fsl_mc_bus_groups[] = {
++ &fsl_mc_bus_group,
++ NULL,
++};
++
+ struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
+ .dev_groups = fsl_mc_dev_groups,
++ .bus_groups = fsl_mc_bus_groups,
+ };
+ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
+-static atomic_t root_dprc_count = ATOMIC_INIT(0);
+-
+ static int fsl_mc_driver_probe(struct device *dev)
+ {
+ struct fsl_mc_driver *mc_drv;
+@@ -164,8 +285,7 @@ static int fsl_mc_driver_probe(struct device *dev)
+
+ error = mc_drv->probe(mc_dev);
+ if (error < 0) {
+- dev_err(dev, "MC object device probe callback failed: %d\n",
+- error);
++ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+@@ -183,9 +303,7 @@ static int fsl_mc_driver_remove(struct device *dev)
+
+ error = mc_drv->remove(mc_dev);
+ if (error < 0) {
+- dev_err(dev,
+- "MC object device remove callback failed: %d\n",
+- error);
++ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+@@ -232,8 +350,6 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
+ return error;
+ }
+
+- pr_info("MC object device driver %s registered\n",
+- mc_driver->driver.name);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+@@ -248,15 +364,6 @@ void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+
+-/**
+- * fsl_mc_bus_exists - check if a root dprc exists
+- */
+-bool fsl_mc_bus_exists(void)
+-{
+- return atomic_read(&root_dprc_count) > 0;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_bus_exists);
+-
+ /**
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+@@ -315,21 +422,6 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ return error;
+ }
+
+-static int get_dprc_version(struct fsl_mc_io *mc_io,
+- int container_id, u16 *major, u16 *minor)
+-{
+- struct dprc_attributes attr;
+- int error;
+-
+- error = get_dprc_attr(mc_io, container_id, &attr);
+- if (error == 0) {
+- *major = attr.version.major;
+- *minor = attr.version.minor;
+- }
+-
+- return error;
+-}
+-
+ static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+ enum dprc_region_type mc_region_type,
+ u64 mc_offset, phys_addr_t *phys_addr)
+@@ -451,18 +543,37 @@ bool fsl_mc_is_root_dprc(struct device *dev)
+ return dev == root_dprc_dev;
+ }
+
++static void fsl_mc_device_release(struct device *dev)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ struct fsl_mc_bus *mc_bus = NULL;
++
++ kfree(mc_dev->regions);
++
++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
++ mc_bus = to_fsl_mc_bus(mc_dev);
++
++ if (mc_bus)
++ kfree(mc_bus);
++ else
++ kfree(mc_dev);
++}
++
+ /**
+- * Add a newly discovered MC object device to be visible in Linux
++ * Add a newly discovered fsl-mc device to be visible in Linux
+ */
+ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
++ const char *driver_override,
+ struct fsl_mc_device **new_mc_dev)
+ {
+ int error;
+ struct fsl_mc_device *mc_dev = NULL;
+ struct fsl_mc_bus *mc_bus = NULL;
+ struct fsl_mc_device *parent_mc_dev;
++ struct device *fsl_mc_platform_dev;
++ struct device_node *fsl_mc_platform_node;
+
+ if (dev_is_fsl_mc(parent_dev))
+ parent_mc_dev = to_fsl_mc_device(parent_dev);
+@@ -473,7 +584,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ /*
+ * Allocate an MC bus device object:
+ */
+- mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL);
++ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
+ if (!mc_bus)
+ return -ENOMEM;
+
+@@ -482,16 +593,30 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ /*
+ * Allocate a regular fsl_mc_device object:
+ */
+- mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL);
++ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
+ if (!mc_dev)
+ return -ENOMEM;
+ }
+
+ mc_dev->obj_desc = *obj_desc;
+ mc_dev->mc_io = mc_io;
++
++ if (driver_override) {
++ /*
++ * We trust driver_override, so we don't need to use
++ * kstrndup() here
++ */
++ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL);
++ if (!mc_dev->driver_override) {
++ error = -ENOMEM;
++ goto error_cleanup_dev;
++ }
++ }
++
+ device_initialize(&mc_dev->dev);
+ mc_dev->dev.parent = parent_dev;
+ mc_dev->dev.bus = &fsl_mc_bus_type;
++ mc_dev->dev.release = fsl_mc_device_release;
+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+@@ -524,8 +649,6 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ }
+
+ mc_io2 = mc_io;
+-
+- atomic_inc(&root_dprc_count);
+ }
+
+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
+@@ -533,8 +656,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ goto error_cleanup_dev;
+ } else {
+ /*
+- * A non-DPRC MC object device has to be a child of another
+- * MC object (specifically a DPRC object)
++ * A non-DPRC object has to be a child of a DPRC, use the
++ * parent's ICID and interrupt domain.
+ */
+ mc_dev->icid = parent_mc_dev->icid;
+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+@@ -556,9 +679,14 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ goto error_cleanup_dev;
+ }
+
+- /* Objects are coherent, unless 'no shareability' flag set. */
+- if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
++ fsl_mc_platform_dev = &mc_dev->dev;
++ while (dev_is_fsl_mc(fsl_mc_platform_dev))
++ fsl_mc_platform_dev = fsl_mc_platform_dev->parent;
++ fsl_mc_platform_node = fsl_mc_platform_dev->of_node;
++
++ /* Set up the iommu configuration for the devices. */
++ fsl_mc_dma_configure(mc_dev, fsl_mc_platform_node,
++ !(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY));
+
+ /*
+ * The device-specific probe callback will get invoked by device_add()
+@@ -571,9 +699,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ goto error_cleanup_dev;
+ }
+
+- (void)get_device(&mc_dev->dev);
+- dev_dbg(parent_dev, "Added MC object device %s\n",
+- dev_name(&mc_dev->dev));
++ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
+
+ *new_mc_dev = mc_dev;
+ return 0;
+@@ -581,47 +707,34 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ error_cleanup_dev:
+ kfree(mc_dev->regions);
+ if (mc_bus)
+- devm_kfree(parent_dev, mc_bus);
++ kfree(mc_bus);
+ else
+- kmem_cache_free(mc_dev_cache, mc_dev);
++ kfree(mc_dev);
+
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+
+ /**
+- * fsl_mc_device_remove - Remove a MC object device from being visible to
++ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
+ * Linux
+ *
+- * @mc_dev: Pointer to a MC object device object
++ * @mc_dev: Pointer to an fsl-mc device
+ */
+ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+ {
+- struct fsl_mc_bus *mc_bus = NULL;
+-
+- kfree(mc_dev->regions);
++ kfree(mc_dev->driver_override);
++ mc_dev->driver_override = NULL;
+
+ /*
+ * The device-specific remove callback will get invoked by device_del()
+ */
+ device_del(&mc_dev->dev);
+- put_device(&mc_dev->dev);
+
+- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
+- mc_bus = to_fsl_mc_bus(mc_dev);
++ if (strcmp(mc_dev->obj_desc.type, "dprc") != 0)
++ mc_dev->dev.iommu_fwspec = NULL;
+
+- if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
+- if (atomic_read(&root_dprc_count) > 0)
+- atomic_dec(&root_dprc_count);
+- else
+- WARN_ON(1);
+- }
+- }
+-
+- if (mc_bus)
+- devm_kfree(mc_dev->dev.parent, mc_bus);
+- else
+- kmem_cache_free(mc_dev_cache, mc_dev);
++ put_device(&mc_dev->dev);
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+
+@@ -629,8 +742,7 @@ static int parse_mc_ranges(struct device *dev,
+ int *paddr_cells,
+ int *mc_addr_cells,
+ int *mc_size_cells,
+- const __be32 **ranges_start,
+- u8 *num_ranges)
++ const __be32 **ranges_start)
+ {
+ const __be32 *prop;
+ int range_tuple_cell_count;
+@@ -643,8 +755,6 @@ static int parse_mc_ranges(struct device *dev,
+ dev_warn(dev,
+ "missing or empty ranges property for device tree node '%s'\n",
+ mc_node->name);
+-
+- *num_ranges = 0;
+ return 0;
+ }
+
+@@ -671,8 +781,7 @@ static int parse_mc_ranges(struct device *dev,
+ return -EINVAL;
+ }
+
+- *num_ranges = ranges_len / tuple_len;
+- return 0;
++ return ranges_len / tuple_len;
+ }
+
+ static int get_mc_addr_translation_ranges(struct device *dev,
+@@ -680,7 +789,7 @@ static int get_mc_addr_translation_ranges(struct device *dev,
+ **ranges,
+ u8 *num_ranges)
+ {
+- int error;
++ int ret;
+ int paddr_cells;
+ int mc_addr_cells;
+ int mc_size_cells;
+@@ -688,16 +797,16 @@ static int get_mc_addr_translation_ranges(struct device *dev,
+ const __be32 *ranges_start;
+ const __be32 *cell;
+
+- error = parse_mc_ranges(dev,
++ ret = parse_mc_ranges(dev,
+ &paddr_cells,
+ &mc_addr_cells,
+ &mc_size_cells,
+- &ranges_start,
+- num_ranges);
+- if (error < 0)
+- return error;
++ &ranges_start);
++ if (ret < 0)
++ return ret;
+
+- if (!(*num_ranges)) {
++ *num_ranges = ret;
++ if (!ret) {
+ /*
+ * Missing or empty ranges property ("ranges;") for the
+ * 'fsl,qoriq-mc' node. In this case, identity mapping
+@@ -749,8 +858,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ struct mc_version mc_version;
+ struct resource res;
+
+- dev_info(&pdev->dev, "Root MC bus device probed");
+-
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+@@ -783,8 +890,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ goto error_cleanup_mc_io;
+ }
+
+- dev_info(&pdev->dev,
+- "Freescale Management Complex Firmware version: %u.%u.%u\n",
++ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
+ mc_version.major, mc_version.minor, mc_version.revision);
+
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+@@ -793,16 +899,17 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+- error = dpmng_get_container_id(mc_io, 0, &container_id);
++ error = dprc_get_container_id(mc_io, 0, &container_id);
+ if (error < 0) {
+ dev_err(&pdev->dev,
+- "dpmng_get_container_id() failed: %d\n", error);
++ "dprc_get_container_id() failed: %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+- error = get_dprc_version(mc_io, container_id,
+- &obj_desc.ver_major, &obj_desc.ver_minor);
++ error = dprc_get_api_version(mc_io, 0,
++ &obj_desc.ver_major,
++ &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+@@ -812,7 +919,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
+- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
++ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL,
++ &mc_bus_dev);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+@@ -840,7 +948,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+ mc->root_mc_bus_dev->mc_io = NULL;
+
+- dev_info(&pdev->dev, "Root MC bus device removed");
+ return 0;
+ }
+
+@@ -865,22 +972,12 @@ static int __init fsl_mc_bus_driver_init(void)
+ {
+ int error;
+
+- mc_dev_cache = kmem_cache_create("fsl_mc_device",
+- sizeof(struct fsl_mc_device), 0, 0,
+- NULL);
+- if (!mc_dev_cache) {
+- pr_err("Could not create fsl_mc_device cache\n");
+- return -ENOMEM;
+- }
+-
+ error = bus_register(&fsl_mc_bus_type);
+ if (error < 0) {
+- pr_err("fsl-mc bus type registration failed: %d\n", error);
++ pr_err("bus type registration failed: %d\n", error);
+ goto error_cleanup_cache;
+ }
+
+- pr_info("fsl-mc bus type registered\n");
+-
+ error = platform_driver_register(&fsl_mc_bus_driver);
+ if (error < 0) {
+ pr_err("platform_driver_register() failed: %d\n", error);
+@@ -914,7 +1011,6 @@ static int __init fsl_mc_bus_driver_init(void)
+ bus_unregister(&fsl_mc_bus_type);
+
+ error_cleanup_cache:
+- kmem_cache_destroy(mc_dev_cache);
+ return error;
+ }
+ postcore_initcall(fsl_mc_bus_driver_init);
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c b/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
+new file mode 100644
+index 00000000..86b2cd84
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
+@@ -0,0 +1,104 @@
++/*
++ * Copyright 2016-17 NXP
++ * Author: Nipun Gupta <nipun.gupta@nxp.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/iommu.h>
++#include <linux/of.h>
++#include <linux/of_iommu.h>
++#include "../include/mc.h"
++
++/* Setup the IOMMU for the DPRC container */
++static const struct iommu_ops
++*fsl_mc_iommu_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node)
++{
++ struct of_phandle_args iommu_spec;
++ const struct iommu_ops *ops;
++ u32 iommu_phandle;
++ struct device_node *iommu_node;
++ const __be32 *map = NULL;
++ int iommu_cells, map_len, ret;
++
++ map = of_get_property(fsl_mc_platform_node, "iommu-map", &map_len);
++ if (!map)
++ return NULL;
++
++ ops = mc_dev->dev.bus->iommu_ops;
++ if (!ops || !ops->of_xlate)
++ return NULL;
++
++ iommu_phandle = be32_to_cpup(map + 1);
++ iommu_node = of_find_node_by_phandle(iommu_phandle);
++
++ if (of_property_read_u32(iommu_node, "#iommu-cells", &iommu_cells)) {
++ pr_err("%s: missing #iommu-cells property\n", iommu_node->name);
++ return NULL;
++ }
++
++ /* Initialize the fwspec */
++ ret = iommu_fwspec_init(&mc_dev->dev, &iommu_node->fwnode, ops);
++ if (ret)
++ return NULL;
++
++ /*
++ * Fill in the required stream-id before calling the iommu's
++ * ops->xlate callback.
++ */
++ iommu_spec.np = iommu_node;
++ iommu_spec.args[0] = mc_dev->icid;
++ iommu_spec.args_count = 1;
++
++ ret = ops->of_xlate(&mc_dev->dev, &iommu_spec);
++ if (ret)
++ return NULL;
++
++ of_node_put(iommu_spec.np);
++
++ return ops;
++}
++
++/* Set up DMA configuration for fsl-mc devices */
++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node, int coherent)
++{
++ const struct iommu_ops *ops;
++
++ ops = fsl_mc_iommu_configure(mc_dev, fsl_mc_platform_node);
++
++ mc_dev->dev.coherent_dma_mask = DMA_BIT_MASK(48);
++ mc_dev->dev.dma_mask = &mc_dev->dev.coherent_dma_mask;
++ arch_setup_dma_ops(&mc_dev->dev, 0,
++ mc_dev->dev.coherent_dma_mask + 1, ops, coherent);
++}
++
++/* Macro to get the container device of a MC device */
++#define fsl_mc_cont_dev(_dev) ((to_fsl_mc_device(_dev)->flags & \
++ FSL_MC_IS_DPRC) ? (_dev) : ((_dev)->parent))
++
++/* Macro to check if a device is a container device */
++#define is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & FSL_MC_IS_DPRC)
++
++/* Get the IOMMU group for device on fsl-mc bus */
++struct iommu_group *fsl_mc_device_group(struct device *dev)
++{
++ struct device *cont_dev = fsl_mc_cont_dev(dev);
++ struct iommu_group *group;
++
++ /* Container device is responsible for creating the iommu group */
++ if (is_cont_dev(dev)) {
++ group = iommu_group_alloc();
++ if (IS_ERR(group))
++ return NULL;
++ } else {
++ get_device(cont_dev);
++ group = iommu_group_get(cont_dev);
++ put_device(cont_dev);
++ }
++
++ return group;
++}
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+index 3d46b1b1..b8b2c86e 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+- * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -17,6 +17,7 @@
+ #include <linux/irqdomain.h>
+ #include <linux/msi.h>
+ #include "../include/mc-bus.h"
++#include "fsl-mc-private.h"
+
+ /*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+index d459c267..e08b8843 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+@@ -10,13 +10,15 @@
+ #ifndef _FSL_MC_PRIVATE_H_
+ #define _FSL_MC_PRIVATE_H_
+
++#include "../include/mc.h"
++#include "../include/mc-bus.h"
++
+ int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
++ const char *driver_override,
+ struct fsl_mc_device **new_mc_dev);
+
+-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+-
+ int __init dprc_driver_init(void);
+
+ void dprc_driver_exit(void);
+diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+index 7a6ac640..49127acb 100644
+--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+- * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -17,9 +17,10 @@
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include "../include/mc-bus.h"
++#include "fsl-mc-private.h"
+
+ static struct irq_chip its_msi_irq_chip = {
+- .name = "fsl-mc-bus-msi",
++ .name = "ITS-fMSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+@@ -51,7 +52,7 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+ }
+
+-static struct msi_domain_ops its_fsl_mc_msi_ops = {
++static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = {
+ .msi_prepare = its_fsl_mc_msi_prepare,
+ };
+
+@@ -94,8 +95,8 @@ int __init its_fsl_mc_msi_init(void)
+ continue;
+ }
+
+- WARN_ON(mc_msi_domain->
+- host_data != &its_fsl_mc_msi_domain_info);
++ WARN_ON(mc_msi_domain->host_data !=
++ &its_fsl_mc_msi_domain_info);
+
+ pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
+ }
+diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
+index 798c965f..d66b87f0 100644
+--- a/drivers/staging/fsl-mc/bus/mc-io.c
++++ b/drivers/staging/fsl-mc/bus/mc-io.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -11,7 +12,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+diff --git a/drivers/staging/fsl-mc/bus/mc-ioctl.h b/drivers/staging/fsl-mc/bus/mc-ioctl.h
+new file mode 100644
+index 00000000..8ac502a1
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h
+@@ -0,0 +1,22 @@
++/*
++ * Freescale Management Complex (MC) ioclt interface
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Author: Lijun Pan <Lijun.Pan@freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++#ifndef _FSL_MC_IOCTL_H_
++#define _FSL_MC_IOCTL_H_
++
++#include <linux/ioctl.h>
++#include "../include/mc-sys.h"
++
++#define RESTOOL_IOCTL_TYPE 'R'
++
++#define RESTOOL_SEND_MC_COMMAND \
++ _IOWR(RESTOOL_IOCTL_TYPE, 0xE0, struct mc_command)
++
++#endif /* _FSL_MC_IOCTL_H_ */
+diff --git a/drivers/staging/fsl-mc/bus/mc-restool.c b/drivers/staging/fsl-mc/bus/mc-restool.c
+new file mode 100644
+index 00000000..d5330b68
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/mc-restool.c
+@@ -0,0 +1,405 @@
++/*
++ * Freescale Management Complex (MC) restool driver
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Author: Lijun Pan <Lijun.Pan@freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "../include/mc.h"
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/mutex.h>
++#include <linux/platform_device.h>
++#include "mc-ioctl.h"
++#include "../include/mc-sys.h"
++#include "../include/mc-bus.h"
++#include "../include/mc-cmd.h"
++#include "../include/dpmng.h"
++
++/**
++ * Maximum number of DPRCs that can be opened at the same time
++ */
++#define MAX_DPRC_HANDLES 64
++
++/**
++ * restool_misc - information associated with the newly added miscdevice
++ * @misc: newly created miscdevice associated with root dprc
++ * @miscdevt: device id of this miscdevice
++ * @list: a linked list node representing this miscdevcie
++ * @static_mc_io: pointer to the static MC I/O object used by the restool
++ * @dynamic_instance_count: number of dynamically created instances
++ * @static_instance_in_use: static instance is in use or not
++ * @mutex: mutex lock to serialze the open/release operations
++ * @dev: root dprc associated with this miscdevice
++ */
++struct restool_misc {
++ struct miscdevice misc;
++ dev_t miscdevt;
++ struct list_head list;
++ struct fsl_mc_io *static_mc_io;
++ u32 dynamic_instance_count;
++ bool static_instance_in_use;
++ struct mutex mutex; /* serialze the open/release operations */
++ struct device *dev;
++};
++
++/**
++ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
++ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
++ * @num_translation_ranges: number of entries in addr_translation_ranges
++ * @translation_ranges: array of bus to system address translation ranges
++ */
++struct fsl_mc {
++ struct fsl_mc_device *root_mc_bus_dev;
++ u8 num_translation_ranges;
++ struct fsl_mc_addr_translation_range *translation_ranges;
++};
++
++/*
++ * initialize a global list to link all
++ * the miscdevice nodes (struct restool_misc)
++ */
++static LIST_HEAD(misc_list);
++static DEFINE_MUTEX(misc_list_mutex);
++
++static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep)
++{
++ struct fsl_mc_device *root_mc_dev;
++ int error;
++ struct fsl_mc_io *dynamic_mc_io = NULL;
++ struct restool_misc *restool_misc = NULL;
++ struct restool_misc *restool_misc_cursor;
++
++ mutex_lock(&misc_list_mutex);
++
++ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
++ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
++ restool_misc = restool_misc_cursor;
++ break;
++ }
++ }
++
++ mutex_unlock(&misc_list_mutex);
++
++ if (!restool_misc)
++ return -EINVAL;
++
++ if (WARN_ON(!restool_misc->dev))
++ return -EINVAL;
++
++ mutex_lock(&restool_misc->mutex);
++
++ if (!restool_misc->static_instance_in_use) {
++ restool_misc->static_instance_in_use = true;
++ filep->private_data = restool_misc->static_mc_io;
++ } else {
++ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL);
++ if (!dynamic_mc_io) {
++ error = -ENOMEM;
++ goto err_unlock;
++ }
++
++ root_mc_dev = to_fsl_mc_device(restool_misc->dev);
++ error = fsl_mc_portal_allocate(root_mc_dev, 0, &dynamic_mc_io);
++ if (error < 0) {
++ pr_err("Not able to allocate MC portal\n");
++ goto free_dynamic_mc_io;
++ }
++ ++restool_misc->dynamic_instance_count;
++ filep->private_data = dynamic_mc_io;
++ }
++
++ mutex_unlock(&restool_misc->mutex);
++
++ return 0;
++
++free_dynamic_mc_io:
++ kfree(dynamic_mc_io);
++err_unlock:
++ mutex_unlock(&restool_misc->mutex);
++
++ return error;
++}
++
++static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep)
++{
++ struct fsl_mc_io *local_mc_io = filep->private_data;
++ struct restool_misc *restool_misc = NULL;
++ struct restool_misc *restool_misc_cursor;
++
++ if (WARN_ON(!filep->private_data))
++ return -EINVAL;
++
++ mutex_lock(&misc_list_mutex);
++
++ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
++ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
++ restool_misc = restool_misc_cursor;
++ break;
++ }
++ }
++
++ mutex_unlock(&misc_list_mutex);
++
++ if (!restool_misc)
++ return -EINVAL;
++
++ mutex_lock(&restool_misc->mutex);
++
++ if (WARN_ON(restool_misc->dynamic_instance_count == 0 &&
++ !restool_misc->static_instance_in_use)) {
++ mutex_unlock(&restool_misc->mutex);
++ return -EINVAL;
++ }
++
++ /* Globally clean up opened/untracked handles */
++ fsl_mc_portal_reset(local_mc_io);
++
++ /*
++ * must check
++ * whether local_mc_io is dynamic or static instance
++ * Otherwise it will free up the reserved portal by accident
++ * or even not free up the dynamic allocated portal
++ * if 2 or more instances running concurrently
++ */
++ if (local_mc_io == restool_misc->static_mc_io) {
++ restool_misc->static_instance_in_use = false;
++ } else {
++ fsl_mc_portal_free(local_mc_io);
++ kfree(filep->private_data);
++ --restool_misc->dynamic_instance_count;
++ }
++
++ filep->private_data = NULL;
++ mutex_unlock(&restool_misc->mutex);
++
++ return 0;
++}
++
++static int restool_send_mc_command(unsigned long arg,
++ struct fsl_mc_io *local_mc_io)
++{
++ int error;
++ struct mc_command mc_cmd;
++
++ if (copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)))
++ return -EFAULT;
++
++ /*
++ * Send MC command to the MC:
++ */
++ error = mc_send_command(local_mc_io, &mc_cmd);
++ if (error < 0)
++ return error;
++
++ if (copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static long
++fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int error;
++
++ switch (cmd) {
++ case RESTOOL_SEND_MC_COMMAND:
++ error = restool_send_mc_command(arg, file->private_data);
++ break;
++ default:
++ pr_err("%s: unexpected ioctl call number\n", __func__);
++ error = -EINVAL;
++ }
++
++ return error;
++}
++
++static const struct file_operations fsl_mc_restool_dev_fops = {
++ .owner = THIS_MODULE,
++ .open = fsl_mc_restool_dev_open,
++ .release = fsl_mc_restool_dev_release,
++ .unlocked_ioctl = fsl_mc_restool_dev_ioctl,
++};
++
++static int restool_add_device_file(struct device *dev)
++{
++ u32 name1 = 0;
++ char name2[20] = {0};
++ int error;
++ struct fsl_mc_device *root_mc_dev;
++ struct restool_misc *restool_misc;
++
++ if (dev->bus == &platform_bus_type && dev->driver_data) {
++ if (sscanf(dev_name(dev), "%x.%s", &name1, name2) != 2)
++ return -EINVAL;
++
++ if (strcmp(name2, "fsl-mc") == 0)
++ pr_debug("platform's root dprc name is: %s\n",
++ dev_name(&(((struct fsl_mc *)
++ (dev->driver_data))->root_mc_bus_dev->dev)));
++ }
++
++ if (!fsl_mc_is_root_dprc(dev))
++ return 0;
++
++ restool_misc = kzalloc(sizeof(*restool_misc), GFP_KERNEL);
++ if (!restool_misc)
++ return -ENOMEM;
++
++ restool_misc->dev = dev;
++ root_mc_dev = to_fsl_mc_device(dev);
++ error = fsl_mc_portal_allocate(root_mc_dev, 0,
++ &restool_misc->static_mc_io);
++ if (error < 0) {
++ pr_err("Not able to allocate MC portal\n");
++ goto free_restool_misc;
++ }
++
++ restool_misc->misc.minor = MISC_DYNAMIC_MINOR;
++ restool_misc->misc.name = dev_name(dev);
++ restool_misc->misc.fops = &fsl_mc_restool_dev_fops;
++
++ error = misc_register(&restool_misc->misc);
++ if (error < 0) {
++ pr_err("misc_register() failed: %d\n", error);
++ goto free_portal;
++ }
++
++ restool_misc->miscdevt = restool_misc->misc.this_device->devt;
++ mutex_init(&restool_misc->mutex);
++ mutex_lock(&misc_list_mutex);
++ list_add(&restool_misc->list, &misc_list);
++ mutex_unlock(&misc_list_mutex);
++
++ pr_info("/dev/%s driver registered\n", dev_name(dev));
++
++ return 0;
++
++free_portal:
++ fsl_mc_portal_free(restool_misc->static_mc_io);
++free_restool_misc:
++ kfree(restool_misc);
++
++ return error;
++}
++
++static int restool_bus_notifier(struct notifier_block *nb,
++ unsigned long action, void *data)
++{
++ int error;
++ struct device *dev = data;
++
++ switch (action) {
++ case BUS_NOTIFY_ADD_DEVICE:
++ error = restool_add_device_file(dev);
++ if (error)
++ return error;
++ break;
++ case BUS_NOTIFY_DEL_DEVICE:
++ case BUS_NOTIFY_REMOVED_DEVICE:
++ case BUS_NOTIFY_BIND_DRIVER:
++ case BUS_NOTIFY_BOUND_DRIVER:
++ case BUS_NOTIFY_UNBIND_DRIVER:
++ case BUS_NOTIFY_UNBOUND_DRIVER:
++ break;
++ default:
++ pr_err("%s: unrecognized device action from %s\n", __func__,
++ dev_name(dev));
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int add_to_restool(struct device *dev, void *data)
++{
++ return restool_add_device_file(dev);
++}
++
++static int __init fsl_mc_restool_driver_init(void)
++{
++ int error;
++ struct notifier_block *nb;
++
++ nb = kzalloc(sizeof(*nb), GFP_KERNEL);
++ if (!nb)
++ return -ENOMEM;
++
++ nb->notifier_call = restool_bus_notifier;
++ error = bus_register_notifier(&fsl_mc_bus_type, nb);
++ if (error)
++ goto free_nb;
++
++ /*
++ * This driver runs after fsl-mc bus driver runs.
++ * Hence, many of the root dprcs are already attached to fsl-mc bus
++ * In order to make sure we find all the root dprcs,
++ * we need to scan the fsl_mc_bus_type.
++ */
++ error = bus_for_each_dev(&fsl_mc_bus_type, NULL, NULL, add_to_restool);
++ if (error) {
++ bus_unregister_notifier(&fsl_mc_bus_type, nb);
++ kfree(nb);
++ pr_err("restool driver registration failure\n");
++ return error;
++ }
++
++ return 0;
++
++free_nb:
++ kfree(nb);
++ return error;
++}
++
++module_init(fsl_mc_restool_driver_init);
++
++static void __exit fsl_mc_restool_driver_exit(void)
++{
++ struct restool_misc *restool_misc;
++ struct restool_misc *restool_misc_tmp;
++ char name1[20] = {0};
++ u32 name2 = 0;
++
++ list_for_each_entry_safe(restool_misc, restool_misc_tmp,
++ &misc_list, list) {
++ if (sscanf(restool_misc->misc.name, "%4s.%u", name1, &name2)
++ != 2)
++ continue;
++
++ pr_debug("name1=%s,name2=%u\n", name1, name2);
++ pr_debug("misc-device: %s\n", restool_misc->misc.name);
++ if (strcmp(name1, "dprc") != 0)
++ continue;
++
++ if (WARN_ON(!restool_misc->static_mc_io))
++ return;
++
++ if (WARN_ON(restool_misc->dynamic_instance_count != 0))
++ return;
++
++ if (WARN_ON(restool_misc->static_instance_in_use))
++ return;
++
++ misc_deregister(&restool_misc->misc);
++ pr_info("/dev/%s driver unregistered\n",
++ restool_misc->misc.name);
++ fsl_mc_portal_free(restool_misc->static_mc_io);
++ list_del(&restool_misc->list);
++ kfree(restool_misc);
++ }
++}
++
++module_exit(fsl_mc_restool_driver_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor Inc.");
++MODULE_DESCRIPTION("Freescale's MC restool driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
+index 285917c7..cf63c7b6 100644
+--- a/drivers/staging/fsl-mc/bus/mc-sys.c
++++ b/drivers/staging/fsl-mc/bus/mc-sys.c
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2014 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * I/O services to send MC commands to the MC hardware
+ *
+@@ -13,7 +14,6 @@
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+- *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+@@ -46,7 +46,7 @@
+ /**
+ * Timeout in milliseconds to wait for the completion of an MC command
+ */
+-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
++#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
+
+ /*
+ * usleep_range() min and max values used to throttle down polling
+@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+- return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
++ return cmd_id;
+ }
+
+ static int mc_status_to_error(enum mc_cmd_status status)
+@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+ dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
++ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+ dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
++ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
+
+ if (status != MC_CMD_STATUS_OK) {
+ dev_dbg(mc_io->dev,
+- "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
++ "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
+diff --git a/drivers/staging/fsl-mc/include/dpaa2-fd.h b/drivers/staging/fsl-mc/include/dpaa2-fd.h
+new file mode 100644
+index 00000000..72328415
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpaa2-fd.h
+@@ -0,0 +1,706 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_FD_H
++#define __FSL_DPAA2_FD_H
++
++#include <linux/kernel.h>
++
++/**
++ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
++ *
++ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2.
++ * Frames can be enqueued and dequeued to Frame Queues (FQs) which are consumed
++ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE)
++ *
++ * There are three types of frames: single, scatter gather, and frame lists.
++ *
++ * The set of APIs in this file must be used to create, manipulate and
++ * query Frame Descriptors.
++ */
++
++/**
++ * struct dpaa2_fd - Struct describing FDs
++ * @words: for easier/faster copying the whole FD structure
++ * @addr: address in the FD
++ * @len: length in the FD
++ * @bpid: buffer pool ID
++ * @format_offset: format, offset, and short-length fields
++ * @frc: frame context
++ * @ctrl: control bits...including dd, sc, va, err, etc
++ * @flc: flow context address
++ *
++ * This structure represents the basic Frame Descriptor used in the system.
++ */
++struct dpaa2_fd {
++ union {
++ u32 words[8];
++ struct dpaa2_fd_simple {
++ __le64 addr;
++ __le32 len;
++ __le16 bpid;
++ __le16 format_offset;
++ __le32 frc;
++ __le32 ctrl;
++ __le64 flc;
++ } simple;
++ };
++};
++
++#define FD_SHORT_LEN_FLAG_MASK 0x1
++#define FD_SHORT_LEN_FLAG_SHIFT 14
++#define FD_SHORT_LEN_MASK 0x3FFFF
++#define FD_OFFSET_MASK 0x0FFF
++#define FD_FORMAT_MASK 0x3
++#define FD_FORMAT_SHIFT 12
++#define FD_BPID_MASK 0x3FFF
++#define SG_SHORT_LEN_FLAG_MASK 0x1
++#define SG_SHORT_LEN_FLAG_SHIFT 14
++#define SG_SHORT_LEN_MASK 0x1FFFF
++#define SG_OFFSET_MASK 0x0FFF
++#define SG_FORMAT_MASK 0x3
++#define SG_FORMAT_SHIFT 12
++#define SG_BPID_MASK 0x3FFF
++#define SG_FINAL_FLAG_MASK 0x1
++#define SG_FINAL_FLAG_SHIFT 15
++#define FL_SHORT_LEN_FLAG_MASK 0x1
++#define FL_SHORT_LEN_FLAG_SHIFT 14
++#define FL_SHORT_LEN_MASK 0x3FFFF
++#define FL_OFFSET_MASK 0x0FFF
++#define FL_FORMAT_MASK 0x3
++#define FL_FORMAT_SHIFT 12
++#define FL_BPID_MASK 0x3FFF
++#define FL_FINAL_FLAG_MASK 0x1
++#define FL_FINAL_FLAG_SHIFT 15
++
++/* Error bits in FD CTRL */
++#define FD_CTRL_ERR_MASK 0x000000FF
++#define FD_CTRL_UFD 0x00000004
++#define FD_CTRL_SBE 0x00000008
++#define FD_CTRL_FLC 0x00000010
++#define FD_CTRL_FSE 0x00000020
++#define FD_CTRL_FAERR 0x00000040
++
++/* Annotation bits in FD CTRL */
++#define FD_CTRL_PTA 0x00800000
++#define FD_CTRL_PTV1 0x00400000
++
++enum dpaa2_fd_format {
++ dpaa2_fd_single = 0,
++ dpaa2_fd_list,
++ dpaa2_fd_sg
++};
++
++/**
++ * dpaa2_fd_get_addr() - get the addr field of frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the address in the frame descriptor.
++ */
++static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd)
++{
++ return (dma_addr_t)le64_to_cpu(fd->simple.addr);
++}
++
++/**
++ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor
++ * @fd: the given frame descriptor
++ * @addr: the address needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr)
++{
++ fd->simple.addr = cpu_to_le64(addr);
++}
++
++/**
++ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the frame context field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd)
++{
++ return le32_to_cpu(fd->simple.frc);
++}
++
++/**
++ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor
++ * @fd: the given frame descriptor
++ * @frc: the frame context needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc)
++{
++ fd->simple.frc = cpu_to_le32(frc);
++}
++
++/**
++ * dpaa2_fd_get_ctrl() - Get the control bits in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the control bits field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_ctrl(const struct dpaa2_fd *fd)
++{
++ return le32_to_cpu(fd->simple.ctrl);
++}
++
++/**
++ * dpaa2_fd_set_ctrl() - Set the control bits in the frame descriptor
++ * @fd: the given frame descriptor
++ * @ctrl: the control bits to be set in the frame descriptor
++ */
++static inline void dpaa2_fd_set_ctrl(struct dpaa2_fd *fd, u32 ctrl)
++{
++ fd->simple.ctrl = cpu_to_le32(ctrl);
++}
++
++/**
++ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the flow context in the frame descriptor.
++ */
++static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd)
++{
++ return (dma_addr_t)le64_to_cpu(fd->simple.flc);
++}
++
++/**
++ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor
++ * @fd: the given frame descriptor
++ * @flc_addr: the flow context needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr)
++{
++ fd->simple.flc = cpu_to_le64(flc_addr);
++}
++
++static inline bool dpaa2_fd_short_len(const struct dpaa2_fd *fd)
++{
++ return !!((le16_to_cpu(fd->simple.format_offset) >>
++ FD_SHORT_LEN_FLAG_SHIFT) & FD_SHORT_LEN_FLAG_MASK);
++}
++
++/**
++ * dpaa2_fd_get_len() - Get the length in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the length field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd)
++{
++ if (dpaa2_fd_short_len(fd))
++ return le32_to_cpu(fd->simple.len) & FD_SHORT_LEN_MASK;
++
++ return le32_to_cpu(fd->simple.len);
++}
++
++/**
++ * dpaa2_fd_set_len() - Set the length field of frame descriptor
++ * @fd: the given frame descriptor
++ * @len: the length needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len)
++{
++ fd->simple.len = cpu_to_le32(len);
++}
++
++/**
++ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the offset.
++ */
++static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd)
++{
++ return le16_to_cpu(fd->simple.format_offset) & FD_OFFSET_MASK;
++}
++
++/**
++ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor
++ * @fd: the given frame descriptor
++ * @offset: the offset needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset)
++{
++ fd->simple.format_offset &= cpu_to_le16(~FD_OFFSET_MASK);
++ fd->simple.format_offset |= cpu_to_le16(offset);
++}
++
++/**
++ * dpaa2_fd_get_format() - Get the format field in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_fd_format dpaa2_fd_get_format(
++ const struct dpaa2_fd *fd)
++{
++ return (enum dpaa2_fd_format)((le16_to_cpu(fd->simple.format_offset)
++ >> FD_FORMAT_SHIFT) & FD_FORMAT_MASK);
++}
++
++/**
++ * dpaa2_fd_set_format() - Set the format field of frame descriptor
++ * @fd: the given frame descriptor
++ * @format: the format needs to be set in frame descriptor
++ */
++static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd,
++ enum dpaa2_fd_format format)
++{
++ fd->simple.format_offset &=
++ cpu_to_le16(~(FD_FORMAT_MASK << FD_FORMAT_SHIFT));
++ fd->simple.format_offset |= cpu_to_le16(format << FD_FORMAT_SHIFT);
++}
++
++/**
++ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor
++ * @fd: the given frame descriptor
++ *
++ * Return the buffer pool id.
++ */
++static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd)
++{
++ return le16_to_cpu(fd->simple.bpid) & FD_BPID_MASK;
++}
++
++/**
++ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor
++ * @fd: the given frame descriptor
++ * @bpid: buffer pool id to be set
++ */
++static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid)
++{
++ fd->simple.bpid &= cpu_to_le16(~(FD_BPID_MASK));
++ fd->simple.bpid |= cpu_to_le16(bpid);
++}
++
++/**
++ * struct dpaa2_sg_entry - the scatter-gathering structure
++ * @addr: address of the sg entry
++ * @len: length in this sg entry
++ * @bpid: buffer pool id
++ * @format_offset: format and offset fields
++ */
++struct dpaa2_sg_entry {
++ __le64 addr;
++ __le32 len;
++ __le16 bpid;
++ __le16 format_offset;
++};
++
++enum dpaa2_sg_format {
++ dpaa2_sg_single = 0,
++ dpaa2_sg_frame_data,
++ dpaa2_sg_sgt_ext
++};
++
++/* Accessors for SG entry fields */
++
++/**
++ * dpaa2_sg_get_addr() - Get the address from SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the address.
++ */
++static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
++{
++ return le64_to_cpu((dma_addr_t)sg->addr);
++}
++
++/**
++ * dpaa2_sg_set_addr() - Set the address in SG entry
++ * @sg: the given scatter-gathering object
++ * @addr: the address to be set
++ */
++static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr)
++{
++ sg->addr = cpu_to_le64(addr);
++}
++
++static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg)
++{
++ return !!((le16_to_cpu(sg->format_offset) >> SG_SHORT_LEN_FLAG_SHIFT)
++ & SG_SHORT_LEN_FLAG_MASK);
++}
++
++/**
++ * dpaa2_sg_get_len() - Get the length in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the length.
++ */
++static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg)
++{
++ if (dpaa2_sg_short_len(sg))
++ return le32_to_cpu(sg->len) & SG_SHORT_LEN_MASK;
++
++ return le32_to_cpu(sg->len);
++}
++
++/**
++ * dpaa2_sg_set_len() - Set the length in SG entry
++ * @sg: the given scatter-gathering object
++ * @len: the length to be set
++ */
++static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len)
++{
++ sg->len = cpu_to_le32(len);
++}
++
++/**
++ * dpaa2_sg_get_offset() - Get the offset in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the offset.
++ */
++static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg)
++{
++ return le16_to_cpu(sg->format_offset) & SG_OFFSET_MASK;
++}
++
++/**
++ * dpaa2_sg_set_offset() - Set the offset in SG entry
++ * @sg: the given scatter-gathering object
++ * @offset: the offset to be set
++ */
++static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg,
++ u16 offset)
++{
++ sg->format_offset &= cpu_to_le16(~SG_OFFSET_MASK);
++ sg->format_offset |= cpu_to_le16(offset);
++}
++
++/**
++ * dpaa2_sg_get_format() - Get the SG format in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_sg_format
++ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg)
++{
++ return (enum dpaa2_sg_format)((le16_to_cpu(sg->format_offset)
++ >> SG_FORMAT_SHIFT) & SG_FORMAT_MASK);
++}
++
++/**
++ * dpaa2_sg_set_format() - Set the SG format in SG entry
++ * @sg: the given scatter-gathering object
++ * @format: the format to be set
++ */
++static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg,
++ enum dpaa2_sg_format format)
++{
++ sg->format_offset &= cpu_to_le16(~(SG_FORMAT_MASK << SG_FORMAT_SHIFT));
++ sg->format_offset |= cpu_to_le16(format << SG_FORMAT_SHIFT);
++}
++
++/**
++ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return the bpid.
++ */
++static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg)
++{
++ return le16_to_cpu(sg->bpid) & SG_BPID_MASK;
++}
++
++/**
++ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry
++ * @sg: the given scatter-gathering object
++ * @bpid: the bpid to be set
++ */
++static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid)
++{
++ sg->bpid &= cpu_to_le16(~(SG_BPID_MASK));
++ sg->bpid |= cpu_to_le16(bpid);
++}
++
++/**
++ * dpaa2_sg_is_final() - Check final bit in SG entry
++ * @sg: the given scatter-gathering object
++ *
++ * Return bool.
++ */
++static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg)
++{
++ return !!(le16_to_cpu(sg->format_offset) >> SG_FINAL_FLAG_SHIFT);
++}
++
++/**
++ * dpaa2_sg_set_final() - Set the final bit in SG entry
++ * @sg: the given scatter-gathering object
++ * @final: the final boolean to be set
++ */
++static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
++{
++ sg->format_offset &= cpu_to_le16(~(SG_FINAL_FLAG_MASK
++ << SG_FINAL_FLAG_SHIFT));
++ sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
++}
++
++/**
++ * struct dpaa2_fl_entry - structure for frame list entry.
++ * @addr: address in the FLE
++ * @len: length in the FLE
++ * @bpid: buffer pool ID
++ * @format_offset: format, offset, and short-length fields
++ * @frc: frame context
++ * @ctrl: control bits...including pta, pvt1, pvt2, err, etc
++ * @flc: flow context address
++ */
++struct dpaa2_fl_entry {
++ __le64 addr;
++ __le32 len;
++ __le16 bpid;
++ __le16 format_offset;
++ __le32 frc;
++ __le32 ctrl;
++ __le64 flc;
++};
++
++enum dpaa2_fl_format {
++ dpaa2_fl_single = 0,
++ dpaa2_fl_res,
++ dpaa2_fl_sg
++};
++
++/**
++ * dpaa2_fl_get_addr() - get the addr field of FLE
++ * @fle: the given frame list entry
++ *
++ * Return the address in the frame list entry.
++ */
++static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
++{
++ return (dma_addr_t)le64_to_cpu(fle->addr);
++}
++
++/**
++ * dpaa2_fl_set_addr() - Set the addr field of FLE
++ * @fle: the given frame list entry
++ * @addr: the address needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
++ dma_addr_t addr)
++{
++ fle->addr = cpu_to_le64(addr);
++}
++
++/**
++ * dpaa2_fl_get_frc() - Get the frame context in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the frame context field in the frame lsit entry.
++ */
++static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle)
++{
++ return le32_to_cpu(fle->frc);
++}
++
++/**
++ * dpaa2_fl_set_frc() - Set the frame context in the FLE
++ * @fle: the given frame list entry
++ * @frc: the frame context needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc)
++{
++ fle->frc = cpu_to_le32(frc);
++}
++
++/**
++ * dpaa2_fl_get_ctrl() - Get the control bits in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the control bits field in the frame list entry.
++ */
++static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle)
++{
++ return le32_to_cpu(fle->ctrl);
++}
++
++/**
++ * dpaa2_fl_set_ctrl() - Set the control bits in the FLE
++ * @fle: the given frame list entry
++ * @ctrl: the control bits to be set in the frame list entry
++ */
++static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl)
++{
++ fle->ctrl = cpu_to_le32(ctrl);
++}
++
++/**
++ * dpaa2_fl_get_flc() - Get the flow context in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the flow context in the frame list entry.
++ */
++static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
++{
++ return (dma_addr_t)le64_to_cpu(fle->flc);
++}
++
++/**
++ * dpaa2_fl_set_flc() - Set the flow context field of FLE
++ * @fle: the given frame list entry
++ * @flc_addr: the flow context needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
++ dma_addr_t flc_addr)
++{
++ fle->flc = cpu_to_le64(flc_addr);
++}
++
++static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle)
++{
++ return !!((le16_to_cpu(fle->format_offset) >>
++ FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK);
++}
++
++/**
++ * dpaa2_fl_get_len() - Get the length in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the length field in the frame list entry.
++ */
++static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
++{
++ if (dpaa2_fl_short_len(fle))
++ return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK;
++
++ return le32_to_cpu(fle->len);
++}
++
++/**
++ * dpaa2_fl_set_len() - Set the length field of FLE
++ * @fle: the given frame list entry
++ * @len: the length needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
++{
++ fle->len = cpu_to_le32(len);
++}
++
++/**
++ * dpaa2_fl_get_offset() - Get the offset field in the frame list entry
++ * @fle: the given frame list entry
++ *
++ * Return the offset.
++ */
++static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
++{
++ return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK;
++}
++
++/**
++ * dpaa2_fl_set_offset() - Set the offset field of FLE
++ * @fle: the given frame list entry
++ * @offset: the offset needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset)
++{
++ fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK);
++ fle->format_offset |= cpu_to_le16(offset);
++}
++
++/**
++ * dpaa2_fl_get_format() - Get the format field in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_fl_format dpaa2_fl_get_format(
++ const struct dpaa2_fl_entry *fle)
++{
++ return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >>
++ FL_FORMAT_SHIFT) & FL_FORMAT_MASK);
++}
++
++/**
++ * dpaa2_fl_set_format() - Set the format field of FLE
++ * @fle: the given frame list entry
++ * @format: the format needs to be set in frame list entry
++ */
++static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
++ enum dpaa2_fl_format format)
++{
++ fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT));
++ fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT);
++}
++
++/**
++ * dpaa2_fl_get_bpid() - Get the bpid field in the FLE
++ * @fle: the given frame list entry
++ *
++ * Return the buffer pool id.
++ */
++static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
++{
++ return le16_to_cpu(fle->bpid) & FL_BPID_MASK;
++}
++
++/**
++ * dpaa2_fl_set_bpid() - Set the bpid field of FLE
++ * @fle: the given frame list entry
++ * @bpid: buffer pool id to be set
++ */
++static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid)
++{
++ fle->bpid &= cpu_to_le16(~(FL_BPID_MASK));
++ fle->bpid |= cpu_to_le16(bpid);
++}
++
++/**
++ * dpaa2_fl_is_final() - Check final bit in FLE
++ * @fle: the given frame list entry
++ *
++ * Return bool.
++ */
++static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
++{
++ return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT);
++}
++
++/**
++ * dpaa2_fl_set_final() - Set the final bit in FLE
++ * @fle: the given frame list entry
++ * @final: the final boolean to be set
++ */
++static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
++{
++ fle->format_offset &= cpu_to_le16(~(FL_FINAL_FLAG_MASK <<
++ FL_FINAL_FLAG_SHIFT));
++ fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT);
++}
++
++#endif /* __FSL_DPAA2_FD_H */
+diff --git a/drivers/staging/fsl-mc/include/dpaa2-global.h b/drivers/staging/fsl-mc/include/dpaa2-global.h
+new file mode 100644
+index 00000000..0326447f
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpaa2-global.h
+@@ -0,0 +1,202 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_GLOBAL_H
++#define __FSL_DPAA2_GLOBAL_H
++
++#include <linux/types.h>
++#include <linux/cpumask.h>
++#include "dpaa2-fd.h"
++
++struct dpaa2_dq {
++ union {
++ struct common {
++ u8 verb;
++ u8 reserved[63];
++ } common;
++ struct dq {
++ u8 verb;
++ u8 stat;
++ __le16 seqnum;
++ __le16 oprid;
++ u8 reserved;
++ u8 tok;
++ __le32 fqid;
++ u32 reserved2;
++ __le32 fq_byte_cnt;
++ __le32 fq_frm_cnt;
++ __le64 fqd_ctx;
++ u8 fd[32];
++ } dq;
++ struct scn {
++ u8 verb;
++ u8 stat;
++ u8 state;
++ u8 reserved;
++ __le32 rid_tok;
++ __le64 ctx;
++ } scn;
++ };
++};
++
++/* Parsing frame dequeue results */
++/* FQ empty */
++#define DPAA2_DQ_STAT_FQEMPTY 0x80
++/* FQ held active */
++#define DPAA2_DQ_STAT_HELDACTIVE 0x40
++/* FQ force eligible */
++#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20
++/* valid frame */
++#define DPAA2_DQ_STAT_VALIDFRAME 0x10
++/* FQ ODP enable */
++#define DPAA2_DQ_STAT_ODPVALID 0x04
++/* volatile dequeue */
++#define DPAA2_DQ_STAT_VOLATILE 0x02
++/* volatile dequeue command is expired */
++#define DPAA2_DQ_STAT_EXPIRED 0x01
++
++#define DQ_FQID_MASK 0x00FFFFFF
++#define DQ_FRAME_COUNT_MASK 0x00FFFFFF
++
++/**
++ * dpaa2_dq_flags() - Get the stat field of dequeue response
++ * @dq: the dequeue result.
++ */
++static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
++{
++ return dq->dq.stat;
++}
++
++/**
++ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
++ * command.
++ * @dq: the dequeue result
++ *
++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
++ */
++static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
++{
++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
++}
++
++/**
++ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
++ * @dq: the dequeue result
++ *
++ * Return boolean.
++ */
++static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
++{
++ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
++}
++
++/**
++ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
++ * @dq: the dequeue result
++ *
++ * seqnum is valid only if VALIDFRAME flag is TRUE
++ *
++ * Return seqnum.
++ */
++static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
++{
++ return le16_to_cpu(dq->dq.seqnum);
++}
++
++/**
++ * dpaa2_dq_odpid() - Get the odpid field in dequeue response
++ * @dq: the dequeue result
++ *
++ * odpid is valid only if ODPVALID flag is TRUE.
++ *
++ * Return odpid.
++ */
++static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
++{
++ return le16_to_cpu(dq->dq.oprid);
++}
++
++/**
++ * dpaa2_dq_fqid() - Get the fqid in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return fqid.
++ */
++static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
++}
++
++/**
++ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the byte count remaining in the FQ.
++ */
++static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fq_byte_cnt);
++}
++
++/**
++ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame count remaining in the FQ.
++ */
++static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
++}
++
++/**
++ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame queue context.
++ */
++static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
++{
++ return le64_to_cpu(dq->dq.fqd_ctx);
++}
++
++/**
++ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame descriptor.
++ */
++static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
++{
++ return (const struct dpaa2_fd *)&dq->dq.fd[0];
++}
++
++#endif /* __FSL_DPAA2_GLOBAL_H */
+diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
+new file mode 100644
+index 00000000..c7d1d997
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
+@@ -0,0 +1,190 @@
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_IO_H
++#define __FSL_DPAA2_IO_H
++
++#include <linux/types.h>
++#include <linux/cpumask.h>
++
++#include "dpaa2-fd.h"
++#include "dpaa2-global.h"
++
++struct dpaa2_io;
++struct dpaa2_io_store;
++struct device;
++
++/**
++ * DOC: DPIO Service
++ *
++ * The DPIO service provides APIs for users to interact with the datapath
++ * by enqueueing and dequeing frame descriptors.
++ *
++ * The following set of APIs can be used to enqueue and dequeue frames
++ * as well as producing notification callbacks when data is available
++ * for dequeue.
++ */
++
++/**
++ * struct dpaa2_io_desc - The DPIO descriptor
++ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
++ * has a channel.
++ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
++ * unless receives_notification is TRUE.
++ * @cpu: The cpu index that at least interrupt handlers will
++ * execute on.
++ * @stash_affinity: The stash affinity for this portal favour 'cpu'
++ * @regs_cena: The cache enabled regs.
++ * @regs_cinh: The cache inhibited regs
++ * @dpio_id: The dpio index
++ * @qman_version: The qman version
++ *
++ * Describes the attributes and features of the DPIO object.
++ */
++struct dpaa2_io_desc {
++ int receives_notifications;
++ int has_8prio;
++ int cpu;
++ void *regs_cena;
++ void *regs_cinh;
++ int dpio_id;
++ u32 qman_version;
++};
++
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
++
++void dpaa2_io_down(struct dpaa2_io *d);
++
++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
++
++/**
++ * struct dpaa2_io_notification_ctx - The DPIO notification context structure
++ * @cb: The callback to be invoked when the notification arrives
++ * @is_cdan: Zero for FQDAN, non-zero for CDAN
++ * @id: FQID or channel ID, needed for rearm
++ * @desired_cpu: The cpu on which the notifications will show up. -1 means
++ * any CPU.
++ * @dpio_id: The dpio index
++ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
++ * @node: The list node
++ * @dpio_private: The dpio object internal to dpio_service
++ *
++ * Used when a FQDAN/CDAN registration is made by drivers.
++ */
++struct dpaa2_io_notification_ctx {
++ void (*cb)(struct dpaa2_io_notification_ctx *);
++ int is_cdan;
++ u32 id;
++ int desired_cpu;
++ int dpio_id;
++ u64 qman64;
++ struct list_head node;
++ void *dpio_private;
++};
++
++int dpaa2_io_service_register(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++void dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++int dpaa2_io_service_rearm(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
++ struct dpaa2_io_store *s);
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
++ struct dpaa2_io_store *s);
++
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
++ const struct dpaa2_fd *fd);
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
++ u16 qdbin, const struct dpaa2_fd *fd);
++int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
++ const u64 *buffers, unsigned int num_buffers);
++int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
++ u64 *buffers, unsigned int num_buffers);
++
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev);
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
++ uint32_t *fcnt, uint32_t *bcnt);
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid,
++ uint32_t *num);
++#endif
++
++
++/***************/
++/* CSCN */
++/***************/
++
++/**
++ * struct dpaa2_cscn - The CSCN message format
++ * @verb: identifies the type of message (should be 0x27).
++ * @stat: status bits related to dequeuing response (not used)
++ * @state: bit 0 = 0/1 if CG is no/is congested
++ * @reserved: reserved byte
++ * @cgid: congest grp ID - the first 16 bits
++ * @ctx: context data
++ *
++ * Congestion management can be implemented in software through
++ * the use of Congestion State Change Notifications (CSCN). These
++ * are messages written by DPAA2 hardware to memory whenever the
++ * instantaneous count (I_CNT field in the CG) exceeds the
++ * Congestion State (CS) entrance threshold, signifying congestion
++ * entrance, or when the instantaneous count returns below exit
++ * threshold, signifying congestion exit. The format of the message
++ * is given by the dpaa2_cscn structure. Bit 0 of the state field
++ * represents congestion state written by the hardware.
++ */
++struct dpaa2_cscn {
++ u8 verb;
++ u8 stat;
++ u8 state;
++ u8 reserved;
++ __le32 cgid;
++ __le64 ctx;
++};
++
++#define DPAA2_CSCN_SIZE 64
++#define DPAA2_CSCN_ALIGN 16
++
++#define DPAA2_CSCN_STATE_MASK 0x1
++#define DPAA2_CSCN_CONGESTED 1
++
++static inline bool dpaa2_cscn_state_congested(struct dpaa2_cscn *cscn)
++{
++ return ((cscn->state & DPAA2_CSCN_STATE_MASK) == DPAA2_CSCN_CONGESTED);
++}
++
++#endif /* __FSL_DPAA2_IO_H */
+diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
+deleted file mode 100644
+index 2860411d..00000000
+--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
++++ /dev/null
+@@ -1,185 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef _FSL_DPBP_CMD_H
+-#define _FSL_DPBP_CMD_H
+-
+-/* DPBP Version */
+-#define DPBP_VER_MAJOR 2
+-#define DPBP_VER_MINOR 2
+-
+-/* Command IDs */
+-#define DPBP_CMDID_CLOSE 0x800
+-#define DPBP_CMDID_OPEN 0x804
+-#define DPBP_CMDID_CREATE 0x904
+-#define DPBP_CMDID_DESTROY 0x900
+-
+-#define DPBP_CMDID_ENABLE 0x002
+-#define DPBP_CMDID_DISABLE 0x003
+-#define DPBP_CMDID_GET_ATTR 0x004
+-#define DPBP_CMDID_RESET 0x005
+-#define DPBP_CMDID_IS_ENABLED 0x006
+-
+-#define DPBP_CMDID_SET_IRQ 0x010
+-#define DPBP_CMDID_GET_IRQ 0x011
+-#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPBP_CMDID_SET_IRQ_MASK 0x014
+-#define DPBP_CMDID_GET_IRQ_MASK 0x015
+-#define DPBP_CMDID_GET_IRQ_STATUS 0x016
+-#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
+-#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+-
+-struct dpbp_cmd_open {
+- __le32 dpbp_id;
+-};
+-
+-#define DPBP_ENABLE 0x1
+-
+-struct dpbp_rsp_is_enabled {
+- u8 enabled;
+-};
+-
+-struct dpbp_cmd_set_irq {
+- /* cmd word 0 */
+- u8 irq_index;
+- u8 pad[3];
+- __le32 irq_val;
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+-};
+-
+-struct dpbp_cmd_get_irq {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq {
+- /* response word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* response word 1 */
+- __le64 irq_addr;
+- /* response word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-struct dpbp_cmd_set_irq_enable {
+- u8 enable;
+- u8 pad[3];
+- u8 irq_index;
+-};
+-
+-struct dpbp_cmd_get_irq_enable {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_enable {
+- u8 enabled;
+-};
+-
+-struct dpbp_cmd_set_irq_mask {
+- __le32 mask;
+- u8 irq_index;
+-};
+-
+-struct dpbp_cmd_get_irq_mask {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_mask {
+- __le32 mask;
+-};
+-
+-struct dpbp_cmd_get_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_status {
+- __le32 status;
+-};
+-
+-struct dpbp_cmd_clear_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_attributes {
+- /* response word 0 */
+- __le16 pad;
+- __le16 bpid;
+- __le32 id;
+- /* response word 1 */
+- __le16 version_major;
+- __le16 version_minor;
+-};
+-
+-struct dpbp_cmd_set_notifications {
+- /* cmd word 0 */
+- __le32 depletion_entry;
+- __le32 depletion_exit;
+- /* cmd word 1 */
+- __le32 surplus_entry;
+- __le32 surplus_exit;
+- /* cmd word 2 */
+- __le16 options;
+- __le16 pad[3];
+- /* cmd word 3 */
+- __le64 message_ctx;
+- /* cmd word 4 */
+- __le64 message_iova;
+-};
+-
+-struct dpbp_rsp_get_notifications {
+- /* response word 0 */
+- __le32 depletion_entry;
+- __le32 depletion_exit;
+- /* response word 1 */
+- __le32 surplus_entry;
+- __le32 surplus_exit;
+- /* response word 2 */
+- __le16 options;
+- __le16 pad[3];
+- /* response word 3 */
+- __le64 message_ctx;
+- /* response word 4 */
+- __le64 message_iova;
+-};
+-
+-#endif /* _FSL_DPBP_CMD_H */
+diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
+index e14e85a5..e9e04cce 100644
+--- a/drivers/staging/fsl-mc/include/dpbp.h
++++ b/drivers/staging/fsl-mc/include/dpbp.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -32,7 +33,8 @@
+ #ifndef __FSL_DPBP_H
+ #define __FSL_DPBP_H
+
+-/* Data Path Buffer Pool API
++/*
++ * Data Path Buffer Pool API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+@@ -44,25 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io,
+ u16 *token);
+
+ int dpbp_close(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-/**
+- * struct dpbp_cfg - Structure representing DPBP configuration
+- * @options: place holder
+- */
+-struct dpbp_cfg {
+- u32 options;
+-};
+-
+-int dpbp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpbp_cfg *cfg,
+- u16 *token);
+-
+-int dpbp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
++ u32 cmd_flags,
++ u16 token);
+
+ int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+@@ -81,140 +66,25 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+-/**
+- * struct dpbp_irq_cfg - IRQ configuration
+- * @addr: Address that must be written to signal a message-based interrupt
+- * @val: Value to write into irq_addr address
+- * @irq_num: A user defined number associated with this IRQ
+- */
+-struct dpbp_irq_cfg {
+- u64 addr;
+- u32 val;
+- int irq_num;
+-};
+-
+-int dpbp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpbp_irq_cfg *irq_cfg);
+-
+-int dpbp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpbp_irq_cfg *irq_cfg);
+-
+-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en);
+-
+-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en);
+-
+-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask);
+-
+-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask);
+-
+-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status);
+-
+-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status);
+-
+ /**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+- * @version: DPBP version
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+ struct dpbp_attr {
+ int id;
+- /**
+- * struct version - Structure representing DPBP version
+- * @major: DPBP major version
+- * @minor: DPBP minor version
+- */
+- struct {
+- u16 major;
+- u16 minor;
+- } version;
+ u16 bpid;
+ };
+
+-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_attr *attr);
+-
+-/**
+- * DPBP notifications options
+- */
+-
+-/**
+- * BPSCN write will attempt to allocate into a cache (coherent write)
+- */
+-#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
+-
+-/**
+- * struct dpbp_notification_cfg - Structure representing DPBP notifications
+- * towards software
+- * @depletion_entry: below this threshold the pool is "depleted";
+- * set it to '0' to disable it
+- * @depletion_exit: greater than or equal to this threshold the pool exit its
+- * "depleted" state
+- * @surplus_entry: above this threshold the pool is in "surplus" state;
+- * set it to '0' to disable it
+- * @surplus_exit: less than or equal to this threshold the pool exit its
+- * "surplus" state
+- * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
+- * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
+- * must be 16B aligned.
+- * @message_ctx: The context that will be part of the BPSCN message and will
+- * be written to 'message_iova'
+- * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
+- */
+-struct dpbp_notification_cfg {
+- u32 depletion_entry;
+- u32 depletion_exit;
+- u32 surplus_entry;
+- u32 surplus_exit;
+- u64 message_iova;
+- u64 message_ctx;
+- u16 options;
+-};
+-
+-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg);
+-
+-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg);
++int dpbp_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_attr *attr);
+
+-/** @} */
++int dpbp_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
+
+ #endif /* __FSL_DPBP_H */
+diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h
+new file mode 100644
+index 00000000..efa23906
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpcon.h
+@@ -0,0 +1,115 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPCON_H
++#define __FSL_DPCON_H
++
++/* Data Path Concentrator API
++ * Contains initialization APIs and runtime control APIs for DPCON
++ */
++
++struct fsl_mc_io;
++
++/** General DPCON macros */
++
++/**
++ * Use it to disable notifications; see dpcon_set_notification()
++ */
++#define DPCON_INVALID_DPIO_ID (int)(-1)
++
++int dpcon_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpcon_id,
++ u16 *token);
++
++int dpcon_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
++
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpcon_attr - Structure representing DPCON attributes
++ * @id: DPCON object ID
++ * @qbman_ch_id: Channel ID to be used by dequeue operation
++ * @num_priorities: Number of priorities for the DPCON channel (1-8)
++ */
++struct dpcon_attr {
++ int id;
++ u16 qbman_ch_id;
++ u8 num_priorities;
++};
++
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_attr *attr);
++
++/**
++ * struct dpcon_notification_cfg - Structure representing notification params
++ * @dpio_id: DPIO object ID; must be configured with a notification channel;
++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
++ * @priority: Priority selection within the DPIO channel; valid values
++ * are 0-7, depending on the number of priorities in that channel
++ * @user_ctx: User context value provided with each CDAN message
++ */
++struct dpcon_notification_cfg {
++ int dpio_id;
++ u8 priority;
++ u64 user_ctx;
++};
++
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_notification_cfg *cfg);
++
++int dpcon_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++#endif /* __FSL_DPCON_H */
+diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
+index e5cfd017..170c07dd 100644
+--- a/drivers/staging/fsl-mc/include/dpmng.h
++++ b/drivers/staging/fsl-mc/include/dpmng.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -32,7 +33,8 @@
+ #ifndef __FSL_DPMNG_H
+ #define __FSL_DPMNG_H
+
+-/* Management Complex General API
++/*
++ * Management Complex General API
+ * Contains general API for the Management Complex firmware
+ */
+
+@@ -58,12 +60,8 @@ struct mc_version {
+ u32 revision;
+ };
+
+-int mc_get_version(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- struct mc_version *mc_ver_info);
+-
+-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int *container_id);
++int mc_get_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ struct mc_version *mc_ver_info);
+
+ #endif /* __FSL_DPMNG_H */
+diff --git a/drivers/staging/fsl-mc/include/dpopr.h b/drivers/staging/fsl-mc/include/dpopr.h
+new file mode 100644
+index 00000000..e1110af2
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpopr.h
+@@ -0,0 +1,110 @@
++/*
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPOPR_H_
++#define __FSL_DPOPR_H_
++
++/* Data Path Order Restoration API
++ * Contains initialization APIs and runtime APIs for the Order Restoration
++ */
++
++/** Order Restoration properties */
++
++/**
++ * Create a new Order Point Record option
++ */
++#define OPR_OPT_CREATE 0x1
++/**
++ * Retire an existing Order Point Record option
++ */
++#define OPR_OPT_RETIRE 0x2
++
++/**
++ * struct opr_cfg - Structure representing OPR configuration
++ * @oprrws: Order point record (OPR) restoration window size (0 to 5)
++ * 0 - Window size is 32 frames.
++ * 1 - Window size is 64 frames.
++ * 2 - Window size is 128 frames.
++ * 3 - Window size is 256 frames.
++ * 4 - Window size is 512 frames.
++ * 5 - Window size is 1024 frames.
++ * @oa: OPR auto advance NESN window size (0 disabled, 1 enabled)
++ * @olws: OPR acceptable late arrival window size (0 to 3)
++ * 0 - Disabled. Late arrivals are always rejected.
++ * 1 - Window size is 32 frames.
++ * 2 - Window size is the same as the OPR restoration
++ * window size configured in the OPRRWS field.
++ * 3 - Window size is 8192 frames. Late arrivals are
++ * always accepted.
++ * @oeane: Order restoration list (ORL) resource exhaustion
++ * advance NESN enable (0 disabled, 1 enabled)
++ * @oloe: OPR loose ordering enable (0 disabled, 1 enabled)
++ */
++struct opr_cfg {
++ u8 oprrws;
++ u8 oa;
++ u8 olws;
++ u8 oeane;
++ u8 oloe;
++};
++
++/**
++ * struct opr_qry - Structure representing OPR configuration
++ * @enable: Enabled state
++ * @rip: Retirement In Progress
++ * @ndsn: Next dispensed sequence number
++ * @nesn: Next expected sequence number
++ * @ea_hseq: Early arrival head sequence number
++ * @hseq_nlis: HSEQ not last in sequence
++ * @ea_tseq: Early arrival tail sequence number
++ * @tseq_nlis: TSEQ not last in sequence
++ * @ea_tptr: Early arrival tail pointer
++ * @ea_hptr: Early arrival head pointer
++ * @opr_id: Order Point Record ID
++ * @opr_vid: Order Point Record Virtual ID
++ */
++struct opr_qry {
++ char enable;
++ char rip;
++ u16 ndsn;
++ u16 nesn;
++ u16 ea_hseq;
++ char hseq_nlis;
++ u16 ea_tseq;
++ char tseq_nlis;
++ u16 ea_tptr;
++ u16 ea_hptr;
++ u16 opr_id;
++ u16 opr_vid;
++};
++
++#endif /* __FSL_DPOPR_H_ */
+diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
+index 593b2bbe..8dc411ec 100644
+--- a/drivers/staging/fsl-mc/include/dprc.h
++++ b/drivers/staging/fsl-mc/include/dprc.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -34,26 +35,13 @@
+
+ #include "mc-cmd.h"
+
+-/* Data Path Resource Container API
++/*
++ * Data Path Resource Container API
+ * Contains DPRC API for managing and querying DPAA resources
+ */
+
+ struct fsl_mc_io;
+
+-/**
+- * Set this value as the icid value in dprc_cfg structure when creating a
+- * container, in case the ICID is not selected by the user and should be
+- * allocated by the DPRC from the pool of ICIDs.
+- */
+-#define DPRC_GET_ICID_FROM_POOL (u16)(~(0))
+-
+-/**
+- * Set this value as the portal_id value in dprc_cfg structure when creating a
+- * container, in case the portal ID is not specifically selected by the
+- * user and should be allocated by the DPRC from the pool of portal ids.
+- */
+-#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
+-
+ int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+@@ -63,75 +51,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+-/**
+- * Container general options
+- *
+- * These options may be selected at container creation by the container creator
+- * and can be retrieved using dprc_get_attributes()
+- */
+-
+-/* Spawn Policy Option allowed - Indicates that the new container is allowed
+- * to spawn and have its own child containers.
+- */
+-#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
+-
+-/* General Container allocation policy - Indicates that the new container is
+- * allowed to allocate requested resources from its parent container; if not
+- * set, the container is only allowed to use resources in its own pools; Note
+- * that this is a container's global policy, but the parent container may
+- * override it and set specific quota per resource type.
+- */
+-#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
+-
+-/* Object initialization allowed - software context associated with this
+- * container is allowed to invoke object initialization operations.
+- */
+-#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
+-
+-/* Topology change allowed - software context associated with this
+- * container is allowed to invoke topology operations, such as attach/detach
+- * of network objects.
+- */
+-#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
+-
+-/* AIOP - Indicates that container belongs to AIOP. */
+-#define DPRC_CFG_OPT_AIOP 0x00000020
+-
+-/* IRQ Config - Indicates that the container allowed to configure its IRQs. */
+-#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040
+-
+-/**
+- * struct dprc_cfg - Container configuration options
+- * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
+- * ICID value is allocated by the DPRC
+- * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
+- * portal ID is allocated by the DPRC
+- * @options: Combination of 'DPRC_CFG_OPT_<X>' options
+- * @label: Object's label
+- */
+-struct dprc_cfg {
+- u16 icid;
+- int portal_id;
+- u64 options;
+- char label[16];
+-};
+-
+-int dprc_create_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_cfg *cfg,
+- int *child_container_id,
+- u64 *child_portal_offset);
+-
+-int dprc_destroy_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id);
+-
+-int dprc_reset_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id);
+
+ /* IRQ */
+
+@@ -139,7 +58,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
+ #define DPRC_IRQ_INDEX 0
+
+ /* Number of dprc's IRQs */
+-#define DPRC_NUM_OF_IRQS 1
++#define DPRC_NUM_OF_IRQS 1
+
+ /* DPRC IRQ events */
+
+@@ -151,12 +70,14 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
+ #define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
+ /* IRQ event - Indicates that resources removed from the container */
+ #define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
+-/* IRQ event - Indicates that one of the descendant containers that opened by
++/*
++ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+ #define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+-/* IRQ event - Indicates that on one of the container's opened object is
++/*
++ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+ #define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+@@ -171,59 +92,59 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
+ * @irq_num: A user defined number associated with this IRQ
+ */
+ struct dprc_irq_cfg {
+- phys_addr_t paddr;
+- u32 val;
+- int irq_num;
++ phys_addr_t paddr;
++ u32 val;
++ int irq_num;
+ };
+
+-int dprc_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en);
+-
+-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en);
+-
+-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask);
+-
+-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask);
+-
+-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status);
+-
+-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status);
++int dprc_set_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_get_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
++
++int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
++
++int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
++
++int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
++
++int dprc_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
++
++int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
+
+ /**
+ * struct dprc_attributes - Container attributes
+@@ -231,114 +152,23 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+- * @version: DPRC version
+ */
+ struct dprc_attributes {
+ int container_id;
+ u16 icid;
+ int portal_id;
+ u64 options;
+- /**
+- * struct version - DPRC version
+- * @major: DPRC major version
+- * @minor: DPRC minor version
+- */
+- struct {
+- u16 major;
+- u16 minor;
+- } version;
+-};
+-
+-int dprc_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_attributes *attributes);
+-
+-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 quota);
+-
+-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 *quota);
+-
+-/* Resource request options */
+-
+-/* Explicit resource ID request - The requested objects/resources
+- * are explicit and sequential (in case of resources).
+- * The base ID is given at res_req at base_align field
+- */
+-#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
+-
+-/* Aligned resources request - Relevant only for resources
+- * request (and not objects). Indicates that resources base ID should be
+- * sequential and aligned to the value given at dprc_res_req base_align field
+- */
+-#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
+-
+-/* Plugged Flag - Relevant only for object assignment request.
+- * Indicates that after all objects assigned. An interrupt will be invoked at
+- * the relevant GPP. The assigned object will be marked as plugged.
+- * plugged objects can't be assigned from their container
+- */
+-#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
+-
+-/**
+- * struct dprc_res_req - Resource request descriptor, to be used in assignment
+- * or un-assignment of resources and objects.
+- * @type: Resource/object type: Represent as a NULL terminated string.
+- * This string may received by using dprc_get_pool() to get resource
+- * type and dprc_get_obj() to get object type;
+- * Note: it is not possible to assign/un-assign DPRC objects
+- * @num: Number of resources
+- * @options: Request options: combination of DPRC_RES_REQ_OPT_ options
+- * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT
+- * is set at option), this field represents the required base ID
+- * for resource allocation; In case of aligned assignment
+- * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field
+- * indicates the required alignment for the resource ID(s) -
+- * use 0 if there is no alignment or explicit ID requirements
+- */
+-struct dprc_res_req {
+- char type[16];
+- u32 num;
+- u32 options;
+- int id_base_align;
+ };
+
+-int dprc_assign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int container_id,
+- struct dprc_res_req *res_req);
+-
+-int dprc_unassign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- struct dprc_res_req *res_req);
+-
+-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *pool_count);
+-
+-int dprc_get_pool(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int pool_index,
+- char *type);
++int dprc_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dprc_attributes *attributes);
+
+ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *obj_count);
++ u32 cmd_flags,
++ u16 token,
++ int *obj_count);
+
+ /* Objects Attributes Flags */
+
+@@ -353,7 +183,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+ /**
+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
+@@ -381,41 +211,41 @@ struct dprc_obj_desc {
+ u16 flags;
+ };
+
+-int dprc_get_obj(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int obj_index,
+- struct dprc_obj_desc *obj_desc);
+-
+-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- struct dprc_obj_desc *obj_desc);
+-
+-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_res_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- int *res_count);
++int dprc_get_obj(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int obj_index,
++ struct dprc_obj_desc *obj_desc);
++
++int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ struct dprc_obj_desc *obj_desc);
++
++int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 irq_index,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg);
++
++int dprc_get_res_count(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *type,
++ int *res_count);
+
+ /**
+ * enum dprc_iter_status - Iteration status
+@@ -429,27 +259,6 @@ enum dprc_iter_status {
+ DPRC_ITER_STATUS_LAST = 2
+ };
+
+-/**
+- * struct dprc_res_ids_range_desc - Resource ID range descriptor
+- * @base_id: Base resource ID of this range
+- * @last_id: Last resource ID of this range
+- * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
+- * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
+- * additional iterations are needed, until the returned marker is
+- * DPRC_ITER_STATUS_LAST
+- */
+-struct dprc_res_ids_range_desc {
+- int base_id;
+- int last_id;
+- enum dprc_iter_status iter_status;
+-};
+-
+-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- struct dprc_res_ids_range_desc *range_desc);
+-
+ /* Region flags */
+ /* Cacheable - Indicates that region should be mapped as cacheable */
+ #define DPRC_REGION_CACHEABLE 0x00000001
+@@ -481,64 +290,27 @@ struct dprc_region_desc {
+ enum dprc_region_type type;
+ };
+
+-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 region_index,
+- struct dprc_region_desc *region_desc);
+-
+-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- char *label);
++int dprc_get_obj_region(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 region_index,
++ struct dprc_region_desc *region_desc);
+
+-/**
+- * struct dprc_endpoint - Endpoint description for link connect/disconnect
+- * operations
+- * @type: Endpoint object type: NULL terminated string
+- * @id: Endpoint object ID
+- * @if_id: Interface ID; should be set for endpoints with multiple
+- * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+- */
+-struct dprc_endpoint {
+- char type[16];
+- int id;
+- int if_id;
+-};
++int dprc_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
+
+-/**
+- * struct dprc_connection_cfg - Connection configuration.
+- * Used for virtual connections only
+- * @committed_rate: Committed rate (Mbits/s)
+- * @max_rate: Maximum rate (Mbits/s)
+- */
+-struct dprc_connection_cfg {
+- u32 committed_rate;
+- u32 max_rate;
+-};
++int dprc_get_container_id(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int *container_id);
+
+-int dprc_connect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- const struct dprc_endpoint *endpoint2,
+- const struct dprc_connection_cfg *cfg);
+-
+-int dprc_disconnect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint);
+-
+-int dprc_get_connection(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- struct dprc_endpoint *endpoint2,
+- int *state);
++int dprc_reset_container(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int child_container_id);
+
+ #endif /* _FSL_DPRC_H */
+
+diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h
+index 170684a5..4d1f2d3e 100644
+--- a/drivers/staging/fsl-mc/include/mc-bus.h
++++ b/drivers/staging/fsl-mc/include/mc-bus.h
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus declarations
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -42,8 +42,8 @@ struct msi_domain_info;
+ */
+ struct fsl_mc_resource_pool {
+ enum fsl_mc_pool_type type;
+- int16_t max_count;
+- int16_t free_count;
++ int max_count;
++ int free_count;
+ struct mutex mutex; /* serializes access to free_list */
+ struct list_head free_list;
+ struct fsl_mc_bus *mc_bus;
+@@ -73,6 +73,7 @@ struct fsl_mc_bus {
+ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
+
+ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ unsigned int *total_irq_count);
+
+ int __init dprc_driver_init(void);
+diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
+index 5decb989..2e08aa31 100644
+--- a/drivers/staging/fsl-mc/include/mc-cmd.h
++++ b/drivers/staging/fsl-mc/include/mc-cmd.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+@@ -48,6 +49,15 @@ struct mc_command {
+ u64 params[MC_CMD_NUM_OF_PARAMS];
+ };
+
++struct mc_rsp_create {
++ __le32 object_id;
++};
++
++struct mc_rsp_api_ver {
++ __le16 major_ver;
++ __le16 minor_ver;
++};
++
+ enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+@@ -72,11 +82,6 @@ enum mc_cmd_status {
+ /* Command completion flag */
+ #define MC_CMD_FLAG_INTR_DIS 0x01
+
+-#define MC_CMD_HDR_CMDID_MASK 0xFFF0
+-#define MC_CMD_HDR_CMDID_SHIFT 4
+-#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
+-#define MC_CMD_HDR_TOKEN_SHIFT 6
+-
+ static inline u64 mc_encode_cmd_header(u16 cmd_id,
+ u32 cmd_flags,
+ u16 token)
+@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u16 cmd_id,
+ u64 header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+- hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
+- MC_CMD_HDR_CMDID_MASK);
+- hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
+- MC_CMD_HDR_TOKEN_MASK);
++ hdr->cmd_id = cpu_to_le16(cmd_id);
++ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ if (cmd_flags & MC_CMD_FLAG_PRI)
+ hdr->flags_hw = MC_CMD_FLAG_PRI;
+@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 token = le16_to_cpu(hdr->token);
+
+- return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
++ return token;
++}
++
++static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
++{
++ struct mc_rsp_create *rsp_params;
++
++ rsp_params = (struct mc_rsp_create *)cmd->params;
++ return le32_to_cpu(rsp_params->object_id);
++}
++
++static inline void mc_cmd_read_api_version(struct mc_command *cmd,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_rsp_api_ver *rsp_params;
++
++ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
++ *major_ver = le16_to_cpu(rsp_params->major_ver);
++ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
+ }
+
+ #endif /* __FSL_MC_CMD_H */
+diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
+index 89ad0cf5..dca7f908 100644
+--- a/drivers/staging/fsl-mc/include/mc-sys.h
++++ b/drivers/staging/fsl-mc/include/mc-sys.h
+@@ -1,4 +1,5 @@
+-/* Copyright 2013-2014 Freescale Semiconductor Inc.
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Interface of the I/O services to send MC commands to the MC hardware
+ *
+diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
+index f6e720e8..c23b78a4 100644
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale Management Complex (MC) bus public interface
+ *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+@@ -81,7 +81,7 @@ enum fsl_mc_pool_type {
+ */
+ struct fsl_mc_resource {
+ enum fsl_mc_pool_type type;
+- int32_t id;
++ s32 id;
+ void *data;
+ struct fsl_mc_resource_pool *parent_pool;
+ struct list_head node;
+@@ -122,6 +122,7 @@ struct fsl_mc_device_irq {
+ * @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
+ * @resource: generic resource associated with this MC object device, if any.
++ * @driver_override: Driver name to force a match
+ *
+ * Generic device object for MC object devices that are "attached" to a
+ * MC bus.
+@@ -154,6 +155,7 @@ struct fsl_mc_device {
+ struct resource *regions;
+ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
++ const char *driver_override;
+ };
+
+ #define to_fsl_mc_device(_dev) \
+@@ -175,6 +177,8 @@ struct fsl_mc_device {
+ #define fsl_mc_driver_register(drv) \
+ __fsl_mc_driver_register(drv, THIS_MODULE)
+
++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
++
+ int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
+ struct module *owner);
+
+@@ -198,4 +202,13 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+
+ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node, int coherent);
++
++#ifdef CONFIG_FSL_MC_BUS
++struct iommu_group *fsl_mc_device_group(struct device *dev);
++#else
++#define fsl_mc_device_group(__dev) NULL
++#endif
++
+ #endif /* _FSL_MC_H_ */
+--
+2.14.1
+