aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/701-net-0286-staging-fsl_ppfe-eth-introduce-pfe-driver.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/layerscape/patches-5.4/701-net-0286-staging-fsl_ppfe-eth-introduce-pfe-driver.patch')
-rw-r--r--target/linux/layerscape/patches-5.4/701-net-0286-staging-fsl_ppfe-eth-introduce-pfe-driver.patch7994
1 files changed, 0 insertions, 7994 deletions
diff --git a/target/linux/layerscape/patches-5.4/701-net-0286-staging-fsl_ppfe-eth-introduce-pfe-driver.patch b/target/linux/layerscape/patches-5.4/701-net-0286-staging-fsl_ppfe-eth-introduce-pfe-driver.patch
deleted file mode 100644
index 13fd4f90d1..0000000000
--- a/target/linux/layerscape/patches-5.4/701-net-0286-staging-fsl_ppfe-eth-introduce-pfe-driver.patch
+++ /dev/null
@@ -1,7994 +0,0 @@
-From fccb0e1e07fc0750fd081ab52ed94ee13f6b360f Mon Sep 17 00:00:00 2001
-From: Calvin Johnson <calvin.johnson@nxp.com>
-Date: Sat, 16 Sep 2017 14:22:17 +0530
-Subject: [PATCH] staging: fsl_ppfe/eth: introduce pfe driver
-
- This patch introduces Linux support for NXP's LS1012A Packet
-Forwarding Engine (pfe_eth). LS1012A uses hardware packet forwarding
-engine to provide high performance Ethernet interfaces. The device
-includes two Ethernet ports.
-
-Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
-Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
----
- drivers/staging/fsl_ppfe/Kconfig | 20 +
- drivers/staging/fsl_ppfe/Makefile | 19 +
- drivers/staging/fsl_ppfe/TODO | 2 +
- drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 +++
- drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 ++
- drivers/staging/fsl_ppfe/pfe_eth.c | 2434 +++++++++++++++++++++++
- drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
- drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++++++
- drivers/staging/fsl_ppfe/pfe_hif.c | 1094 ++++++++++
- drivers/staging/fsl_ppfe/pfe_hif_lib.c | 638 ++++++
- drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
- drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 394 ++++
- drivers/staging/fsl_ppfe/pfe_mod.c | 141 ++
- drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 ++++++++
- 14 files changed, 7915 insertions(+)
- create mode 100644 drivers/staging/fsl_ppfe/Kconfig
- create mode 100644 drivers/staging/fsl_ppfe/Makefile
- create mode 100644 drivers/staging/fsl_ppfe/TODO
- create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
-
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/Kconfig
-@@ -0,0 +1,20 @@
-+#
-+# Freescale Programmable Packet Forwarding Engine driver
-+#
-+config FSL_PPFE
-+ bool "Freescale PPFE Driver"
-+ default n
-+ ---help---
-+ Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
-+ It provides two high performance ethernet interfaces.
-+ This driver initializes, programs and controls the PPFE.
-+ Use this driver to enable network connectivity on LS1012A platforms.
-+
-+if FSL_PPFE
-+
-+config FSL_PPFE_UTIL_DISABLED
-+ bool "Disable PPFE UTIL Processor Engine"
-+ ---help---
-+ UTIL PE has to be enabled only if required.
-+
-+endif # FSL_PPFE
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/Makefile
-@@ -0,0 +1,19 @@
-+#
-+# Makefile for Freesecale PPFE driver
-+#
-+
-+ccflags-y += -I$(src)/include -I$(src)
-+
-+obj-m += pfe.o
-+
-+pfe-y += pfe_mod.o \
-+ pfe_hw.o \
-+ pfe_firmware.o \
-+ pfe_ctrl.o \
-+ pfe_hif.o \
-+ pfe_hif_lib.o\
-+ pfe_eth.o \
-+ pfe_sysfs.o \
-+ pfe_debugfs.o \
-+ pfe_ls1012a_platform.o \
-+ pfe_hal.o
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/TODO
-@@ -0,0 +1,2 @@
-+TODO:
-+ - provide pfe pe monitoring support
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
-@@ -0,0 +1,238 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/kthread.h>
-+
-+#include "pfe_mod.h"
-+#include "pfe_ctrl.h"
-+
-+#define TIMEOUT_MS 1000
-+
-+int relax(unsigned long end)
-+{
-+ if (time_after(jiffies, end)) {
-+ if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
-+ return -1;
-+
-+ if (need_resched())
-+ schedule();
-+ }
-+
-+ return 0;
-+}
-+
-+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
-+{
-+ int id;
-+
-+ mutex_lock(&ctrl->mutex);
-+
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
-+ pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
-+
-+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
-+ if (id == TMU2_ID)
-+ continue;
-+ pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
-+#endif
-+ mutex_unlock(&ctrl->mutex);
-+}
-+
-+void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
-+{
-+ int pe_mask = CLASS_MASK | TMU_MASK;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe_mask |= UTIL_MASK;
-+#endif
-+ mutex_lock(&ctrl->mutex);
-+ pe_start(&pfe->ctrl, pe_mask);
-+ mutex_unlock(&ctrl->mutex);
-+}
-+
-+/* PE sync stop.
-+ * Stops packet processing for a list of PE's (specified using a bitmask).
-+ * The caller must hold ctrl->mutex.
-+ *
-+ * @param ctrl Control context
-+ * @param pe_mask Mask of PE id's to stop
-+ *
-+ */
-+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
-+{
-+ struct pe_sync_mailbox *mbox;
-+ int pe_stopped = 0;
-+ unsigned long end = jiffies + 2;
-+ int i;
-+
-+ pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+
-+ while (pe_stopped != pe_mask) {
-+ for (i = 0; i < MAX_PE; i++)
-+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ if (pe_dmem_read(i, (unsigned
-+ long)&mbox->stopped, 4) &
-+ cpu_to_be32(0x1))
-+ pe_stopped |= (1 << i);
-+ }
-+
-+ if (relax(end) < 0)
-+ goto err;
-+ }
-+
-+ return 0;
-+
-+err:
-+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+
-+ return -EIO;
-+}
-+
-+/* PE start.
-+ * Starts packet processing for a list of PE's (specified using a bitmask).
-+ * The caller must hold ctrl->mutex.
-+ *
-+ * @param ctrl Control context
-+ * @param pe_mask Mask of PE id's to start
-+ *
-+ */
-+void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
-+{
-+ struct pe_sync_mailbox *mbox;
-+ int i;
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+}
-+
-+/* This function will ensure all PEs are put in to idle state */
-+int pe_reset_all(struct pfe_ctrl *ctrl)
-+{
-+ struct pe_sync_mailbox *mbox;
-+ int pe_stopped = 0;
-+ unsigned long end = jiffies + 2;
-+ int i;
-+ int pe_mask = CLASS_MASK | TMU_MASK;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe_mask |= UTIL_MASK;
-+#endif
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+
-+ while (pe_stopped != pe_mask) {
-+ for (i = 0; i < MAX_PE; i++)
-+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ if (pe_dmem_read(i, (unsigned long)
-+ &mbox->stopped, 4) &
-+ cpu_to_be32(0x1))
-+ pe_stopped |= (1 << i);
-+ }
-+
-+ if (relax(end) < 0)
-+ goto err;
-+ }
-+
-+ return 0;
-+
-+err:
-+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
-+ return -EIO;
-+}
-+
-+int pfe_ctrl_init(struct pfe *pfe)
-+{
-+ struct pfe_ctrl *ctrl = &pfe->ctrl;
-+ int id;
-+
-+ pr_info("%s\n", __func__);
-+
-+ mutex_init(&ctrl->mutex);
-+ spin_lock_init(&ctrl->lock);
-+
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
-+ ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
-+ ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
-+ }
-+
-+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
-+ if (id == TMU2_ID)
-+ continue;
-+ ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
-+ ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
-+ ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
-+#endif
-+
-+ ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
-+ ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
-+ ROUTE_TABLE_BASEADDR;
-+
-+ ctrl->dev = pfe->dev;
-+
-+ pr_info("%s finished\n", __func__);
-+
-+ return 0;
-+}
-+
-+void pfe_ctrl_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
-@@ -0,0 +1,111 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/debugfs.h>
-+#include <linux/platform_device.h>
-+
-+#include "pfe_mod.h"
-+
-+static int dmem_show(struct seq_file *s, void *unused)
-+{
-+ u32 dmem_addr, val;
-+ int id = (long int)s->private;
-+ int i;
-+
-+ for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
-+ seq_printf(s, "%04x:", dmem_addr);
-+
-+ for (i = 0; i < 8; i++) {
-+ val = pe_dmem_read(id, dmem_addr + i * 4, 4);
-+ seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
-+ (val >> 8) & 0xff, (val >> 16) & 0xff,
-+ (val >> 24) & 0xff);
-+ }
-+
-+ seq_puts(s, "\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static int dmem_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, dmem_show, inode->i_private);
-+}
-+
-+static const struct file_operations dmem_fops = {
-+ .open = dmem_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+int pfe_debugfs_init(struct pfe *pfe)
-+{
-+ struct dentry *d;
-+
-+ pr_info("%s\n", __func__);
-+
-+ pfe->dentry = debugfs_create_dir("pfe", NULL);
-+ if (IS_ERR_OR_NULL(pfe->dentry))
-+ goto err_dir;
-+
-+ d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ return 0;
-+
-+err_pe:
-+ debugfs_remove_recursive(pfe->dentry);
-+
-+err_dir:
-+ return -1;
-+}
-+
-+void pfe_debugfs_exit(struct pfe *pfe)
-+{
-+ debugfs_remove_recursive(pfe->dentry);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_eth.c
-@@ -0,0 +1,2434 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/* @pfe_eth.c.
-+ * Ethernet driver for to handle exception path for PFE.
-+ * - uses HIF functions to send/receive packets.
-+ * - uses ctrl function to start/stop interfaces.
-+ * - uses direct register accesses to control phy operation.
-+ */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/interrupt.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ethtool.h>
-+#include <linux/mii.h>
-+#include <linux/phy.h>
-+#include <linux/timer.h>
-+#include <linux/hrtimer.h>
-+#include <linux/platform_device.h>
-+
-+#include <net/ip.h>
-+#include <net/sock.h>
-+
-+#include <linux/io.h>
-+#include <asm/irq.h>
-+#include <linux/delay.h>
-+#include <linux/regmap.h>
-+#include <linux/i2c.h>
-+
-+#if defined(CONFIG_NF_CONNTRACK_MARK)
-+#include <net/netfilter/nf_conntrack.h>
-+#endif
-+
-+#include "pfe_mod.h"
-+#include "pfe_eth.h"
-+
-+static void *cbus_emac_base[3];
-+static void *cbus_gpi_base[3];
-+
-+/* Forward Declaration */
-+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
-+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
-+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
-+ from_tx, int n_desc);
-+
-+unsigned int gemac_regs[] = {
-+ 0x0004, /* Interrupt event */
-+ 0x0008, /* Interrupt mask */
-+ 0x0024, /* Ethernet control */
-+ 0x0064, /* MIB Control/Status */
-+ 0x0084, /* Receive control/status */
-+ 0x00C4, /* Transmit control */
-+ 0x00E4, /* Physical address low */
-+ 0x00E8, /* Physical address high */
-+ 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
-+ 0x0190, /* Receive FIFO Section Full Threshold */
-+ 0x01A0, /* Transmit FIFO Section Empty Threshold */
-+ 0x01B0, /* Frame Truncation Length */
-+};
-+
-+/********************************************************************/
-+/* SYSFS INTERFACE */
-+/********************************************************************/
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+/*
-+ * pfe_eth_show_napi_stats
-+ */
-+static ssize_t pfe_eth_show_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "sched: %u\n",
-+ priv->napi_counters[NAPI_SCHED_COUNT]);
-+ len += sprintf(buf + len, "poll: %u\n",
-+ priv->napi_counters[NAPI_POLL_COUNT]);
-+ len += sprintf(buf + len, "packet: %u\n",
-+ priv->napi_counters[NAPI_PACKET_COUNT]);
-+ len += sprintf(buf + len, "budget: %u\n",
-+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
-+ len += sprintf(buf + len, "desc: %u\n",
-+ priv->napi_counters[NAPI_DESC_COUNT]);
-+
-+ return len;
-+}
-+
-+/*
-+ * pfe_eth_set_napi_stats
-+ */
-+static ssize_t pfe_eth_set_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+
-+ memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
-+
-+ return count;
-+}
-+#endif
-+#ifdef PFE_ETH_TX_STATS
-+/* pfe_eth_show_tx_stats
-+ *
-+ */
-+static ssize_t pfe_eth_show_tx_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t len = 0;
-+ int i;
-+
-+ len += sprintf(buf + len, "TX queues stats:\n");
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ i);
-+
-+ len += sprintf(buf + len, "\n");
-+ __netif_tx_lock_bh(tx_queue);
-+
-+ hif_tx_lock(&pfe->hif);
-+ len += sprintf(buf + len,
-+ "Queue %2d : credits = %10d\n"
-+ , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
-+ len += sprintf(buf + len,
-+ " tx packets = %10d\n"
-+ , pfe->tmu_credit.tx_packets[priv->id][i]);
-+ hif_tx_unlock(&pfe->hif);
-+
-+ /* Don't output additionnal stats if queue never used */
-+ if (!pfe->tmu_credit.tx_packets[priv->id][i])
-+ goto skip;
-+
-+ len += sprintf(buf + len,
-+ " clean_fail = %10d\n"
-+ , priv->clean_fail[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue = %10d\n"
-+ , priv->stop_queue_total[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue_hif = %10d\n"
-+ , priv->stop_queue_hif[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue_hif_client = %10d\n"
-+ , priv->stop_queue_hif_client[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue_credit = %10d\n"
-+ , priv->stop_queue_credit[i]);
-+skip:
-+ __netif_tx_unlock_bh(tx_queue);
-+ }
-+ return len;
-+}
-+
-+/* pfe_eth_set_tx_stats
-+ *
-+ */
-+static ssize_t pfe_eth_set_tx_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ int i;
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ i);
-+
-+ __netif_tx_lock_bh(tx_queue);
-+ priv->clean_fail[i] = 0;
-+ priv->stop_queue_total[i] = 0;
-+ priv->stop_queue_hif[i] = 0;
-+ priv->stop_queue_hif_client[i] = 0;
-+ priv->stop_queue_credit[i] = 0;
-+ __netif_tx_unlock_bh(tx_queue);
-+ }
-+
-+ return count;
-+}
-+#endif
-+/* pfe_eth_show_txavail
-+ *
-+ */
-+static ssize_t pfe_eth_show_txavail(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t len = 0;
-+ int i;
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ i);
-+
-+ __netif_tx_lock_bh(tx_queue);
-+
-+ len += sprintf(buf + len, "%d",
-+ hif_lib_tx_avail(&priv->client, i));
-+
-+ __netif_tx_unlock_bh(tx_queue);
-+
-+ if (i == (emac_txq_cnt - 1))
-+ len += sprintf(buf + len, "\n");
-+ else
-+ len += sprintf(buf + len, " ");
-+ }
-+
-+ return len;
-+}
-+
-+/* pfe_eth_show_default_priority
-+ *
-+ */
-+static ssize_t pfe_eth_show_default_priority(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ unsigned long flags;
-+ int rc;
-+
-+ spin_lock_irqsave(&priv->lock, flags);
-+ rc = sprintf(buf, "%d\n", priv->default_priority);
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+
-+ return rc;
-+}
-+
-+/* pfe_eth_set_default_priority
-+ *
-+ */
-+
-+static ssize_t pfe_eth_set_default_priority(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&priv->lock, flags);
-+ priv->default_priority = kstrtoul(buf, 0, 0);
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+
-+ return count;
-+}
-+
-+static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
-+static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
-+ pfe_eth_set_default_priority);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
-+ pfe_eth_set_napi_stats);
-+#endif
-+
-+#ifdef PFE_ETH_TX_STATS
-+static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
-+ pfe_eth_set_tx_stats);
-+#endif
-+
-+/*
-+ * pfe_eth_sysfs_init
-+ *
-+ */
-+static int pfe_eth_sysfs_init(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int err;
-+
-+ /* Initialize the default values */
-+
-+ /*
-+ * By default, packets without conntrack will use this default high
-+ * priority queue
-+ */
-+ priv->default_priority = 15;
-+
-+ /* Create our sysfs files */
-+ err = device_create_file(&ndev->dev, &dev_attr_default_priority);
-+ if (err) {
-+ netdev_err(ndev,
-+ "failed to create default_priority sysfs files\n");
-+ goto err_priority;
-+ }
-+
-+ err = device_create_file(&ndev->dev, &dev_attr_txavail);
-+ if (err) {
-+ netdev_err(ndev,
-+ "failed to create default_priority sysfs files\n");
-+ goto err_txavail;
-+ }
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
-+ if (err) {
-+ netdev_err(ndev, "failed to create napi stats sysfs files\n");
-+ goto err_napi;
-+ }
-+#endif
-+
-+#ifdef PFE_ETH_TX_STATS
-+ err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
-+ if (err) {
-+ netdev_err(ndev, "failed to create tx stats sysfs files\n");
-+ goto err_tx;
-+ }
-+#endif
-+
-+ return 0;
-+
-+#ifdef PFE_ETH_TX_STATS
-+err_tx:
-+#endif
-+#ifdef PFE_ETH_NAPI_STATS
-+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
-+
-+err_napi:
-+#endif
-+ device_remove_file(&ndev->dev, &dev_attr_txavail);
-+
-+err_txavail:
-+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
-+
-+err_priority:
-+ return -1;
-+}
-+
-+/* pfe_eth_sysfs_exit
-+ *
-+ */
-+void pfe_eth_sysfs_exit(struct net_device *ndev)
-+{
-+#ifdef PFE_ETH_TX_STATS
-+ device_remove_file(&ndev->dev, &dev_attr_tx_stats);
-+#endif
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
-+#endif
-+ device_remove_file(&ndev->dev, &dev_attr_txavail);
-+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
-+}
-+
-+/*************************************************************************/
-+/* ETHTOOL INTERCAE */
-+/*************************************************************************/
-+
-+/*MTIP GEMAC */
-+static const struct fec_stat {
-+ char name[ETH_GSTRING_LEN];
-+ u16 offset;
-+} fec_stats[] = {
-+ /* RMON TX */
-+ { "tx_dropped", RMON_T_DROP },
-+ { "tx_packets", RMON_T_PACKETS },
-+ { "tx_broadcast", RMON_T_BC_PKT },
-+ { "tx_multicast", RMON_T_MC_PKT },
-+ { "tx_crc_errors", RMON_T_CRC_ALIGN },
-+ { "tx_undersize", RMON_T_UNDERSIZE },
-+ { "tx_oversize", RMON_T_OVERSIZE },
-+ { "tx_fragment", RMON_T_FRAG },
-+ { "tx_jabber", RMON_T_JAB },
-+ { "tx_collision", RMON_T_COL },
-+ { "tx_64byte", RMON_T_P64 },
-+ { "tx_65to127byte", RMON_T_P65TO127 },
-+ { "tx_128to255byte", RMON_T_P128TO255 },
-+ { "tx_256to511byte", RMON_T_P256TO511 },
-+ { "tx_512to1023byte", RMON_T_P512TO1023 },
-+ { "tx_1024to2047byte", RMON_T_P1024TO2047 },
-+ { "tx_GTE2048byte", RMON_T_P_GTE2048 },
-+ { "tx_octets", RMON_T_OCTETS },
-+
-+ /* IEEE TX */
-+ { "IEEE_tx_drop", IEEE_T_DROP },
-+ { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
-+ { "IEEE_tx_1col", IEEE_T_1COL },
-+ { "IEEE_tx_mcol", IEEE_T_MCOL },
-+ { "IEEE_tx_def", IEEE_T_DEF },
-+ { "IEEE_tx_lcol", IEEE_T_LCOL },
-+ { "IEEE_tx_excol", IEEE_T_EXCOL },
-+ { "IEEE_tx_macerr", IEEE_T_MACERR },
-+ { "IEEE_tx_cserr", IEEE_T_CSERR },
-+ { "IEEE_tx_sqe", IEEE_T_SQE },
-+ { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
-+ { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
-+
-+ /* RMON RX */
-+ { "rx_packets", RMON_R_PACKETS },
-+ { "rx_broadcast", RMON_R_BC_PKT },
-+ { "rx_multicast", RMON_R_MC_PKT },
-+ { "rx_crc_errors", RMON_R_CRC_ALIGN },
-+ { "rx_undersize", RMON_R_UNDERSIZE },
-+ { "rx_oversize", RMON_R_OVERSIZE },
-+ { "rx_fragment", RMON_R_FRAG },
-+ { "rx_jabber", RMON_R_JAB },
-+ { "rx_64byte", RMON_R_P64 },
-+ { "rx_65to127byte", RMON_R_P65TO127 },
-+ { "rx_128to255byte", RMON_R_P128TO255 },
-+ { "rx_256to511byte", RMON_R_P256TO511 },
-+ { "rx_512to1023byte", RMON_R_P512TO1023 },
-+ { "rx_1024to2047byte", RMON_R_P1024TO2047 },
-+ { "rx_GTE2048byte", RMON_R_P_GTE2048 },
-+ { "rx_octets", RMON_R_OCTETS },
-+
-+ /* IEEE RX */
-+ { "IEEE_rx_drop", IEEE_R_DROP },
-+ { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
-+ { "IEEE_rx_crc", IEEE_R_CRC },
-+ { "IEEE_rx_align", IEEE_R_ALIGN },
-+ { "IEEE_rx_macerr", IEEE_R_MACERR },
-+ { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
-+ { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
-+};
-+
-+static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
-+ *stats, u64 *data)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
-+ data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
-+}
-+
-+static void pfe_eth_gstrings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
-+{
-+ int i;
-+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ fec_stats[i].name, ETH_GSTRING_LEN);
-+ break;
-+ }
-+}
-+
-+static int pfe_eth_stats_count(struct net_device *ndev, int sset)
-+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ARRAY_SIZE(fec_stats);
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+/*
-+ * pfe_eth_gemac_reglen - Return the length of the register structure.
-+ *
-+ */
-+static int pfe_eth_gemac_reglen(struct net_device *ndev)
-+{
-+ pr_info("%s()\n", __func__);
-+ return (sizeof(gemac_regs) / sizeof(u32));
-+}
-+
-+/*
-+ * pfe_eth_gemac_get_regs - Return the gemac register structure.
-+ *
-+ */
-+static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
-+ *regs, void *regbuf)
-+{
-+ int i;
-+
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ u32 *buf = (u32 *)regbuf;
-+
-+ pr_info("%s()\n", __func__);
-+ for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
-+ buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
-+}
-+
-+/*
-+ * pfe_eth_set_wol - Set the magic packet option, in WoL register.
-+ *
-+ */
-+static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ if (wol->wolopts & ~WAKE_MAGIC)
-+ return -EOPNOTSUPP;
-+
-+ /* for MTIP we store wol->wolopts */
-+ priv->wol = wol->wolopts;
-+
-+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
-+
-+ return 0;
-+}
-+
-+/*
-+ *
-+ * pfe_eth_get_wol - Get the WoL options.
-+ *
-+ */
-+static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
-+ *wol)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ wol->supported = WAKE_MAGIC;
-+ wol->wolopts = 0;
-+
-+ if (priv->wol & WAKE_MAGIC)
-+ wol->wolopts = WAKE_MAGIC;
-+
-+ memset(&wol->sopass, 0, sizeof(wol->sopass));
-+}
-+
-+/*
-+ * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
-+ *
-+ */
-+static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
-+ *drvinfo)
-+{
-+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
-+}
-+
-+/*
-+ * pfe_eth_set_settings - Used to send commands to PHY.
-+ *
-+ */
-+static int pfe_eth_set_settings(struct net_device *ndev,
-+ const struct ethtool_link_ksettings *cmd)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct phy_device *phydev = priv->phydev;
-+
-+ if (!phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_ksettings_set(phydev, cmd);
-+}
-+
-+/*
-+ * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
-+ * structure.
-+ *
-+ */
-+static int pfe_eth_get_settings(struct net_device *ndev,
-+ struct ethtool_link_ksettings *cmd)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct phy_device *phydev = priv->phydev;
-+
-+ if (!phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_ksettings_get(phydev, cmd);
-+}
-+
-+/*
-+ * pfe_eth_get_msglevel - Gets the debug message mask.
-+ *
-+ */
-+static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ return priv->msg_enable;
-+}
-+
-+/*
-+ * pfe_eth_set_msglevel - Sets the debug message mask.
-+ *
-+ */
-+static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ priv->msg_enable = data;
-+}
-+
-+#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
-+#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
-+#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
-+ HIF_RX_COAL_CLKS_PER_USEC)
-+
-+/*
-+ * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
-+ *
-+ */
-+static int pfe_eth_set_coalesce(struct net_device *ndev,
-+ struct ethtool_coalesce *ec)
-+{
-+ if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
-+ return -EINVAL;
-+
-+ if (!ec->rx_coalesce_usecs) {
-+ writel(0, HIF_INT_COAL);
-+ return 0;
-+ }
-+
-+ writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
-+ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
-+
-+ return 0;
-+}
-+
-+/*
-+ * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
-+ *
-+ */
-+static int pfe_eth_get_coalesce(struct net_device *ndev,
-+ struct ethtool_coalesce *ec)
-+{
-+ int reg_val = readl(HIF_INT_COAL);
-+
-+ if (reg_val & HIF_INT_COAL_ENABLE)
-+ ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
-+ HIF_RX_COAL_CLKS_PER_USEC;
-+ else
-+ ec->rx_coalesce_usecs = 0;
-+
-+ return 0;
-+}
-+
-+/*
-+ * pfe_eth_set_pauseparam - Sets pause parameters
-+ *
-+ */
-+static int pfe_eth_set_pauseparam(struct net_device *ndev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ if (epause->tx_pause != epause->rx_pause) {
-+ netdev_info(ndev,
-+ "hardware only support enable/disable both tx and rx\n");
-+ return -EINVAL;
-+ }
-+
-+ priv->pause_flag = 0;
-+ priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
-+ priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
-+
-+ if (epause->rx_pause || epause->autoneg) {
-+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
-+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
-+ EGPI_PAUSE_ENABLE),
-+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
-+ if (priv->phydev) {
-+ priv->phydev->supported |= ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause;
-+ priv->phydev->advertising |= ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause;
-+ }
-+ } else {
-+ gemac_disable_pause_rx(priv->EMAC_baseaddr);
-+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
-+ ~EGPI_PAUSE_ENABLE),
-+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
-+ if (priv->phydev) {
-+ priv->phydev->supported &= ~(ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause);
-+ priv->phydev->advertising &= ~(ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause);
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * pfe_eth_get_pauseparam - Gets pause parameters
-+ *
-+ */
-+static void pfe_eth_get_pauseparam(struct net_device *ndev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
-+ epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
-+ epause->rx_pause = epause->tx_pause;
-+}
-+
-+/*
-+ * pfe_eth_get_hash
-+ */
-+#define PFE_HASH_BITS 6 /* #bits in hash */
-+#define CRC32_POLY 0xEDB88320
-+
-+static int pfe_eth_get_hash(u8 *addr)
-+{
-+ unsigned int i, bit, data, crc, hash;
-+
-+ /* calculate crc32 value of mac address */
-+ crc = 0xffffffff;
-+
-+ for (i = 0; i < 6; i++) {
-+ data = addr[i];
-+ for (bit = 0; bit < 8; bit++, data >>= 1) {
-+ crc = (crc >> 1) ^
-+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
-+ }
-+ }
-+
-+ /*
-+ * only upper 6 bits (PFE_HASH_BITS) are used
-+ * which point to specific bit in the hash registers
-+ */
-+ hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
-+
-+ return hash;
-+}
-+
-+const struct ethtool_ops pfe_ethtool_ops = {
-+ .get_drvinfo = pfe_eth_get_drvinfo,
-+ .get_regs_len = pfe_eth_gemac_reglen,
-+ .get_regs = pfe_eth_gemac_get_regs,
-+ .get_link = ethtool_op_get_link,
-+ .get_wol = pfe_eth_get_wol,
-+ .set_wol = pfe_eth_set_wol,
-+ .set_pauseparam = pfe_eth_set_pauseparam,
-+ .get_pauseparam = pfe_eth_get_pauseparam,
-+ .get_strings = pfe_eth_gstrings,
-+ .get_sset_count = pfe_eth_stats_count,
-+ .get_ethtool_stats = pfe_eth_fill_stats,
-+ .get_msglevel = pfe_eth_get_msglevel,
-+ .set_msglevel = pfe_eth_set_msglevel,
-+ .set_coalesce = pfe_eth_set_coalesce,
-+ .get_coalesce = pfe_eth_get_coalesce,
-+ .get_link_ksettings = pfe_eth_get_settings,
-+ .set_link_ksettings = pfe_eth_set_settings,
-+};
-+
-+/* pfe_eth_mdio_reset
-+ */
-+int pfe_eth_mdio_reset(struct mii_bus *bus)
-+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
-+ u32 phy_speed;
-+
-+ netif_info(priv, hw, priv->ndev, "%s\n", __func__);
-+
-+ mutex_lock(&bus->mdio_lock);
-+
-+ /*
-+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
-+ *
-+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
-+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
-+ */
-+ phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
-+ << EMAC_MII_SPEED_SHIFT);
-+ phy_speed |= EMAC_HOLDTIME(0x5);
-+ __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
-+
-+ mutex_unlock(&bus->mdio_lock);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_gemac_phy_timeout
-+ *
-+ */
-+static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
-+{
-+ while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
-+ EMAC_IEVENT_MII)) {
-+ if (timeout-- <= 0)
-+ return -1;
-+ usleep_range(10, 20);
-+ }
-+ __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_mux(u8 muxval)
-+{
-+ struct i2c_adapter *a;
-+ struct i2c_msg msg;
-+ unsigned char buf[2];
-+ int ret;
-+
-+ a = i2c_get_adapter(0);
-+ if (!a)
-+ return -ENODEV;
-+
-+ /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
-+ buf[0] = 0x54; /* reg number */
-+ buf[1] = (muxval << 6) | 0x3; /* data */
-+ msg.addr = 0x66;
-+ msg.buf = buf;
-+ msg.len = 2;
-+ msg.flags = 0;
-+ ret = i2c_transfer(a, &msg, 1);
-+ i2c_put_adapter(a);
-+ if (ret != 1)
-+ return -ENODEV;
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
-+ int dev_addr, int regnum)
-+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
-+
-+ __raw_writel(EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA(dev_addr) |
-+ EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
-+
-+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
-+ __func__);
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
-+ u16 value)
-+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
-+
-+ /*To access external PHYs on QDS board mux needs to be configured*/
-+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
-+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
-+
-+ if (regnum & MII_ADDR_C45) {
-+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
-+ regnum & 0xffff);
-+ __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
-+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
-+ } else {
-+ /* start a write op */
-+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA(regnum) |
-+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
-+ }
-+
-+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
-+ __func__);
-+ return -1;
-+ }
-+ netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
-+ mii_id, regnum, value);
-+
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
-+ u16 value = 0;
-+
-+ /*To access external PHYs on QDS board mux needs to be configured*/
-+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
-+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
-+
-+ if (regnum & MII_ADDR_C45) {
-+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
-+ regnum & 0xffff);
-+ __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
-+ EMAC_MII_DATA_TA,
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
-+ } else {
-+ /* start a read op */
-+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA(regnum) |
-+ EMAC_MII_DATA_TA, priv->PHY_baseaddr +
-+ EMAC_MII_DATA_REG);
-+ }
-+
-+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
-+ return -1;
-+ }
-+
-+ value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
-+ EMAC_MII_DATA_REG));
-+ netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
-+ mii_id, regnum, value);
-+ return value;
-+}
-+
-+static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
-+ struct ls1012a_mdio_platform_data *minfo)
-+{
-+ struct mii_bus *bus;
-+ int rc;
-+
-+ netif_info(priv, drv, priv->ndev, "%s\n", __func__);
-+ pr_info("%s\n", __func__);
-+
-+ bus = mdiobus_alloc();
-+ if (!bus) {
-+ netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
-+ rc = -ENOMEM;
-+ goto err0;
-+ }
-+
-+ bus->name = "ls1012a MDIO Bus";
-+ bus->read = &pfe_eth_mdio_read;
-+ bus->write = &pfe_eth_mdio_write;
-+ bus->reset = &pfe_eth_mdio_reset;
-+ snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
-+ bus->priv = priv;
-+
-+ bus->phy_mask = minfo->phy_mask;
-+ priv->mdc_div = minfo->mdc_div;
-+
-+ if (!priv->mdc_div)
-+ priv->mdc_div = 64;
-+
-+ bus->irq[0] = minfo->irq[0];
-+
-+ bus->parent = priv->pfe->dev;
-+
-+ netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
-+ __func__, priv->mdc_div, bus->phy_mask);
-+ rc = mdiobus_register(bus);
-+ if (rc) {
-+ netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
-+ bus->name);
-+ goto err1;
-+ }
-+
-+ priv->mii_bus = bus;
-+ pfe_eth_mdio_reset(bus);
-+
-+ return 0;
-+
-+err1:
-+ mdiobus_free(bus);
-+err0:
-+ return rc;
-+}
-+
-+/* pfe_eth_mdio_exit
-+ */
-+static void pfe_eth_mdio_exit(struct mii_bus *bus)
-+{
-+ if (!bus)
-+ return;
-+
-+ netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
-+ pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
-+
-+ mdiobus_unregister(bus);
-+ mdiobus_free(bus);
-+}
-+
-+/* pfe_get_phydev_speed
-+ */
-+static int pfe_get_phydev_speed(struct phy_device *phydev)
-+{
-+ switch (phydev->speed) {
-+ case 10:
-+ return SPEED_10M;
-+ case 100:
-+ return SPEED_100M;
-+ case 1000:
-+ default:
-+ return SPEED_1000M;
-+ }
-+}
-+
-+/* pfe_set_rgmii_speed
-+ */
-+#define RGMIIPCR 0x434
-+/* RGMIIPCR bit definitions*/
-+#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
-+#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
-+#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
-+#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
-+#define SCFG_RGMIIPCR_SETFD (0x00000001)
-+
-+static void pfe_set_rgmii_speed(struct phy_device *phydev)
-+{
-+ u32 rgmii_pcr;
-+
-+ regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
-+ rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
-+
-+ switch (phydev->speed) {
-+ case 10:
-+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
-+ break;
-+ case 1000:
-+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
-+ break;
-+ case 100:
-+ default:
-+ /* Default is 100M */
-+ break;
-+ }
-+ regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
-+}
-+
-+/* pfe_get_phydev_duplex
-+ */
-+static int pfe_get_phydev_duplex(struct phy_device *phydev)
-+{
-+ /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
-+ return DUPLEX_FULL;
-+}
-+
-+/* pfe_eth_adjust_link
-+ */
-+static void pfe_eth_adjust_link(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ unsigned long flags;
-+ struct phy_device *phydev = priv->phydev;
-+ int new_state = 0;
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ spin_lock_irqsave(&priv->lock, flags);
-+
-+ if (phydev->link) {
-+ /*
-+ * Now we make sure that we can be in full duplex mode.
-+ * If not, we operate in half-duplex mode.
-+ */
-+ if (phydev->duplex != priv->oldduplex) {
-+ new_state = 1;
-+ gemac_set_duplex(priv->EMAC_baseaddr,
-+ pfe_get_phydev_duplex(phydev));
-+ priv->oldduplex = phydev->duplex;
-+ }
-+
-+ if (phydev->speed != priv->oldspeed) {
-+ new_state = 1;
-+ gemac_set_speed(priv->EMAC_baseaddr,
-+ pfe_get_phydev_speed(phydev));
-+ if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII)
-+ pfe_set_rgmii_speed(phydev);
-+ priv->oldspeed = phydev->speed;
-+ }
-+
-+ if (!priv->oldlink) {
-+ new_state = 1;
-+ priv->oldlink = 1;
-+ }
-+
-+ } else if (priv->oldlink) {
-+ new_state = 1;
-+ priv->oldlink = 0;
-+ priv->oldspeed = 0;
-+ priv->oldduplex = -1;
-+ }
-+
-+ if (new_state && netif_msg_link(priv))
-+ phy_print_status(phydev);
-+
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+}
-+
-+/* pfe_phy_exit
-+ */
-+static void pfe_phy_exit(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ phy_disconnect(priv->phydev);
-+ priv->phydev = NULL;
-+}
-+
-+/* pfe_eth_stop
-+ */
-+static void pfe_eth_stop(struct net_device *ndev, int wake)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ if (wake) {
-+ gemac_tx_disable(priv->EMAC_baseaddr);
-+ } else {
-+ gemac_disable(priv->EMAC_baseaddr);
-+ gpi_disable(priv->GPI_baseaddr);
-+
-+ if (priv->phydev)
-+ phy_stop(priv->phydev);
-+ }
-+}
-+
-+/* pfe_eth_start
-+ */
-+static int pfe_eth_start(struct pfe_eth_priv_s *priv)
-+{
-+ netif_info(priv, drv, priv->ndev, "%s\n", __func__);
-+
-+ if (priv->phydev)
-+ phy_start(priv->phydev);
-+
-+ gpi_enable(priv->GPI_baseaddr);
-+ gemac_enable(priv->EMAC_baseaddr);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Configure on chip serdes through mdio
-+ */
-+static void ls1012a_configure_serdes(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
-+ int sgmii_2500 = 0;
-+ struct mii_bus *bus = priv->mii_bus;
-+
-+ if (priv->einfo->mii_config == PHY_INTERFACE_MODE_SGMII_2500)
-+ sgmii_2500 = 1;
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+ /* PCS configuration done with corresponding GEMAC */
-+
-+ pfe_eth_mdio_read(bus, 0, 0);
-+ pfe_eth_mdio_read(bus, 0, 1);
-+
-+ /*These settings taken from validtion team */
-+ pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
-+ if (sgmii_2500) {
-+ pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
-+ pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
-+ pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
-+ pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
-+ } else {
-+ pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
-+ pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
-+ pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
-+ pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
-+ }
-+
-+ pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
-+}
-+
-+/*
-+ * pfe_phy_init
-+ *
-+ */
-+static int pfe_phy_init(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct phy_device *phydev;
-+ char phy_id[MII_BUS_ID_SIZE + 3];
-+ char bus_id[MII_BUS_ID_SIZE];
-+ phy_interface_t interface;
-+
-+ priv->oldlink = 0;
-+ priv->oldspeed = 0;
-+ priv->oldduplex = -1;
-+
-+ snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
-+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
-+ priv->einfo->phy_id);
-+
-+ netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
-+ interface = priv->einfo->mii_config;
-+ if ((interface == PHY_INTERFACE_MODE_SGMII) ||
-+ (interface == PHY_INTERFACE_MODE_SGMII_2500)) {
-+ /*Configure SGMII PCS */
-+ if (pfe->scfg) {
-+ /*Config MDIO from serdes */
-+ regmap_write(pfe->scfg, 0x484, 0x00000000);
-+ }
-+ ls1012a_configure_serdes(ndev);
-+ }
-+
-+ if (pfe->scfg) {
-+ /*Config MDIO from PAD */
-+ regmap_write(pfe->scfg, 0x484, 0x80000000);
-+ }
-+
-+ priv->oldlink = 0;
-+ priv->oldspeed = 0;
-+ priv->oldduplex = -1;
-+ pr_info("%s interface %x\n", __func__, interface);
-+ phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
-+
-+ if (IS_ERR(phydev)) {
-+ netdev_err(ndev, "phy_connect() failed\n");
-+ return PTR_ERR(phydev);
-+ }
-+
-+ priv->phydev = phydev;
-+ phydev->irq = PHY_POLL;
-+
-+ return 0;
-+}
-+
-+/* pfe_gemac_init
-+ */
-+static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
-+{
-+ struct gemac_cfg cfg;
-+
-+ netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
-+
-+ cfg.speed = SPEED_1000M;
-+ cfg.duplex = DUPLEX_FULL;
-+
-+ gemac_set_config(priv->EMAC_baseaddr, &cfg);
-+ gemac_allow_broadcast(priv->EMAC_baseaddr);
-+ gemac_enable_1536_rx(priv->EMAC_baseaddr);
-+ gemac_enable_rx_jmb(priv->EMAC_baseaddr);
-+ gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
-+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
-+ gemac_set_bus_width(priv->EMAC_baseaddr, 64);
-+
-+ /*GEM will perform checksum verifications*/
-+ if (priv->ndev->features & NETIF_F_RXCSUM)
-+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
-+ else
-+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_event_handler
-+ */
-+static int pfe_eth_event_handler(void *data, int event, int qno)
-+{
-+ struct pfe_eth_priv_s *priv = data;
-+
-+ switch (event) {
-+ case EVENT_RX_PKT_IND:
-+
-+ if (qno == 0) {
-+ if (napi_schedule_prep(&priv->high_napi)) {
-+ netif_info(priv, intr, priv->ndev,
-+ "%s: schedule high prio poll\n"
-+ , __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+
-+ __napi_schedule(&priv->high_napi);
-+ }
-+ } else if (qno == 1) {
-+ if (napi_schedule_prep(&priv->low_napi)) {
-+ netif_info(priv, intr, priv->ndev,
-+ "%s: schedule low prio poll\n"
-+ , __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+ __napi_schedule(&priv->low_napi);
-+ }
-+ } else if (qno == 2) {
-+ if (napi_schedule_prep(&priv->lro_napi)) {
-+ netif_info(priv, intr, priv->ndev,
-+ "%s: schedule lro prio poll\n"
-+ , __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+ __napi_schedule(&priv->lro_napi);
-+ }
-+ }
-+
-+ break;
-+
-+ case EVENT_TXDONE_IND:
-+ pfe_eth_flush_tx(priv);
-+ hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
-+ break;
-+ case EVENT_HIGH_RX_WM:
-+ default:
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_open
-+ */
-+static int pfe_eth_open(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct hif_client_s *client;
-+ int rc;
-+
-+ netif_info(priv, ifup, ndev, "%s\n", __func__);
-+
-+ /* Register client driver with HIF */
-+ client = &priv->client;
-+ memset(client, 0, sizeof(*client));
-+ client->id = PFE_CL_GEM0 + priv->id;
-+ client->tx_qn = emac_txq_cnt;
-+ client->rx_qn = EMAC_RXQ_CNT;
-+ client->priv = priv;
-+ client->pfe = priv->pfe;
-+ client->event_handler = pfe_eth_event_handler;
-+
-+ client->tx_qsize = EMAC_TXQ_DEPTH;
-+ client->rx_qsize = EMAC_RXQ_DEPTH;
-+
-+ rc = hif_lib_client_register(client);
-+ if (rc) {
-+ netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
-+ __func__, client->id);
-+ goto err0;
-+ }
-+
-+ netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
-+ client);
-+
-+ pfe_gemac_init(priv);
-+
-+ if (!is_valid_ether_addr(ndev->dev_addr)) {
-+ netdev_err(ndev, "%s: invalid MAC address\n", __func__);
-+ rc = -EADDRNOTAVAIL;
-+ goto err1;
-+ }
-+
-+ gemac_set_laddrN(priv->EMAC_baseaddr,
-+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
-+
-+ napi_enable(&priv->high_napi);
-+ napi_enable(&priv->low_napi);
-+ napi_enable(&priv->lro_napi);
-+
-+ rc = pfe_eth_start(priv);
-+
-+ netif_tx_wake_all_queues(ndev);
-+
-+ return rc;
-+
-+err1:
-+ hif_lib_client_unregister(&priv->client);
-+
-+err0:
-+ return rc;
-+}
-+
-+/*
-+ * pfe_eth_shutdown
-+ */
-+int pfe_eth_shutdown(struct net_device *ndev, int wake)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int i, qstatus;
-+ unsigned long next_poll = jiffies + 1, end = jiffies +
-+ (TX_POLL_TIMEOUT_MS * HZ) / 1000;
-+ int tx_pkts, prv_tx_pkts;
-+
-+ netif_info(priv, ifdown, ndev, "%s\n", __func__);
-+
-+ for (i = 0; i < emac_txq_cnt; i++)
-+ hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
-+
-+ netif_tx_stop_all_queues(ndev);
-+
-+ do {
-+ tx_pkts = 0;
-+ pfe_eth_flush_tx(priv);
-+
-+ for (i = 0; i < emac_txq_cnt; i++)
-+ tx_pkts += hif_lib_tx_pending(&priv->client, i);
-+
-+ if (tx_pkts) {
-+ /*Don't wait forever, break if we cross max timeout */
-+ if (time_after(jiffies, end)) {
-+ pr_err(
-+ "(%s)Tx is not complete after %dmsec\n",
-+ ndev->name, TX_POLL_TIMEOUT_MS);
-+ break;
-+ }
-+
-+ pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
-+ , __func__, ndev->name, tx_pkts);
-+ if (need_resched())
-+ schedule();
-+ }
-+
-+ } while (tx_pkts);
-+
-+ end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
-+
-+ prv_tx_pkts = tmu_pkts_processed(priv->id);
-+ /*
-+ * Wait till TMU transmits all pending packets
-+ * poll tmu_qstatus and pkts processed by TMU for every 10ms
-+ * Consider TMU is busy, If we see TMU qeueu pending or any packets
-+ * processed by TMU
-+ */
-+ while (1) {
-+ if (time_after(jiffies, next_poll)) {
-+ tx_pkts = tmu_pkts_processed(priv->id);
-+ qstatus = tmu_qstatus(priv->id) & 0x7ffff;
-+
-+ if (!qstatus && (tx_pkts == prv_tx_pkts))
-+ break;
-+ /* Don't wait forever, break if we cross max
-+ * timeout(TX_POLL_TIMEOUT_MS)
-+ */
-+ if (time_after(jiffies, end)) {
-+ pr_err("TMU%d is busy after %dmsec\n",
-+ priv->id, TX_POLL_TIMEOUT_MS);
-+ break;
-+ }
-+ prv_tx_pkts = tx_pkts;
-+ next_poll++;
-+ }
-+ if (need_resched())
-+ schedule();
-+ }
-+ /* Wait for some more time to complete transmitting packet if any */
-+ next_poll = jiffies + 1;
-+ while (1) {
-+ if (time_after(jiffies, next_poll))
-+ break;
-+ if (need_resched())
-+ schedule();
-+ }
-+
-+ pfe_eth_stop(ndev, wake);
-+
-+ napi_disable(&priv->lro_napi);
-+ napi_disable(&priv->low_napi);
-+ napi_disable(&priv->high_napi);
-+
-+ hif_lib_client_unregister(&priv->client);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_close
-+ *
-+ */
-+static int pfe_eth_close(struct net_device *ndev)
-+{
-+ pfe_eth_shutdown(ndev, 0);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_suspend
-+ *
-+ * return value : 1 if netdevice is configured to wakeup system
-+ * 0 otherwise
-+ */
-+int pfe_eth_suspend(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int retval = 0;
-+
-+ if (priv->wol) {
-+ gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
-+ retval = 1;
-+ }
-+ pfe_eth_shutdown(ndev, priv->wol);
-+
-+ return retval;
-+}
-+
-+/* pfe_eth_resume
-+ *
-+ */
-+int pfe_eth_resume(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ if (priv->wol)
-+ gemac_set_wol(priv->EMAC_baseaddr, 0);
-+ gemac_tx_enable(priv->EMAC_baseaddr);
-+
-+ return pfe_eth_open(ndev);
-+}
-+
-+/* pfe_eth_get_queuenum
-+ */
-+static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
-+ *skb)
-+{
-+ int queuenum = 0;
-+ unsigned long flags;
-+
-+ /* Get the Fast Path queue number */
-+ /*
-+ * Use conntrack mark (if conntrack exists), then packet mark (if any),
-+ * then fallback to default
-+ */
-+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
-+ if (skb->_nfct) {
-+ enum ip_conntrack_info cinfo;
-+ struct nf_conn *ct;
-+
-+ ct = nf_ct_get(skb, &cinfo);
-+
-+ if (ct) {
-+ u32 connmark;
-+
-+ connmark = ct->mark;
-+
-+ if ((connmark & 0x80000000) && priv->id != 0)
-+ connmark >>= 16;
-+
-+ queuenum = connmark & EMAC_QUEUENUM_MASK;
-+ }
-+ } else {/* continued after #endif ... */
-+#endif
-+ if (skb->mark) {
-+ queuenum = skb->mark & EMAC_QUEUENUM_MASK;
-+ } else {
-+ spin_lock_irqsave(&priv->lock, flags);
-+ queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+ }
-+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
-+ }
-+#endif
-+ return queuenum;
-+}
-+
-+/* pfe_eth_might_stop_tx
-+ *
-+ */
-+static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
-+ struct netdev_queue *tx_queue,
-+ unsigned int n_desc,
-+ unsigned int n_segs)
-+{
-+ ktime_t kt;
-+
-+ if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
-+ (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
-+ (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
-+#ifdef PFE_ETH_TX_STATS
-+ if (__hif_tx_avail(&pfe->hif) < n_desc) {
-+ priv->stop_queue_hif[queuenum]++;
-+ } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
-+ priv->stop_queue_hif_client[queuenum]++;
-+ } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
-+ n_segs) {
-+ priv->stop_queue_credit[queuenum]++;
-+ }
-+ priv->stop_queue_total[queuenum]++;
-+#endif
-+ netif_tx_stop_queue(tx_queue);
-+
-+ kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
-+ NSEC_PER_MSEC);
-+ hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
-+ HRTIMER_MODE_REL);
-+ return -1;
-+ } else {
-+ return 0;
-+ }
-+}
-+
-+#define SA_MAX_OP 2
-+/* pfe_hif_send_packet
-+ *
-+ * At this level if TX fails we drop the packet
-+ */
-+static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
-+ *priv, int queuenum)
-+{
-+ struct skb_shared_info *sh = skb_shinfo(skb);
-+ unsigned int nr_frags;
-+ u32 ctrl = 0;
-+
-+ netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
-+
-+ if (skb_is_gso(skb)) {
-+ priv->stats.tx_dropped++;
-+ return;
-+ }
-+
-+ if (skb->ip_summed == CHECKSUM_PARTIAL)
-+ ctrl = HIF_CTRL_TX_CHECKSUM;
-+
-+ nr_frags = sh->nr_frags;
-+
-+ if (nr_frags) {
-+ skb_frag_t *f;
-+ int i;
-+
-+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
-+ skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
-+ skb);
-+
-+ for (i = 0; i < nr_frags - 1; i++) {
-+ f = &sh->frags[i];
-+ __hif_lib_xmit_pkt(&priv->client, queuenum,
-+ skb_frag_address(f),
-+ skb_frag_size(f),
-+ 0x0, 0x0, skb);
-+ }
-+
-+ f = &sh->frags[i];
-+
-+ __hif_lib_xmit_pkt(&priv->client, queuenum,
-+ skb_frag_address(f), skb_frag_size(f),
-+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
-+ skb);
-+
-+ netif_info(priv, tx_queued, priv->ndev,
-+ "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
-+ __func__, skb, nr_frags, skb->len);
-+ } else {
-+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
-+ skb->len, ctrl, HIF_FIRST_BUFFER |
-+ HIF_LAST_BUFFER | HIF_DATA_VALID,
-+ skb);
-+ netif_info(priv, tx_queued, priv->ndev,
-+ "%s: pkt sent successfully skb:%p len:%d\n",
-+ __func__, skb, skb->len);
-+ }
-+ hif_tx_dma_start();
-+ priv->stats.tx_packets++;
-+ priv->stats.tx_bytes += skb->len;
-+ hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
-+}
-+
-+/* pfe_eth_flush_txQ
-+ */
-+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
-+ from_tx, int n_desc)
-+{
-+ struct sk_buff *skb;
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ tx_q_num);
-+ unsigned int flags;
-+
-+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
-+
-+ if (!from_tx)
-+ __netif_tx_lock_bh(tx_queue);
-+
-+ /* Clean HIF and client queue */
-+ while ((skb = hif_lib_tx_get_next_complete(&priv->client,
-+ tx_q_num, &flags,
-+ HIF_TX_DESC_NT))) {
-+ if (flags & HIF_DATA_VALID)
-+ dev_kfree_skb_any(skb);
-+ }
-+ if (!from_tx)
-+ __netif_tx_unlock_bh(tx_queue);
-+}
-+
-+/* pfe_eth_flush_tx
-+ */
-+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
-+{
-+ int ii;
-+
-+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
-+
-+ for (ii = 0; ii < emac_txq_cnt; ii++)
-+ pfe_eth_flush_txQ(priv, ii, 0, 0);
-+}
-+
-+void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
-+ *n_segs)
-+{
-+ struct skb_shared_info *sh = skb_shinfo(skb);
-+
-+ /* Scattered data */
-+ if (sh->nr_frags) {
-+ *n_desc = sh->nr_frags + 1;
-+ *n_segs = 1;
-+ /* Regular case */
-+ } else {
-+ *n_desc = 1;
-+ *n_segs = 1;
-+ }
-+}
-+
-+/* pfe_eth_send_packet
-+ */
-+static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int tx_q_num = skb_get_queue_mapping(skb);
-+ int n_desc, n_segs;
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ tx_q_num);
-+
-+ netif_info(priv, tx_queued, ndev, "%s\n", __func__);
-+
-+ if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
-+ sizeof(unsigned long)))) {
-+ netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
-+ __func__);
-+
-+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
-+ long)), 0, GFP_ATOMIC)) {
-+ /* No need to re-transmit, no way to recover*/
-+ kfree_skb(skb);
-+ priv->stats.tx_dropped++;
-+ return NETDEV_TX_OK;
-+ }
-+ }
-+
-+ pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
-+
-+ hif_tx_lock(&pfe->hif);
-+ if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
-+ n_segs))) {
-+#ifdef PFE_ETH_TX_STATS
-+ if (priv->was_stopped[tx_q_num]) {
-+ priv->clean_fail[tx_q_num]++;
-+ priv->was_stopped[tx_q_num] = 0;
-+ }
-+#endif
-+ hif_tx_unlock(&pfe->hif);
-+ return NETDEV_TX_BUSY;
-+ }
-+
-+ pfe_hif_send_packet(skb, priv, tx_q_num);
-+
-+ hif_tx_unlock(&pfe->hif);
-+
-+ tx_queue->trans_start = jiffies;
-+
-+#ifdef PFE_ETH_TX_STATS
-+ priv->was_stopped[tx_q_num] = 0;
-+#endif
-+
-+ return NETDEV_TX_OK;
-+}
-+
-+/* pfe_eth_select_queue
-+ *
-+ */
-+static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
-+ struct net_device *sb_dev,
-+ select_queue_fallback_t fallback)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ return pfe_eth_get_queuenum(priv, skb);
-+}
-+
-+/* pfe_eth_get_stats
-+ */
-+static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ return &priv->stats;
-+}
-+
-+/* pfe_eth_set_mac_address
-+ */
-+static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct sockaddr *sa = addr;
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ if (!is_valid_ether_addr(sa->sa_data))
-+ return -EADDRNOTAVAIL;
-+
-+ memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
-+
-+ gemac_set_laddrN(priv->EMAC_baseaddr,
-+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_enet_addr_byte_mac
-+ */
-+int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
-+ struct pfe_mac_addr *enet_addr)
-+{
-+ if (!enet_byte_addr || !enet_addr) {
-+ return -1;
-+
-+ } else {
-+ enet_addr->bottom = enet_byte_addr[0] |
-+ (enet_byte_addr[1] << 8) |
-+ (enet_byte_addr[2] << 16) |
-+ (enet_byte_addr[3] << 24);
-+ enet_addr->top = enet_byte_addr[4] |
-+ (enet_byte_addr[5] << 8);
-+ return 0;
-+ }
-+}
-+
-+/* pfe_eth_set_multi
-+ */
-+static void pfe_eth_set_multi(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct pfe_mac_addr hash_addr; /* hash register structure */
-+ /* specific mac address register structure */
-+ struct pfe_mac_addr spec_addr;
-+ int result; /* index into hash register to set.. */
-+ int uc_count = 0;
-+ struct netdev_hw_addr *ha;
-+
-+ if (ndev->flags & IFF_PROMISC) {
-+ netif_info(priv, drv, ndev, "entering promiscuous mode\n");
-+
-+ priv->promisc = 1;
-+ gemac_enable_copy_all(priv->EMAC_baseaddr);
-+ } else {
-+ priv->promisc = 0;
-+ gemac_disable_copy_all(priv->EMAC_baseaddr);
-+ }
-+
-+ /* Enable broadcast frame reception if required. */
-+ if (ndev->flags & IFF_BROADCAST) {
-+ gemac_allow_broadcast(priv->EMAC_baseaddr);
-+ } else {
-+ netif_info(priv, drv, ndev,
-+ "disabling broadcast frame reception\n");
-+
-+ gemac_no_broadcast(priv->EMAC_baseaddr);
-+ }
-+
-+ if (ndev->flags & IFF_ALLMULTI) {
-+ /* Set the hash to rx all multicast frames */
-+ hash_addr.bottom = 0xFFFFFFFF;
-+ hash_addr.top = 0xFFFFFFFF;
-+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
-+ netdev_for_each_uc_addr(ha, ndev) {
-+ if (uc_count >= MAX_UC_SPEC_ADDR_REG)
-+ break;
-+ pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
-+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
-+ uc_count + 2);
-+ uc_count++;
-+ }
-+ } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
-+ u8 *addr;
-+
-+ hash_addr.bottom = 0;
-+ hash_addr.top = 0;
-+
-+ netdev_for_each_mc_addr(ha, ndev) {
-+ addr = ha->addr;
-+
-+ netif_info(priv, drv, ndev,
-+ "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
-+ addr[0], addr[1], addr[2],
-+ addr[3], addr[4], addr[5]);
-+
-+ result = pfe_eth_get_hash(addr);
-+
-+ if (result < EMAC_HASH_REG_BITS) {
-+ if (result < 32)
-+ hash_addr.bottom |= (1 << result);
-+ else
-+ hash_addr.top |= (1 << (result - 32));
-+ } else {
-+ break;
-+ }
-+ }
-+
-+ uc_count = -1;
-+ netdev_for_each_uc_addr(ha, ndev) {
-+ addr = ha->addr;
-+
-+ if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
-+ netdev_info(ndev,
-+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
-+ addr[0], addr[1], addr[2],
-+ addr[3], addr[4], addr[5]);
-+ pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
-+ gemac_set_laddrN(priv->EMAC_baseaddr,
-+ &spec_addr, uc_count + 2);
-+ } else {
-+ netif_info(priv, drv, ndev,
-+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
-+ addr[0], addr[1], addr[2],
-+ addr[3], addr[4], addr[5]);
-+
-+ result = pfe_eth_get_hash(addr);
-+ if (result >= EMAC_HASH_REG_BITS) {
-+ break;
-+
-+ } else {
-+ if (result < 32)
-+ hash_addr.bottom |= (1 <<
-+ result);
-+ else
-+ hash_addr.top |= (1 <<
-+ (result - 32));
-+ }
-+ }
-+ }
-+
-+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
-+ }
-+
-+ if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
-+ /*
-+ * Check if there are any specific address HW registers that
-+ * need to be flushed
-+ */
-+ for (uc_count = netdev_uc_count(ndev); uc_count <
-+ MAX_UC_SPEC_ADDR_REG; uc_count++)
-+ gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
-+ }
-+
-+ if (ndev->flags & IFF_LOOPBACK)
-+ gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
-+}
-+
-+/* pfe_eth_set_features
-+ */
-+static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
-+ features)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int rc = 0;
-+
-+ if (features & NETIF_F_RXCSUM)
-+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
-+ else
-+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
-+ return rc;
-+}
-+
-+/* pfe_eth_fast_tx_timeout
-+ */
-+static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
-+{
-+ struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
-+ pfe_eth_fast_timer,
-+ timer);
-+ struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
-+ struct pfe_eth_priv_s,
-+ fast_tx_timeout);
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ fast_tx_timeout->queuenum);
-+
-+ if (netif_tx_queue_stopped(tx_queue)) {
-+#ifdef PFE_ETH_TX_STATS
-+ priv->was_stopped[fast_tx_timeout->queuenum] = 1;
-+#endif
-+ netif_tx_wake_queue(tx_queue);
-+ }
-+
-+ return HRTIMER_NORESTART;
-+}
-+
-+/* pfe_eth_fast_tx_timeout_init
-+ */
-+static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
-+{
-+ int i;
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ priv->fast_tx_timeout[i].queuenum = i;
-+ hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
-+ HRTIMER_MODE_REL);
-+ priv->fast_tx_timeout[i].timer.function =
-+ pfe_eth_fast_tx_timeout;
-+ priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
-+ }
-+}
-+
-+static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
-+ struct pfe_eth_priv_s *priv,
-+ unsigned int qno)
-+{
-+ void *buf_addr;
-+ unsigned int rx_ctrl;
-+ unsigned int desc_ctrl = 0;
-+ struct hif_ipsec_hdr *ipsec_hdr = NULL;
-+ struct sk_buff *skb;
-+ struct sk_buff *skb_frag, *skb_frag_last = NULL;
-+ int length = 0, offset;
-+
-+ skb = priv->skb_inflight[qno];
-+
-+ if (skb) {
-+ skb_frag_last = skb_shinfo(skb)->frag_list;
-+ if (skb_frag_last) {
-+ while (skb_frag_last->next)
-+ skb_frag_last = skb_frag_last->next;
-+ }
-+ }
-+
-+ while (!(desc_ctrl & CL_DESC_LAST)) {
-+ buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
-+ &offset, &rx_ctrl, &desc_ctrl,
-+ (void **)&ipsec_hdr);
-+ if (!buf_addr)
-+ goto incomplete;
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_DESC_COUNT]++;
-+#endif
-+
-+ /* First frag */
-+ if (desc_ctrl & CL_DESC_FIRST) {
-+ skb = build_skb(buf_addr, 0);
-+ if (unlikely(!skb))
-+ goto pkt_drop;
-+
-+ skb_reserve(skb, offset);
-+ skb_put(skb, length);
-+ skb->dev = ndev;
-+
-+ if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
-+ HIF_CTRL_RX_CHECKSUMMED))
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ else
-+ skb_checksum_none_assert(skb);
-+
-+ } else {
-+ /* Next frags */
-+ if (unlikely(!skb)) {
-+ pr_err("%s: NULL skb_inflight\n",
-+ __func__);
-+ goto pkt_drop;
-+ }
-+
-+ skb_frag = build_skb(buf_addr, 0);
-+
-+ if (unlikely(!skb_frag)) {
-+ kfree(buf_addr);
-+ goto pkt_drop;
-+ }
-+
-+ skb_reserve(skb_frag, offset);
-+ skb_put(skb_frag, length);
-+
-+ skb_frag->dev = ndev;
-+
-+ if (skb_shinfo(skb)->frag_list)
-+ skb_frag_last->next = skb_frag;
-+ else
-+ skb_shinfo(skb)->frag_list = skb_frag;
-+
-+ skb->truesize += skb_frag->truesize;
-+ skb->data_len += length;
-+ skb->len += length;
-+ skb_frag_last = skb_frag;
-+ }
-+ }
-+
-+ priv->skb_inflight[qno] = NULL;
-+ return skb;
-+
-+incomplete:
-+ priv->skb_inflight[qno] = skb;
-+ return NULL;
-+
-+pkt_drop:
-+ priv->skb_inflight[qno] = NULL;
-+
-+ if (skb)
-+ kfree_skb(skb);
-+ else
-+ kfree(buf_addr);
-+
-+ priv->stats.rx_errors++;
-+
-+ return NULL;
-+}
-+
-+/* pfe_eth_poll
-+ */
-+static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
-+ unsigned int qno, int budget)
-+{
-+ struct net_device *ndev = priv->ndev;
-+ struct sk_buff *skb;
-+ int work_done = 0;
-+ unsigned int len;
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_POLL_COUNT]++;
-+#endif
-+
-+ do {
-+ skb = pfe_eth_rx_skb(ndev, priv, qno);
-+
-+ if (!skb)
-+ break;
-+
-+ len = skb->len;
-+
-+ /* Packet will be processed */
-+ skb->protocol = eth_type_trans(skb, ndev);
-+
-+ netif_receive_skb(skb);
-+
-+ priv->stats.rx_packets++;
-+ priv->stats.rx_bytes += len;
-+
-+ work_done++;
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_PACKET_COUNT]++;
-+#endif
-+
-+ } while (work_done < budget);
-+
-+ /*
-+ * If no Rx receive nor cleanup work was done, exit polling mode.
-+ * No more netif_running(dev) check is required here , as this is
-+ * checked in net/core/dev.c (2.6.33.5 kernel specific).
-+ */
-+ if (work_done < budget) {
-+ napi_complete(napi);
-+
-+ hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
-+ qno);
-+ }
-+#ifdef PFE_ETH_NAPI_STATS
-+ else
-+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
-+#endif
-+
-+ return work_done;
-+}
-+
-+/*
-+ * pfe_eth_lro_poll
-+ */
-+static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
-+ lro_napi);
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+ return pfe_eth_poll(priv, napi, 2, budget);
-+}
-+
-+/* pfe_eth_low_poll
-+ */
-+static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
-+ low_napi);
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+ return pfe_eth_poll(priv, napi, 1, budget);
-+}
-+
-+/* pfe_eth_high_poll
-+ */
-+static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
-+ high_napi);
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+ return pfe_eth_poll(priv, napi, 0, budget);
-+}
-+
-+static const struct net_device_ops pfe_netdev_ops = {
-+ .ndo_open = pfe_eth_open,
-+ .ndo_stop = pfe_eth_close,
-+ .ndo_start_xmit = pfe_eth_send_packet,
-+ .ndo_select_queue = pfe_eth_select_queue,
-+ .ndo_get_stats = pfe_eth_get_stats,
-+ .ndo_set_mac_address = pfe_eth_set_mac_address,
-+ .ndo_set_rx_mode = pfe_eth_set_multi,
-+ .ndo_set_features = pfe_eth_set_features,
-+ .ndo_validate_addr = eth_validate_addr,
-+};
-+
-+/* pfe_eth_init_one
-+ */
-+static int pfe_eth_init_one(struct pfe *pfe, int id)
-+{
-+ struct net_device *ndev = NULL;
-+ struct pfe_eth_priv_s *priv = NULL;
-+ struct ls1012a_eth_platform_data *einfo;
-+ struct ls1012a_mdio_platform_data *minfo;
-+ struct ls1012a_pfe_platform_data *pfe_info;
-+ int err;
-+
-+ /* Extract pltform data */
-+ pfe_info = (struct ls1012a_pfe_platform_data *)
-+ pfe->dev->platform_data;
-+ if (!pfe_info) {
-+ pr_err(
-+ "%s: pfe missing additional platform data\n"
-+ , __func__);
-+ err = -ENODEV;
-+ goto err0;
-+ }
-+
-+ einfo = (struct ls1012a_eth_platform_data *)
-+ pfe_info->ls1012a_eth_pdata;
-+
-+ /* einfo never be NULL, but no harm in having this check */
-+ if (!einfo) {
-+ pr_err(
-+ "%s: pfe missing additional gemacs platform data\n"
-+ , __func__);
-+ err = -ENODEV;
-+ goto err0;
-+ }
-+
-+ minfo = (struct ls1012a_mdio_platform_data *)
-+ pfe_info->ls1012a_mdio_pdata;
-+
-+ /* einfo never be NULL, but no harm in having this check */
-+ if (!minfo) {
-+ pr_err(
-+ "%s: pfe missing additional mdios platform data\n",
-+ __func__);
-+ err = -ENODEV;
-+ goto err0;
-+ }
-+
-+ /* Create an ethernet device instance */
-+ ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
-+
-+ if (!ndev) {
-+ pr_err("%s: gemac %d device allocation failed\n",
-+ __func__, einfo[id].gem_id);
-+ err = -ENOMEM;
-+ goto err0;
-+ }
-+
-+ priv = netdev_priv(ndev);
-+ priv->ndev = ndev;
-+ priv->id = einfo[id].gem_id;
-+ priv->pfe = pfe;
-+
-+ SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
-+
-+ pfe->eth.eth_priv[id] = priv;
-+
-+ /* Set the info in the priv to the current info */
-+ priv->einfo = &einfo[id];
-+ priv->EMAC_baseaddr = cbus_emac_base[id];
-+ priv->PHY_baseaddr = cbus_emac_base[0];
-+ priv->GPI_baseaddr = cbus_gpi_base[id];
-+
-+#define HIF_GEMAC_TMUQ_BASE 6
-+ priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
-+ priv->high_tmu_q = priv->low_tmu_q + 1;
-+
-+ spin_lock_init(&priv->lock);
-+
-+ pfe_eth_fast_tx_timeout_init(priv);
-+
-+ /* Copy the station address into the dev structure, */
-+ memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
-+
-+ /* Initialize mdio */
-+ if (minfo[id].enabled) {
-+ err = pfe_eth_mdio_init(priv, &minfo[id]);
-+ if (err) {
-+ netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
-+ __func__);
-+ goto err2;
-+ }
-+ }
-+
-+ ndev->mtu = 1500;
-+
-+ /* Set MTU limits */
-+ ndev->min_mtu = ETH_MIN_MTU;
-+ ndev->max_mtu = JUMBO_FRAME_SIZE;
-+
-+ /* supported features */
-+ ndev->hw_features = NETIF_F_SG;
-+
-+ /*Enable after checksum offload is validated */
-+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
-+ NETIF_F_IPV6_CSUM | NETIF_F_SG;
-+
-+ /* enabled by default */
-+ ndev->features = ndev->hw_features;
-+
-+ priv->usr_features = ndev->features;
-+
-+ ndev->netdev_ops = &pfe_netdev_ops;
-+
-+ ndev->ethtool_ops = &pfe_ethtool_ops;
-+
-+ /* Enable basic messages by default */
-+ priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
-+ NETIF_MSG_PROBE;
-+
-+ netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
-+ HIF_RX_POLL_WEIGHT - 16);
-+ netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
-+ HIF_RX_POLL_WEIGHT - 16);
-+ netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
-+ HIF_RX_POLL_WEIGHT - 16);
-+
-+ err = register_netdev(ndev);
-+
-+ if (err) {
-+ netdev_err(ndev, "register_netdev() failed\n");
-+ goto err3;
-+ }
-+ device_init_wakeup(&ndev->dev, WAKE_MAGIC);
-+
-+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
-+ err = pfe_phy_init(ndev);
-+ if (err) {
-+ netdev_err(ndev, "%s: pfe_phy_init() failed\n",
-+ __func__);
-+ goto err4;
-+ }
-+ }
-+
-+ netif_carrier_on(ndev);
-+
-+ /* Create all the sysfs files */
-+ if (pfe_eth_sysfs_init(ndev))
-+ goto err4;
-+
-+ netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
-+ __func__, priv->EMAC_baseaddr);
-+
-+ return 0;
-+err4:
-+ unregister_netdev(ndev);
-+err3:
-+ pfe_eth_mdio_exit(priv->mii_bus);
-+err2:
-+ free_netdev(priv->ndev);
-+err0:
-+ return err;
-+}
-+
-+/* pfe_eth_init
-+ */
-+int pfe_eth_init(struct pfe *pfe)
-+{
-+ int ii = 0;
-+ int err;
-+
-+ pr_info("%s\n", __func__);
-+
-+ cbus_emac_base[0] = EMAC1_BASE_ADDR;
-+ cbus_emac_base[1] = EMAC2_BASE_ADDR;
-+
-+ cbus_gpi_base[0] = EGPI1_BASE_ADDR;
-+ cbus_gpi_base[1] = EGPI2_BASE_ADDR;
-+
-+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
-+ err = pfe_eth_init_one(pfe, ii);
-+ if (err)
-+ goto err0;
-+ }
-+
-+ return 0;
-+
-+err0:
-+ while (ii--)
-+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
-+
-+ /* Register three network devices in the kernel */
-+ return err;
-+}
-+
-+/* pfe_eth_exit_one
-+ */
-+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
-+{
-+ netif_info(priv, probe, priv->ndev, "%s\n", __func__);
-+
-+ pfe_eth_sysfs_exit(priv->ndev);
-+
-+ unregister_netdev(priv->ndev);
-+
-+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
-+ pfe_phy_exit(priv->ndev);
-+
-+ if (priv->mii_bus)
-+ pfe_eth_mdio_exit(priv->mii_bus);
-+
-+ free_netdev(priv->ndev);
-+}
-+
-+/* pfe_eth_exit
-+ */
-+void pfe_eth_exit(struct pfe *pfe)
-+{
-+ int ii;
-+
-+ pr_info("%s\n", __func__);
-+
-+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
-+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
-@@ -0,0 +1,314 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * @file
-+ * Contains all the functions to handle parsing and loading of PE firmware
-+ * files.
-+ */
-+#include <linux/firmware.h>
-+
-+#include "pfe_mod.h"
-+#include "pfe_firmware.h"
-+#include "pfe/pfe.h"
-+
-+static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
-+ const char *section)
-+{
-+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
-+ struct elf32_shdr *shdr;
-+ struct elf32_shdr *shdr_shstr;
-+ Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
-+ Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
-+ Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
-+ Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
-+ Elf32_Off shstr_offset;
-+ Elf32_Word sh_name;
-+ const char *name;
-+ int i;
-+
-+ /* Section header strings */
-+ shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
-+ e_shentsize);
-+ shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
-+
-+ for (i = 0; i < e_shnum; i++) {
-+ shdr = (struct elf32_shdr *)(fw->data + e_shoff
-+ + i * e_shentsize);
-+
-+ sh_name = be32_to_cpu(shdr->sh_name);
-+
-+ name = (const char *)(fw->data + shstr_offset + sh_name);
-+
-+ if (!strcmp(name, section))
-+ return shdr;
-+ }
-+
-+ pr_err("%s: didn't find section %s\n", __func__, section);
-+
-+ return NULL;
-+}
-+
-+#if defined(CFG_DIAGS)
-+static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
-+ *diags_info)
-+{
-+ struct elf32_shdr *shdr;
-+ unsigned long offset, size;
-+
-+ shdr = get_elf_section_header(fw, ".pfe_diags_str");
-+ if (shdr) {
-+ offset = be32_to_cpu(shdr->sh_offset);
-+ size = be32_to_cpu(shdr->sh_size);
-+ diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
-+ diags_info->diags_str_size = size;
-+ diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
-+ memcpy(diags_info->diags_str_array, fw->data + offset, size);
-+
-+ return 0;
-+ } else {
-+ return -1;
-+ }
-+}
-+#endif
-+
-+static void pfe_check_version_info(const struct firmware *fw)
-+{
-+ /*static char *version = NULL;*/
-+ static char *version;
-+
-+ struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
-+
-+ if (shdr) {
-+ if (!version) {
-+ /*
-+ * this is the first fw we load, use its version
-+ * string as reference (whatever it is)
-+ */
-+ version = (char *)(fw->data +
-+ be32_to_cpu(shdr->sh_offset));
-+
-+ pr_info("PFE binary version: %s\n", version);
-+ } else {
-+ /*
-+ * already have loaded at least one firmware, check
-+ * sequence can start now
-+ */
-+ if (strcmp(version, (char *)(fw->data +
-+ be32_to_cpu(shdr->sh_offset)))) {
-+ pr_info(
-+ "WARNING: PFE firmware binaries from incompatible version\n");
-+ }
-+ }
-+ } else {
-+ /*
-+ * version cannot be verified, a potential issue that should
-+ * be reported
-+ */
-+ pr_info(
-+ "WARNING: PFE firmware binaries from incompatible version\n");
-+ }
-+}
-+
-+/* PFE elf firmware loader.
-+ * Loads an elf firmware image into a list of PE's (specified using a bitmask)
-+ *
-+ * @param pe_mask Mask of PE id's to load firmware to
-+ * @param fw Pointer to the firmware image
-+ *
-+ * @return 0 on success, a negative value on error
-+ *
-+ */
-+int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
-+{
-+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
-+ Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
-+ struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
-+ be32_to_cpu(elf_hdr->e_shoff));
-+ int id, section;
-+ int rc;
-+
-+ pr_info("%s\n", __func__);
-+
-+ /* Some sanity checks */
-+ if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
-+ pr_err("%s: incorrect elf magic number\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
-+ pr_err("%s: incorrect elf class(%x)\n", __func__,
-+ elf_hdr->e_ident[EI_CLASS]);
-+ return -EINVAL;
-+ }
-+
-+ if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
-+ pr_err("%s: incorrect elf data(%x)\n", __func__,
-+ elf_hdr->e_ident[EI_DATA]);
-+ return -EINVAL;
-+ }
-+
-+ if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
-+ pr_err("%s: incorrect elf file type(%x)\n", __func__,
-+ be16_to_cpu(elf_hdr->e_type));
-+ return -EINVAL;
-+ }
-+
-+ for (section = 0; section < sections; section++, shdr++) {
-+ if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
-+ SHF_EXECINSTR)))
-+ continue;
-+
-+ for (id = 0; id < MAX_PE; id++)
-+ if (pe_mask & (1 << id)) {
-+ rc = pe_load_elf_section(id, fw->data, shdr,
-+ pfe->dev);
-+ if (rc < 0)
-+ goto err;
-+ }
-+ }
-+
-+ pfe_check_version_info(fw);
-+
-+ return 0;
-+
-+err:
-+ return rc;
-+}
-+
-+/* PFE firmware initialization.
-+ * Loads different firmware files from filesystem.
-+ * Initializes PE IMEM/DMEM and UTIL-PE DDR
-+ * Initializes control path symbol addresses (by looking them up in the elf
-+ * firmware files
-+ * Takes PE's out of reset
-+ *
-+ * @return 0 on success, a negative value on error
-+ *
-+ */
-+int pfe_firmware_init(struct pfe *pfe)
-+{
-+ const struct firmware *class_fw, *tmu_fw;
-+ int rc = 0;
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ const char *util_fw_name;
-+ const struct firmware *util_fw;
-+#endif
-+
-+ pr_info("%s\n", __func__);
-+
-+ if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
-+ pr_err("%s: request firmware %s failed\n", __func__,
-+ CLASS_FIRMWARE_FILENAME);
-+ rc = -ETIMEDOUT;
-+ goto err0;
-+ }
-+
-+ if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
-+ pr_err("%s: request firmware %s failed\n", __func__,
-+ TMU_FIRMWARE_FILENAME);
-+ rc = -ETIMEDOUT;
-+ goto err1;
-+}
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_fw_name = UTIL_FIRMWARE_FILENAME;
-+
-+ if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
-+ pr_err("%s: request firmware %s failed\n", __func__,
-+ util_fw_name);
-+ rc = -ETIMEDOUT;
-+ goto err2;
-+ }
-+#endif
-+ rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
-+ if (rc < 0) {
-+ pr_err("%s: class firmware load failed\n", __func__);
-+ goto err3;
-+ }
-+
-+#if defined(CFG_DIAGS)
-+ rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
-+ if (rc < 0) {
-+ pr_warn(
-+ "PFE diags won't be available for class PEs\n");
-+ rc = 0;
-+ }
-+#endif
-+
-+ rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
-+ if (rc < 0) {
-+ pr_err("%s: tmu firmware load failed\n", __func__);
-+ goto err3;
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
-+ if (rc < 0) {
-+ pr_err("%s: util firmware load failed\n", __func__);
-+ goto err3;
-+ }
-+
-+#if defined(CFG_DIAGS)
-+ rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
-+ if (rc < 0) {
-+ pr_warn(
-+ "PFE diags won't be available for util PE\n");
-+ rc = 0;
-+ }
-+#endif
-+
-+ util_enable();
-+#endif
-+
-+ tmu_enable(0xf);
-+ class_enable();
-+
-+err3:
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ release_firmware(util_fw);
-+
-+err2:
-+#endif
-+ release_firmware(tmu_fw);
-+
-+err1:
-+ release_firmware(class_fw);
-+
-+err0:
-+ return rc;
-+}
-+
-+/* PFE firmware cleanup
-+ * Puts PE's in reset
-+ *
-+ *
-+ */
-+void pfe_firmware_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ if (pe_reset_all(&pfe->ctrl) != 0)
-+ pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
-+
-+ class_disable();
-+ tmu_disable(0xf);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_disable();
-+#endif
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hal.c
-@@ -0,0 +1,1516 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include "pfe_mod.h"
-+#include "pfe/pfe.h"
-+
-+void *cbus_base_addr;
-+void *ddr_base_addr;
-+unsigned long ddr_phys_base_addr;
-+unsigned int ddr_size;
-+
-+static struct pe_info pe[MAX_PE];
-+
-+/* Initializes the PFE library.
-+ * Must be called before using any of the library functions.
-+ *
-+ * @param[in] cbus_base CBUS virtual base address (as mapped in
-+ * the host CPU address space)
-+ * @param[in] ddr_base PFE DDR range virtual base address (as
-+ * mapped in the host CPU address space)
-+ * @param[in] ddr_phys_base PFE DDR range physical base address (as
-+ * mapped in platform)
-+ * @param[in] size PFE DDR range size (as defined by the host
-+ * software)
-+ */
-+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
-+ unsigned int size)
-+{
-+ cbus_base_addr = cbus_base;
-+ ddr_base_addr = ddr_base;
-+ ddr_phys_base_addr = ddr_phys_base;
-+ ddr_size = size;
-+
-+ pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
-+ pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
-+ pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
-+ pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
-+ pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
-+ pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
-+ pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
-+ pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
-+ pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
-+ pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
-+ pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
-+ pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
-+ pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
-+ pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
-+ pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
-+ pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
-+ pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
-+ pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
-+
-+ pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
-+ pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
-+ pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
-+ pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
-+ pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
-+ pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
-+
-+ pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
-+ pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
-+ pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
-+ pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
-+ pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
-+ pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
-+ pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
-+ pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
-+ pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
-+#endif
-+}
-+
-+/* Writes a buffer to PE internal memory from the host
-+ * through indirect access registers.
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] src Buffer source address
-+ * @param[in] mem_access_addr DMEM destination address (must be 32bit
-+ * aligned)
-+ * @param[in] len Number of bytes to copy
-+ */
-+void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
-+int len)
-+{
-+ u32 offset = 0, val, addr;
-+ unsigned int len32 = len >> 2;
-+ int i;
-+
-+ addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
-+ PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
-+
-+ for (i = 0; i < len32; i++, offset += 4, src += 4) {
-+ val = *(u32 *)src;
-+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
-+ writel(addr + offset, pe[id].mem_access_addr);
-+ }
-+
-+ len = (len & 0x3);
-+ if (len) {
-+ val = 0;
-+
-+ addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
-+ PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
-+
-+ for (i = 0; i < len; i++, src++)
-+ val |= (*(u8 *)src) << (8 * i);
-+
-+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
-+ writel(addr, pe[id].mem_access_addr);
-+ }
-+}
-+
-+/* Writes a buffer to PE internal data memory (DMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] src Buffer source address
-+ * @param[in] dst DMEM destination address (must be 32bit
-+ * aligned)
-+ * @param[in] len Number of bytes to copy
-+ */
-+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
-+{
-+ pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
-+ PE_MEM_ACCESS_DMEM, src, len);
-+}
-+
-+/* Writes a buffer to PE internal program memory (PMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., TMU3_ID)
-+ * @param[in] src Buffer source address
-+ * @param[in] dst PMEM destination address (must be 32bit
-+ * aligned)
-+ * @param[in] len Number of bytes to copy
-+ */
-+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
-+{
-+ pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
-+ - 1)) | PE_MEM_ACCESS_IMEM, src, len);
-+}
-+
-+/* Reads PE internal program memory (IMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., TMU3_ID)
-+ * @param[in] addr PMEM read address (must be aligned on size)
-+ * @param[in] size Number of bytes to read (maximum 4, must not
-+ * cross 32bit boundaries)
-+ * @return the data read (in PE endianness, i.e BE).
-+ */
-+u32 pe_pmem_read(int id, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+ u32 mask = 0xffffffff >> ((4 - size) << 3);
-+ u32 val;
-+
-+ addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
-+ | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
-+
-+ writel(addr, pe[id].mem_access_addr);
-+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
-+
-+ return (val >> (offset << 3)) & mask;
-+}
-+
-+/* Writes PE internal data memory (DMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] addr DMEM write address (must be aligned on size)
-+ * @param[in] val Value to write (in PE endianness, i.e BE)
-+ * @param[in] size Number of bytes to write (maximum 4, must not
-+ * cross 32bit boundaries)
-+ */
-+void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+
-+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
-+ PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
-+
-+ /* Indirect access interface is byte swapping data being written */
-+ writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
-+ writel(addr, pe[id].mem_access_addr);
-+}
-+
-+/* Reads PE internal data memory (DMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] addr DMEM read address (must be aligned on size)
-+ * @param[in] size Number of bytes to read (maximum 4, must not
-+ * cross 32bit boundaries)
-+ * @return the data read (in PE endianness, i.e BE).
-+ */
-+u32 pe_dmem_read(int id, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+ u32 mask = 0xffffffff >> ((4 - size) << 3);
-+ u32 val;
-+
-+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
-+ PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
-+
-+ writel(addr, pe[id].mem_access_addr);
-+
-+ /* Indirect access interface is byte swapping data being read */
-+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
-+
-+ return (val >> (offset << 3)) & mask;
-+}
-+
-+/* This function is used to write to CLASS internal bus peripherals (ccu,
-+ * pe-lem) from the host
-+ * through indirect access registers.
-+ * @param[in] val value to write
-+ * @param[in] addr Address to write to (must be aligned on size)
-+ * @param[in] size Number of bytes to write (1, 2 or 4)
-+ *
-+ */
-+void class_bus_write(u32 val, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+
-+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
-+
-+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
-+ (size << 24);
-+
-+ writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
-+ writel(addr, CLASS_BUS_ACCESS_ADDR);
-+}
-+
-+/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
-+ * through indirect access registers.
-+ * @param[in] addr Address to read from (must be aligned on size)
-+ * @param[in] size Number of bytes to read (1, 2 or 4)
-+ * @return the read data
-+ *
-+ */
-+u32 class_bus_read(u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+ u32 mask = 0xffffffff >> ((4 - size) << 3);
-+ u32 val;
-+
-+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
-+
-+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
-+
-+ writel(addr, CLASS_BUS_ACCESS_ADDR);
-+ val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
-+
-+ return (val >> (offset << 3)) & mask;
-+}
-+
-+/* Writes data to the cluster memory (PE_LMEM)
-+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
-+ * @param[in] src Buffer source address
-+ * @param[in] len Number of bytes to copy
-+ */
-+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
-+{
-+ u32 len32 = len >> 2;
-+ int i;
-+
-+ for (i = 0; i < len32; i++, src += 4, dst += 4)
-+ class_bus_write(*(u32 *)src, dst, 4);
-+
-+ if (len & 0x2) {
-+ class_bus_write(*(u16 *)src, dst, 2);
-+ src += 2;
-+ dst += 2;
-+ }
-+
-+ if (len & 0x1) {
-+ class_bus_write(*(u8 *)src, dst, 1);
-+ src++;
-+ dst++;
-+ }
-+}
-+
-+/* Writes value to the cluster memory (PE_LMEM)
-+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
-+ * @param[in] val Value to write
-+ * @param[in] len Number of bytes to write
-+ */
-+void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
-+{
-+ u32 len32 = len >> 2;
-+ int i;
-+
-+ val = val | (val << 8) | (val << 16) | (val << 24);
-+
-+ for (i = 0; i < len32; i++, dst += 4)
-+ class_bus_write(val, dst, 4);
-+
-+ if (len & 0x2) {
-+ class_bus_write(val, dst, 2);
-+ dst += 2;
-+ }
-+
-+ if (len & 0x1) {
-+ class_bus_write(val, dst, 1);
-+ dst++;
-+ }
-+}
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+
-+/* Writes UTIL program memory (DDR) from the host.
-+ *
-+ * @param[in] addr Address to write (virtual, must be aligned on size)
-+ * @param[in] val Value to write (in PE endianness, i.e BE)
-+ * @param[in] size Number of bytes to write (2 or 4)
-+ */
-+static void util_pmem_write(u32 val, void *addr, u8 size)
-+{
-+ void *addr64 = (void *)((unsigned long)addr & ~0x7);
-+ unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
-+
-+ /*
-+ * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
-+ * location
-+ */
-+ if (size == 4)
-+ writel(be32_to_cpu(val), addr64 + off);
-+ else
-+ writew(be16_to_cpu((u16)val), addr64 + off);
-+}
-+
-+/* Writes a buffer to UTIL program memory (DDR) from the host.
-+ *
-+ * @param[in] dst Address to write (virtual, must be at least 16bit
-+ * aligned)
-+ * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
-+ * same alignment as dst)
-+ * @param[in] len Number of bytes to write (must be at least 16bit
-+ * aligned)
-+ */
-+static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
-+{
-+ unsigned int len32;
-+ int i;
-+
-+ if ((unsigned long)src & 0x2) {
-+ util_pmem_write(*(u16 *)src, dst, 2);
-+ src += 2;
-+ dst += 2;
-+ len -= 2;
-+ }
-+
-+ len32 = len >> 2;
-+
-+ for (i = 0; i < len32; i++, dst += 4, src += 4)
-+ util_pmem_write(*(u32 *)src, dst, 4);
-+
-+ if (len & 0x2)
-+ util_pmem_write(*(u16 *)src, dst, len & 0x2);
-+}
-+#endif
-+
-+/* Loads an elf section into pmem
-+ * Code needs to be at least 16bit aligned and only PROGBITS sections are
-+ * supported
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
-+ * TMU3_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_pmem_section(int id, const void *data,
-+ struct elf32_shdr *shdr)
-+{
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (id == UTIL_ID) {
-+ pr_err("%s: unsupported pmem section for UTIL\n",
-+ __func__);
-+ return -EINVAL;
-+ }
-+#endif
-+
-+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
-+ , __func__, addr, (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x1) {
-+ pr_err("%s: load address(%x) is not 16bit aligned\n",
-+ __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ if (size & 0x1) {
-+ pr_err("%s: load size(%x) is not 16bit aligned\n",
-+ __func__, size);
-+ return -EINVAL;
-+ }
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ pe_pmem_memcpy_to32(id, addr, data + offset, size);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into dmem
-+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
-+ * initialized to 0
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_dmem_section(int id, const void *data,
-+ struct elf32_shdr *shdr)
-+{
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+ u32 size32 = size >> 2;
-+ int i;
-+
-+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
-+ __func__, addr, (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x3) {
-+ pr_err("%s: load address(%x) is not 32bit aligned\n",
-+ __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ pe_dmem_memcpy_to32(id, addr, data + offset, size);
-+ break;
-+
-+ case SHT_NOBITS:
-+ for (i = 0; i < size32; i++, addr += 4)
-+ pe_dmem_write(id, 0, addr, 4);
-+
-+ if (size & 0x3)
-+ pe_dmem_write(id, 0, addr, size & 0x3);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into DDR
-+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
-+ * initialized to 0
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_ddr_section(int id, const void *data,
-+ struct elf32_shdr *shdr,
-+ struct device *dev) {
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+ u32 flags = be32_to_cpu(shdr->sh_flags);
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ if (flags & SHF_EXECINSTR) {
-+ if (id <= CLASS_MAX_ID) {
-+ /* DO the loading only once in DDR */
-+ if (id == CLASS0_ID) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) rcvd\n",
-+ __func__, addr,
-+ (unsigned long)data + offset);
-+ if (((unsigned long)(data + offset)
-+ & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
-+ , __func__, addr,
-+ (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x1) {
-+ pr_err(
-+ "%s: load address(%x) is not 16bit aligned\n"
-+ , __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ if (size & 0x1) {
-+ pr_err(
-+ "%s: load length(%x) is not 16bit aligned\n"
-+ , __func__, size);
-+ return -EINVAL;
-+ }
-+ memcpy(DDR_PHYS_TO_VIRT(
-+ DDR_PFE_TO_PHYS(addr)),
-+ data + offset, size);
-+ }
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ } else if (id == UTIL_ID) {
-+ if (((unsigned long)(data + offset) & 0x3)
-+ != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
-+ , __func__, addr,
-+ (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x1) {
-+ pr_err(
-+ "%s: load address(%x) is not 16bit aligned\n"
-+ , __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ if (size & 0x1) {
-+ pr_err(
-+ "%s: load length(%x) is not 16bit aligned\n"
-+ , __func__, size);
-+ return -EINVAL;
-+ }
-+
-+ util_pmem_memcpy(DDR_PHYS_TO_VIRT(
-+ DDR_PFE_TO_PHYS(addr)),
-+ data + offset, size);
-+ }
-+#endif
-+ } else {
-+ pr_err(
-+ "%s: unsupported ddr section type(%x) for PE(%d)\n"
-+ , __func__, type, id);
-+ return -EINVAL;
-+ }
-+
-+ } else {
-+ memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
-+ + offset, size);
-+ }
-+
-+ break;
-+
-+ case SHT_NOBITS:
-+ memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into pe lmem
-+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
-+ * initialized to 0
-+ *
-+ * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_pe_lmem_section(int id, const void *data,
-+ struct elf32_shdr *shdr)
-+{
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+
-+ if (id > CLASS_MAX_ID) {
-+ pr_err(
-+ "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
-+ __func__, type, id);
-+ return -EINVAL;
-+ }
-+
-+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
-+ __func__, addr, (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x3) {
-+ pr_err("%s: load address(%x) is not 32bit aligned\n",
-+ __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ class_pe_lmem_memcpy_to32(addr, data + offset, size);
-+ break;
-+
-+ case SHT_NOBITS:
-+ class_pe_lmem_memset(addr, 0, size);
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into a PE
-+ * For now only supports loading a section to dmem (all PE's), pmem (class and
-+ * tmu PE's),
-+ * DDDR (util PE code)
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
-+ struct device *dev) {
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+
-+ if (IS_DMEM(addr, size))
-+ return pe_load_dmem_section(id, data, shdr);
-+ else if (IS_PMEM(addr, size))
-+ return pe_load_pmem_section(id, data, shdr);
-+ else if (IS_PFE_LMEM(addr, size))
-+ return 0;
-+ else if (IS_PHYS_DDR(addr, size))
-+ return pe_load_ddr_section(id, data, shdr, dev);
-+ else if (IS_PE_LMEM(addr, size))
-+ return pe_load_pe_lmem_section(id, data, shdr);
-+
-+ pr_err("%s: unsupported memory range(%x)\n", __func__,
-+ addr);
-+ return 0;
-+}
-+
-+/**************************** BMU ***************************/
-+
-+/* Initializes a BMU block.
-+ * @param[in] base BMU block base address
-+ * @param[in] cfg BMU configuration
-+ */
-+void bmu_init(void *base, struct BMU_CFG *cfg)
-+{
-+ bmu_disable(base);
-+
-+ bmu_set_config(base, cfg);
-+
-+ bmu_reset(base);
-+}
-+
-+/* Resets a BMU block.
-+ * @param[in] base BMU block base address
-+ */
-+void bmu_reset(void *base)
-+{
-+ writel(CORE_SW_RESET, base + BMU_CTRL);
-+
-+ /* Wait for self clear */
-+ while (readl(base + BMU_CTRL) & CORE_SW_RESET)
-+ ;
-+}
-+
-+/* Enabled a BMU block.
-+ * @param[in] base BMU block base address
-+ */
-+void bmu_enable(void *base)
-+{
-+ writel(CORE_ENABLE, base + BMU_CTRL);
-+}
-+
-+/* Disables a BMU block.
-+ * @param[in] base BMU block base address
-+ */
-+void bmu_disable(void *base)
-+{
-+ writel(CORE_DISABLE, base + BMU_CTRL);
-+}
-+
-+/* Sets the configuration of a BMU block.
-+ * @param[in] base BMU block base address
-+ * @param[in] cfg BMU configuration
-+ */
-+void bmu_set_config(void *base, struct BMU_CFG *cfg)
-+{
-+ writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
-+ writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
-+ writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
-+
-+ /* Interrupts are never used */
-+ writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
-+ writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
-+ writel(0x0, base + BMU_INT_ENABLE);
-+}
-+
-+/**************************** MTIP GEMAC ***************************/
-+
-+/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
-+ * TCP or UDP checksums are discarded
-+ *
-+ * @param[in] base GEMAC base address.
-+ */
-+void gemac_enable_rx_checksum_offload(void *base)
-+{
-+ /*Do not find configuration to do this */
-+}
-+
-+/* Disable Rx Checksum Engine.
-+ *
-+ * @param[in] base GEMAC base address.
-+ */
-+void gemac_disable_rx_checksum_offload(void *base)
-+{
-+ /*Do not find configuration to do this */
-+}
-+
-+/* GEMAC set speed.
-+ * @param[in] base GEMAC base address
-+ * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
-+ */
-+void gemac_set_speed(void *base, enum mac_speed gem_speed)
-+{
-+ u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
-+ u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
-+
-+ switch (gem_speed) {
-+ case SPEED_10M:
-+ rcr |= EMAC_RCNTRL_RMII_10T;
-+ break;
-+
-+ case SPEED_1000M:
-+ ecr |= EMAC_ECNTRL_SPEED;
-+ break;
-+
-+ case SPEED_100M:
-+ default:
-+ /*It is in 100M mode */
-+ break;
-+ }
-+ writel(ecr, (base + EMAC_ECNTRL_REG));
-+ writel(rcr, (base + EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC set duplex.
-+ * @param[in] base GEMAC base address
-+ * @param[in] duplex GEMAC duplex mode (Full, Half)
-+ */
-+void gemac_set_duplex(void *base, int duplex)
-+{
-+ if (duplex == DUPLEX_HALF) {
-+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
-+ + EMAC_TCNTRL_REG);
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
-+ + EMAC_RCNTRL_REG));
-+ } else{
-+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
-+ + EMAC_TCNTRL_REG);
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
-+ + EMAC_RCNTRL_REG));
-+ }
-+}
-+
-+/* GEMAC set mode.
-+ * @param[in] base GEMAC base address
-+ * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
-+ */
-+void gemac_set_mode(void *base, int mode)
-+{
-+ u32 val = readl(base + EMAC_RCNTRL_REG);
-+
-+ /*Remove loopbank*/
-+ val &= ~EMAC_RCNTRL_LOOP;
-+
-+ /*Enable flow control and MII mode*/
-+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
-+
-+ writel(val, base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable(void *base)
-+{
-+ writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
-+ EMAC_ECNTRL_REG);
-+}
-+
-+/* GEMAC disable function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable(void *base)
-+{
-+ writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
-+ EMAC_ECNTRL_REG);
-+}
-+
-+/* GEMAC TX disable function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_tx_disable(void *base)
-+{
-+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
-+ EMAC_TCNTRL_REG);
-+}
-+
-+void gemac_tx_enable(void *base)
-+{
-+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
-+ EMAC_TCNTRL_REG);
-+}
-+
-+/* Sets the hash register of the MAC.
-+ * This register is used for matching unicast and multicast frames.
-+ *
-+ * @param[in] base GEMAC base address.
-+ * @param[in] hash 64-bit hash to be configured.
-+ */
-+void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
-+{
-+ writel(hash->bottom, base + EMAC_GALR);
-+ writel(hash->top, base + EMAC_GAUR);
-+}
-+
-+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
-+ unsigned int entry_index)
-+{
-+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
-+ return;
-+
-+ entry_index = entry_index - 1;
-+ if (entry_index < 1) {
-+ writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
-+ writel((htonl(address->top) | 0x8808), base +
-+ EMAC_PHY_ADDR_HIGH);
-+ } else {
-+ writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
-+ + EMAC_SMAC_0_0);
-+ writel((htonl(address->top) | 0x8808), base + ((entry_index -
-+ 1) * 8) + EMAC_SMAC_0_1);
-+ }
-+}
-+
-+void gemac_clear_laddrN(void *base, unsigned int entry_index)
-+{
-+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
-+ return;
-+
-+ entry_index = entry_index - 1;
-+ if (entry_index < 1) {
-+ writel(0, base + EMAC_PHY_ADDR_LOW);
-+ writel(0, base + EMAC_PHY_ADDR_HIGH);
-+ } else {
-+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
-+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
-+ }
-+}
-+
-+/* Set the loopback mode of the MAC. This can be either no loopback for
-+ * normal operation, local loopback through MAC internal loopback module or PHY
-+ * loopback for external loopback through a PHY. This asserts the external
-+ * loop pin.
-+ *
-+ * @param[in] base GEMAC base address.
-+ * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
-+ * Loopback,
-+ * LB_EXT - PHY Loopback.
-+ */
-+void gemac_set_loop(void *base, enum mac_loop gem_loop)
-+{
-+ pr_info("%s()\n", __func__);
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
-+ EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC allow frames
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_copy_all(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
-+ EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC do not allow frames
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable_copy_all(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
-+ EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC allow broadcast function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_allow_broadcast(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
-+ EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC no broadcast function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_no_broadcast(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
-+ EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable 1536 rx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_1536_rx(void *base)
-+{
-+ /* Set 1536 as Maximum frame length */
-+ writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
-+ EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable jumbo function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_rx_jmb(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
-+ + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable stacked vlan function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_stacked_vlan(void *base)
-+{
-+ /* MTIP doesn't support stacked vlan */
-+}
-+
-+/* GEMAC enable pause rx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_pause_rx(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
-+ base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC disable pause rx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable_pause_rx(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
-+ base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable pause tx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_pause_tx(void *base)
-+{
-+ writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
-+}
-+
-+/* GEMAC disable pause tx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable_pause_tx(void *base)
-+{
-+ writel(0x0, base + EMAC_RX_SECTION_EMPTY);
-+}
-+
-+/* GEMAC wol configuration
-+ * @param[in] base GEMAC base address
-+ * @param[in] wol_conf WoL register configuration
-+ */
-+void gemac_set_wol(void *base, u32 wol_conf)
-+{
-+ u32 val = readl(base + EMAC_ECNTRL_REG);
-+
-+ if (wol_conf)
-+ val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
-+ else
-+ val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
-+ writel(val, base + EMAC_ECNTRL_REG);
-+}
-+
-+/* Sets Gemac bus width to 64bit
-+ * @param[in] base GEMAC base address
-+ * @param[in] width gemac bus width to be set possible values are 32/64/128
-+ */
-+void gemac_set_bus_width(void *base, int width)
-+{
-+}
-+
-+/* Sets Gemac configuration.
-+ * @param[in] base GEMAC base address
-+ * @param[in] cfg GEMAC configuration
-+ */
-+void gemac_set_config(void *base, struct gemac_cfg *cfg)
-+{
-+ /*GEMAC config taken from VLSI */
-+ writel(0x00000004, base + EMAC_TFWR_STR_FWD);
-+ writel(0x00000005, base + EMAC_RX_SECTION_FULL);
-+ writel(0x00003fff, base + EMAC_TRUNC_FL);
-+ writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
-+ writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
-+
-+ gemac_set_mode(base, cfg->mode);
-+
-+ gemac_set_speed(base, cfg->speed);
-+
-+ gemac_set_duplex(base, cfg->duplex);
-+}
-+
-+/**************************** GPI ***************************/
-+
-+/* Initializes a GPI block.
-+ * @param[in] base GPI base address
-+ * @param[in] cfg GPI configuration
-+ */
-+void gpi_init(void *base, struct gpi_cfg *cfg)
-+{
-+ gpi_reset(base);
-+
-+ gpi_disable(base);
-+
-+ gpi_set_config(base, cfg);
-+}
-+
-+/* Resets a GPI block.
-+ * @param[in] base GPI base address
-+ */
-+void gpi_reset(void *base)
-+{
-+ writel(CORE_SW_RESET, base + GPI_CTRL);
-+}
-+
-+/* Enables a GPI block.
-+ * @param[in] base GPI base address
-+ */
-+void gpi_enable(void *base)
-+{
-+ writel(CORE_ENABLE, base + GPI_CTRL);
-+}
-+
-+/* Disables a GPI block.
-+ * @param[in] base GPI base address
-+ */
-+void gpi_disable(void *base)
-+{
-+ writel(CORE_DISABLE, base + GPI_CTRL);
-+}
-+
-+/* Sets the configuration of a GPI block.
-+ * @param[in] base GPI base address
-+ * @param[in] cfg GPI configuration
-+ */
-+void gpi_set_config(void *base, struct gpi_cfg *cfg)
-+{
-+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
-+ + GPI_LMEM_ALLOC_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
-+ + GPI_LMEM_FREE_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
-+ + GPI_DDR_ALLOC_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
-+ + GPI_DDR_FREE_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
-+ writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
-+ writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
-+ writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
-+ writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
-+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
-+ writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
-+
-+ writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
-+ GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
-+ writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
-+ writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
-+ writel(1, base + GPI_TOE_CHKSUM_EN);
-+
-+ if (cfg->mtip_pause_reg) {
-+ writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
-+ writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
-+ }
-+}
-+
-+/**************************** CLASSIFIER ***************************/
-+
-+/* Initializes CLASSIFIER block.
-+ * @param[in] cfg CLASSIFIER configuration
-+ */
-+void class_init(struct class_cfg *cfg)
-+{
-+ class_reset();
-+
-+ class_disable();
-+
-+ class_set_config(cfg);
-+}
-+
-+/* Resets CLASSIFIER block.
-+ *
-+ */
-+void class_reset(void)
-+{
-+ writel(CORE_SW_RESET, CLASS_TX_CTRL);
-+}
-+
-+/* Enables all CLASS-PE's cores.
-+ *
-+ */
-+void class_enable(void)
-+{
-+ writel(CORE_ENABLE, CLASS_TX_CTRL);
-+}
-+
-+/* Disables all CLASS-PE's cores.
-+ *
-+ */
-+void class_disable(void)
-+{
-+ writel(CORE_DISABLE, CLASS_TX_CTRL);
-+}
-+
-+/*
-+ * Sets the configuration of the CLASSIFIER block.
-+ * @param[in] cfg CLASSIFIER configuration
-+ */
-+void class_set_config(struct class_cfg *cfg)
-+{
-+ u32 val;
-+
-+ /* Initialize route table */
-+ if (!cfg->resume)
-+ memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
-+ cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
-+#endif
-+
-+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
-+ writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
-+ writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
-+ CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
-+ CLASS_ROUTE_HASH_ENTRY_SIZE);
-+ writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
-+ CLASS_HIF_PARSE);
-+
-+ val = HASH_CRC_PORT_IP | QB2BUS_LE;
-+
-+#if defined(CONFIG_IP_ALIGNED)
-+ val |= IP_ALIGNED;
-+#endif
-+
-+ /*
-+ * Class PE packet steering will only work if TOE mode, bridge fetch or
-+ * route fetch are enabled (see class/qb_fet.v). Route fetch would
-+ * trigger additional memory copies (likely from DDR because of hash
-+ * table size, which cannot be reduced because PE software still
-+ * relies on hash value computed in HW), so when not in TOE mode we
-+ * simply enable HW bridge fetch even though we don't use it.
-+ */
-+ if (cfg->toe_mode)
-+ val |= CLASS_TOE;
-+ else
-+ val |= HW_BRIDGE_FETCH;
-+
-+ writel(val, CLASS_ROUTE_MULTI);
-+
-+ writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
-+ CLASS_ROUTE_TABLE_BASE);
-+ writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
-+ writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
-+ writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
-+ writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
-+ writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
-+
-+ writel(23, CLASS_AFULL_THRES);
-+ writel(23, CLASS_TSQ_FIFO_THRES);
-+
-+ writel(24, CLASS_MAX_BUF_CNT);
-+ writel(24, CLASS_TSQ_MAX_CNT);
-+}
-+
-+/**************************** TMU ***************************/
-+
-+void tmu_reset(void)
-+{
-+ writel(SW_RESET, TMU_CTRL);
-+}
-+
-+/* Initializes TMU block.
-+ * @param[in] cfg TMU configuration
-+ */
-+void tmu_init(struct tmu_cfg *cfg)
-+{
-+ int q, phyno;
-+
-+ tmu_disable(0xF);
-+ mdelay(10);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ /* keep in soft reset */
-+ writel(SW_RESET, TMU_CTRL);
-+#endif
-+ writel(0x3, TMU_SYS_GENERIC_CONTROL);
-+ writel(750, TMU_INQ_WATERMARK);
-+ writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
-+ GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
-+ GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
-+ GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
-+ TMU_BMU_INQ_ADDR);
-+
-+ writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
-+ * enabling all 10
-+ * schedulers [9:0] of each TDQ
-+ */
-+ writel(0x3FF, TMU_TDQ1_SCH_CTRL);
-+ writel(0x3FF, TMU_TDQ3_SCH_CTRL);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
-+#endif
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
-+ /* Extra packet pointers will be stored from this address onwards */
-+
-+ writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
-+ writel(5, TMU_TDQ_IIFG_CFG);
-+ writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
-+
-+ writel(0x0, TMU_CTRL);
-+
-+ /* MEM init */
-+ pr_info("%s: mem init\n", __func__);
-+ writel(MEM_INIT, TMU_CTRL);
-+
-+ while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
-+ ;
-+
-+ /* LLM init */
-+ pr_info("%s: lmem init\n", __func__);
-+ writel(LLM_INIT, TMU_CTRL);
-+
-+ while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
-+ ;
-+#endif
-+ /* set up each queue for tail drop */
-+ for (phyno = 0; phyno < 4; phyno++) {
-+ if (phyno == 2)
-+ continue;
-+ for (q = 0; q < 16; q++) {
-+ u32 qdepth;
-+
-+ writel((phyno << 8) | q, TMU_TEQ_CTRL);
-+ writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
-+
-+ if (phyno == 3)
-+ qdepth = DEFAULT_TMU3_QDEPTH;
-+ else
-+ qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
-+ DEFAULT_MAX_QDEPTH;
-+
-+ /* LOG: 68855 */
-+ /*
-+ * The following is a workaround for the reordered
-+ * packet and BMU2 buffer leakage issue.
-+ */
-+ if (CHIP_REVISION() == 0)
-+ qdepth = 31;
-+
-+ writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
-+ writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
-+ }
-+ }
-+
-+#ifdef CFG_LRO
-+ /* Set TMU-3 queue 5 (LRO) in no-drop mode */
-+ writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
-+ writel(0, TMU_TEQ_QCFG);
-+#endif
-+
-+ writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
-+
-+ writel(0x0, TMU_CTRL);
-+}
-+
-+/* Enables TMU-PE cores.
-+ * @param[in] pe_mask TMU PE mask
-+ */
-+void tmu_enable(u32 pe_mask)
-+{
-+ writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
-+}
-+
-+/* Disables TMU cores.
-+ * @param[in] pe_mask TMU PE mask
-+ */
-+void tmu_disable(u32 pe_mask)
-+{
-+ writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
-+}
-+
-+/* This will return the tmu queue status
-+ * @param[in] if_id gem interface id or TMU index
-+ * @return returns the bit mask of busy queues, zero means all
-+ * queues are empty
-+ */
-+u32 tmu_qstatus(u32 if_id)
-+{
-+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
-+ offsetof(struct pe_status, tmu_qstatus), 4));
-+}
-+
-+u32 tmu_pkts_processed(u32 if_id)
-+{
-+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
-+ offsetof(struct pe_status, rx), 4));
-+}
-+
-+/**************************** UTIL ***************************/
-+
-+/* Resets UTIL block.
-+ */
-+void util_reset(void)
-+{
-+ writel(CORE_SW_RESET, UTIL_TX_CTRL);
-+}
-+
-+/* Initializes UTIL block.
-+ * @param[in] cfg UTIL configuration
-+ */
-+void util_init(struct util_cfg *cfg)
-+{
-+ writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
-+}
-+
-+/* Enables UTIL-PE core.
-+ *
-+ */
-+void util_enable(void)
-+{
-+ writel(CORE_ENABLE, UTIL_TX_CTRL);
-+}
-+
-+/* Disables UTIL-PE core.
-+ *
-+ */
-+void util_disable(void)
-+{
-+ writel(CORE_DISABLE, UTIL_TX_CTRL);
-+}
-+
-+/**************************** HIF ***************************/
-+/* Initializes HIF copy block.
-+ *
-+ */
-+void hif_init(void)
-+{
-+ /*Initialize HIF registers*/
-+ writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
-+ HIF_POLL_CTRL);
-+}
-+
-+/* Enable hif tx DMA and interrupt
-+ *
-+ */
-+void hif_tx_enable(void)
-+{
-+ writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
-+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
-+ HIF_INT_ENABLE);
-+}
-+
-+/* Disable hif tx DMA and interrupt
-+ *
-+ */
-+void hif_tx_disable(void)
-+{
-+ u32 hif_int;
-+
-+ writel(0, HIF_TX_CTRL);
-+
-+ hif_int = readl(HIF_INT_ENABLE);
-+ hif_int &= HIF_TXPKT_INT_EN;
-+ writel(hif_int, HIF_INT_ENABLE);
-+}
-+
-+/* Enable hif rx DMA and interrupt
-+ *
-+ */
-+void hif_rx_enable(void)
-+{
-+ hif_rx_dma_start();
-+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
-+ HIF_INT_ENABLE);
-+}
-+
-+/* Disable hif rx DMA and interrupt
-+ *
-+ */
-+void hif_rx_disable(void)
-+{
-+ u32 hif_int;
-+
-+ writel(0, HIF_RX_CTRL);
-+
-+ hif_int = readl(HIF_INT_ENABLE);
-+ hif_int &= HIF_RXPKT_INT_EN;
-+ writel(hif_int, HIF_INT_ENABLE);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hif.c
-@@ -0,0 +1,1094 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/interrupt.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/kthread.h>
-+#include <linux/slab.h>
-+
-+#include <linux/io.h>
-+#include <asm/irq.h>
-+
-+#include "pfe_mod.h"
-+
-+#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
-+
-+unsigned char napi_first_batch;
-+
-+static void pfe_tx_do_cleanup(unsigned long data);
-+
-+static int pfe_hif_alloc_descr(struct pfe_hif *hif)
-+{
-+ void *addr;
-+ dma_addr_t dma_addr;
-+ int err = 0;
-+
-+ pr_info("%s\n", __func__);
-+ addr = dma_alloc_coherent(pfe->dev,
-+ HIF_RX_DESC_NT * sizeof(struct hif_desc) +
-+ HIF_TX_DESC_NT * sizeof(struct hif_desc),
-+ &dma_addr, GFP_KERNEL);
-+
-+ if (!addr) {
-+ pr_err("%s: Could not allocate buffer descriptors!\n"
-+ , __func__);
-+ err = -ENOMEM;
-+ goto err0;
-+ }
-+
-+ hif->descr_baseaddr_p = dma_addr;
-+ hif->descr_baseaddr_v = addr;
-+ hif->rx_ring_size = HIF_RX_DESC_NT;
-+ hif->tx_ring_size = HIF_TX_DESC_NT;
-+
-+ return 0;
-+
-+err0:
-+ return err;
-+}
-+
-+#if defined(LS1012A_PFE_RESET_WA)
-+static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
-+{
-+ int ii;
-+ struct hif_desc *desc = hif->rx_base;
-+
-+ /*Mark all descriptors as LAST_BD */
-+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
-+ desc->ctrl |= BD_CTRL_LAST_BD;
-+ desc++;
-+ }
-+}
-+
-+struct class_rx_hdr_t {
-+ u32 next_ptr; /* ptr to the start of the first DDR buffer */
-+ u16 length; /* total packet length */
-+ u16 phyno; /* input physical port number */
-+ u32 status; /* gemac status bits */
-+ u32 status2; /* reserved for software usage */
-+};
-+
-+/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
-+ * except overflow
-+ */
-+#define STATUS_BAD_FRAME_ERR BIT(16)
-+#define STATUS_LENGTH_ERR BIT(17)
-+#define STATUS_CRC_ERR BIT(18)
-+#define STATUS_TOO_SHORT_ERR BIT(19)
-+#define STATUS_TOO_LONG_ERR BIT(20)
-+#define STATUS_CODE_ERR BIT(21)
-+#define STATUS_MC_HASH_MATCH BIT(22)
-+#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
-+#define STATUS_UNICAST_HASH_MATCH BIT(24)
-+#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
-+#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
-+#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
-+#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
-+#define MIN_PKT_SIZE 64
-+
-+static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
-+{
-+ int i;
-+
-+ for (i = 0; i < len; i += sizeof(u32)) {
-+ *dst = htonl(*src);
-+ dst++; src++;
-+ }
-+}
-+
-+static void send_dummy_pkt_to_hif(void)
-+{
-+ void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
-+ u32 physaddr;
-+ struct class_rx_hdr_t local_hdr;
-+ static u32 dummy_pkt[] = {
-+ 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
-+ 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
-+ 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
-+ 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
-+
-+ ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
-+ if (!ddr_ptr)
-+ return;
-+
-+ lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
-+ if (!lmem_ptr)
-+ return;
-+
-+ pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
-+ physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
-+
-+ lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
-+
-+ local_hdr.phyno = htons(0); /* RX_PHY_0 */
-+ local_hdr.length = htons(MIN_PKT_SIZE);
-+
-+ local_hdr.next_ptr = htonl((u32)physaddr);
-+ /*Mark checksum is correct */
-+ local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
-+ STATUS_UDP_CHECKSUM_CORRECT |
-+ STATUS_TCP_CHECKSUM_CORRECT |
-+ STATUS_UNICAST_HASH_MATCH |
-+ STATUS_CUMULATIVE_ARC_HIT));
-+ copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
-+ sizeof(local_hdr));
-+
-+ copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
-+ 0x40);
-+
-+ writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
-+}
-+
-+void pfe_hif_rx_idle(struct pfe_hif *hif)
-+{
-+ int hif_stop_loop = 10;
-+ u32 rx_status;
-+
-+ pfe_hif_disable_rx_desc(hif);
-+ pr_info("Bringing hif to idle state...");
-+ writel(0, HIF_INT_ENABLE);
-+ /*If HIF Rx BDP is busy send a dummy packet */
-+ do {
-+ rx_status = readl(HIF_RX_STATUS);
-+ if (rx_status & BDP_CSR_RX_DMA_ACTV)
-+ send_dummy_pkt_to_hif();
-+
-+ usleep_range(100, 150);
-+ } while (--hif_stop_loop);
-+
-+ if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
-+ pr_info("Failed\n");
-+ else
-+ pr_info("Done\n");
-+}
-+#endif
-+
-+static void pfe_hif_free_descr(struct pfe_hif *hif)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ dma_free_coherent(pfe->dev,
-+ hif->rx_ring_size * sizeof(struct hif_desc) +
-+ hif->tx_ring_size * sizeof(struct hif_desc),
-+ hif->descr_baseaddr_v, hif->descr_baseaddr_p);
-+}
-+
-+void pfe_hif_desc_dump(struct pfe_hif *hif)
-+{
-+ struct hif_desc *desc;
-+ unsigned long desc_p;
-+ int ii = 0;
-+
-+ pr_info("%s\n", __func__);
-+
-+ desc = hif->rx_base;
-+ desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
-+ hif->descr_baseaddr_p);
-+
-+ pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
-+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
-+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
-+ readl(&desc->status), readl(&desc->ctrl),
-+ readl(&desc->data), readl(&desc->next));
-+ desc++;
-+ }
-+
-+ desc = hif->tx_base;
-+ desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
-+ hif->descr_baseaddr_p);
-+
-+ pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
-+ for (ii = 0; ii < hif->tx_ring_size; ii++) {
-+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
-+ readl(&desc->status), readl(&desc->ctrl),
-+ readl(&desc->data), readl(&desc->next));
-+ desc++;
-+ }
-+}
-+
-+/* pfe_hif_release_buffers */
-+static void pfe_hif_release_buffers(struct pfe_hif *hif)
-+{
-+ struct hif_desc *desc;
-+ int i = 0;
-+
-+ hif->rx_base = hif->descr_baseaddr_v;
-+
-+ pr_info("%s\n", __func__);
-+
-+ /*Free Rx buffers */
-+ desc = hif->rx_base;
-+ for (i = 0; i < hif->rx_ring_size; i++) {
-+ if (readl(&desc->data)) {
-+ if ((i < hif->shm->rx_buf_pool_cnt) &&
-+ (!hif->shm->rx_buf_pool[i])) {
-+ /*
-+ * dma_unmap_single(hif->dev, desc->data,
-+ * hif->rx_buf_len[i], DMA_FROM_DEVICE);
-+ */
-+ dma_unmap_single(hif->dev,
-+ DDR_PFE_TO_PHYS(
-+ readl(&desc->data)),
-+ hif->rx_buf_len[i],
-+ DMA_FROM_DEVICE);
-+ hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
-+ } else {
-+ pr_err("%s: buffer pool already full\n"
-+ , __func__);
-+ }
-+ }
-+
-+ writel(0, &desc->data);
-+ writel(0, &desc->status);
-+ writel(0, &desc->ctrl);
-+ desc++;
-+ }
-+}
-+
-+/*
-+ * pfe_hif_init_buffers
-+ * This function initializes the HIF Rx/Tx ring descriptors and
-+ * initialize Rx queue with buffers.
-+ */
-+static int pfe_hif_init_buffers(struct pfe_hif *hif)
-+{
-+ struct hif_desc *desc, *first_desc_p;
-+ u32 data;
-+ int i = 0;
-+
-+ pr_info("%s\n", __func__);
-+
-+ /* Check enough Rx buffers available in the shared memory */
-+ if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
-+ return -ENOMEM;
-+
-+ hif->rx_base = hif->descr_baseaddr_v;
-+ memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
-+
-+ /*Initialize Rx descriptors */
-+ desc = hif->rx_base;
-+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
-+
-+ for (i = 0; i < hif->rx_ring_size; i++) {
-+ /* Initialize Rx buffers from the shared memory */
-+
-+ data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
-+ pfe_pkt_size, DMA_FROM_DEVICE);
-+ hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
-+ hif->rx_buf_len[i] = pfe_pkt_size;
-+ hif->shm->rx_buf_pool[i] = NULL;
-+
-+ if (likely(dma_mapping_error(hif->dev, data) == 0)) {
-+ writel(DDR_PHYS_TO_PFE(data), &desc->data);
-+ } else {
-+ pr_err("%s : low on mem\n", __func__);
-+
-+ goto err;
-+ }
-+
-+ writel(0, &desc->status);
-+
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ wmb();
-+
-+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
-+ | BD_CTRL_DIR | BD_CTRL_DESC_EN
-+ | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
-+
-+ /* Chain descriptors */
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
-+ desc++;
-+ }
-+
-+ /* Overwrite last descriptor to chain it to first one*/
-+ desc--;
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
-+
-+ hif->rxtoclean_index = 0;
-+
-+ /*Initialize Rx buffer descriptor ring base address */
-+ writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
-+
-+ hif->tx_base = hif->rx_base + hif->rx_ring_size;
-+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
-+ hif->rx_ring_size;
-+ memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
-+
-+ /*Initialize tx descriptors */
-+ desc = hif->tx_base;
-+
-+ for (i = 0; i < hif->tx_ring_size; i++) {
-+ /* Chain descriptors */
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
-+ writel(0, &desc->ctrl);
-+ desc++;
-+ }
-+
-+ /* Overwrite last descriptor to chain it to first one */
-+ desc--;
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
-+ hif->txavail = hif->tx_ring_size;
-+ hif->txtosend = 0;
-+ hif->txtoclean = 0;
-+ hif->txtoflush = 0;
-+
-+ /*Initialize Tx buffer descriptor ring base address */
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
-+
-+ return 0;
-+
-+err:
-+ pfe_hif_release_buffers(hif);
-+ return -ENOMEM;
-+}
-+
-+/*
-+ * pfe_hif_client_register
-+ *
-+ * This function used to register a client driver with the HIF driver.
-+ *
-+ * Return value:
-+ * 0 - on Successful registration
-+ */
-+static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
-+ struct hif_client_shm *client_shm)
-+{
-+ struct hif_client *client = &hif->client[client_id];
-+ u32 i, cnt;
-+ struct rx_queue_desc *rx_qbase;
-+ struct tx_queue_desc *tx_qbase;
-+ struct hif_rx_queue *rx_queue;
-+ struct hif_tx_queue *tx_queue;
-+ int err = 0;
-+
-+ pr_info("%s\n", __func__);
-+
-+ spin_lock_bh(&hif->tx_lock);
-+
-+ if (test_bit(client_id, &hif->shm->g_client_status[0])) {
-+ pr_err("%s: client %d already registered\n",
-+ __func__, client_id);
-+ err = -1;
-+ goto unlock;
-+ }
-+
-+ memset(client, 0, sizeof(struct hif_client));
-+
-+ /* Initialize client Rx queues baseaddr, size */
-+
-+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
-+ /* Check if client is requesting for more queues than supported */
-+ if (cnt > HIF_CLIENT_QUEUES_MAX)
-+ cnt = HIF_CLIENT_QUEUES_MAX;
-+
-+ client->rx_qn = cnt;
-+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
-+ for (i = 0; i < cnt; i++) {
-+ rx_queue = &client->rx_q[i];
-+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
-+ rx_queue->size = client_shm->rx_qsize;
-+ rx_queue->write_idx = 0;
-+ }
-+
-+ /* Initialize client Tx queues baseaddr, size */
-+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
-+
-+ /* Check if client is requesting for more queues than supported */
-+ if (cnt > HIF_CLIENT_QUEUES_MAX)
-+ cnt = HIF_CLIENT_QUEUES_MAX;
-+
-+ client->tx_qn = cnt;
-+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
-+ for (i = 0; i < cnt; i++) {
-+ tx_queue = &client->tx_q[i];
-+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
-+ tx_queue->size = client_shm->tx_qsize;
-+ tx_queue->ack_idx = 0;
-+ }
-+
-+ set_bit(client_id, &hif->shm->g_client_status[0]);
-+
-+unlock:
-+ spin_unlock_bh(&hif->tx_lock);
-+
-+ return err;
-+}
-+
-+/*
-+ * pfe_hif_client_unregister
-+ *
-+ * This function used to unregister a client from the HIF driver.
-+ *
-+ */
-+static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ /*
-+ * Mark client as no longer available (which prevents further packet
-+ * receive for this client)
-+ */
-+ spin_lock_bh(&hif->tx_lock);
-+
-+ if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
-+ pr_err("%s: client %d not registered\n", __func__,
-+ client_id);
-+
-+ spin_unlock_bh(&hif->tx_lock);
-+ return;
-+ }
-+
-+ clear_bit(client_id, &hif->shm->g_client_status[0]);
-+
-+ spin_unlock_bh(&hif->tx_lock);
-+}
-+
-+/*
-+ * client_put_rxpacket-
-+ * This functions puts the Rx pkt in the given client Rx queue.
-+ * It actually swap the Rx pkt in the client Rx descriptor buffer
-+ * and returns the free buffer from it.
-+ *
-+ * If the function returns NULL means client Rx queue is full and
-+ * packet couldn't send to client queue.
-+ */
-+static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
-+ u32 flags, u32 client_ctrl, u32 *rem_len)
-+{
-+ void *free_pkt = NULL;
-+ struct rx_queue_desc *desc = queue->base + queue->write_idx;
-+
-+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
-+ if (page_mode) {
-+ int rem_page_size = PAGE_SIZE -
-+ PRESENT_OFST_IN_PAGE(pkt);
-+ int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
-+ pfe_pkt_headroom);
-+ *rem_len = (rem_page_size - cur_pkt_size);
-+ if (*rem_len) {
-+ free_pkt = pkt + cur_pkt_size;
-+ get_page(virt_to_page(free_pkt));
-+ } else {
-+ free_pkt = (void
-+ *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
-+ *rem_len = pfe_pkt_size;
-+ }
-+ } else {
-+ free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
-+ GFP_DMA_PFE);
-+ *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
-+ }
-+
-+ if (free_pkt) {
-+ desc->data = pkt;
-+ desc->client_ctrl = client_ctrl;
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ smp_wmb();
-+ writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
-+ /* queue->write_idx = (queue->write_idx + 1)
-+ * & (queue->size - 1);
-+ */
-+ free_pkt += pfe_pkt_headroom;
-+ }
-+ }
-+
-+ return free_pkt;
-+}
-+
-+/*
-+ * pfe_hif_rx_process-
-+ * This function does pfe hif rx queue processing.
-+ * Dequeue packet from Rx queue and send it to corresponding client queue
-+ */
-+static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
-+{
-+ struct hif_desc *desc;
-+ struct hif_hdr *pkt_hdr;
-+ struct __hif_hdr hif_hdr;
-+ void *free_buf;
-+ int rtc, len, rx_processed = 0;
-+ struct __hif_desc local_desc;
-+ int flags;
-+ unsigned int desc_p;
-+ unsigned int buf_size = 0;
-+
-+ spin_lock_bh(&hif->lock);
-+
-+ rtc = hif->rxtoclean_index;
-+
-+ while (rx_processed < budget) {
-+ desc = hif->rx_base + rtc;
-+
-+ __memcpy12(&local_desc, desc);
-+
-+ /* ACK pending Rx interrupt */
-+ if (local_desc.ctrl & BD_CTRL_DESC_EN) {
-+ writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
-+
-+ if (rx_processed == 0) {
-+ if (napi_first_batch == 1) {
-+ desc_p = hif->descr_baseaddr_p +
-+ ((unsigned long int)(desc) -
-+ (unsigned long
-+ int)hif->descr_baseaddr_v);
-+ napi_first_batch = 0;
-+ }
-+ }
-+
-+ __memcpy12(&local_desc, desc);
-+
-+ if (local_desc.ctrl & BD_CTRL_DESC_EN)
-+ break;
-+ }
-+
-+ napi_first_batch = 0;
-+
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_DESC_COUNT]++;
-+#endif
-+ len = BD_BUF_LEN(local_desc.ctrl);
-+ /*
-+ * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
-+ * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
-+ */
-+ dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
-+ hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
-+
-+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
-+
-+ /* Track last HIF header received */
-+ if (!hif->started) {
-+ hif->started = 1;
-+
-+ __memcpy8(&hif_hdr, pkt_hdr);
-+
-+ hif->qno = hif_hdr.hdr.q_num;
-+ hif->client_id = hif_hdr.hdr.client_id;
-+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
-+ hif_hdr.hdr.client_ctrl;
-+ flags = CL_DESC_FIRST;
-+
-+ } else {
-+ flags = 0;
-+ }
-+
-+ if (local_desc.ctrl & BD_CTRL_LIFM)
-+ flags |= CL_DESC_LAST;
-+
-+ /* Check for valid client id and still registered */
-+ if ((hif->client_id >= HIF_CLIENTS_MAX) ||
-+ !(test_bit(hif->client_id,
-+ &hif->shm->g_client_status[0]))) {
-+ printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
-+ __func__,
-+ hif->client_id,
-+ hif->qno);
-+
-+ free_buf = pkt_hdr;
-+
-+ goto pkt_drop;
-+ }
-+
-+ /* Check to valid queue number */
-+ if (hif->client[hif->client_id].rx_qn <= hif->qno) {
-+ pr_info("%s: packet with invalid queue: %d\n"
-+ , __func__, hif->qno);
-+ hif->qno = 0;
-+ }
-+
-+ free_buf =
-+ client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
-+ (void *)pkt_hdr, len, flags,
-+ hif->client_ctrl, &buf_size);
-+
-+ hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
-+ hif->qno);
-+
-+ if (unlikely(!free_buf)) {
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
-+#endif
-+ /*
-+ * If we want to keep in polling mode to retry later,
-+ * we need to tell napi that we consumed
-+ * the full budget or we will hit a livelock scenario.
-+ * The core code keeps this napi instance
-+ * at the head of the list and none of the other
-+ * instances get to run
-+ */
-+ rx_processed = budget;
-+
-+ if (flags & CL_DESC_FIRST)
-+ hif->started = 0;
-+
-+ break;
-+ }
-+
-+pkt_drop:
-+ /*Fill free buffer in the descriptor */
-+ hif->rx_buf_addr[rtc] = free_buf;
-+ hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
-+ writel((DDR_PHYS_TO_PFE
-+ ((u32)dma_map_single(hif->dev,
-+ free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
-+ &desc->data);
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ wmb();
-+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
-+ BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
-+ &desc->ctrl);
-+
-+ rtc = (rtc + 1) & (hif->rx_ring_size - 1);
-+
-+ if (local_desc.ctrl & BD_CTRL_LIFM) {
-+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
-+ rx_processed++;
-+
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_PACKET_COUNT]++;
-+#endif
-+ }
-+ hif->started = 0;
-+ }
-+ }
-+
-+ hif->rxtoclean_index = rtc;
-+ spin_unlock_bh(&hif->lock);
-+
-+ /* we made some progress, re-start rx dma in case it stopped */
-+ hif_rx_dma_start();
-+
-+ return rx_processed;
-+}
-+
-+/*
-+ * client_ack_txpacket-
-+ * This function ack the Tx packet in the give client Tx queue by resetting
-+ * ownership bit in the descriptor.
-+ */
-+static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
-+ unsigned int q_no)
-+{
-+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
-+ struct tx_queue_desc *desc = queue->base + queue->ack_idx;
-+
-+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
-+ writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
-+ /* queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1); */
-+
-+ return 0;
-+
-+ } else {
-+ /*This should not happen */
-+ pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
-+ hif->txtosend, hif->txtoclean, hif->txavail,
-+ client_id, q_no, queue, queue->ack_idx);
-+ WARN(1, "%s: doesn't own this descriptor", __func__);
-+ return 1;
-+ }
-+}
-+
-+void __hif_tx_done_process(struct pfe_hif *hif, int count)
-+{
-+ struct hif_desc *desc;
-+ struct hif_desc_sw *desc_sw;
-+ int ttc, tx_avl;
-+ int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
-+
-+ ttc = hif->txtoclean;
-+ tx_avl = hif->txavail;
-+
-+ while ((tx_avl < hif->tx_ring_size) && count--) {
-+ desc = hif->tx_base + ttc;
-+
-+ if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
-+ break;
-+
-+ desc_sw = &hif->tx_sw_queue[ttc];
-+
-+ if (desc_sw->data) {
-+ /*
-+ * dmap_unmap_single(hif->dev, desc_sw->data,
-+ * desc_sw->len, DMA_TO_DEVICE);
-+ */
-+ dma_unmap_single(hif->dev, desc_sw->data,
-+ desc_sw->len, DMA_TO_DEVICE);
-+ }
-+
-+ if (desc_sw->client_id > HIF_CLIENTS_MAX)
-+ pr_err("Invalid cl id %d\n", desc_sw->client_id);
-+
-+ pkts_done[desc_sw->client_id]++;
-+
-+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
-+
-+ ttc = (ttc + 1) & (hif->tx_ring_size - 1);
-+ tx_avl++;
-+ }
-+
-+ if (pkts_done[0])
-+ hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
-+ if (pkts_done[1])
-+ hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
-+
-+ hif->txtoclean = ttc;
-+ hif->txavail = tx_avl;
-+
-+ if (!count) {
-+ tasklet_schedule(&hif->tx_cleanup_tasklet);
-+ } else {
-+ /*Enable Tx done interrupt */
-+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
-+ HIF_INT_ENABLE);
-+ }
-+}
-+
-+static void pfe_tx_do_cleanup(unsigned long data)
-+{
-+ struct pfe_hif *hif = (struct pfe_hif *)data;
-+
-+ writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
-+
-+ hif_tx_done_process(hif, 64);
-+}
-+
-+/*
-+ * __hif_xmit_pkt -
-+ * This function puts one packet in the HIF Tx queue
-+ */
-+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
-+ q_no, void *data, u32 len, unsigned int flags)
-+{
-+ struct hif_desc *desc;
-+ struct hif_desc_sw *desc_sw;
-+
-+ desc = hif->tx_base + hif->txtosend;
-+ desc_sw = &hif->tx_sw_queue[hif->txtosend];
-+
-+ desc_sw->len = len;
-+ desc_sw->client_id = client_id;
-+ desc_sw->q_no = q_no;
-+ desc_sw->flags = flags;
-+
-+ if (flags & HIF_DONT_DMA_MAP) {
-+ desc_sw->data = 0;
-+ writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
-+ } else {
-+ desc_sw->data = dma_map_single(hif->dev, data, len,
-+ DMA_TO_DEVICE);
-+ writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
-+ }
-+
-+ hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
-+ hif->txavail--;
-+
-+ if ((!((flags & HIF_DATA_VALID) && (flags &
-+ HIF_LAST_BUFFER))))
-+ goto skip_tx;
-+
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ wmb();
-+
-+ do {
-+ desc_sw = &hif->tx_sw_queue[hif->txtoflush];
-+ desc = hif->tx_base + hif->txtoflush;
-+
-+ if (desc_sw->flags & HIF_LAST_BUFFER) {
-+ writel((BD_CTRL_LIFM |
-+ BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
-+ | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
-+ BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
-+ &desc->ctrl);
-+ } else {
-+ writel((BD_CTRL_DESC_EN |
-+ BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
-+ }
-+ hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
-+ }
-+ while (hif->txtoflush != hif->txtosend)
-+ ;
-+
-+skip_tx:
-+ return;
-+}
-+
-+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
-+ void *data, unsigned int len)
-+{
-+ int rc = 0;
-+
-+ spin_lock_bh(&hif->tx_lock);
-+
-+ if (!hif->txavail) {
-+ rc = 1;
-+ } else {
-+ __hif_xmit_pkt(hif, client_id, q_no, data, len,
-+ HIF_FIRST_BUFFER | HIF_LAST_BUFFER);
-+ hif_tx_dma_start();
-+ }
-+
-+ if (hif->txavail < (hif->tx_ring_size >> 1))
-+ __hif_tx_done_process(hif, TX_FREE_MAX_COUNT);
-+
-+ spin_unlock_bh(&hif->tx_lock);
-+
-+ return rc;
-+}
-+
-+static irqreturn_t wol_isr(int irq, void *dev_id)
-+{
-+ pr_info("WoL\n");
-+ gemac_set_wol(EMAC1_BASE_ADDR, 0);
-+ gemac_set_wol(EMAC2_BASE_ADDR, 0);
-+ return IRQ_HANDLED;
-+}
-+
-+/*
-+ * hif_isr-
-+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
-+ */
-+static irqreturn_t hif_isr(int irq, void *dev_id)
-+{
-+ struct pfe_hif *hif = (struct pfe_hif *)dev_id;
-+ int int_status;
-+ int int_enable_mask;
-+
-+ /*Read hif interrupt source register */
-+ int_status = readl_relaxed(HIF_INT_SRC);
-+ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
-+
-+ if ((int_status & HIF_INT) == 0)
-+ return IRQ_NONE;
-+
-+ int_status &= ~(HIF_INT);
-+
-+ if (int_status & HIF_RXPKT_INT) {
-+ int_status &= ~(HIF_RXPKT_INT);
-+ int_enable_mask &= ~(HIF_RXPKT_INT);
-+
-+ napi_first_batch = 1;
-+
-+ if (napi_schedule_prep(&hif->napi)) {
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+ __napi_schedule(&hif->napi);
-+ }
-+ }
-+ if (int_status & HIF_TXPKT_INT) {
-+ int_status &= ~(HIF_TXPKT_INT);
-+ int_enable_mask &= ~(HIF_TXPKT_INT);
-+ /*Schedule tx cleanup tassklet */
-+ tasklet_schedule(&hif->tx_cleanup_tasklet);
-+ }
-+
-+ /*Disable interrupts, they will be enabled after they are serviced */
-+ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
-+
-+ if (int_status) {
-+ pr_info("%s : Invalid interrupt : %d\n", __func__,
-+ int_status);
-+ writel(int_status, HIF_INT_SRC);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
-+{
-+ unsigned int client_id = data1;
-+
-+ if (client_id >= HIF_CLIENTS_MAX) {
-+ pr_err("%s: client id %d out of bounds\n", __func__,
-+ client_id);
-+ return;
-+ }
-+
-+ switch (req) {
-+ case REQUEST_CL_REGISTER:
-+ /* Request for register a client */
-+ pr_info("%s: register client_id %d\n",
-+ __func__, client_id);
-+ pfe_hif_client_register(hif, client_id, (struct
-+ hif_client_shm *)&hif->shm->client[client_id]);
-+ break;
-+
-+ case REQUEST_CL_UNREGISTER:
-+ pr_info("%s: unregister client_id %d\n",
-+ __func__, client_id);
-+
-+ /* Request for unregister a client */
-+ pfe_hif_client_unregister(hif, client_id);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported request %d\n",
-+ __func__, req);
-+ break;
-+ }
-+
-+ /*
-+ * Process client Tx queues
-+ * Currently we don't have checking for tx pending
-+ */
-+}
-+
-+/*
-+ * pfe_hif_rx_poll
-+ * This function is NAPI poll function to process HIF Rx queue.
-+ */
-+static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
-+ int work_done;
-+
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_POLL_COUNT]++;
-+#endif
-+
-+ work_done = pfe_hif_rx_process(hif, budget);
-+
-+ if (work_done < budget) {
-+ napi_complete(napi);
-+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
-+ HIF_INT_ENABLE);
-+ }
-+#ifdef HIF_NAPI_STATS
-+ else
-+ hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
-+#endif
-+
-+ return work_done;
-+}
-+
-+/*
-+ * pfe_hif_init
-+ * This function initializes the baseaddresses and irq, etc.
-+ */
-+int pfe_hif_init(struct pfe *pfe)
-+{
-+ struct pfe_hif *hif = &pfe->hif;
-+ int err;
-+
-+ pr_info("%s\n", __func__);
-+
-+ hif->dev = pfe->dev;
-+ hif->irq = pfe->hif_irq;
-+
-+ err = pfe_hif_alloc_descr(hif);
-+ if (err)
-+ goto err0;
-+
-+ if (pfe_hif_init_buffers(hif)) {
-+ pr_err("%s: Could not initialize buffer descriptors\n"
-+ , __func__);
-+ err = -ENOMEM;
-+ goto err1;
-+ }
-+
-+ /* Initialize NAPI for Rx processing */
-+ init_dummy_netdev(&hif->dummy_dev);
-+ netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
-+ HIF_RX_POLL_WEIGHT);
-+ napi_enable(&hif->napi);
-+
-+ spin_lock_init(&hif->tx_lock);
-+ spin_lock_init(&hif->lock);
-+
-+ hif_init();
-+ hif_rx_enable();
-+ hif_tx_enable();
-+
-+ /* Disable tx done interrupt */
-+ writel(HIF_INT_MASK, HIF_INT_ENABLE);
-+
-+ gpi_enable(HGPI_BASE_ADDR);
-+
-+ err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
-+ if (err) {
-+ pr_err("%s: failed to get the hif IRQ = %d\n",
-+ __func__, hif->irq);
-+ goto err1;
-+ }
-+
-+ err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
-+ if (err) {
-+ pr_err("%s: failed to get the wol IRQ = %d\n",
-+ __func__, pfe->wol_irq);
-+ goto err1;
-+ }
-+
-+ tasklet_init(&hif->tx_cleanup_tasklet,
-+ (void(*)(unsigned long))pfe_tx_do_cleanup,
-+ (unsigned long)hif);
-+
-+ return 0;
-+err1:
-+ pfe_hif_free_descr(hif);
-+err0:
-+ return err;
-+}
-+
-+/* pfe_hif_exit- */
-+void pfe_hif_exit(struct pfe *pfe)
-+{
-+ struct pfe_hif *hif = &pfe->hif;
-+
-+ pr_info("%s\n", __func__);
-+
-+ tasklet_kill(&hif->tx_cleanup_tasklet);
-+
-+ spin_lock_bh(&hif->lock);
-+ hif->shm->g_client_status[0] = 0;
-+ /* Make sure all clients are disabled*/
-+ hif->shm->g_client_status[1] = 0;
-+
-+ spin_unlock_bh(&hif->lock);
-+
-+ /*Disable Rx/Tx */
-+ gpi_disable(HGPI_BASE_ADDR);
-+ hif_rx_disable();
-+ hif_tx_disable();
-+
-+ napi_disable(&hif->napi);
-+ netif_napi_del(&hif->napi);
-+
-+ free_irq(pfe->wol_irq, pfe);
-+ free_irq(hif->irq, hif);
-+
-+ pfe_hif_release_buffers(hif);
-+ pfe_hif_free_descr(hif);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
-@@ -0,0 +1,638 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/workqueue.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/sched.h>
-+#include <linux/skbuff.h>
-+#include <linux/moduleparam.h>
-+#include <linux/cpu.h>
-+
-+#include "pfe_mod.h"
-+#include "pfe_hif.h"
-+#include "pfe_hif_lib.h"
-+
-+unsigned int lro_mode;
-+unsigned int page_mode;
-+unsigned int tx_qos;
-+unsigned int pfe_pkt_size;
-+unsigned int pfe_pkt_headroom;
-+unsigned int emac_txq_cnt;
-+
-+/*
-+ * @pfe_hal_lib.c.
-+ * Common functions used by HIF client drivers
-+ */
-+
-+/*HIF shared memory Global variable */
-+struct hif_shm ghif_shm;
-+
-+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
-+ * This function should be called after pfe_hif_exit
-+ *
-+ * @param[in] hif_shm Shared memory address location in DDR
-+ */
-+static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
-+{
-+ int i;
-+ void *pkt;
-+
-+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
-+ pkt = hif_shm->rx_buf_pool[i];
-+ if (pkt) {
-+ hif_shm->rx_buf_pool[i] = NULL;
-+ pkt -= pfe_pkt_headroom;
-+
-+ if (page_mode)
-+ put_page(virt_to_page(pkt));
-+ else
-+ kfree(pkt);
-+ }
-+ }
-+}
-+
-+/* Initialize shared memory used between HIF driver and clients,
-+ * allocate rx_buffer_pool required for HIF Rx descriptors.
-+ * This function should be called before initializing HIF driver.
-+ *
-+ * @param[in] hif_shm Shared memory address location in DDR
-+ * @rerurn 0 - on succes, <0 on fail to initialize
-+ */
-+static int pfe_hif_shm_init(struct hif_shm *hif_shm)
-+{
-+ int i;
-+ void *pkt;
-+
-+ memset(hif_shm, 0, sizeof(struct hif_shm));
-+ hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
-+
-+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
-+ if (page_mode) {
-+ pkt = (void *)__get_free_page(GFP_KERNEL |
-+ GFP_DMA_PFE);
-+ } else {
-+ pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
-+ }
-+
-+ if (pkt)
-+ hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
-+ else
-+ goto err0;
-+ }
-+
-+ return 0;
-+
-+err0:
-+ pr_err("%s Low memory\n", __func__);
-+ pfe_hif_shm_clean(hif_shm);
-+ return -ENOMEM;
-+}
-+
-+/*This function sends indication to HIF driver
-+ *
-+ * @param[in] hif hif context
-+ */
-+static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
-+ data2)
-+{
-+ hif_process_client_req(hif, req, data1, data2);
-+}
-+
-+void hif_lib_indicate_client(int client_id, int event_type, int qno)
-+{
-+ struct hif_client_s *client = pfe->hif_client[client_id];
-+
-+ if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
-+ HIF_CLIENT_QUEUES_MAX))
-+ return;
-+
-+ if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
-+ client->event_handler(client->priv, event_type, qno);
-+}
-+
-+/*This function releases Rx queue descriptors memory and pre-filled buffers
-+ *
-+ * @param[in] client hif_client context
-+ */
-+static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
-+{
-+ struct rx_queue_desc *desc;
-+ int qno, ii;
-+ void *buf;
-+
-+ for (qno = 0; qno < client->rx_qn; qno++) {
-+ desc = client->rx_q[qno].base;
-+
-+ for (ii = 0; ii < client->rx_q[qno].size; ii++) {
-+ buf = (void *)desc->data;
-+ if (buf) {
-+ buf -= pfe_pkt_headroom;
-+
-+ if (page_mode)
-+ free_page((unsigned long)buf);
-+ else
-+ kfree(buf);
-+
-+ desc->ctrl = 0;
-+ }
-+
-+ desc++;
-+ }
-+ }
-+
-+ kfree(client->rx_qbase);
-+}
-+
-+/*This function allocates memory for the rxq descriptors and pre-fill rx queues
-+ * with buffers.
-+ * @param[in] client client context
-+ * @param[in] q_size size of the rxQ, all queues are of same size
-+ */
-+static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
-+ q_size)
-+{
-+ struct rx_queue_desc *desc;
-+ struct hif_client_rx_queue *queue;
-+ int ii, qno;
-+
-+ /*Allocate memory for the client queues */
-+ client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
-+ rx_queue_desc), GFP_KERNEL);
-+ if (!client->rx_qbase)
-+ goto err;
-+
-+ for (qno = 0; qno < client->rx_qn; qno++) {
-+ queue = &client->rx_q[qno];
-+
-+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct
-+ rx_queue_desc);
-+ queue->size = q_size;
-+ queue->read_idx = 0;
-+ queue->write_idx = 0;
-+
-+ pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
-+ queue->base, queue->size);
-+ }
-+
-+ for (qno = 0; qno < client->rx_qn; qno++) {
-+ queue = &client->rx_q[qno];
-+ desc = queue->base;
-+
-+ for (ii = 0; ii < queue->size; ii++) {
-+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
-+ CL_DESC_OWN;
-+ desc++;
-+ }
-+ }
-+
-+ return 0;
-+
-+err:
-+ return 1;
-+}
-+
-+#define inc_cl_idx(idxname) \
-+ ({ typeof(idxname) idxname_ = (idxname); \
-+ ((idxname_) = (idxname_ + 1) & (queue->size - 1)); })
-+
-+static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
-+{
-+ pr_debug("%s\n", __func__);
-+
-+ /*
-+ * Check if there are any pending packets. Client must flush the tx
-+ * queues before unregistering, by calling by calling
-+ * hif_lib_tx_get_next_complete()
-+ *
-+ * Hif no longer calls since we are no longer registered
-+ */
-+ if (queue->tx_pending)
-+ pr_err("%s: pending transmit packets\n", __func__);
-+}
-+
-+static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
-+{
-+ int qno;
-+
-+ pr_debug("%s\n", __func__);
-+
-+ for (qno = 0; qno < client->tx_qn; qno++)
-+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
-+
-+ kfree(client->tx_qbase);
-+}
-+
-+static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
-+ q_size)
-+{
-+ struct hif_client_tx_queue *queue;
-+ int qno;
-+
-+ client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
-+ tx_queue_desc), GFP_KERNEL);
-+ if (!client->tx_qbase)
-+ return 1;
-+
-+ for (qno = 0; qno < client->tx_qn; qno++) {
-+ queue = &client->tx_q[qno];
-+
-+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct
-+ tx_queue_desc);
-+ queue->size = q_size;
-+ queue->read_idx = 0;
-+ queue->write_idx = 0;
-+ queue->tx_pending = 0;
-+ queue->nocpy_flag = 0;
-+ queue->prev_tmu_tx_pkts = 0;
-+ queue->done_tmu_tx_pkts = 0;
-+
-+ pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
-+ queue->base, queue->size);
-+ }
-+
-+ return 0;
-+}
-+
-+static int hif_lib_event_dummy(void *priv, int event_type, int qno)
-+{
-+ return 0;
-+}
-+
-+int hif_lib_client_register(struct hif_client_s *client)
-+{
-+ struct hif_shm *hif_shm;
-+ struct hif_client_shm *client_shm;
-+ int err, i;
-+ /* int loop_cnt = 0; */
-+
-+ pr_debug("%s\n", __func__);
-+
-+ /*Allocate memory before spin_lock*/
-+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
-+ err = -ENOMEM;
-+ goto err_rx;
-+ }
-+
-+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
-+ err = -ENOMEM;
-+ goto err_tx;
-+ }
-+
-+ spin_lock_bh(&pfe->hif.lock);
-+ if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
-+ (pfe->hif_client[client->id])) {
-+ err = -EINVAL;
-+ goto err;
-+ }
-+
-+ hif_shm = client->pfe->hif.shm;
-+
-+ if (!client->event_handler)
-+ client->event_handler = hif_lib_event_dummy;
-+
-+ /*Initialize client specific shared memory */
-+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
-+ client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
-+ client_shm->rx_qsize = client->rx_qsize;
-+ client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
-+ client_shm->tx_qsize = client->tx_qsize;
-+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
-+ (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
-+ /* spin_lock_init(&client->rx_lock); */
-+
-+ for (i = 0; i < HIF_EVENT_MAX; i++) {
-+ client->queue_mask[i] = 0; /*
-+ * By default all events are
-+ * unmasked
-+ */
-+ }
-+
-+ /*Indicate to HIF driver*/
-+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
-+
-+ pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
-+ __func__, client, client->id, client->tx_qsize,
-+ client->rx_qsize);
-+
-+ client->cpu_id = -1;
-+
-+ pfe->hif_client[client->id] = client;
-+ spin_unlock_bh(&pfe->hif.lock);
-+
-+ return 0;
-+
-+err:
-+ spin_unlock_bh(&pfe->hif.lock);
-+ hif_lib_client_release_tx_buffers(client);
-+
-+err_tx:
-+ hif_lib_client_release_rx_buffers(client);
-+
-+err_rx:
-+ return err;
-+}
-+
-+int hif_lib_client_unregister(struct hif_client_s *client)
-+{
-+ struct pfe *pfe = client->pfe;
-+ u32 client_id = client->id;
-+
-+ pr_info(
-+ "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
-+ , __func__, client, client->id, client->tx_qsize,
-+ client->rx_qsize);
-+
-+ spin_lock_bh(&pfe->hif.lock);
-+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
-+
-+ hif_lib_client_release_tx_buffers(client);
-+ hif_lib_client_release_rx_buffers(client);
-+ pfe->hif_client[client_id] = NULL;
-+ spin_unlock_bh(&pfe->hif.lock);
-+
-+ return 0;
-+}
-+
-+int hif_lib_event_handler_start(struct hif_client_s *client, int event,
-+ int qno)
-+{
-+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
-+ struct rx_queue_desc *desc = queue->base + queue->read_idx;
-+
-+ if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
-+ pr_debug("%s: Unsupported event : %d queue number : %d\n",
-+ __func__, event, qno);
-+ return -1;
-+ }
-+
-+ test_and_clear_bit(qno, &client->queue_mask[event]);
-+
-+ switch (event) {
-+ case EVENT_RX_PKT_IND:
-+ if (!(desc->ctrl & CL_DESC_OWN))
-+ hif_lib_indicate_client(client->id,
-+ EVENT_RX_PKT_IND, qno);
-+ break;
-+
-+ case EVENT_HIGH_RX_WM:
-+ case EVENT_TXDONE_IND:
-+ default:
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * This function gets one packet from the specified client queue
-+ * It also refill the rx buffer
-+ */
-+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
-+ *ofst, unsigned int *rx_ctrl,
-+ unsigned int *desc_ctrl, void **priv_data)
-+{
-+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
-+ struct rx_queue_desc *desc;
-+ void *pkt = NULL;
-+
-+ /*
-+ * Following lock is to protect rx queue access from,
-+ * hif_lib_event_handler_start.
-+ * In general below lock is not required, because hif_lib_xmit_pkt and
-+ * hif_lib_event_handler_start are called from napi poll and which is
-+ * not re-entrant. But if some client use in different way this lock is
-+ * required.
-+ */
-+ /*spin_lock_irqsave(&client->rx_lock, flags); */
-+ desc = queue->base + queue->read_idx;
-+ if (!(desc->ctrl & CL_DESC_OWN)) {
-+ pkt = desc->data - pfe_pkt_headroom;
-+
-+ *rx_ctrl = desc->client_ctrl;
-+ *desc_ctrl = desc->ctrl;
-+
-+ if (desc->ctrl & CL_DESC_FIRST) {
-+ u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
-+
-+ if (size) {
-+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
-+ PFE_PKT_HEADER_SZ - size;
-+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
-+ + size;
-+ *priv_data = desc->data + PFE_PKT_HEADER_SZ;
-+ } else {
-+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
-+ PFE_PKT_HEADER_SZ;
-+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
-+ *priv_data = NULL;
-+ }
-+
-+ } else {
-+ *len = CL_DESC_BUF_LEN(desc->ctrl);
-+ *ofst = pfe_pkt_headroom;
-+ }
-+
-+ /*
-+ * Needed so we don't free a buffer/page
-+ * twice on module_exit
-+ */
-+ desc->data = NULL;
-+
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ smp_wmb();
-+
-+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
-+ inc_cl_idx(queue->read_idx);
-+ }
-+
-+ /*spin_unlock_irqrestore(&client->rx_lock, flags); */
-+ return pkt;
-+}
-+
-+static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
-+ client_id, unsigned int qno,
-+ u32 client_ctrl)
-+{
-+ /* Optimize the write since the destinaton may be non-cacheable */
-+ if (!((unsigned long)pkt_hdr & 0x3)) {
-+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
-+ client_id;
-+ } else {
-+ ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
-+ ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
-+ }
-+}
-+
-+/*This function puts the given packet in the specific client queue */
-+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
-+ *data, unsigned int len, u32 client_ctrl,
-+ unsigned int flags, void *client_data)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
-+
-+ /* First buffer */
-+ if (flags & HIF_FIRST_BUFFER) {
-+ data -= sizeof(struct hif_hdr);
-+ len += sizeof(struct hif_hdr);
-+
-+ hif_hdr_write(data, client->id, qno, client_ctrl);
-+ }
-+
-+ desc->data = client_data;
-+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
-+
-+ __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
-+
-+ inc_cl_idx(queue->write_idx);
-+ queue->tx_pending++;
-+ queue->jiffies_last_packet = jiffies;
-+}
-+
-+/*This function puts the given packet in the specific client queue */
-+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
-+ unsigned int len, u32 client_ctrl, void *client_data)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
-+
-+ if (queue->tx_pending < queue->size) {
-+ /*Construct pkt header */
-+
-+ data -= sizeof(struct hif_hdr);
-+ len += sizeof(struct hif_hdr);
-+
-+ hif_hdr_write(data, client->id, qno, client_ctrl);
-+
-+ desc->data = client_data;
-+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(HIF_FIRST_BUFFER |
-+ HIF_LAST_BUFFER | HIF_DATA_VALID);
-+
-+ if (hif_xmit_pkt(&pfe->hif, client->id, qno, data, len))
-+ return 1;
-+
-+ inc_cl_idx(queue->write_idx);
-+ queue->tx_pending++;
-+ queue->jiffies_last_packet = jiffies;
-+
-+ return 0;
-+ }
-+
-+ pr_debug("%s Tx client %d qno %d is full\n", __func__, client->id,
-+ qno);
-+ return 1;
-+}
-+
-+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
-+ unsigned int *flags, int count)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+ struct tx_queue_desc *desc = queue->base + queue->read_idx;
-+
-+ pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
-+ queue->read_idx, queue->tx_pending);
-+
-+ if (!queue->tx_pending)
-+ return NULL;
-+
-+ if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
-+ u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
-+ client->id, TMU_DM_TX_TRANS, 4));
-+
-+ if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
-+ queue->done_tmu_tx_pkts = UINT_MAX -
-+ queue->prev_tmu_tx_pkts + tmu_tx_pkts;
-+ else
-+ queue->done_tmu_tx_pkts = tmu_tx_pkts -
-+ queue->prev_tmu_tx_pkts;
-+
-+ queue->prev_tmu_tx_pkts = tmu_tx_pkts;
-+
-+ if (!queue->done_tmu_tx_pkts)
-+ return NULL;
-+ }
-+
-+ if (desc->ctrl & CL_DESC_OWN)
-+ return NULL;
-+
-+ inc_cl_idx(queue->read_idx);
-+ queue->tx_pending--;
-+
-+ *flags = CL_DESC_GET_FLAGS(desc->ctrl);
-+
-+ if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
-+ queue->done_tmu_tx_pkts--;
-+
-+ return desc->data;
-+}
-+
-+static void hif_lib_tmu_credit_init(struct pfe *pfe)
-+{
-+ int i, q;
-+
-+ for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
-+ for (q = 0; q < emac_txq_cnt; q++) {
-+ pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
-+ DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
-+ pfe->tmu_credit.tx_credit[i][q] =
-+ pfe->tmu_credit.tx_credit_max[i][q];
-+ }
-+}
-+
-+int pfe_hif_lib_init(struct pfe *pfe)
-+{
-+ int rc;
-+
-+ pr_info("%s\n", __func__);
-+
-+ if (lro_mode) {
-+ page_mode = 1;
-+ pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
-+ pfe_pkt_headroom = 0;
-+ } else {
-+ page_mode = 0;
-+ pfe_pkt_size = PFE_PKT_SIZE;
-+ pfe_pkt_headroom = PFE_PKT_HEADROOM;
-+ }
-+
-+ if (tx_qos)
-+ emac_txq_cnt = EMAC_TXQ_CNT / 2;
-+ else
-+ emac_txq_cnt = EMAC_TXQ_CNT;
-+
-+ hif_lib_tmu_credit_init(pfe);
-+ pfe->hif.shm = &ghif_shm;
-+ rc = pfe_hif_shm_init(pfe->hif.shm);
-+
-+ return rc;
-+}
-+
-+void pfe_hif_lib_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ pfe_hif_shm_clean(pfe->hif.shm);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hw.c
-@@ -0,0 +1,176 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include "pfe_mod.h"
-+#include "pfe_hw.h"
-+
-+/* Functions to handle most of pfe hw register initialization */
-+int pfe_hw_init(struct pfe *pfe, int resume)
-+{
-+ struct class_cfg class_cfg = {
-+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
-+ .route_table_baseaddr = pfe->ddr_phys_baseaddr +
-+ ROUTE_TABLE_BASEADDR,
-+ .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
-+ };
-+
-+ struct tmu_cfg tmu_cfg = {
-+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
-+ .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
-+ .llm_queue_len = TMU_LLM_QUEUE_LEN,
-+ };
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ struct util_cfg util_cfg = {
-+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
-+ };
-+#endif
-+
-+ struct BMU_CFG bmu1_cfg = {
-+ .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
-+ BMU1_LMEM_BASEADDR),
-+ .count = BMU1_BUF_COUNT,
-+ .size = BMU1_BUF_SIZE,
-+ .low_watermark = 10,
-+ .high_watermark = 15,
-+ };
-+
-+ struct BMU_CFG bmu2_cfg = {
-+ .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
-+ BMU2_DDR_BASEADDR),
-+ .count = BMU2_BUF_COUNT,
-+ .size = BMU2_BUF_SIZE,
-+ .low_watermark = 250,
-+ .high_watermark = 253,
-+ };
-+
-+ struct gpi_cfg egpi1_cfg = {
-+ .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
-+ .tmlf_txthres = EGPI1_TMLF_TXTHRES,
-+ .aseq_len = EGPI1_ASEQ_LEN,
-+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
-+ EMAC_TCNTRL_REG),
-+ };
-+
-+ struct gpi_cfg egpi2_cfg = {
-+ .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
-+ .tmlf_txthres = EGPI2_TMLF_TXTHRES,
-+ .aseq_len = EGPI2_ASEQ_LEN,
-+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
-+ EMAC_TCNTRL_REG),
-+ };
-+
-+ struct gpi_cfg hgpi_cfg = {
-+ .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
-+ .tmlf_txthres = HGPI_TMLF_TXTHRES,
-+ .aseq_len = HGPI_ASEQ_LEN,
-+ .mtip_pause_reg = 0,
-+ };
-+
-+ pr_info("%s\n", __func__);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ /* LS1012A needs this to make PE work correctly */
-+ writel(0x3, CLASS_PE_SYS_CLK_RATIO);
-+ writel(0x3, TMU_PE_SYS_CLK_RATIO);
-+ writel(0x3, UTIL_PE_SYS_CLK_RATIO);
-+ usleep_range(10, 20);
-+#endif
-+
-+ pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
-+ pr_info("TMU version: %x\n", readl(TMU_VERSION));
-+
-+ pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
-+ BMU_VERSION));
-+ pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
-+ BMU_VERSION));
-+
-+ pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
-+ GPI_VERSION));
-+ pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
-+ GPI_VERSION));
-+ pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
-+ GPI_VERSION));
-+
-+ pr_info("HIF version: %x\n", readl(HIF_VERSION));
-+ pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
-+#endif
-+ while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
-+ ;
-+
-+ hif_rx_disable();
-+ hif_tx_disable();
-+
-+ bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
-+
-+ pr_info("bmu_init(1) done\n");
-+
-+ bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
-+
-+ pr_info("bmu_init(2) done\n");
-+
-+ class_cfg.resume = resume ? 1 : 0;
-+
-+ class_init(&class_cfg);
-+
-+ pr_info("class_init() done\n");
-+
-+ tmu_init(&tmu_cfg);
-+
-+ pr_info("tmu_init() done\n");
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_init(&util_cfg);
-+
-+ pr_info("util_init() done\n");
-+#endif
-+ gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
-+
-+ pr_info("gpi_init(1) done\n");
-+
-+ gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
-+
-+ pr_info("gpi_init(2) done\n");
-+
-+ gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
-+
-+ pr_info("gpi_init(hif) done\n");
-+
-+ bmu_enable(BMU1_BASE_ADDR);
-+
-+ pr_info("bmu_enable(1) done\n");
-+
-+ bmu_enable(BMU2_BASE_ADDR);
-+
-+ pr_info("bmu_enable(2) done\n");
-+
-+ return 0;
-+}
-+
-+void pfe_hw_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ bmu_disable(BMU1_BASE_ADDR);
-+ bmu_reset(BMU1_BASE_ADDR);
-+
-+ bmu_disable(BMU2_BASE_ADDR);
-+ bmu_reset(BMU2_BASE_ADDR);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
-@@ -0,0 +1,388 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/device.h>
-+#include <linux/of_net.h>
-+#include <linux/of_address.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+#include <linux/clk.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-+
-+#include "pfe_mod.h"
-+
-+struct ls1012a_pfe_platform_data pfe_platform_data;
-+
-+static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
-+ if_cnt,
-+ struct ls1012a_pfe_platform_data
-+ *pdata)
-+{
-+ struct device_node *gem = NULL, *phy = NULL;
-+ int size;
-+ int ii = 0, phy_id = 0;
-+ const u32 *addr;
-+
-+ for (ii = 0; ii < if_cnt; ii++) {
-+ gem = of_get_next_child(parent, gem);
-+ if (!gem)
-+ goto err;
-+ addr = of_get_property(gem, "reg", &size);
-+ if (addr && (be32_to_cpup(addr) == port))
-+ break;
-+ }
-+
-+ if (ii >= if_cnt) {
-+ pr_err("%s:%d Failed to find interface = %d\n",
-+ __func__, __LINE__, if_cnt);
-+ goto err;
-+ }
-+
-+ pdata->ls1012a_eth_pdata[port].gem_id = port;
-+
-+ of_get_mac_address(gem, pdata->ls1012a_eth_pdata[port].mac_addr);
-+
-+ pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
-+
-+ if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
-+ pr_err("%s:%d Incorrect Phy mode....\n", __func__,
-+ __LINE__);
-+
-+ addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
-+ if (!addr)
-+ pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
-+ __LINE__);
-+ else
-+ pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
-+
-+ addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
-+ if (!addr) {
-+ pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
-+ __LINE__);
-+ } else {
-+ phy_id = be32_to_cpup(addr);
-+ pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
-+ pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
-+ }
-+
-+ addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
-+ if (!addr)
-+ pr_err("%s: Invalid mdio-mux-val....\n", __func__);
-+ else
-+ phy_id = be32_to_cpup(addr);
-+ pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
-+
-+ if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
-+ pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
-+ pdata->ls1012a_eth_pdata[port].mdio_muxval;
-+
-+ addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
-+ if (!addr)
-+ pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
-+ __func__, __LINE__);
-+ else
-+ pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
-+
-+ /* If PHY is enabled, read mdio properties */
-+ if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
-+ goto done;
-+
-+ phy = of_get_next_child(gem, NULL);
-+
-+ addr = of_get_property(phy, "reg", &size);
-+
-+ if (!addr)
-+ pr_err("%s:%d Invalid phy enable flag....\n",
-+ __func__, __LINE__);
-+ else
-+ pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
-+
-+ pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
-+
-+done:
-+
-+ return 0;
-+
-+err:
-+ return -1;
-+}
-+
-+/*
-+ *
-+ * pfe_platform_probe -
-+ *
-+ *
-+ */
-+static int pfe_platform_probe(struct platform_device *pdev)
-+{
-+ struct resource res;
-+ int ii, rc, interface_count = 0, size = 0;
-+ const u32 *prop;
-+ struct device_node *np;
-+ struct clk *pfe_clk;
-+
-+ np = pdev->dev.of_node;
-+
-+ if (!np) {
-+ pr_err("Invalid device node\n");
-+ return -EINVAL;
-+ }
-+
-+ pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
-+ if (!pfe) {
-+ rc = -ENOMEM;
-+ goto err_alloc;
-+ }
-+
-+ platform_set_drvdata(pdev, pfe);
-+
-+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-+
-+ if (of_address_to_resource(np, 1, &res)) {
-+ rc = -ENOMEM;
-+ pr_err("failed to get ddr resource\n");
-+ goto err_ddr;
-+ }
-+
-+ pfe->ddr_phys_baseaddr = res.start;
-+ pfe->ddr_size = resource_size(&res);
-+
-+ pfe->ddr_baseaddr = phys_to_virt(res.start);
-+ if (!pfe->ddr_baseaddr) {
-+ pr_err("ioremap() ddr failed\n");
-+ rc = -ENOMEM;
-+ goto err_ddr;
-+ }
-+
-+ pfe->scfg =
-+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-+ "fsl,pfe-scfg");
-+ if (IS_ERR(pfe->scfg)) {
-+ dev_err(&pdev->dev, "No syscfg phandle specified\n");
-+ return PTR_ERR(pfe->scfg);
-+ }
-+
-+ pfe->cbus_baseaddr = of_iomap(np, 0);
-+ if (!pfe->cbus_baseaddr) {
-+ rc = -ENOMEM;
-+ pr_err("failed to get axi resource\n");
-+ goto err_axi;
-+ }
-+
-+ pfe->hif_irq = platform_get_irq(pdev, 0);
-+ if (pfe->hif_irq < 0) {
-+ pr_err("platform_get_irq for hif failed\n");
-+ rc = pfe->hif_irq;
-+ goto err_hif_irq;
-+ }
-+
-+ pfe->wol_irq = platform_get_irq(pdev, 2);
-+ if (pfe->wol_irq < 0) {
-+ pr_err("platform_get_irq for WoL failed\n");
-+ rc = pfe->wol_irq;
-+ goto err_hif_irq;
-+ }
-+
-+ /* Read interface count */
-+ prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
-+ if (!prop) {
-+ pr_err("Failed to read number of interfaces\n");
-+ rc = -ENXIO;
-+ goto err_prop;
-+ }
-+
-+ interface_count = be32_to_cpup(prop);
-+ if (interface_count <= 0) {
-+ pr_err("No ethernet interface count : %d\n",
-+ interface_count);
-+ rc = -ENXIO;
-+ goto err_prop;
-+ }
-+
-+ pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
-+
-+ for (ii = 0; ii < interface_count; ii++) {
-+ pfe_get_gemac_if_proprties(np, ii, interface_count,
-+ &pfe_platform_data);
-+ }
-+
-+ pfe->dev = &pdev->dev;
-+
-+ pfe->dev->platform_data = &pfe_platform_data;
-+
-+ /* declare WoL capabilities */
-+ device_init_wakeup(&pdev->dev, true);
-+
-+ /* find the clocks */
-+ pfe_clk = devm_clk_get(pfe->dev, "pfe");
-+ if (IS_ERR(pfe_clk))
-+ return PTR_ERR(pfe_clk);
-+
-+ /* PFE clock is (platform clock / 2) */
-+ /* save sys_clk value as KHz */
-+ pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
-+
-+ rc = pfe_probe(pfe);
-+ if (rc < 0)
-+ goto err_probe;
-+
-+ return 0;
-+
-+err_probe:
-+err_prop:
-+err_hif_irq:
-+ iounmap(pfe->cbus_baseaddr);
-+
-+err_axi:
-+ iounmap(pfe->ddr_baseaddr);
-+
-+err_ddr:
-+ platform_set_drvdata(pdev, NULL);
-+
-+ kfree(pfe);
-+
-+err_alloc:
-+ return rc;
-+}
-+
-+/*
-+ * pfe_platform_remove -
-+ */
-+static int pfe_platform_remove(struct platform_device *pdev)
-+{
-+ struct pfe *pfe = platform_get_drvdata(pdev);
-+ int rc;
-+
-+ pr_info("%s\n", __func__);
-+
-+ rc = pfe_remove(pfe);
-+
-+ iounmap(pfe->cbus_baseaddr);
-+ iounmap(pfe->ddr_baseaddr);
-+
-+ platform_set_drvdata(pdev, NULL);
-+
-+ kfree(pfe);
-+
-+ return rc;
-+}
-+
-+#ifdef CONFIG_PM
-+#ifdef CONFIG_PM_SLEEP
-+int pfe_platform_suspend(struct device *dev)
-+{
-+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
-+ struct net_device *netdev;
-+ int i;
-+
-+ pfe->wake = 0;
-+
-+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
-+ netdev = pfe->eth.eth_priv[i]->ndev;
-+
-+ netif_device_detach(netdev);
-+
-+ if (netif_running(netdev))
-+ if (pfe_eth_suspend(netdev))
-+ pfe->wake = 1;
-+ }
-+
-+ /* Shutdown PFE only if we're not waking up the system */
-+ if (!pfe->wake) {
-+#if defined(LS1012A_PFE_RESET_WA)
-+ pfe_hif_rx_idle(&pfe->hif);
-+#endif
-+ pfe_ctrl_suspend(&pfe->ctrl);
-+ pfe_firmware_exit(pfe);
-+
-+ pfe_hif_exit(pfe);
-+ pfe_hif_lib_exit(pfe);
-+
-+ pfe_hw_exit(pfe);
-+ }
-+
-+ return 0;
-+}
-+
-+static int pfe_platform_resume(struct device *dev)
-+{
-+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
-+ struct net_device *netdev;
-+ int i;
-+
-+ if (!pfe->wake) {
-+ pfe_hw_init(pfe, 1);
-+ pfe_hif_lib_init(pfe);
-+ pfe_hif_init(pfe);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_enable();
-+#endif
-+ tmu_enable(0xf);
-+ class_enable();
-+ pfe_ctrl_resume(&pfe->ctrl);
-+ }
-+
-+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
-+ netdev = pfe->eth.eth_priv[i]->ndev;
-+
-+ if (pfe->eth.eth_priv[i]->mii_bus)
-+ pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
-+
-+ if (netif_running(netdev))
-+ pfe_eth_resume(netdev);
-+
-+ netif_device_attach(netdev);
-+ }
-+ return 0;
-+}
-+#else
-+#define pfe_platform_suspend NULL
-+#define pfe_platform_resume NULL
-+#endif
-+
-+static const struct dev_pm_ops pfe_platform_pm_ops = {
-+ SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
-+};
-+#endif
-+
-+static const struct of_device_id pfe_match[] = {
-+ {
-+ .compatible = "fsl,pfe",
-+ },
-+ {},
-+};
-+MODULE_DEVICE_TABLE(of, pfe_match);
-+
-+static struct platform_driver pfe_platform_driver = {
-+ .probe = pfe_platform_probe,
-+ .remove = pfe_platform_remove,
-+ .driver = {
-+ .name = "pfe",
-+ .of_match_table = pfe_match,
-+#ifdef CONFIG_PM
-+ .pm = &pfe_platform_pm_ops,
-+#endif
-+ },
-+};
-+
-+module_platform_driver(pfe_platform_driver);
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("PFE Ethernet driver");
-+MODULE_AUTHOR("NXP DNCPE");
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_mod.c
-@@ -0,0 +1,141 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include <linux/dma-mapping.h>
-+#include "pfe_mod.h"
-+
-+struct pfe *pfe;
-+
-+/*
-+ * pfe_probe -
-+ */
-+int pfe_probe(struct pfe *pfe)
-+{
-+ int rc;
-+
-+ if (pfe->ddr_size < DDR_MAX_SIZE) {
-+ pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
-+ __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
-+ rc = -ENOMEM;
-+ goto err_hw;
-+ }
-+
-+ if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
-+ (8 * SZ_1M - 1)) != 0) {
-+ pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
-+ __func__, (int)pfe->ddr_phys_baseaddr +
-+ BMU2_DDR_BASEADDR);
-+ rc = -ENOMEM;
-+ goto err_hw;
-+ }
-+
-+ pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
-+ (unsigned long)pfe->cbus_baseaddr,
-+ (unsigned long)pfe->ddr_baseaddr,
-+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
-+
-+ pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
-+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
-+
-+ rc = pfe_hw_init(pfe, 0);
-+ if (rc < 0)
-+ goto err_hw;
-+
-+ rc = pfe_hif_lib_init(pfe);
-+ if (rc < 0)
-+ goto err_hif_lib;
-+
-+ rc = pfe_hif_init(pfe);
-+ if (rc < 0)
-+ goto err_hif;
-+
-+ rc = pfe_firmware_init(pfe);
-+ if (rc < 0)
-+ goto err_firmware;
-+
-+ rc = pfe_ctrl_init(pfe);
-+ if (rc < 0)
-+ goto err_ctrl;
-+
-+ rc = pfe_eth_init(pfe);
-+ if (rc < 0)
-+ goto err_eth;
-+
-+ rc = pfe_sysfs_init(pfe);
-+ if (rc < 0)
-+ goto err_sysfs;
-+
-+ rc = pfe_debugfs_init(pfe);
-+ if (rc < 0)
-+ goto err_debugfs;
-+
-+ return 0;
-+
-+err_debugfs:
-+ pfe_sysfs_exit(pfe);
-+
-+err_sysfs:
-+ pfe_eth_exit(pfe);
-+
-+err_eth:
-+ pfe_ctrl_exit(pfe);
-+
-+err_ctrl:
-+ pfe_firmware_exit(pfe);
-+
-+err_firmware:
-+ pfe_hif_exit(pfe);
-+
-+err_hif:
-+ pfe_hif_lib_exit(pfe);
-+
-+err_hif_lib:
-+ pfe_hw_exit(pfe);
-+
-+err_hw:
-+ return rc;
-+}
-+
-+/*
-+ * pfe_remove -
-+ */
-+int pfe_remove(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ pfe_debugfs_exit(pfe);
-+
-+ pfe_sysfs_exit(pfe);
-+
-+ pfe_eth_exit(pfe);
-+
-+ pfe_ctrl_exit(pfe);
-+
-+#if defined(LS1012A_PFE_RESET_WA)
-+ pfe_hif_rx_idle(&pfe->hif);
-+#endif
-+ pfe_firmware_exit(pfe);
-+
-+ pfe_hif_exit(pfe);
-+
-+ pfe_hif_lib_exit(pfe);
-+
-+ pfe_hw_exit(pfe);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
-@@ -0,0 +1,818 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+
-+#include "pfe_mod.h"
-+
-+#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
-+#define NUM_QUEUES 16
-+
-+static char register_name[20][5] = {
-+ "EPC", "ECAS", "EID", "ED",
-+ "r0", "r1", "r2", "r3",
-+ "r4", "r5", "r6", "r7",
-+ "r8", "r9", "r10", "r11",
-+ "r12", "r13", "r14", "r15",
-+};
-+
-+static char exception_name[14][20] = {
-+ "Reset",
-+ "HardwareFailure",
-+ "NMI",
-+ "InstBreakpoint",
-+ "DataBreakpoint",
-+ "Unsupported",
-+ "PrivilegeViolation",
-+ "InstBusError",
-+ "DataBusError",
-+ "AlignmentError",
-+ "ArithmeticError",
-+ "SystemCall",
-+ "MemoryManagement",
-+ "Interrupt",
-+};
-+
-+static unsigned long class_do_clear;
-+static unsigned long tmu_do_clear;
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static unsigned long util_do_clear;
-+#endif
-+
-+static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
-+ do_clear)
-+{
-+ ssize_t len = 0;
-+ u32 val;
-+ char statebuf[5];
-+ struct pfe_cpumon *cpumon = &pfe->cpumon;
-+ u32 debug_indicator;
-+ u32 debug[20];
-+
-+ *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
-+ dmem_addr += 4;
-+
-+ statebuf[4] = '\0';
-+ len += sprintf(buf + len, "state=%4s ", statebuf);
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ dmem_addr += 4;
-+ len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ if (do_clear && val)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ dmem_addr += 4;
-+ len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ if (do_clear && val)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ dmem_addr += 4;
-+ if (id >= TMU0_ID && id <= TMU_MAX_ID)
-+ len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
-+ else
-+ len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ if (do_clear && val)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ dmem_addr += 4;
-+ if (val)
-+ len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
-+
-+ len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
-+
-+ len += sprintf(buf + len, "\n");
-+
-+ debug_indicator = pe_dmem_read(id, dmem_addr, 4);
-+ dmem_addr += 4;
-+ if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
-+ int j, last = 0;
-+
-+ for (j = 0; j < 16; j++) {
-+ debug[j] = pe_dmem_read(id, dmem_addr, 4);
-+ if (debug[j]) {
-+ if (do_clear)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ last = j + 1;
-+ }
-+ dmem_addr += 4;
-+ }
-+ for (j = 0; j < last; j++) {
-+ len += sprintf(buf + len, "%08x%s",
-+ cpu_to_be32(debug[j]),
-+ (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
-+ }
-+ }
-+
-+ if (!strncmp(statebuf, "DEAD", 4)) {
-+ u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
-+
-+ len += sprintf(buf + len, "Exception details:\n");
-+ for (i = 0; i < 20; i++) {
-+ debug[i] = pe_dmem_read(id, dump, 4);
-+ dump += 4;
-+ if (i == 2)
-+ len += sprintf(buf + len, "%4s = %08x (=%s) ",
-+ register_name[i], cpu_to_be32(debug[i]),
-+ exception_name[min((u32)
-+ cpu_to_be32(debug[i]), (u32)13)]);
-+ else
-+ len += sprintf(buf + len, "%4s = %08x%s",
-+ register_name[i], cpu_to_be32(debug[i]),
-+ (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
-+ }
-+ }
-+
-+ return len;
-+}
-+
-+static ssize_t class_phy_stats(char *buf, int phy)
-+{
-+ ssize_t len = 0;
-+ int off1 = phy * 0x28;
-+ int off2 = phy * 0x10;
-+
-+ if (phy == 3)
-+ off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
-+
-+ len += sprintf(buf + len, "phy: %d\n", phy);
-+ len += sprintf(buf + len,
-+ " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
-+ readl(CLASS_PHY1_RX_PKTS + off1),
-+ readl(CLASS_PHY1_TX_PKTS + off1),
-+ readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
-+ readl(CLASS_PHY1_V4_PKTS + off1),
-+ readl(CLASS_PHY1_V6_PKTS + off1));
-+
-+ len += sprintf(buf + len,
-+ " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
-+ readl(CLASS_PHY1_ICMP_PKTS + off2),
-+ readl(CLASS_PHY1_IGMP_PKTS + off2),
-+ readl(CLASS_PHY1_TCP_PKTS + off2),
-+ readl(CLASS_PHY1_UDP_PKTS + off2));
-+
-+ len += sprintf(buf + len, " err\n");
-+ len += sprintf(buf + len,
-+ " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
-+ readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
-+ readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
-+ readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
-+ readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
-+ readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
-+
-+ return len;
-+}
-+
-+/* qm_read_drop_stat
-+ * This function is used to read the drop statistics from the TMU
-+ * hw drop counter. Since the hw counter is always cleared afer
-+ * reading, this function maintains the previous drop count, and
-+ * adds the new value to it. That value can be retrieved by
-+ * passing a pointer to it with the total_drops arg.
-+ *
-+ * @param tmu TMU number (0 - 3)
-+ * @param queue queue number (0 - 15)
-+ * @param total_drops pointer to location to store total drops (or NULL)
-+ * @param do_reset if TRUE, clear total drops after updating
-+ */
-+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
-+{
-+ static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
-+ u32 val;
-+
-+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
-+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
-+ val = readl(TMU_TEQ_DROP_STAT);
-+ qtotal[tmu][queue] += val;
-+ if (total_drops)
-+ *total_drops = qtotal[tmu][queue];
-+ if (do_reset)
-+ qtotal[tmu][queue] = 0;
-+ return val;
-+}
-+
-+static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
-+{
-+ ssize_t len = 0;
-+ u32 drops;
-+
-+ len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
-+
-+ drops = qm_read_drop_stat(tmu, queue, NULL, 0);
-+
-+ /* Select queue */
-+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
-+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
-+
-+ len += sprintf(buf + len,
-+ "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
-+ drops, readl(TMU_TEQ_TRANS_STAT),
-+ readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
-+ readl(TMU_LLM_QUE_DROPCNT));
-+
-+ return len;
-+}
-+
-+static ssize_t tmu_queues(char *buf, int tmu)
-+{
-+ ssize_t len = 0;
-+ int queue;
-+
-+ for (queue = 0; queue < 16; queue++)
-+ len += tmu_queue_stats(buf + len, tmu, queue);
-+
-+ return len;
-+}
-+
-+static ssize_t block_version(char *buf, void *addr)
-+{
-+ ssize_t len = 0;
-+ u32 val;
-+
-+ val = readl(addr);
-+ len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
-+ (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
-+
-+ return len;
-+}
-+
-+static ssize_t bmu(char *buf, int id, void *base)
-+{
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "%s: %d\n ", __func__, id);
-+
-+ len += block_version(buf + len, base + BMU_VERSION);
-+
-+ len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
-+ BMU_BUF_SIZE)));
-+ len += sprintf(buf + len, " buf count: %x\n", readl(base +
-+ BMU_BUF_CNT));
-+ len += sprintf(buf + len, " buf rem: %x\n", readl(base +
-+ BMU_REM_BUF_CNT));
-+ len += sprintf(buf + len, " buf curr: %x\n", readl(base +
-+ BMU_CURR_BUF_CNT));
-+ len += sprintf(buf + len, " free err: %x\n", readl(base +
-+ BMU_FREE_ERR_ADDR));
-+
-+ return len;
-+}
-+
-+static ssize_t gpi(char *buf, int id, void *base)
-+{
-+ ssize_t len = 0;
-+ u32 val;
-+
-+ len += sprintf(buf + len, "%s%d:\n ", __func__, id);
-+ len += block_version(buf + len, base + GPI_VERSION);
-+
-+ len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
-+ GPI_FIFO_STATUS));
-+ val = readl(base + GPI_FIFO_DEBUG);
-+ len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
-+ 0x3f);
-+ len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
-+ 0x3f);
-+ len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
-+ 0x1ff);
-+ len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
-+ 0x1ff);
-+ len += sprintf(buf + len, " overrun: %x\n", readl(base +
-+ GPI_OVERRUN_DROPCNT));
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ class_do_clear = kstrtoul(buf, 0, 0);
-+ return count;
-+}
-+
-+static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ int id;
-+ u32 val;
-+ struct pfe_cpumon *cpumon = &pfe->cpumon;
-+
-+ len += block_version(buf + len, CLASS_VERSION);
-+
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
-+ len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
-+
-+ val = readl(CLASS_PE0_DEBUG + id * 4);
-+ len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
-+
-+ len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
-+ class_do_clear);
-+ }
-+ len += sprintf(buf + len, "aggregate load=%d%%\n\n",
-+ cpumon->class_usage_pct);
-+
-+ len += sprintf(buf + len, "pe status: 0x%x\n",
-+ readl(CLASS_PE_STATUS));
-+ len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
-+ readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
-+ len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
-+ readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
-+ len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
-+
-+ len += class_phy_stats(buf + len, 0);
-+ len += class_phy_stats(buf + len, 1);
-+ len += class_phy_stats(buf + len, 2);
-+ len += class_phy_stats(buf + len, 3);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ tmu_do_clear = kstrtoul(buf, 0, 0);
-+ return count;
-+}
-+
-+static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ int id;
-+ u32 val;
-+
-+ len += block_version(buf + len, TMU_VERSION);
-+
-+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
-+ if (id == TMU2_ID)
-+ continue;
-+ len += sprintf(buf + len, "%d: ", id - TMU0_ID);
-+
-+ len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
-+ tmu_do_clear);
-+ }
-+
-+ len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
-+ len += sprintf(buf + len, "inq fifo cnt: %x\n",
-+ readl(TMU_PHY_INQ_FIFO_CNT));
-+ val = readl(TMU_INQ_STAT);
-+ len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
-+ len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
-+
-+ return len;
-+}
-+
-+static unsigned long drops_do_clear;
-+static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
-+#endif
-+
-+char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
-+ "ICC",
-+ "Host Pkt Error",
-+ "Rx Error",
-+ "IPsec Outbound",
-+ "IPsec Inbound",
-+ "EXPT IPsec Error",
-+ "Reassembly",
-+ "Fragmenter",
-+ "NAT-T",
-+ "Socket",
-+ "Multicast",
-+ "NAT-PT",
-+ "Tx Disabled",
-+};
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
-+ "IPsec Outbound",
-+ "IPsec Inbound",
-+ "IPsec Rate Limiter",
-+ "Fragmenter",
-+ "Socket",
-+ "Tx Disabled",
-+ "Rx Error",
-+};
-+#endif
-+
-+static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ drops_do_clear = kstrtoul(buf, 0, 0);
-+ return count;
-+}
-+
-+static u32 tmu_drops[4][16];
-+static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ int id, dropnum;
-+ int tmu, queue;
-+ u32 val;
-+ u32 dmem_addr;
-+ int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
-+ struct pfe_ctrl *ctrl = &pfe->ctrl;
-+
-+ memset(class_drop_counter, 0, sizeof(class_drop_counter));
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
-+ if (drops_do_clear)
-+ pe_sync_stop(ctrl, (1 << id));
-+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
-+ dropnum++) {
-+ dmem_addr = CLASS_DM_DROP_CNTR;
-+ val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
-+ class_drop_counter[dropnum] += val;
-+ num_class_drops += val;
-+ if (drops_do_clear)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ }
-+ if (drops_do_clear)
-+ pe_start(ctrl, (1 << id));
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (drops_do_clear)
-+ pe_sync_stop(ctrl, (1 << UTIL_ID));
-+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
-+ dmem_addr = UTIL_DM_DROP_CNTR;
-+ val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
-+ util_drop_counter[dropnum] = val;
-+ num_util_drops += val;
-+ if (drops_do_clear)
-+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
-+ }
-+ if (drops_do_clear)
-+ pe_start(ctrl, (1 << UTIL_ID));
-+#endif
-+ for (tmu = 0; tmu < 4; tmu++) {
-+ for (queue = 0; queue < 16; queue++) {
-+ qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
-+ drops_do_clear);
-+ num_tmu_drops += tmu_drops[tmu][queue];
-+ }
-+ }
-+
-+ if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
-+ len += sprintf(buf + len, "No PE drops\n\n");
-+
-+ if (num_class_drops > 0) {
-+ len += sprintf(buf + len, "Class PE drops --\n");
-+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
-+ dropnum++) {
-+ if (class_drop_counter[dropnum] > 0)
-+ len += sprintf(buf + len, " %s: %d\n",
-+ class_drop_description[dropnum],
-+ class_drop_counter[dropnum]);
-+ }
-+ len += sprintf(buf + len, "\n");
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (num_util_drops > 0) {
-+ len += sprintf(buf + len, "Util PE drops --\n");
-+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
-+ if (util_drop_counter[dropnum] > 0)
-+ len += sprintf(buf + len, " %s: %d\n",
-+ util_drop_description[dropnum],
-+ util_drop_counter[dropnum]);
-+ }
-+ len += sprintf(buf + len, "\n");
-+ }
-+#endif
-+ if (num_tmu_drops > 0) {
-+ len += sprintf(buf + len, "TMU drops --\n");
-+ for (tmu = 0; tmu < 4; tmu++) {
-+ for (queue = 0; queue < 16; queue++) {
-+ if (tmu_drops[tmu][queue] > 0)
-+ len += sprintf(buf + len,
-+ " TMU%d-Q%d: %d\n"
-+ , tmu, queue, tmu_drops[tmu][queue]);
-+ }
-+ }
-+ len += sprintf(buf + len, "\n");
-+ }
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 0);
-+}
-+
-+static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 1);
-+}
-+
-+static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 2);
-+}
-+
-+static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 3);
-+}
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ util_do_clear = kstrtoul(buf, NULL, 0);
-+ return count;
-+}
-+
-+static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ struct pfe_ctrl *ctrl = &pfe->ctrl;
-+
-+ len += block_version(buf + len, UTIL_VERSION);
-+
-+ pe_sync_stop(ctrl, (1 << UTIL_ID));
-+ len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
-+ util_do_clear);
-+ pe_start(ctrl, (1 << UTIL_ID));
-+
-+ len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
-+ len += sprintf(buf + len, "max buf cnt: %x\n",
-+ readl(UTIL_MAX_BUF_CNT));
-+ len += sprintf(buf + len, "tsq max cnt: %x\n",
-+ readl(UTIL_TSQ_MAX_CNT));
-+
-+ return len;
-+}
-+#endif
-+
-+static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+
-+ len += bmu(buf + len, 1, BMU1_BASE_ADDR);
-+ len += bmu(buf + len, 2, BMU2_BASE_ADDR);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "hif:\n ");
-+ len += block_version(buf + len, HIF_VERSION);
-+
-+ len += sprintf(buf + len, " tx curr bd: %x\n",
-+ readl(HIF_TX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " tx status: %x\n",
-+ readl(HIF_TX_STATUS));
-+ len += sprintf(buf + len, " tx dma status: %x\n",
-+ readl(HIF_TX_DMA_STATUS));
-+
-+ len += sprintf(buf + len, " rx curr bd: %x\n",
-+ readl(HIF_RX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " rx status: %x\n",
-+ readl(HIF_RX_STATUS));
-+ len += sprintf(buf + len, " rx dma status: %x\n",
-+ readl(HIF_RX_DMA_STATUS));
-+
-+ len += sprintf(buf + len, "hif nocopy:\n ");
-+ len += block_version(buf + len, HIF_NOCPY_VERSION);
-+
-+ len += sprintf(buf + len, " tx curr bd: %x\n",
-+ readl(HIF_NOCPY_TX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " tx status: %x\n",
-+ readl(HIF_NOCPY_TX_STATUS));
-+ len += sprintf(buf + len, " tx dma status: %x\n",
-+ readl(HIF_NOCPY_TX_DMA_STATUS));
-+
-+ len += sprintf(buf + len, " rx curr bd: %x\n",
-+ readl(HIF_NOCPY_RX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " rx status: %x\n",
-+ readl(HIF_NOCPY_RX_STATUS));
-+ len += sprintf(buf + len, " rx dma status: %x\n",
-+ readl(HIF_NOCPY_RX_DMA_STATUS));
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+
-+ len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
-+ len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
-+ len += gpi(buf + len, 3, HGPI_BASE_ADDR);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ ssize_t len = 0;
-+ struct pfe_memmon *memmon = &pfe->memmon;
-+
-+ len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
-+ memmon->kernel_memory_allocated,
-+ (memmon->kernel_memory_allocated + 1023) / 1024);
-+
-+ return len;
-+}
-+
-+#ifdef HIF_NAPI_STATS
-+static ssize_t pfe_show_hif_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct platform_device *pdev = to_platform_device(dev);
-+ struct pfe *pfe = platform_get_drvdata(pdev);
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "sched: %u\n",
-+ pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
-+ len += sprintf(buf + len, "poll: %u\n",
-+ pfe->hif.napi_counters[NAPI_POLL_COUNT]);
-+ len += sprintf(buf + len, "packet: %u\n",
-+ pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
-+ len += sprintf(buf + len, "budget: %u\n",
-+ pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
-+ len += sprintf(buf + len, "desc: %u\n",
-+ pfe->hif.napi_counters[NAPI_DESC_COUNT]);
-+ len += sprintf(buf + len, "full: %u\n",
-+ pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_set_hif_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct platform_device *pdev = to_platform_device(dev);
-+ struct pfe *pfe = platform_get_drvdata(pdev);
-+
-+ memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
-+
-+ return count;
-+}
-+
-+static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
-+ pfe_set_hif_napi_stats);
-+#endif
-+
-+static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
-+static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
-+#endif
-+static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
-+static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
-+static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
-+static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
-+static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
-+static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
-+static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
-+static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
-+static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
-+
-+int pfe_sysfs_init(struct pfe *pfe)
-+{
-+ if (device_create_file(pfe->dev, &dev_attr_class))
-+ goto err_class;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu))
-+ goto err_tmu;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (device_create_file(pfe->dev, &dev_attr_util))
-+ goto err_util;
-+#endif
-+
-+ if (device_create_file(pfe->dev, &dev_attr_bmu))
-+ goto err_bmu;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_hif))
-+ goto err_hif;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_gpi))
-+ goto err_gpi;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_drops))
-+ goto err_drops;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
-+ goto err_tmu0_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
-+ goto err_tmu1_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
-+ goto err_tmu2_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
-+ goto err_tmu3_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_pfemem))
-+ goto err_pfemem;
-+
-+#ifdef HIF_NAPI_STATS
-+ if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
-+ goto err_hif_napi_stats;
-+#endif
-+
-+ return 0;
-+
-+#ifdef HIF_NAPI_STATS
-+err_hif_napi_stats:
-+ device_remove_file(pfe->dev, &dev_attr_pfemem);
-+#endif
-+
-+err_pfemem:
-+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
-+
-+err_tmu3_queues:
-+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
-+
-+err_tmu2_queues:
-+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
-+
-+err_tmu1_queues:
-+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
-+
-+err_tmu0_queues:
-+ device_remove_file(pfe->dev, &dev_attr_drops);
-+
-+err_drops:
-+ device_remove_file(pfe->dev, &dev_attr_gpi);
-+
-+err_gpi:
-+ device_remove_file(pfe->dev, &dev_attr_hif);
-+
-+err_hif:
-+ device_remove_file(pfe->dev, &dev_attr_bmu);
-+
-+err_bmu:
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ device_remove_file(pfe->dev, &dev_attr_util);
-+
-+err_util:
-+#endif
-+ device_remove_file(pfe->dev, &dev_attr_tmu);
-+
-+err_tmu:
-+ device_remove_file(pfe->dev, &dev_attr_class);
-+
-+err_class:
-+ return -1;
-+}
-+
-+void pfe_sysfs_exit(struct pfe *pfe)
-+{
-+#ifdef HIF_NAPI_STATS
-+ device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
-+#endif
-+ device_remove_file(pfe->dev, &dev_attr_pfemem);
-+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
-+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
-+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
-+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
-+ device_remove_file(pfe->dev, &dev_attr_drops);
-+ device_remove_file(pfe->dev, &dev_attr_gpi);
-+ device_remove_file(pfe->dev, &dev_attr_hif);
-+ device_remove_file(pfe->dev, &dev_attr_bmu);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ device_remove_file(pfe->dev, &dev_attr_util);
-+#endif
-+ device_remove_file(pfe->dev, &dev_attr_tmu);
-+ device_remove_file(pfe->dev, &dev_attr_class);
-+}