aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch
diff options
context:
space:
mode:
authorDaniel Golle <daniel@makrotopia.org>2022-03-21 01:16:48 +0000
committerDaniel Golle <daniel@makrotopia.org>2022-03-21 13:11:56 +0000
commit786bf7fdaca4c75e7eba6e9aa3a8b5775fd21186 (patch)
tree926fecb2b1f6ce1e42ba7ef4c7aab8e68dfd214c /target/linux/layerscape/patches-5.4/701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch
parent9470160c350d15f765c33d6c1db15d6c4709a64c (diff)
downloadupstream-786bf7fdaca4c75e7eba6e9aa3a8b5775fd21186.tar.gz
upstream-786bf7fdaca4c75e7eba6e9aa3a8b5775fd21186.tar.bz2
upstream-786bf7fdaca4c75e7eba6e9aa3a8b5775fd21186.zip
kernel: delete Linux 5.4 config and patches
As the upcoming release will be based on Linux 5.10 only, remove all kernel configuration as well as patches for Linux 5.4. There were no targets still actively using Linux 5.4. Signed-off-by: Daniel Golle <daniel@makrotopia.org> (cherry picked from commit 3a14580411adfb75f9a44eded9f41245b9e44606)
Diffstat (limited to 'target/linux/layerscape/patches-5.4/701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch')
-rw-r--r--target/linux/layerscape/patches-5.4/701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch12966
1 files changed, 0 insertions, 12966 deletions
diff --git a/target/linux/layerscape/patches-5.4/701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch b/target/linux/layerscape/patches-5.4/701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch
deleted file mode 100644
index 8dbcb0654f..0000000000
--- a/target/linux/layerscape/patches-5.4/701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch
+++ /dev/null
@@ -1,12966 +0,0 @@
-From f7f94b1e7e9c6044a23bab1c5e773f6259f2d3e0 Mon Sep 17 00:00:00 2001
-From: Madalin Bucur <madalin.bucur@nxp.com>
-Date: Wed, 10 May 2017 16:39:42 +0300
-Subject: [PATCH] dpa: SDK DPAA 1.x Ethernet driver
-
-Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
----
- drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 173 ++
- drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 46 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1210 ++++++++++++
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 697 +++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1991 ++++++++++++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 236 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1812 ++++++++++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 226 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1113 +++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 ++++++
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 290 +++
- drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 909 +++++++++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 +++++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 ++
- .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++++++++
- .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
- 25 files changed, 12835 insertions(+)
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
-
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
-@@ -0,0 +1,173 @@
-+menuconfig FSL_SDK_DPAA_ETH
-+ tristate "DPAA Ethernet"
-+ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH
-+ select PHYLIB
-+ help
-+ Data Path Acceleration Architecture Ethernet driver,
-+ supporting the Freescale QorIQ chips.
-+ Depends on Freescale Buffer Manager and Queue Manager
-+ driver and Frame Manager Driver.
-+
-+if FSL_SDK_DPAA_ETH
-+
-+config FSL_DPAA_HOOKS
-+ bool "DPAA Ethernet driver hooks"
-+
-+config FSL_DPAA_CEETM
-+ bool "DPAA CEETM QoS"
-+ depends on NET_SCHED
-+ default n
-+ help
-+ Enable QoS offloading support through the CEETM hardware block.
-+
-+config FSL_DPAA_OFFLINE_PORTS
-+ bool "Offline Ports support"
-+ depends on FSL_SDK_DPAA_ETH
-+ default y
-+ help
-+ The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
-+ most of the functionality of the regular, online ports, except they receive their
-+ frames from a core or an accelerator on the SoC, via QMan frame queues,
-+ rather than directly from the network.
-+ Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
-+ any online FMan port. They deliver the processed frames to frame queues, according
-+ to the applied PCD configurations.
-+
-+ Choosing this feature will not impact the functionality and/or performance of the system,
-+ so it is safe to have it.
-+
-+config FSL_DPAA_ADVANCED_DRIVERS
-+ bool "Advanced DPAA Ethernet drivers"
-+ depends on FSL_SDK_DPAA_ETH
-+ default y
-+ help
-+ Besides the standard DPAA Ethernet driver the DPAA Proxy initialization driver
-+ is needed to support advanced scenarios. Select this to also build the advanced
-+ drivers.
-+
-+config FSL_DPAA_ETH_JUMBO_FRAME
-+ bool "Optimize for jumbo frames"
-+ default n
-+ help
-+ Optimize the DPAA Ethernet driver throughput for large frames
-+ termination traffic (e.g. 4K and above).
-+ NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
-+ is set to 9600 bytes.
-+ Using this option in combination with small frames increases
-+ significantly the driver's memory footprint and may even deplete
-+ the system memory. Also, the skb truesize is altered and messages
-+ from the stack that warn against this are bypassed.
-+ This option is not available on LS1043.
-+
-+config FSL_DPAA_TS
-+ bool "Linux compliant timestamping"
-+ depends on FSL_SDK_DPAA_ETH
-+ default n
-+ help
-+ Enable Linux API compliant timestamping support.
-+
-+config FSL_DPAA_1588
-+ bool "IEEE 1588-compliant timestamping"
-+ depends on FSL_SDK_DPAA_ETH
-+ select FSL_DPAA_TS
-+ default n
-+ help
-+ Enable IEEE1588 support code.
-+
-+config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+ bool "Use driver's Tx queue selection mechanism"
-+ default y
-+ depends on FSL_SDK_DPAA_ETH
-+ help
-+ The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
-+ of the egress FQ. That will override the XPS support for this netdevice.
-+ If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
-+ or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
-+ and use the standard XPS support instead.
-+
-+config FSL_DPAA_ETH_MAX_BUF_COUNT
-+ int "Maximum nuber of buffers in private bpool"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 64 2048
-+ default "128"
-+ help
-+ The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
-+ buffer pool. One needn't normally modify this, as it has probably been tuned for performance
-+ already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
-+
-+config FSL_DPAA_ETH_REFILL_THRESHOLD
-+ int "Private bpool refill threshold"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
-+ default "80"
-+ help
-+ The DPAA-Ethernet driver will start replenishing buffer pools whose count
-+ falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
-+ modify this value unless one has very specific performance reasons.
-+
-+config FSL_DPAA_CS_THRESHOLD_1G
-+ hex "Egress congestion threshold on 1G ports"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 0x1000 0x10000000
-+ default "0x06000000"
-+ help
-+ The size in bytes of the egress Congestion State notification threshold on 1G ports.
-+ The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
-+ (e.g. by sending UDP datagrams at "while(1) speed"),
-+ and the larger the frame size, the more acute the problem.
-+ So we have to find a balance between these factors:
-+ - avoiding the device staying congested for a prolonged time (risking
-+ the netdev watchdog to fire - see also the tx_timeout module param);
-+ - affecting performance of protocols such as TCP, which otherwise
-+ behave well under the congestion notification mechanism;
-+ - preventing the Tx cores from tightly-looping (as if the congestion
-+ threshold was too low to be effective);
-+ - running out of memory if the CS threshold is set too high.
-+
-+config FSL_DPAA_CS_THRESHOLD_10G
-+ hex "Egress congestion threshold on 10G ports"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 0x1000 0x20000000
-+ default "0x10000000"
-+ help
-+ The size in bytes of the egress Congestion State notification threshold on 10G ports.
-+
-+config FSL_DPAA_INGRESS_CS_THRESHOLD
-+ hex "Ingress congestion threshold on FMan ports"
-+ depends on FSL_SDK_DPAA_ETH
-+ default "0x10000000"
-+ help
-+ The size in bytes of the ingress tail-drop threshold on FMan ports.
-+ Traffic piling up above this value will be rejected by QMan and discarded by FMan.
-+
-+config FSL_DPAA_ETH_DEBUGFS
-+ bool "DPAA Ethernet debugfs interface"
-+ depends on DEBUG_FS && FSL_SDK_DPAA_ETH
-+ default y
-+ help
-+ This option compiles debugfs code for the DPAA Ethernet driver.
-+
-+config FSL_DPAA_ETH_DEBUG
-+ bool "DPAA Ethernet Debug Support"
-+ depends on FSL_SDK_DPAA_ETH
-+ default n
-+ help
-+ This option compiles debug code for the DPAA Ethernet driver.
-+
-+config FSL_DPAA_DBG_LOOP
-+ bool "DPAA Ethernet Debug loopback"
-+ depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+ default n
-+ help
-+ This option allows to divert all received traffic on a certain interface A towards a
-+ selected interface B. This option is used to benchmark the HW + Ethernet driver in
-+ isolation from the Linux networking stack. The loops are controlled by debugfs entries,
-+ one for each interface. By default all loops are disabled (target value is -1). I.e. to
-+ change the loop setting for interface 4 and divert all received traffic to interface 5
-+ write Tx interface number in the receive interface debugfs file:
-+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
-+ 4->-1
-+ # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
-+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
-+ 4->5
-+endif # FSL_SDK_DPAA_ETH
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
-@@ -0,0 +1,46 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+# Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+ccflags-y += -I$(NET_DPA)
-+
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
-+obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
-+
-+fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
-+ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
-+fsl_dpa-objs += dpaa_debugfs.o
-+endif
-+ifeq ($(CONFIG_FSL_DPAA_1588),y)
-+fsl_dpa-objs += dpaa_1588.o
-+endif
-+ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
-+ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
-+fsl_dpa-objs += dpaa_eth_ceetm.o
-+endif
-+
-+fsl_mac-objs += mac.o mac-api.o
-+
-+# Advanced drivers
-+ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
-+
-+fsl_advanced-objs += dpaa_eth_base.o
-+# suport for multiple drivers per kernel module comes in kernel 3.14
-+# so we are forced to generate several modules for the advanced drivers
-+fsl_proxy-objs += dpaa_eth_proxy.o
-+
-+ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
-+
-+fsl_oh-objs += offline_port.o
-+endif
-+endif
-+
-+# Needed by the tracing framework
-+CFLAGS_dpaa_eth.o := -I$(src)
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
-@@ -0,0 +1,580 @@
-+/* Copyright (C) 2011 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2009 IXXAT Automation, GmbH
-+ *
-+ * DPAA Ethernet Driver -- IEEE 1588 interface functionality
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along
-+ * with this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+#include <linux/io.h>
-+#include <linux/device.h>
-+#include <linux/fs.h>
-+#include <linux/vmalloc.h>
-+#include <linux/spinlock.h>
-+#include <linux/ip.h>
-+#include <linux/ipv6.h>
-+#include <linux/udp.h>
-+#include <asm/div64.h>
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#include "dpaa_1588.h"
-+#include "mac.h"
-+
-+static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+
-+ circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
-+ if (!circ_buf->buf)
-+ return 1;
-+
-+ circ_buf->head = 0;
-+ circ_buf->tail = 0;
-+ ptp_buf->size = size;
-+ spin_lock_init(&ptp_buf->ptp_lock);
-+
-+ return 0;
-+}
-+
-+static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+
-+ circ_buf->head = 0;
-+ circ_buf->tail = 0;
-+ ptp_buf->size = size;
-+}
-+
-+static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
-+ struct dpa_ptp_data *data)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+ int size = ptp_buf->size;
-+ struct dpa_ptp_data *tmp;
-+ unsigned long flags;
-+ int head, tail;
-+
-+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
-+
-+ head = circ_buf->head;
-+ tail = circ_buf->tail;
-+
-+ if (CIRC_SPACE(head, tail, size) <= 0)
-+ circ_buf->tail = (tail + 1) & (size - 1);
-+
-+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
-+ memcpy(tmp, data, sizeof(struct dpa_ptp_data));
-+
-+ circ_buf->head = (head + 1) & (size - 1);
-+
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
-+ struct dpa_ptp_ident *src)
-+{
-+ int ret;
-+
-+ if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
-+ return 0;
-+
-+ if ((dst->netw_prot == src->netw_prot)
-+ || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
-+ if (dst->seq_id != src->seq_id)
-+ return 0;
-+
-+ ret = memcmp(dst->snd_port_id, src->snd_port_id,
-+ DPA_PTP_SOURCE_PORT_LENGTH);
-+ if (ret)
-+ return 0;
-+ else
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
-+ struct dpa_ptp_ident *ident,
-+ struct dpa_ptp_time *ts)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+ int size = ptp_buf->size;
-+ int head, tail, idx;
-+ unsigned long flags;
-+ struct dpa_ptp_data *tmp, *tmp2;
-+ struct dpa_ptp_ident *tmp_ident;
-+
-+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
-+
-+ head = circ_buf->head;
-+ tail = idx = circ_buf->tail;
-+
-+ if (CIRC_CNT(head, tail, size) == 0) {
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+ return 1;
-+ }
-+
-+ while (idx != head) {
-+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
-+ tmp_ident = &tmp->ident;
-+ if (dpa_ptp_is_ident_match(tmp_ident, ident))
-+ break;
-+ idx = (idx + 1) & (size - 1);
-+ }
-+
-+ if (idx == head) {
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+ return 1;
-+ }
-+
-+ ts->sec = tmp->ts.sec;
-+ ts->nsec = tmp->ts.nsec;
-+
-+ if (idx != tail) {
-+ if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
-+ tail = circ_buf->tail =
-+ (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
-+ }
-+
-+ while (CIRC_CNT(idx, tail, size) > 0) {
-+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
-+ idx = (idx - 1) & (size - 1);
-+ tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
-+ *tmp = *tmp2;
-+ }
-+ }
-+ circ_buf->tail = (tail + 1) & (size - 1);
-+
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+
-+ return 0;
-+}
-+
-+/* Parse the PTP packets
-+ *
-+ * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
-+ * an IEEE802.3 ethernet frame. This function returns the position of
-+ * the PTP packet or NULL if no PTP found
-+ */
-+static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
-+{
-+ u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
-+ u8 *ptp_loc = NULL;
-+ u8 msg_type;
-+ u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
-+ struct iphdr *iph;
-+ struct udphdr *udph;
-+ struct ipv6hdr *ipv6h;
-+
-+ /* when we can receive S/G frames we need to check the data we want to
-+ * access is in the linear skb buffer
-+ */
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ *eth_type = *((u16 *)pos);
-+
-+ /* Check if inner tag is here */
-+ if (*eth_type == ETH_P_8021Q) {
-+ access_len += DPA_VLAN_TAG_LEN;
-+
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ pos += DPA_VLAN_TAG_LEN;
-+ *eth_type = *((u16 *)pos);
-+ }
-+
-+ pos += DPA_ETYPE_LEN;
-+
-+ switch (*eth_type) {
-+ /* Transport of PTP over Ethernet */
-+ case ETH_P_1588:
-+ ptp_loc = pos;
-+
-+ if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
-+ return NULL;
-+
-+ msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
-+ if ((msg_type == PTP_MSGTYPE_SYNC)
-+ || (msg_type == PTP_MSGTYPE_DELREQ)
-+ || (msg_type == PTP_MSGTYPE_PDELREQ)
-+ || (msg_type == PTP_MSGTYPE_PDELRESP))
-+ return ptp_loc;
-+ break;
-+ /* Transport of PTP over IPv4 */
-+ case ETH_P_IP:
-+ iph = (struct iphdr *)pos;
-+ access_len += sizeof(struct iphdr);
-+
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ if (ntohs(iph->protocol) != IPPROTO_UDP)
-+ return NULL;
-+
-+ access_len += iph->ihl * 4 - sizeof(struct iphdr) +
-+ sizeof(struct udphdr);
-+
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ pos += iph->ihl * 4;
-+ udph = (struct udphdr *)pos;
-+ if (ntohs(udph->dest) != 319)
-+ return NULL;
-+ ptp_loc = pos + sizeof(struct udphdr);
-+ break;
-+ /* Transport of PTP over IPv6 */
-+ case ETH_P_IPV6:
-+ ipv6h = (struct ipv6hdr *)pos;
-+
-+ access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
-+
-+ if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
-+ return NULL;
-+
-+ pos += sizeof(struct ipv6hdr);
-+ udph = (struct udphdr *)pos;
-+ if (ntohs(udph->dest) != 319)
-+ return NULL;
-+ ptp_loc = pos + sizeof(struct udphdr);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return ptp_loc;
-+}
-+
-+static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data, enum port_type rx_tx,
-+ struct dpa_ptp_data *ptp_data)
-+{
-+ u64 nsec;
-+ u32 mod;
-+ u8 *ptp_loc;
-+ u16 eth_type;
-+
-+ ptp_loc = dpa_ptp_parse_packet(skb, &eth_type);
-+ if (!ptp_loc)
-+ return -EINVAL;
-+
-+ switch (eth_type) {
-+ case ETH_P_IP:
-+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
-+ break;
-+ case ETH_P_IPV6:
-+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
-+ break;
-+ case ETH_P_1588:
-+ ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
-+ return -EINVAL;
-+
-+ ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
-+ ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
-+ ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
-+ memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
-+ DPA_PTP_SOURCE_PORT_LENGTH);
-+
-+ nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
-+ mod = do_div(nsec, NANOSEC_PER_SECOND);
-+ ptp_data->ts.sec = nsec;
-+ ptp_data->ts.nsec = mod;
-+
-+ return 0;
-+}
-+
-+void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data)
-+{
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+ struct dpa_ptp_data ptp_tx_data;
-+
-+ if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
-+ return;
-+
-+ dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
-+}
-+
-+void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data)
-+{
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+ struct dpa_ptp_data ptp_rx_data;
-+
-+ if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
-+ return;
-+
-+ dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
-+}
-+
-+static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
-+ struct dpa_ptp_ident *ident,
-+ struct dpa_ptp_time *ts)
-+{
-+ struct dpa_ptp_tsu *tsu = ptp_tsu;
-+ struct dpa_ptp_time tmp;
-+ int flag;
-+
-+ flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
-+ if (!flag) {
-+ ts->sec = tmp.sec;
-+ ts->nsec = tmp.nsec;
-+ return 0;
-+ }
-+
-+ return -1;
-+}
-+
-+static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
-+ struct dpa_ptp_ident *ident,
-+ struct dpa_ptp_time *ts)
-+{
-+ struct dpa_ptp_tsu *tsu = ptp_tsu;
-+ struct dpa_ptp_time tmp;
-+ int flag;
-+
-+ flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
-+ if (!flag) {
-+ ts->sec = tmp.sec;
-+ ts->nsec = tmp.nsec;
-+ return 0;
-+ }
-+
-+ return -1;
-+}
-+
-+static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
-+ struct dpa_ptp_time *cnt_time)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u64 tmp, fiper;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
-+
-+ /* TMR_FIPER1 will pulse every second after ALARM1 expired */
-+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
-+ fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
-+ if (mac_dev->fm_rtc_set_alarm)
-+ mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
-+ 0, tmp);
-+ if (mac_dev->fm_rtc_set_fiper)
-+ mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
-+ 0, fiper);
-+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
-+}
-+
-+static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
-+ struct dpa_ptp_time *curr_time)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u64 tmp;
-+ u32 mod;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
-+ &tmp);
-+
-+ mod = do_div(tmp, NANOSEC_PER_SECOND);
-+ curr_time->sec = (u32)tmp;
-+ curr_time->nsec = mod;
-+}
-+
-+static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
-+ struct dpa_ptp_time *cnt_time)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u64 tmp;
-+
-+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
-+
-+ if (mac_dev->fm_rtc_set_cnt)
-+ mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
-+ tmp);
-+
-+ /* Restart fiper two seconds later */
-+ cnt_time->sec += 2;
-+ cnt_time->nsec = 0;
-+ dpa_set_fiper_alarm(tsu, cnt_time);
-+}
-+
-+static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u32 drift;
-+
-+ if (mac_dev->fm_rtc_get_drift)
-+ mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
-+ &drift);
-+
-+ *addend = drift;
-+}
-+
-+static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_set_drift)
-+ mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
-+ addend);
-+}
-+
-+static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
-+{
-+ dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
-+ dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
-+}
-+
-+int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ struct dpa_ptp_data ptp_data;
-+ struct dpa_ptp_data *ptp_data_user;
-+ struct dpa_ptp_time act_time;
-+ u32 addend;
-+ int retval = 0;
-+
-+ if (!tsu || !tsu->valid)
-+ return -ENODEV;
-+
-+ switch (cmd) {
-+ case PTP_ENBL_TXTS_IOCTL:
-+ tsu->hwts_tx_en_ioctl = 1;
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
-+ if (mac_dev->ptp_enable)
-+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
-+ break;
-+ case PTP_DSBL_TXTS_IOCTL:
-+ tsu->hwts_tx_en_ioctl = 0;
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
-+ if (mac_dev->ptp_disable)
-+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
-+ break;
-+ case PTP_ENBL_RXTS_IOCTL:
-+ tsu->hwts_rx_en_ioctl = 1;
-+ break;
-+ case PTP_DSBL_RXTS_IOCTL:
-+ tsu->hwts_rx_en_ioctl = 0;
-+ break;
-+ case PTP_GET_RX_TIMESTAMP:
-+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
-+ if (copy_from_user(&ptp_data.ident,
-+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
-+ return -EINVAL;
-+
-+ if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
-+ return -EAGAIN;
-+
-+ if (copy_to_user((void __user *)&ptp_data_user->ts,
-+ &ptp_data.ts, sizeof(ptp_data.ts)))
-+ return -EFAULT;
-+ break;
-+ case PTP_GET_TX_TIMESTAMP:
-+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
-+ if (copy_from_user(&ptp_data.ident,
-+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
-+ return -EINVAL;
-+
-+ if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
-+ return -EAGAIN;
-+
-+ if (copy_to_user((void __user *)&ptp_data_user->ts,
-+ &ptp_data.ts, sizeof(ptp_data.ts)))
-+ return -EFAULT;
-+ break;
-+ case PTP_GET_TIME:
-+ dpa_get_curr_cnt(tsu, &act_time);
-+ if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
-+ return -EFAULT;
-+ break;
-+ case PTP_SET_TIME:
-+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
-+ return -EINVAL;
-+ dpa_set_1588cnt(tsu, &act_time);
-+ break;
-+ case PTP_GET_ADJ:
-+ dpa_get_drift(tsu, &addend);
-+ if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
-+ return -EFAULT;
-+ break;
-+ case PTP_SET_ADJ:
-+ if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
-+ return -EINVAL;
-+ dpa_set_drift(tsu, addend);
-+ break;
-+ case PTP_SET_FIPER_ALARM:
-+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
-+ return -EINVAL;
-+ dpa_set_fiper_alarm(tsu, &act_time);
-+ break;
-+ case PTP_CLEANUP_TS:
-+ dpa_flush_timestamp(tsu);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return retval;
-+}
-+
-+int dpa_ptp_init(struct dpa_priv_s *priv)
-+{
-+ struct dpa_ptp_tsu *tsu;
-+
-+ /* Allocate memory for PTP structure */
-+ tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
-+ if (!tsu)
-+ return -ENOMEM;
-+
-+ tsu->valid = TRUE;
-+ tsu->dpa_priv = priv;
-+
-+ dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
-+ dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
-+
-+ priv->tsu = tsu;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_ptp_init);
-+
-+void dpa_ptp_cleanup(struct dpa_priv_s *priv)
-+{
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+
-+ tsu->valid = FALSE;
-+ vfree(tsu->rx_timestamps.circ_buf.buf);
-+ vfree(tsu->tx_timestamps.circ_buf.buf);
-+
-+ kfree(tsu);
-+}
-+EXPORT_SYMBOL(dpa_ptp_cleanup);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
-@@ -0,0 +1,138 @@
-+/* Copyright (C) 2011 Freescale Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along
-+ * with this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+#ifndef __DPAA_1588_H__
-+#define __DPAA_1588_H__
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/circ_buf.h>
-+#include <linux/fsl_qman.h>
-+
-+#define DEFAULT_PTP_RX_BUF_SZ 256
-+#define DEFAULT_PTP_TX_BUF_SZ 256
-+
-+/* 1588 private ioctl calls */
-+#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
-+#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
-+#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
-+#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
-+#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
-+#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
-+#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
-+#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
-+#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
-+#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
-+#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
-+#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
-+
-+/* PTP V2 message type */
-+enum {
-+ PTP_MSGTYPE_SYNC = 0x0,
-+ PTP_MSGTYPE_DELREQ = 0x1,
-+ PTP_MSGTYPE_PDELREQ = 0x2,
-+ PTP_MSGTYPE_PDELRESP = 0x3,
-+ PTP_MSGTYPE_FLWUP = 0x8,
-+ PTP_MSGTYPE_DELRESP = 0x9,
-+ PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
-+ PTP_MSGTYPE_ANNOUNCE = 0xB,
-+ PTP_MSGTYPE_SGNLNG = 0xC,
-+ PTP_MSGTYPE_MNGMNT = 0xD,
-+};
-+
-+/* Byte offset of data in the PTP V2 headers */
-+#define PTP_OFFS_MSG_TYPE 0
-+#define PTP_OFFS_VER_PTP 1
-+#define PTP_OFFS_MSG_LEN 2
-+#define PTP_OFFS_DOM_NMB 4
-+#define PTP_OFFS_FLAGS 6
-+#define PTP_OFFS_CORFIELD 8
-+#define PTP_OFFS_SRCPRTID 20
-+#define PTP_OFFS_SEQ_ID 30
-+#define PTP_OFFS_CTRL 32
-+#define PTP_OFFS_LOGMEAN 33
-+
-+#define PTP_IP_OFFS 14
-+#define PTP_UDP_OFFS 34
-+#define PTP_HEADER_OFFS 42
-+#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
-+#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
-+#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
-+#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
-+
-+/* 1588-2008 network protocol enumeration values */
-+#define DPA_PTP_PROT_IPV4 1
-+#define DPA_PTP_PROT_IPV6 2
-+#define DPA_PTP_PROT_802_3 3
-+#define DPA_PTP_PROT_DONTCARE 0xFFFF
-+
-+#define DPA_PTP_SOURCE_PORT_LENGTH 10
-+#define DPA_PTP_HEADER_SZE 34
-+#define DPA_ETYPE_LEN 2
-+#define DPA_VLAN_TAG_LEN 4
-+#define NANOSEC_PER_SECOND 1000000000
-+
-+/* The threshold between the current found one and the oldest one */
-+#define TS_ACCUMULATION_THRESHOLD 50
-+
-+/* Struct needed to identify a timestamp */
-+struct dpa_ptp_ident {
-+ u8 version;
-+ u8 msg_type;
-+ u16 netw_prot;
-+ u16 seq_id;
-+ u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
-+};
-+
-+/* Timestamp format in 1588-2008 */
-+struct dpa_ptp_time {
-+ u64 sec; /* just 48 bit used */
-+ u32 nsec;
-+};
-+
-+/* needed for timestamp data over ioctl */
-+struct dpa_ptp_data {
-+ struct dpa_ptp_ident ident;
-+ struct dpa_ptp_time ts;
-+};
-+
-+struct dpa_ptp_circ_buf {
-+ struct circ_buf circ_buf;
-+ u32 size;
-+ spinlock_t ptp_lock;
-+};
-+
-+/* PTP TSU control structure */
-+struct dpa_ptp_tsu {
-+ struct dpa_priv_s *dpa_priv;
-+ bool valid;
-+ struct dpa_ptp_circ_buf rx_timestamps;
-+ struct dpa_ptp_circ_buf tx_timestamps;
-+
-+ /* HW timestamping over ioctl enabled flag */
-+ int hwts_tx_en_ioctl;
-+ int hwts_rx_en_ioctl;
-+};
-+
-+extern int dpa_ptp_init(struct dpa_priv_s *priv);
-+extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
-+extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data);
-+extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data);
-+extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
-@@ -0,0 +1,180 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
-+#include <linux/debugfs.h>
-+#include "dpaa_debugfs.h"
-+#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
-+
-+#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
-+#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
-+
-+static struct dentry *dpa_debugfs_root;
-+
-+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
-+static ssize_t dpa_loop_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off);
-+
-+static const struct file_operations dpa_debugfs_lp_fops = {
-+ .open = dpa_debugfs_loop_open,
-+ .write = dpa_loop_write,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
-+{
-+ struct dpa_priv_s *priv;
-+
-+ BUG_ON(offset == NULL);
-+
-+ priv = netdev_priv((struct net_device *)file->private);
-+ seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
-+
-+ return 0;
-+}
-+
-+static int user_input_convert(const char __user *user_buf, size_t count,
-+ long *val)
-+{
-+ char buf[12];
-+
-+ if (count > sizeof(buf) - 1)
-+ return -EINVAL;
-+ if (copy_from_user(buf, user_buf, count))
-+ return -EFAULT;
-+ buf[count] = '\0';
-+ if (kstrtol(buf, 0, val))
-+ return -EINVAL;
-+ return 0;
-+}
-+
-+static ssize_t dpa_loop_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off)
-+{
-+ struct dpa_priv_s *priv;
-+ struct net_device *netdev;
-+ struct seq_file *sf;
-+ int ret;
-+ long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+
-+ sf = (struct seq_file *)f->private_data;
-+ netdev = (struct net_device *)sf->private;
-+ priv = netdev_priv(netdev);
-+
-+ priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
-+
-+ return count;
-+}
-+
-+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
-+{
-+ int _errno;
-+ const struct net_device *net_dev;
-+
-+ _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
-+ if (unlikely(_errno < 0)) {
-+ net_dev = (struct net_device *)inode->i_private;
-+
-+ if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
-+ netdev_err(net_dev, "single_open() = %d\n",
-+ _errno);
-+ }
-+
-+ return _errno;
-+}
-+
-+
-+int dpa_netdev_debugfs_create(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ static int cnt;
-+ char loop_file_name[100];
-+
-+ if (unlikely(dpa_debugfs_root == NULL)) {
-+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__,
-+ "root debugfs missing, possible module ordering issue");
-+ return -ENOMEM;
-+ }
-+
-+ sprintf(loop_file_name, "eth%d_loop", ++cnt);
-+ priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
-+ S_IRUGO,
-+ dpa_debugfs_root,
-+ net_dev,
-+ &dpa_debugfs_lp_fops);
-+ if (unlikely(priv->debugfs_loop_file == NULL)) {
-+ netdev_err(net_dev, "debugfs_create_file(%s/%s)",
-+ dpa_debugfs_root->d_iname,
-+ loop_file_name);
-+
-+ return -ENOMEM;
-+ }
-+ return 0;
-+}
-+
-+void dpa_netdev_debugfs_remove(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+
-+ debugfs_remove(priv->debugfs_loop_file);
-+}
-+
-+int __init dpa_debugfs_module_init(void)
-+{
-+ int _errno = 0;
-+
-+ pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
-+
-+ dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
-+
-+ if (unlikely(dpa_debugfs_root == NULL)) {
-+ _errno = -ENOMEM;
-+ pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__);
-+ pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
-+ DPA_ETH_DEBUGFS_ROOT, _errno);
-+ }
-+
-+ return _errno;
-+}
-+
-+void __exit dpa_debugfs_module_exit(void)
-+{
-+ debugfs_remove(dpa_debugfs_root);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
-@@ -0,0 +1,43 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPAA_DEBUGFS_H_
-+#define DPAA_DEBUGFS_H_
-+
-+#include <linux/netdevice.h>
-+#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
-+
-+int dpa_netdev_debugfs_create(struct net_device *net_dev);
-+void dpa_netdev_debugfs_remove(struct net_device *net_dev);
-+int __init dpa_debugfs_module_init(void);
-+void __exit dpa_debugfs_module_exit(void);
-+
-+#endif /* DPAA_DEBUGFS_H_ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
-@@ -0,0 +1,1210 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_mdio.h>
-+#include <linux/of_net.h>
-+#include <linux/kthread.h>
-+#include <linux/io.h>
-+#include <linux/if_arp.h> /* arp_hdr_len() */
-+#include <linux/if_vlan.h> /* VLAN_HLEN */
-+#include <linux/icmp.h> /* struct icmphdr */
-+#include <linux/ip.h> /* struct iphdr */
-+#include <linux/ipv6.h> /* struct ipv6hdr */
-+#include <linux/udp.h> /* struct udphdr */
-+#include <linux/tcp.h> /* struct tcphdr */
-+#include <linux/net.h> /* net_ratelimit() */
-+#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
-+#include <linux/highmem.h>
-+#include <linux/percpu.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/fsl_bman.h>
-+#ifdef CONFIG_SOC_BUS
-+#include <linux/sys_soc.h> /* soc_device_match */
-+#endif
-+
-+#include "fsl_fman.h"
-+#include "fm_ext.h"
-+#include "fm_port_ext.h"
-+
-+#include "mac.h"
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+#include "dpaa_debugfs.h"
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
-+ * using trace events only need to #include <trace/events/sched.h>
-+ */
-+#define CREATE_TRACE_POINTS
-+#include "dpaa_eth_trace.h"
-+
-+#define DPA_NAPI_WEIGHT 64
-+
-+/* Valid checksum indication */
-+#define DPA_CSUM_VALID 0xFFFF
-+
-+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
-+
-+MODULE_DESCRIPTION(DPA_DESCRIPTION);
-+
-+static uint8_t debug = -1;
-+module_param(debug, byte, S_IRUGO);
-+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
-+
-+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
-+static uint16_t tx_timeout = 1000;
-+module_param(tx_timeout, ushort, S_IRUGO);
-+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
-+
-+static const char rtx[][3] = {
-+ [RX] = "RX",
-+ [TX] = "TX"
-+};
-+
-+#ifndef CONFIG_PPC
-+bool dpaa_errata_a010022;
-+EXPORT_SYMBOL(dpaa_errata_a010022);
-+#endif
-+
-+/* BM */
-+
-+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
-+
-+static uint8_t dpa_priv_common_bpid;
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+struct net_device *dpa_loop_netdevs[20];
-+#endif
-+
-+#ifdef CONFIG_PM
-+
-+static int dpaa_suspend(struct device *dev)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ int err = 0;
-+
-+ net_dev = dev_get_drvdata(dev);
-+
-+ if (net_dev->flags & IFF_UP) {
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
-+ priv->mac_dev->get_mac_handle(mac_dev), true);
-+ if (err) {
-+ netdev_err(net_dev, "set_wol() = %d\n", err);
-+ goto set_wol_failed;
-+ }
-+ }
-+
-+ err = fm_port_suspend(mac_dev->port_dev[RX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
-+ goto rx_port_suspend_failed;
-+ }
-+
-+ err = fm_port_suspend(mac_dev->port_dev[TX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
-+ goto tx_port_suspend_failed;
-+ }
-+ }
-+
-+ return 0;
-+
-+tx_port_suspend_failed:
-+ fm_port_resume(mac_dev->port_dev[RX]);
-+rx_port_suspend_failed:
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ priv->mac_dev->set_wol(mac_dev->port_dev[RX],
-+ priv->mac_dev->get_mac_handle(mac_dev), false);
-+ }
-+set_wol_failed:
-+ return err;
-+}
-+
-+static int dpaa_resume(struct device *dev)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ int err = 0;
-+
-+ net_dev = dev_get_drvdata(dev);
-+
-+ if (net_dev->flags & IFF_UP) {
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ err = fm_mac_resume(mac_dev->get_mac_handle(mac_dev));
-+ if (err) {
-+ netdev_err(net_dev, "fm_mac_resume = %d\n", err);
-+ goto resume_failed;
-+ }
-+
-+ err = fm_port_resume(mac_dev->port_dev[TX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
-+ goto resume_failed;
-+ }
-+
-+ err = fm_port_resume(mac_dev->port_dev[RX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
-+ goto resume_failed;
-+ }
-+
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
-+ priv->mac_dev->get_mac_handle(mac_dev), false);
-+ if (err) {
-+ netdev_err(net_dev, "set_wol() = %d\n", err);
-+ goto resume_failed;
-+ }
-+ }
-+ }
-+
-+ return 0;
-+
-+resume_failed:
-+ return err;
-+}
-+
-+static const struct dev_pm_ops dpaa_pm_ops = {
-+ .suspend = dpaa_suspend,
-+ .resume = dpaa_resume,
-+};
-+
-+#define DPAA_PM_OPS (&dpaa_pm_ops)
-+
-+#else /* CONFIG_PM */
-+
-+#define DPAA_PM_OPS NULL
-+
-+#endif /* CONFIG_PM */
-+
-+/* Checks whether the checksum field in Parse Results array is valid
-+ * (equals 0xFFFF) and increments the .cse counter otherwise
-+ */
-+static inline void
-+dpa_csum_validation(const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd)
-+{
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ void *frm = phys_to_virt(addr);
-+ fm_prs_result_t *parse_result;
-+
-+ if (unlikely(!frm))
-+ return;
-+
-+ dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
-+ DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
-+
-+ parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
-+
-+ if (parse_result->cksum != DPA_CSUM_VALID)
-+ percpu_priv->rx_errors.cse++;
-+}
-+
-+static void _dpa_rx_error(struct net_device *net_dev,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ /* limit common, possibly innocuous Rx FIFO Overflow errors'
-+ * interference with zero-loss convergence benchmark results.
-+ */
-+ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
-+ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
-+ else
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
-+ fd->status & FM_FD_STAT_RX_ERRORS);
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ if (dpaa_eth_hooks.rx_error &&
-+ dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
-+ /* it's up to the hook to perform resource cleanup */
-+ return;
-+#endif
-+ percpu_priv->stats.rx_errors++;
-+
-+ if (fd->status & FM_PORT_FRM_ERR_DMA)
-+ percpu_priv->rx_errors.dme++;
-+ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
-+ percpu_priv->rx_errors.fpe++;
-+ if (fd->status & FM_PORT_FRM_ERR_SIZE)
-+ percpu_priv->rx_errors.fse++;
-+ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
-+ percpu_priv->rx_errors.phe++;
-+ if (fd->status & FM_FD_STAT_L4CV)
-+ dpa_csum_validation(priv, percpu_priv, fd);
-+
-+ dpa_fd_release(net_dev, fd);
-+}
-+
-+static void _dpa_tx_error(struct net_device *net_dev,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ struct sk_buff *skb;
-+
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_warn(net_dev, "FD status = 0x%08x\n",
-+ fd->status & FM_FD_STAT_TX_ERRORS);
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ if (dpaa_eth_hooks.tx_error &&
-+ dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
-+ /* now the hook must ensure proper cleanup */
-+ return;
-+#endif
-+ percpu_priv->stats.tx_errors++;
-+
-+ /* If we intended the buffers from this frame to go into the bpools
-+ * when the FMan transmit was done, we need to put it in manually.
-+ */
-+ if (fd->bpid != 0xff) {
-+ dpa_fd_release(net_dev, fd);
-+ return;
-+ }
-+
-+ skb = _dpa_cleanup_tx_fd(priv, fd);
-+ dev_kfree_skb(skb);
-+}
-+
-+/* Helper function to factor out frame validation logic on all Rx paths. Its
-+ * purpose is to extract from the Parse Results structure information about
-+ * the integrity of the frame, its checksum, the length of the parsed headers
-+ * and whether the frame is suitable for GRO.
-+ *
-+ * Assumes no parser errors, since any error frame is dropped before this
-+ * function is called.
-+ *
-+ * @skb will have its ip_summed field overwritten;
-+ * @use_gro will only be written with 0, if the frame is definitely not
-+ * GRO-able; otherwise, it will be left unchanged;
-+ * @hdr_size will be written with a safe value, at least the size of the
-+ * headers' length.
-+ */
-+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
-+ const struct qm_fd *fd,
-+ struct sk_buff *skb, int *use_gro)
-+{
-+ if (fd->status & FM_FD_STAT_L4CV) {
-+ /* The parser has run and performed L4 checksum validation.
-+ * We know there were no parser errors (and implicitly no
-+ * L4 csum error), otherwise we wouldn't be here.
-+ */
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+ /* Don't go through GRO for certain types of traffic that
-+ * we know are not GRO-able, such as dgram-based protocols.
-+ * In the worst-case scenarios, such as small-pkt terminating
-+ * UDP, the extra GRO processing would be overkill.
-+ *
-+ * The only protocol the Parser supports that is also GRO-able
-+ * is currently TCP.
-+ */
-+ if (!fm_l4_frame_is_tcp(parse_results))
-+ *use_gro = 0;
-+
-+ return;
-+ }
-+
-+ /* We're here because either the parser didn't run or the L4 checksum
-+ * was not verified. This may include the case of a UDP frame with
-+ * checksum zero or an L4 proto other than TCP/UDP
-+ */
-+ skb->ip_summed = CHECKSUM_NONE;
-+
-+ /* Bypass GRO for unknown traffic or if no PCDs are applied */
-+ *use_gro = 0;
-+}
-+
-+int dpaa_eth_poll(struct napi_struct *napi, int budget)
-+{
-+ struct dpa_napi_portal *np =
-+ container_of(napi, struct dpa_napi_portal, napi);
-+
-+ int cleaned = qman_p_poll_dqrr(np->p, budget);
-+
-+ if (cleaned < budget) {
-+ int tmp;
-+ napi_complete(napi);
-+ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
-+ DPA_BUG_ON(tmp);
-+ }
-+
-+ return cleaned;
-+}
-+EXPORT_SYMBOL(dpaa_eth_poll);
-+
-+static void __hot _dpa_tx_conf(struct net_device *net_dev,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ struct sk_buff *skb;
-+
-+ /* do we need the timestamp for the error frames? */
-+
-+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_warn(net_dev, "FD status = 0x%08x\n",
-+ fd->status & FM_FD_STAT_TX_ERRORS);
-+
-+ percpu_priv->stats.tx_errors++;
-+ }
-+
-+ /* hopefully we need not get the timestamp before the hook */
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
-+ fd, fqid) == DPAA_ETH_STOLEN)
-+ /* it's the hook that must now perform cleanup */
-+ return;
-+#endif
-+ /* This might not perfectly reflect the reality, if the core dequeuing
-+ * the Tx confirmation is different from the one that did the enqueue,
-+ * but at least it'll show up in the total count.
-+ */
-+ percpu_priv->tx_confirm++;
-+
-+ skb = _dpa_cleanup_tx_fd(priv, fd);
-+
-+ dev_kfree_skb(skb);
-+}
-+
-+enum qman_cb_dqrr_result
-+priv_rx_error_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int *count_ptr;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
-+
-+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
-+ return qman_cb_dqrr_stop;
-+
-+ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
-+ /* Unable to refill the buffer pool due to insufficient
-+ * system memory. Just release the frame back into the pool,
-+ * otherwise we'll soon end up with an empty buffer pool.
-+ */
-+ dpa_fd_release(net_dev, &dq->fd);
-+ else
-+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+
-+enum qman_cb_dqrr_result __hot
-+priv_rx_default_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int *count_ptr;
-+ struct dpa_bp *dpa_bp;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+ dpa_bp = priv->dpa_bp;
-+
-+ /* Trace the Rx fd */
-+ trace_dpa_rx_fd(net_dev, fq, &dq->fd);
-+
-+ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
-+
-+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
-+ return qman_cb_dqrr_stop;
-+
-+ /* Vale of plenty: make sure we didn't run out of buffers */
-+
-+ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
-+ /* Unable to refill the buffer pool due to insufficient
-+ * system memory. Just release the frame back into the pool,
-+ * otherwise we'll soon end up with an empty buffer pool.
-+ */
-+ dpa_fd_release(net_dev, &dq->fd);
-+ else
-+ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
-+ count_ptr);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+enum qman_cb_dqrr_result
-+priv_tx_conf_error_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
-+ return qman_cb_dqrr_stop;
-+
-+ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+enum qman_cb_dqrr_result __hot
-+priv_tx_conf_default_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+
-+ /* Trace the fd */
-+ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
-+
-+ /* Non-migratable context, safe to use raw_cpu_ptr */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
-+ return qman_cb_dqrr_stop;
-+
-+ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+void priv_ern(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ struct net_device *net_dev;
-+ const struct dpa_priv_s *priv;
-+ struct sk_buff *skb;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct qm_fd fd = msg->ern.fd;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+ /* Non-migratable context, safe to use raw_cpu_ptr */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ percpu_priv->stats.tx_dropped++;
-+ percpu_priv->stats.tx_fifo_errors++;
-+ count_ern(percpu_priv, msg);
-+
-+ /* If we intended this buffer to go into the pool
-+ * when the FM was done, we need to put it in
-+ * manually.
-+ */
-+ if (msg->ern.fd.bpid != 0xff) {
-+ dpa_fd_release(net_dev, &fd);
-+ return;
-+ }
-+
-+ skb = _dpa_cleanup_tx_fd(priv, &fd);
-+ dev_kfree_skb_any(skb);
-+}
-+
-+const struct dpa_fq_cbs_t private_fq_cbs = {
-+ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
-+ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
-+ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
-+ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
-+ .egress_ern = { .cb = { .ern = priv_ern } }
-+};
-+EXPORT_SYMBOL(private_fq_cbs);
-+
-+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
-+{
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, j;
-+
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ for (j = 0; j < qman_portal_max; j++)
-+ napi_enable(&percpu_priv->np[j].napi);
-+ }
-+}
-+
-+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
-+{
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, j;
-+
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ for (j = 0; j < qman_portal_max; j++)
-+ napi_disable(&percpu_priv->np[j].napi);
-+ }
-+}
-+
-+static int __cold dpa_eth_priv_start(struct net_device *net_dev)
-+{
-+ int err;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ dpaa_eth_napi_enable(priv);
-+
-+ err = dpa_start(net_dev);
-+ if (err < 0)
-+ dpaa_eth_napi_disable(priv);
-+
-+ return err;
-+}
-+
-+
-+
-+static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ _errno = dpa_stop(net_dev);
-+ /* Allow NAPI to consume any frame still in the Rx/TxConfirm
-+ * ingress queues. This is to avoid a race between the current
-+ * context and ksoftirqd which could leave NAPI disabled while
-+ * in fact there's still Rx traffic to be processed.
-+ */
-+ usleep_range(5000, 10000);
-+
-+ priv = netdev_priv(net_dev);
-+ dpaa_eth_napi_disable(priv);
-+
-+ return _errno;
-+}
-+
-+#ifdef CONFIG_NET_POLL_CONTROLLER
-+static void dpaa_eth_poll_controller(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv =
-+ raw_cpu_ptr(priv->percpu_priv);
-+ struct qman_portal *p;
-+ const struct qman_portal_config *pc;
-+ struct dpa_napi_portal *np;
-+
-+ p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
-+ pc = qman_p_get_portal_config(p);
-+ np = &percpu_priv->np[pc->index];
-+
-+ qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
-+ qman_p_poll_dqrr(np->p, np->napi.weight);
-+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
-+}
-+#endif
-+
-+static const struct net_device_ops dpa_private_ops = {
-+ .ndo_open = dpa_eth_priv_start,
-+ .ndo_start_xmit = dpa_tx,
-+ .ndo_stop = dpa_eth_priv_stop,
-+ .ndo_tx_timeout = dpa_timeout,
-+ .ndo_get_stats64 = dpa_get_stats64,
-+ .ndo_set_mac_address = dpa_set_mac_address,
-+ .ndo_validate_addr = eth_validate_addr,
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+ .ndo_select_queue = dpa_select_queue,
-+#endif
-+ .ndo_change_mtu = dpa_change_mtu,
-+ .ndo_set_rx_mode = dpa_set_rx_mode,
-+ .ndo_init = dpa_ndo_init,
-+ .ndo_set_features = dpa_set_features,
-+ .ndo_fix_features = dpa_fix_features,
-+ .ndo_do_ioctl = dpa_ioctl,
-+#ifdef CONFIG_NET_POLL_CONTROLLER
-+ .ndo_poll_controller = dpaa_eth_poll_controller,
-+#endif
-+};
-+
-+static int dpa_private_napi_add(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
-+
-+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
-+ qman_portal_max * sizeof(struct dpa_napi_portal),
-+ GFP_KERNEL);
-+
-+ if (unlikely(percpu_priv->np == NULL)) {
-+ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < qman_portal_max; i++)
-+ netif_napi_add(net_dev, &percpu_priv->np[i].napi,
-+ dpaa_eth_poll, DPA_NAPI_WEIGHT);
-+ }
-+
-+ return 0;
-+}
-+
-+void dpa_private_napi_del(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
-+
-+ if (percpu_priv->np) {
-+ for (i = 0; i < qman_portal_max; i++)
-+ netif_napi_del(&percpu_priv->np[i].napi);
-+
-+ devm_kfree(net_dev->dev.parent, percpu_priv->np);
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(dpa_private_napi_del);
-+
-+static int dpa_private_netdev_init(struct net_device *net_dev)
-+{
-+ int i;
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ const uint8_t *mac_addr;
-+
-+ /* Although we access another CPU's private data here
-+ * we do it at initialization so it is safe
-+ */
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+ percpu_priv->net_dev = net_dev;
-+ }
-+
-+ net_dev->netdev_ops = &dpa_private_ops;
-+ mac_addr = priv->mac_dev->addr;
-+
-+ net_dev->mem_start = priv->mac_dev->res->start;
-+ net_dev->mem_end = priv->mac_dev->res->end;
-+
-+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-+ NETIF_F_LLTX);
-+
-+ /* Advertise S/G and HIGHDMA support for private interfaces */
-+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
-+ /* Recent kernels enable GSO automatically, if
-+ * we declare NETIF_F_SG. For conformity, we'll
-+ * still declare GSO explicitly.
-+ */
-+ net_dev->features |= NETIF_F_GSO;
-+
-+ /* Advertise GRO support */
-+ net_dev->features |= NETIF_F_GRO;
-+
-+ return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
-+}
-+
-+static struct dpa_bp * __cold
-+dpa_priv_bp_probe(struct device *dev)
-+{
-+ struct dpa_bp *dpa_bp;
-+
-+ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
-+ if (unlikely(dpa_bp == NULL)) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
-+ dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
-+
-+ dpa_bp->seed_cb = dpa_bp_priv_seed;
-+ dpa_bp->free_buf_cb = _dpa_bp_free_pf;
-+
-+ return dpa_bp;
-+}
-+
-+/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
-+ * We won't be sending congestion notifications to FMan; for now, we just use
-+ * this CGR to generate enqueue rejections to FMan in order to drop the frames
-+ * before they reach our ingress queues and eat up memory.
-+ */
-+static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
-+{
-+ struct qm_mcc_initcgr initcgr;
-+ u32 cs_th;
-+ int err;
-+
-+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
-+ if (err < 0) {
-+ pr_err("Error %d allocating CGR ID\n", err);
-+ goto out_error;
-+ }
-+
-+ /* Enable CS TD, but disable Congestion State Change Notifications. */
-+ initcgr.we_mask = QM_CGR_WE_CS_THRES;
-+ initcgr.cgr.cscn_en = QM_CGR_EN;
-+ cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
-+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
-+
-+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
-+ initcgr.cgr.cstd_en = QM_CGR_EN;
-+
-+ /* This is actually a hack, because this CGR will be associated with
-+ * our affine SWP. However, we'll place our ingress FQs in it.
-+ */
-+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
-+ &initcgr);
-+ if (err < 0) {
-+ pr_err("Error %d creating ingress CGR with ID %d\n", err,
-+ priv->ingress_cgr.cgrid);
-+ qman_release_cgrid(priv->ingress_cgr.cgrid);
-+ goto out_error;
-+ }
-+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
-+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
-+
-+ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
-+ * range), but we have no common initialization path between the
-+ * different variants of the DPAA Eth driver, so we do it here rather
-+ * than modifying every other variant than "private Eth".
-+ */
-+ priv->use_ingress_cgr = true;
-+
-+out_error:
-+ return err;
-+}
-+
-+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
-+ size_t count)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ int i;
-+
-+ if (netif_msg_probe(priv))
-+ dev_dbg(net_dev->dev.parent,
-+ "Using private BM buffer pools\n");
-+
-+ priv->bp_count = count;
-+
-+ for (i = 0; i < count; i++) {
-+ int err;
-+ err = dpa_bp_alloc(&dpa_bp[i]);
-+ if (err < 0) {
-+ dpa_bp_free(priv);
-+ priv->dpa_bp = NULL;
-+ return err;
-+ }
-+
-+ priv->dpa_bp = &dpa_bp[i];
-+ }
-+
-+ dpa_priv_common_bpid = priv->dpa_bp->bpid;
-+ return 0;
-+}
-+
-+static const struct of_device_id dpa_match[];
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+static int dpa_new_loop_id(void)
-+{
-+ static int if_id;
-+
-+ return if_id++;
-+}
-+#endif
-+
-+static int
-+dpaa_eth_priv_probe(struct platform_device *_of_dev)
-+{
-+ int err = 0, i, channel;
-+ struct device *dev;
-+ struct device_node *dpa_node;
-+ struct dpa_bp *dpa_bp;
-+ size_t count = 1;
-+ struct net_device *net_dev = NULL;
-+ struct dpa_priv_s *priv = NULL;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct fm_port_fqs port_fqs;
-+ struct dpa_buffer_layout_s *buf_layout = NULL;
-+ struct mac_device *mac_dev;
-+
-+ dev = &_of_dev->dev;
-+
-+ dpa_node = dev->of_node;
-+
-+ if (!of_device_is_available(dpa_node))
-+ return -ENODEV;
-+
-+ /* Get the buffer pools assigned to this interface;
-+ * run only once the default pool probing code
-+ */
-+ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
-+ dpa_priv_bp_probe(dev);
-+ if (IS_ERR(dpa_bp))
-+ return PTR_ERR(dpa_bp);
-+
-+ /* Allocate this early, so we can store relevant information in
-+ * the private area (needed by 1588 code in dpa_mac_probe)
-+ */
-+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
-+ if (!net_dev) {
-+ dev_err(dev, "alloc_etherdev_mq() failed\n");
-+ goto alloc_etherdev_mq_failed;
-+ }
-+
-+ /* Do this here, so we can be verbose early */
-+ SET_NETDEV_DEV(net_dev, dev);
-+ dev_set_drvdata(dev, net_dev);
-+
-+ priv = netdev_priv(net_dev);
-+ priv->net_dev = net_dev;
-+ strcpy(priv->if_type, "private");
-+
-+ priv->msg_enable = netif_msg_init(debug, -1);
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ priv->loop_id = dpa_new_loop_id();
-+ priv->loop_to = -1; /* disabled by default */
-+ dpa_loop_netdevs[priv->loop_id] = net_dev;
-+#endif
-+
-+ mac_dev = dpa_mac_probe(_of_dev);
-+ if (IS_ERR(mac_dev) || !mac_dev) {
-+ err = PTR_ERR(mac_dev);
-+ goto mac_probe_failed;
-+ }
-+
-+ /* We have physical ports, so we need to establish
-+ * the buffer layout.
-+ */
-+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
-+ GFP_KERNEL);
-+ if (!buf_layout) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ goto alloc_failed;
-+ }
-+ dpa_set_buffers_layout(mac_dev, buf_layout);
-+
-+ /* For private ports, need to compute the size of the default
-+ * buffer pool, based on FMan port buffer layout;also update
-+ * the maximum buffer size for private ports if necessary
-+ */
-+ dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
-+ /* We only want to use jumbo frame optimization if we actually have
-+ * L2 MAX FRM set for jumbo frames as well.
-+ */
-+#ifndef CONFIG_PPC
-+ if (likely(!dpaa_errata_a010022))
-+#endif
-+ if(fm_get_max_frm() < 9600)
-+ dev_warn(dev,
-+ "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
-+#endif
-+
-+ INIT_LIST_HEAD(&priv->dpa_fq_list);
-+
-+ memset(&port_fqs, 0, sizeof(port_fqs));
-+
-+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
-+ if (!err)
-+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
-+ &port_fqs, true, TX);
-+
-+ if (err < 0)
-+ goto fq_probe_failed;
-+
-+ /* bp init */
-+
-+ err = dpa_priv_bp_create(net_dev, dpa_bp, count);
-+
-+ if (err < 0)
-+ goto bp_create_failed;
-+
-+ priv->mac_dev = mac_dev;
-+
-+ channel = dpa_get_channel();
-+
-+ if (channel < 0) {
-+ err = channel;
-+ goto get_channel_failed;
-+ }
-+
-+ priv->channel = (uint16_t)channel;
-+ dpaa_eth_add_channel(priv->channel);
-+
-+ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
-+
-+ /* Create a congestion group for this netdev, with
-+ * dynamically-allocated CGR ID.
-+ * Must be executed after probing the MAC, but before
-+ * assigning the egress FQs to the CGRs.
-+ */
-+ err = dpaa_eth_cgr_init(priv);
-+ if (err < 0) {
-+ dev_err(dev, "Error initializing CGR\n");
-+ goto tx_cgr_init_failed;
-+ }
-+ err = dpaa_eth_priv_ingress_cgr_init(priv);
-+ if (err < 0) {
-+ dev_err(dev, "Error initializing ingress CGR\n");
-+ goto rx_cgr_init_failed;
-+ }
-+
-+ /* Add the FQs to the interface, and make them active */
-+ err = dpa_fqs_init(dev, &priv->dpa_fq_list, false);
-+ if (err < 0)
-+ goto fq_alloc_failed;
-+
-+ priv->buf_layout = buf_layout;
-+ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
-+ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
-+
-+ /* All real interfaces need their ports initialized */
-+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
-+ buf_layout, dev);
-+
-+#ifdef CONFIG_FMAN_PFC
-+ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
-+ err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
-+ mac_dev->port_dev[TX], i, i);
-+ if (unlikely(err != 0)) {
-+ dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
-+ goto pfc_mapping_failed;
-+ }
-+ }
-+#endif
-+
-+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
-+
-+ if (priv->percpu_priv == NULL) {
-+ dev_err(dev, "devm_alloc_percpu() failed\n");
-+ err = -ENOMEM;
-+ goto alloc_percpu_failed;
-+ }
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+ memset(percpu_priv, 0, sizeof(*percpu_priv));
-+ }
-+
-+ /* Initialize NAPI */
-+ err = dpa_private_napi_add(net_dev);
-+
-+ if (err < 0)
-+ goto napi_add_failed;
-+
-+ err = dpa_private_netdev_init(net_dev);
-+
-+ if (err < 0)
-+ goto netdev_init_failed;
-+
-+ dpaa_eth_sysfs_init(&net_dev->dev);
-+
-+#ifdef CONFIG_PM
-+ device_set_wakeup_capable(dev, true);
-+#endif
-+
-+ pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
-+
-+ return 0;
-+
-+netdev_init_failed:
-+napi_add_failed:
-+ dpa_private_napi_del(net_dev);
-+alloc_percpu_failed:
-+#ifdef CONFIG_FMAN_PFC
-+pfc_mapping_failed:
-+#endif
-+ dpa_fq_free(dev, &priv->dpa_fq_list);
-+fq_alloc_failed:
-+ qman_delete_cgr_safe(&priv->ingress_cgr);
-+ qman_release_cgrid(priv->ingress_cgr.cgrid);
-+rx_cgr_init_failed:
-+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
-+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-+tx_cgr_init_failed:
-+get_channel_failed:
-+ dpa_bp_free(priv);
-+bp_create_failed:
-+fq_probe_failed:
-+alloc_failed:
-+mac_probe_failed:
-+ dev_set_drvdata(dev, NULL);
-+ free_netdev(net_dev);
-+alloc_etherdev_mq_failed:
-+ if (atomic_read(&dpa_bp->refs) == 0)
-+ devm_kfree(dev, dpa_bp);
-+
-+ return err;
-+}
-+
-+static const struct of_device_id dpa_match[] = {
-+ {
-+ .compatible = "fsl,dpa-ethernet"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, dpa_match);
-+
-+static struct platform_driver dpa_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .of_match_table = dpa_match,
-+ .owner = THIS_MODULE,
-+ .pm = DPAA_PM_OPS,
-+ },
-+ .probe = dpaa_eth_priv_probe,
-+ .remove = dpa_remove
-+};
-+
-+#ifndef CONFIG_PPC
-+static bool __init __cold soc_has_errata_a010022(void)
-+{
-+#ifdef CONFIG_SOC_BUS
-+ const struct soc_device_attribute soc_msi_matches[] = {
-+ { .family = "QorIQ LS1043A",
-+ .data = NULL },
-+ { },
-+ };
-+
-+ if (soc_device_match(soc_msi_matches))
-+ return true;
-+
-+ return false;
-+#else
-+ return true; /* cannot identify SoC */
-+#endif
-+}
-+#endif
-+
-+static int __init __cold dpa_load(void)
-+{
-+ int _errno;
-+
-+ pr_info(DPA_DESCRIPTION "\n");
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ dpa_debugfs_module_init();
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+ /* initialise dpaa_eth mirror values */
-+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
-+ dpa_max_frm = fm_get_max_frm();
-+ dpa_num_cpus = num_possible_cpus();
-+
-+#ifndef CONFIG_PPC
-+ /* Detect if the current SoC requires the 4K alignment workaround */
-+ dpaa_errata_a010022 = soc_has_errata_a010022();
-+#endif
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
-+#endif
-+
-+ _errno = platform_driver_register(&dpa_driver);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ }
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ return _errno;
-+}
-+module_init(dpa_load);
-+
-+static void __exit __cold dpa_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ platform_driver_unregister(&dpa_driver);
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ dpa_debugfs_module_exit();
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+ /* Only one channel is used and needs to be relased after all
-+ * interfaces are removed
-+ */
-+ dpa_release_channel();
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(dpa_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
-@@ -0,0 +1,697 @@
-+/* Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPA_H
-+#define __DPA_H
-+
-+#include <linux/netdevice.h>
-+#include <linux/fsl_qman.h> /* struct qman_fq */
-+
-+#include "fm_ext.h"
-+#include "dpaa_eth_trace.h"
-+
-+extern int dpa_rx_extra_headroom;
-+extern int dpa_max_frm;
-+extern int dpa_num_cpus;
-+
-+#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
-+#define dpa_get_max_frm() dpa_max_frm
-+
-+#define dpa_get_max_mtu() \
-+ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
-+
-+#define __hot
-+
-+/* Simple enum of FQ types - used for array indexing */
-+enum port_type {RX, TX};
-+
-+/* TODO: This structure should be renamed & moved to the FMD wrapper */
-+struct dpa_buffer_layout_s {
-+ uint16_t priv_data_size;
-+ bool parse_results;
-+ bool time_stamp;
-+ bool hash_results;
-+ uint8_t manip_extra_space;
-+ uint16_t data_align;
-+};
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define DPA_BUG_ON(cond) BUG_ON(cond)
-+#else
-+#define DPA_BUG_ON(cond)
-+#endif
-+
-+#define DPA_TX_PRIV_DATA_SIZE 16
-+#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
-+#define DPA_TIME_STAMP_SIZE 8
-+#define DPA_HASH_RESULTS_SIZE 8
-+#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
-+ dpa_get_rx_extra_headroom())
-+
-+#define FM_FD_STAT_RX_ERRORS \
-+ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
-+ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
-+ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
-+ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
-+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
-+
-+#define FM_FD_STAT_TX_ERRORS \
-+ (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
-+ FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
-+
-+#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
-+/* The raw buffer size must be cacheline aligned.
-+ * Normally we use 2K buffers.
-+ */
-+#define DPA_BP_RAW_SIZE 2048
-+#else
-+/* For jumbo frame optimizations, use buffers large enough to accommodate
-+ * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
-+ * space to account for further alignments.
-+ */
-+#define DPA_MAX_FRM_SIZE 9600
-+#ifdef CONFIG_PPC
-+#define DPA_BP_RAW_SIZE \
-+ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
-+ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
-+#else /* CONFIG_PPC */
-+#define DPA_BP_RAW_SIZE ((unlikely(dpaa_errata_a010022)) ? 2048 : \
-+ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
-+ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1)))
-+#endif /* CONFIG_PPC */
-+#endif /* CONFIG_FSL_DPAA_ETH_JUMBO_FRAME */
-+
-+/* This is what FMan is ever allowed to use.
-+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
-+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
-+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
-+ * half-page-aligned buffers (can we?), so we reserve some more space
-+ * for start-of-buffer alignment.
-+ */
-+#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
-+ SMP_CACHE_BYTES)
-+/* We must ensure that skb_shinfo is always cacheline-aligned. */
-+#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
-+
-+/* Maximum size of a buffer for which recycling is allowed.
-+ * We need an upper limit such that forwarded skbs that get reallocated on Tx
-+ * aren't allowed to grow unboundedly. On the other hand, we need to make sure
-+ * that skbs allocated by us will not fail to be recycled due to their size.
-+ *
-+ * For a requested size, the kernel allocator provides the next power of two
-+ * sized block, which the stack will use as is, regardless of the actual size
-+ * it required; since we must accommodate at most 9.6K buffers (L2 maximum
-+ * supported frame size), set the recycling upper limit to 16K.
-+ */
-+#define DPA_RECYCLE_MAX_SIZE 16384
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+/*TODO: temporary for fman pcd testing */
-+#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
-+#endif
-+
-+#define DPAA_ETH_FQ_DELTA 0x10000
-+
-+#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
-+ (((device_addr) & 0x1fffff) >> 6)
-+
-+#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
-+ (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
-+
-+/* Largest value that the FQD's OAL field can hold.
-+ * This is DPAA-1.x specific.
-+ * TODO: This rather belongs in fsl_qman.h
-+ */
-+#define FSL_QMAN_MAX_OAL 127
-+
-+/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
-+#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
-+
-+/* Default alignment for start of data in an Rx FD */
-+#define DPA_FD_DATA_ALIGNMENT 16
-+
-+/* Values for the L3R field of the FM Parse Results
-+ */
-+/* L3 Type field: First IP Present IPv4 */
-+#define FM_L3_PARSE_RESULT_IPV4 0x8000
-+/* L3 Type field: First IP Present IPv6 */
-+#define FM_L3_PARSE_RESULT_IPV6 0x4000
-+
-+/* Values for the L4R field of the FM Parse Results
-+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
-+ */
-+/* L4 Type field: UDP */
-+#define FM_L4_PARSE_RESULT_UDP 0x40
-+/* L4 Type field: TCP */
-+#define FM_L4_PARSE_RESULT_TCP 0x20
-+/* FD status field indicating whether the FM Parser has attempted to validate
-+ * the L4 csum of the frame.
-+ * Note that having this bit set doesn't necessarily imply that the checksum
-+ * is valid. One would have to check the parse results to find that out.
-+ */
-+#define FM_FD_STAT_L4CV 0x00000004
-+
-+
-+#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
-+
-+/* Check if the parsed frame was found to be a TCP segment.
-+ *
-+ * @parse_result_ptr must be of type (fm_prs_result_t *).
-+ */
-+#define fm_l4_frame_is_tcp(parse_result_ptr) \
-+ ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
-+
-+/* number of Tx queues to FMan */
-+#ifdef CONFIG_FMAN_PFC
-+#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
-+#else
-+#define DPAA_ETH_TX_QUEUES NR_CPUS
-+#endif
-+
-+#define DPAA_ETH_RX_QUEUES 128
-+
-+/* Convenience macros for storing/retrieving the skb back-pointers. They must
-+ * accommodate both recycling and confirmation paths - i.e. cases when the buf
-+ * was allocated by ourselves, respectively by the stack. In the former case,
-+ * we could store the skb at negative offset; in the latter case, we can't,
-+ * so we'll use 0 as offset.
-+ *
-+ * NB: @off is an offset from a (struct sk_buff **) pointer!
-+ */
-+#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
-+{ \
-+ skbh = (struct sk_buff **)addr; \
-+ *(skbh + (off)) = skb; \
-+}
-+#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
-+{ \
-+ skbh = (struct sk_buff **)addr; \
-+ skb = *(skbh + (off)); \
-+}
-+
-+#ifdef CONFIG_PM
-+/* Magic Packet wakeup */
-+#define DPAA_WOL_MAGIC 0x00000001
-+#endif
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+struct pcd_range {
-+ uint32_t base;
-+ uint32_t count;
-+};
-+#endif
-+
-+/* More detailed FQ types - used for fine-grained WQ assignments */
-+enum dpa_fq_type {
-+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
-+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
-+ FQ_TYPE_RX_PCD, /* User-defined PCDs */
-+ FQ_TYPE_TX, /* "Real" Tx FQs */
-+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
-+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
-+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
-+ FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
-+};
-+
-+struct dpa_fq {
-+ struct qman_fq fq_base;
-+ struct list_head list;
-+ struct net_device *net_dev;
-+ bool init;
-+ uint32_t fqid;
-+ uint32_t flags;
-+ uint16_t channel;
-+ uint8_t wq;
-+ enum dpa_fq_type fq_type;
-+};
-+
-+struct dpa_fq_cbs_t {
-+ struct qman_fq rx_defq;
-+ struct qman_fq tx_defq;
-+ struct qman_fq rx_errq;
-+ struct qman_fq tx_errq;
-+ struct qman_fq egress_ern;
-+};
-+
-+struct fqid_cell {
-+ uint32_t start;
-+ uint32_t count;
-+};
-+
-+struct dpa_bp {
-+ struct bman_pool *pool;
-+ uint8_t bpid;
-+ struct device *dev;
-+ union {
-+ /* The buffer pools used for the private ports are initialized
-+ * with target_count buffers for each CPU; at runtime the
-+ * number of buffers per CPU is constantly brought back to this
-+ * level
-+ */
-+ int target_count;
-+ /* The configured value for the number of buffers in the pool,
-+ * used for shared port buffer pools
-+ */
-+ int config_count;
-+ };
-+ size_t size;
-+ bool seed_pool;
-+ /* physical address of the contiguous memory used by the pool to store
-+ * the buffers
-+ */
-+ dma_addr_t paddr;
-+ /* virtual address of the contiguous memory used by the pool to store
-+ * the buffers
-+ */
-+ void __iomem *vaddr;
-+ /* current number of buffers in the bpool alloted to this CPU */
-+ int __percpu *percpu_count;
-+ atomic_t refs;
-+ /* some bpools need to be seeded before use by this cb */
-+ int (*seed_cb)(struct dpa_bp *);
-+ /* some bpools need to be emptied before freeing; this cb is used
-+ * for freeing of individual buffers taken from the pool
-+ */
-+ void (*free_buf_cb)(void *addr);
-+};
-+
-+struct dpa_rx_errors {
-+ u64 dme; /* DMA Error */
-+ u64 fpe; /* Frame Physical Error */
-+ u64 fse; /* Frame Size Error */
-+ u64 phe; /* Header Error */
-+ u64 cse; /* Checksum Validation Error */
-+};
-+
-+/* Counters for QMan ERN frames - one counter per rejection code */
-+struct dpa_ern_cnt {
-+ u64 cg_tdrop; /* Congestion group taildrop */
-+ u64 wred; /* WRED congestion */
-+ u64 err_cond; /* Error condition */
-+ u64 early_window; /* Order restoration, frame too early */
-+ u64 late_window; /* Order restoration, frame too late */
-+ u64 fq_tdrop; /* FQ taildrop */
-+ u64 fq_retired; /* FQ is retired */
-+ u64 orp_zero; /* ORP disabled */
-+};
-+
-+struct dpa_napi_portal {
-+ struct napi_struct napi;
-+ struct qman_portal *p;
-+};
-+
-+struct dpa_percpu_priv_s {
-+ struct net_device *net_dev;
-+ struct dpa_napi_portal *np;
-+ u64 in_interrupt;
-+ u64 tx_returned;
-+ u64 tx_confirm;
-+ /* fragmented (non-linear) skbuffs received from the stack */
-+ u64 tx_frag_skbuffs;
-+ /* number of S/G frames received */
-+ u64 rx_sg;
-+
-+ struct rtnl_link_stats64 stats;
-+ struct dpa_rx_errors rx_errors;
-+ struct dpa_ern_cnt ern_cnt;
-+};
-+
-+struct dpa_priv_s {
-+ struct dpa_percpu_priv_s __percpu *percpu_priv;
-+ struct dpa_bp *dpa_bp;
-+ /* Store here the needed Tx headroom for convenience and speed
-+ * (even though it can be computed based on the fields of buf_layout)
-+ */
-+ uint16_t tx_headroom;
-+ struct net_device *net_dev;
-+ struct mac_device *mac_dev;
-+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
-+ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
-+
-+ size_t bp_count;
-+
-+ uint16_t channel; /* "fsl,qman-channel-id" */
-+ struct list_head dpa_fq_list;
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ struct dentry *debugfs_loop_file;
-+#endif
-+
-+ uint32_t msg_enable; /* net_device message level */
-+#ifdef CONFIG_FSL_DPAA_1588
-+ struct dpa_ptp_tsu *tsu;
-+#endif
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+/* TODO: this is temporary until pcd support is implemented in dpaa */
-+ int priv_pcd_num_ranges;
-+ struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
-+#endif
-+
-+ struct {
-+ /**
-+ * All egress queues to a given net device belong to one
-+ * (and the same) congestion group.
-+ */
-+ struct qman_cgr cgr;
-+ /* If congested, when it began. Used for performance stats. */
-+ u32 congestion_start_jiffies;
-+ /* Number of jiffies the Tx port was congested. */
-+ u32 congested_jiffies;
-+ /**
-+ * Counter for the number of times the CGR
-+ * entered congestion state
-+ */
-+ u32 cgr_congested_count;
-+ } cgr_data;
-+ /* Use a per-port CGR for ingress traffic. */
-+ bool use_ingress_cgr;
-+ struct qman_cgr ingress_cgr;
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+ bool ts_tx_en; /* Tx timestamping enabled */
-+ bool ts_rx_en; /* Rx timestamping enabled */
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ struct dpa_buffer_layout_s *buf_layout;
-+ uint16_t rx_headroom;
-+ char if_type[30];
-+
-+ void *peer;
-+#ifdef CONFIG_PM
-+ u32 wol;
-+#endif
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ int loop_id;
-+ int loop_to;
-+#endif
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+ bool ceetm_en; /* CEETM QoS enabled */
-+#endif
-+};
-+
-+struct fm_port_fqs {
-+ struct dpa_fq *tx_defq;
-+ struct dpa_fq *tx_errq;
-+ struct dpa_fq *rx_defq;
-+ struct dpa_fq *rx_errq;
-+};
-+
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+extern struct net_device *dpa_loop_netdevs[20];
-+#endif
-+
-+/* functions with different implementation for SG and non-SG: */
-+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
-+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
-+void __hot _dpa_rx(struct net_device *net_dev,
-+ struct qman_portal *portal,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid,
-+ int *count_ptr);
-+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
-+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
-+ struct qman_fq *egress_fq, struct qman_fq *conf_fq);
-+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd);
-+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
-+ const struct qm_fd *fd,
-+ struct sk_buff *skb,
-+ int *use_gro);
-+#ifndef CONFIG_FSL_DPAA_TS
-+bool dpa_skb_is_recyclable(struct sk_buff *skb);
-+bool dpa_buf_is_recyclable(struct sk_buff *skb,
-+ uint32_t min_size,
-+ uint16_t min_offset,
-+ unsigned char **new_buf_start);
-+#endif
-+int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd,
-+ int *count_ptr, int *offset);
-+int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd);
-+int __cold __attribute__((nonnull))
-+ _dpa_fq_free(struct device *dev, struct qman_fq *fq);
-+
-+/* Turn on HW checksum computation for this outgoing frame.
-+ * If the current protocol is not something we support in this regard
-+ * (or if the stack has already computed the SW checksum), we do nothing.
-+ *
-+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
-+ * otherwise.
-+ *
-+ * Note that this function may modify the fd->cmd field and the skb data buffer
-+ * (the Parse Results area).
-+ */
-+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
-+
-+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
-+ struct qman_portal *portal)
-+{
-+ /* In case of threaded ISR for RT enable kernel,
-+ * in_irq() does not return appropriate value, so use
-+ * in_serving_softirq to distinguish softirq or irq context.
-+ */
-+ if (unlikely(in_irq() || !in_serving_softirq())) {
-+ /* Disable QMan IRQ and invoke NAPI */
-+ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
-+ if (likely(!ret)) {
-+ const struct qman_portal_config *pc =
-+ qman_p_get_portal_config(portal);
-+ struct dpa_napi_portal *np =
-+ &percpu_priv->np[pc->index];
-+
-+ np->p = portal;
-+ napi_schedule(&np->napi);
-+ percpu_priv->in_interrupt++;
-+ return 1;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static inline ssize_t __const __must_check __attribute__((nonnull))
-+dpa_fd_length(const struct qm_fd *fd)
-+{
-+ return fd->length20;
-+}
-+
-+static inline ssize_t __const __must_check __attribute__((nonnull))
-+dpa_fd_offset(const struct qm_fd *fd)
-+{
-+ return fd->offset;
-+}
-+
-+/* Verifies if the skb length is below the interface MTU */
-+static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
-+{
-+ if (unlikely(skb->len > mtu))
-+ if ((skb->protocol != htons(ETH_P_8021Q))
-+ || (skb->len > mtu + 4))
-+ return -1;
-+
-+ return 0;
-+}
-+
-+static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
-+{
-+ uint16_t headroom;
-+ /* The frame headroom must accommodate:
-+ * - the driver private data area
-+ * - parse results, hash results, timestamp if selected
-+ * - manip extra space
-+ * If either hash results or time stamp are selected, both will
-+ * be copied to/from the frame headroom, as TS is located between PR and
-+ * HR in the IC and IC copy size has a granularity of 16bytes
-+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
-+ *
-+ * Also make sure the headroom is a multiple of data_align bytes
-+ */
-+ headroom = (uint16_t)(bl->priv_data_size +
-+ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
-+ (bl->hash_results || bl->time_stamp ?
-+ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
-+ bl->manip_extra_space);
-+
-+ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
-+}
-+
-+int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
-+int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
-+int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
-+
-+void dpaa_eth_sysfs_remove(struct device *dev);
-+void dpaa_eth_sysfs_init(struct device *dev);
-+int dpaa_eth_poll(struct napi_struct *napi, int budget);
-+
-+void dpa_private_napi_del(struct net_device *net_dev);
-+
-+/* Equivalent to a memset(0), but works faster */
-+static inline void clear_fd(struct qm_fd *fd)
-+{
-+ fd->opaque_addr = 0;
-+ fd->opaque = 0;
-+ fd->cmd = 0;
-+}
-+
-+static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
-+ struct qman_fq *tx_fq)
-+{
-+ int i;
-+
-+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
-+ if (priv->egress_fqs[i] == tx_fq)
-+ return i;
-+
-+ return -EINVAL;
-+}
-+
-+static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
-+ struct rtnl_link_stats64 *percpu_stats,
-+ struct qm_fd *fd, struct qman_fq *egress_fq,
-+ struct qman_fq *conf_fq)
-+{
-+ int err, i;
-+
-+ if (fd->bpid == 0xff)
-+ fd->cmd |= qman_fq_fqid(conf_fq);
-+
-+ /* Trace this Tx fd */
-+ trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
-+
-+ for (i = 0; i < 100000; i++) {
-+ err = qman_enqueue(egress_fq, fd, 0);
-+ if (err != -EBUSY)
-+ break;
-+ }
-+
-+ if (unlikely(err < 0)) {
-+ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
-+ percpu_stats->tx_errors++;
-+ percpu_stats->tx_fifo_errors++;
-+ return err;
-+ }
-+
-+ percpu_stats->tx_packets++;
-+ percpu_stats->tx_bytes += dpa_fd_length(fd);
-+
-+ return 0;
-+}
-+
-+/* Use multiple WQs for FQ assignment:
-+ * - Tx Confirmation queues go to WQ1.
-+ * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
-+ * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
-+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
-+ * to be scheduled, in case there are many more FQs in WQ3).
-+ * This ensures that Tx-confirmed buffers are timely released. In particular,
-+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
-+ * are greatly outnumbered by other FQs in the system (usually PCDs), while
-+ * dequeue scheduling is round-robin.
-+ */
-+static inline void _dpa_assign_wq(struct dpa_fq *fq)
-+{
-+ switch (fq->fq_type) {
-+ case FQ_TYPE_TX_CONFIRM:
-+ case FQ_TYPE_TX_CONF_MQ:
-+ fq->wq = 1;
-+ break;
-+ case FQ_TYPE_RX_DEFAULT:
-+ case FQ_TYPE_TX:
-+ fq->wq = 3;
-+ break;
-+ case FQ_TYPE_RX_ERROR:
-+ case FQ_TYPE_TX_ERROR:
-+ case FQ_TYPE_RX_PCD_HI_PRIO:
-+ fq->wq = 2;
-+ break;
-+ case FQ_TYPE_RX_PCD:
-+ fq->wq = 5;
-+ break;
-+ default:
-+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
-+ fq->fq_type, fq->fqid);
-+ }
-+}
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+/* Use in lieu of skb_get_queue_mapping() */
-+#ifdef CONFIG_FMAN_PFC
-+#define dpa_get_queue_mapping(skb) \
-+ (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
-+ ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
-+ ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
-+ dpa_num_cpus + smp_processor_id()));
-+
-+#else
-+#define dpa_get_queue_mapping(skb) \
-+ raw_smp_processor_id()
-+#endif
-+#else
-+/* Use the queue selected by XPS */
-+#define dpa_get_queue_mapping(skb) \
-+ skb_get_queue_mapping(skb)
-+#endif
-+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+struct ptp_priv_s {
-+ struct device_node *node;
-+ struct platform_device *of_dev;
-+ struct mac_device *mac_dev;
-+};
-+extern struct ptp_priv_s ptp_priv;
-+#endif
-+
-+static inline void _dpa_bp_free_pf(void *addr)
-+{
-+ put_page(virt_to_head_page(addr));
-+}
-+
-+/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
-+ * manifests itself at high traffic rates when frames exceed 4K memory
-+ * boundaries; For the moment, we use a SW workaround to avoid frames larger
-+ * than 4K or that exceed 4K alignments.
-+ */
-+
-+#ifndef CONFIG_PPC
-+extern bool dpaa_errata_a010022; /* SoC affected by A010022 errata */
-+
-+#define HAS_DMA_ISSUE(start, size) \
-+ (((u64)(start) + (size)) > (((u64)(start) + 0x1000) & ~0xFFF))
-+#define BOUNDARY_4K(start, size) (((u64)(start) + (u64)(size)) & ~0xFFF)
-+
-+#endif /* !CONFIG_PPC */
-+
-+#endif /* __DPA_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
-@@ -0,0 +1,263 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/io.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_net.h>
-+#include <linux/etherdevice.h>
-+#include <linux/kthread.h>
-+#include <linux/percpu.h>
-+#include <linux/highmem.h>
-+#include <linux/sort.h>
-+#include <linux/fsl_qman.h>
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#include "dpaa_eth_base.h"
-+
-+#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+uint8_t advanced_debug = -1;
-+module_param(advanced_debug, byte, S_IRUGO);
-+MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
-+EXPORT_SYMBOL(advanced_debug);
-+
-+static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
-+{
-+ return ((struct dpa_bp *)dpa_bp0)->size -
-+ ((struct dpa_bp *)dpa_bp1)->size;
-+}
-+
-+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
-+dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
-+{
-+ int i, lenp, na, ns, err;
-+ struct device *dev;
-+ struct device_node *dev_node;
-+ const __be32 *bpool_cfg;
-+ struct dpa_bp *dpa_bp;
-+ u32 bpid;
-+
-+ dev = &_of_dev->dev;
-+
-+ *count = of_count_phandle_with_args(dev->of_node,
-+ "fsl,bman-buffer-pools", NULL);
-+ if (*count < 1) {
-+ dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
-+ return ERR_PTR(-EINVAL);
-+ }
-+
-+ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
-+ if (dpa_bp == NULL) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ dev_node = of_find_node_by_path("/");
-+ if (unlikely(dev_node == NULL)) {
-+ dev_err(dev, "of_find_node_by_path(/) failed\n");
-+ return ERR_PTR(-EINVAL);
-+ }
-+
-+ na = of_n_addr_cells(dev_node);
-+ ns = of_n_size_cells(dev_node);
-+
-+ for (i = 0; i < *count; i++) {
-+ of_node_put(dev_node);
-+
-+ dev_node = of_parse_phandle(dev->of_node,
-+ "fsl,bman-buffer-pools", i);
-+ if (dev_node == NULL) {
-+ dev_err(dev, "of_find_node_by_phandle() failed\n");
-+ return ERR_PTR(-EFAULT);
-+ }
-+
-+ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
-+ dev_err(dev,
-+ "!of_device_is_compatible(%s, fsl,bpool)\n",
-+ dev_node->full_name);
-+ dpa_bp = ERR_PTR(-EINVAL);
-+ goto _return_of_node_put;
-+ }
-+
-+ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
-+ if (err) {
-+ dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
-+ dpa_bp = ERR_PTR(-EINVAL);
-+ goto _return_of_node_put;
-+ }
-+ dpa_bp[i].bpid = (uint8_t)bpid;
-+
-+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
-+ &lenp);
-+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
-+ const uint32_t *seed_pool;
-+
-+ dpa_bp[i].config_count =
-+ (int)of_read_number(bpool_cfg, ns);
-+ dpa_bp[i].size =
-+ (size_t)of_read_number(bpool_cfg + ns, ns);
-+ dpa_bp[i].paddr =
-+ of_read_number(bpool_cfg + 2 * ns, na);
-+
-+ seed_pool = of_get_property(dev_node,
-+ "fsl,bpool-ethernet-seeds", &lenp);
-+ dpa_bp[i].seed_pool = !!seed_pool;
-+
-+ } else {
-+ dev_err(dev,
-+ "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
-+ dev_node->full_name);
-+ dpa_bp = ERR_PTR(-EINVAL);
-+ goto _return_of_node_put;
-+ }
-+ }
-+
-+ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
-+
-+ return dpa_bp;
-+
-+_return_of_node_put:
-+ if (dev_node)
-+ of_node_put(dev_node);
-+
-+ return dpa_bp;
-+}
-+EXPORT_SYMBOL(dpa_bp_probe);
-+
-+int dpa_bp_shared_port_seed(struct dpa_bp *bp)
-+{
-+ void __iomem **ptr;
-+
-+ /* In MAC-less and Shared-MAC scenarios the physical
-+ * address of the buffer pool in device tree is set
-+ * to 0 to specify that another entity (USDPAA) will
-+ * allocate and seed the buffers
-+ */
-+ if (!bp->paddr)
-+ return 0;
-+
-+ /* allocate memory region for buffers */
-+ devm_request_mem_region(bp->dev, bp->paddr,
-+ bp->size * bp->config_count, KBUILD_MODNAME);
-+ /* managed ioremap unmapping */
-+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
-+ if (!ptr)
-+ return -EIO;
-+#ifndef CONFIG_PPC
-+ bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count);
-+#else
-+ bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0);
-+#endif
-+ if (bp->vaddr == NULL) {
-+ pr_err("Could not map memory for pool %d\n", bp->bpid);
-+ devres_free(ptr);
-+ return -EIO;
-+ }
-+ *ptr = bp->vaddr;
-+ devres_add(bp->dev, ptr);
-+
-+ /* seed pool with buffers from that memory region */
-+ if (bp->seed_pool) {
-+ int count = bp->target_count;
-+ dma_addr_t addr = bp->paddr;
-+
-+ while (count) {
-+ struct bm_buffer bufs[8];
-+ uint8_t num_bufs = 0;
-+
-+ do {
-+ BUG_ON(addr > 0xffffffffffffull);
-+ bufs[num_bufs].bpid = bp->bpid;
-+ bm_buffer_set64(&bufs[num_bufs++], addr);
-+ addr += bp->size;
-+
-+ } while (--count && (num_bufs < 8));
-+
-+ while (bman_release(bp->pool, bufs, num_bufs, 0))
-+ cpu_relax();
-+ }
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_bp_shared_port_seed);
-+
-+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
-+ size_t count)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ int i;
-+
-+ priv->dpa_bp = dpa_bp;
-+ priv->bp_count = count;
-+
-+ for (i = 0; i < count; i++) {
-+ int err;
-+ err = dpa_bp_alloc(&dpa_bp[i]);
-+ if (err < 0) {
-+ dpa_bp_free(priv);
-+ priv->dpa_bp = NULL;
-+ return err;
-+ }
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_bp_create);
-+
-+static int __init __cold dpa_advanced_load(void)
-+{
-+ pr_info(DPA_DESCRIPTION "\n");
-+
-+ return 0;
-+}
-+module_init(dpa_advanced_load);
-+
-+static void __exit __cold dpa_advanced_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+}
-+module_exit(dpa_advanced_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
-@@ -0,0 +1,50 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPAA_ETH_BASE_H
-+#define __DPAA_ETH_BASE_H
-+
-+#include <linux/etherdevice.h> /* struct net_device */
-+#include <linux/fsl_bman.h> /* struct bm_buffer */
-+#include <linux/of_platform.h> /* struct platform_device */
-+#include <linux/net_tstamp.h> /* struct hwtstamp_config */
-+
-+extern uint8_t advanced_debug;
-+extern const struct dpa_fq_cbs_t shared_fq_cbs;
-+extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
-+
-+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
-+dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
-+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
-+ size_t count);
-+int dpa_bp_shared_port_seed(struct dpa_bp *bp);
-+
-+#endif /* __DPAA_ETH_BASE_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
-@@ -0,0 +1,1991 @@
-+/* Copyright 2008-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/init.h>
-+#include "dpaa_eth_ceetm.h"
-+
-+#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
-+
-+const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
-+ [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
-+ [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
-+};
-+
-+struct Qdisc_ops ceetm_qdisc_ops;
-+
-+/* Obtain the DCP and the SP ids from the FMan port */
-+static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
-+ unsigned int *sp_id)
-+{
-+ uint32_t channel;
-+ t_LnxWrpFmPortDev *port_dev;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = dpa_priv->mac_dev;
-+
-+ port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
-+ channel = port_dev->txCh;
-+
-+ *sp_id = channel & CHANNEL_SP_MASK;
-+ pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
-+
-+ if (channel < DCP0_MAX_CHANNEL) {
-+ *dcp_id = qm_dc_portal_fman0;
-+ pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
-+ } else {
-+ *dcp_id = qm_dc_portal_fman1;
-+ pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
-+ }
-+}
-+
-+/* Enqueue Rejection Notification callback */
-+static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ struct net_device *net_dev;
-+ struct ceetm_class *cls;
-+ struct ceetm_class_stats *cstats = NULL;
-+ const struct dpa_priv_s *dpa_priv;
-+ struct dpa_percpu_priv_s *dpa_percpu_priv;
-+ struct sk_buff *skb;
-+ struct qm_fd fd = msg->ern.fd;
-+
-+ net_dev = ((struct ceetm_fq *)fq)->net_dev;
-+ dpa_priv = netdev_priv(net_dev);
-+ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
-+
-+ /* Increment DPA counters */
-+ dpa_percpu_priv->stats.tx_dropped++;
-+ dpa_percpu_priv->stats.tx_fifo_errors++;
-+
-+ /* Increment CEETM counters */
-+ cls = ((struct ceetm_fq *)fq)->ceetm_cls;
-+ switch (cls->type) {
-+ case CEETM_PRIO:
-+ cstats = this_cpu_ptr(cls->prio.cstats);
-+ break;
-+ case CEETM_WBFS:
-+ cstats = this_cpu_ptr(cls->wbfs.cstats);
-+ break;
-+ }
-+
-+ if (cstats)
-+ cstats->ern_drop_count++;
-+
-+ if (fd.bpid != 0xff) {
-+ dpa_fd_release(net_dev, &fd);
-+ return;
-+ }
-+
-+ skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
-+ dev_kfree_skb_any(skb);
-+}
-+
-+/* Congestion State Change Notification callback */
-+static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
-+{
-+ struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
-+ struct ceetm_class *cls = ceetm_fq->ceetm_cls;
-+ struct ceetm_class_stats *cstats = NULL;
-+
-+ switch (cls->type) {
-+ case CEETM_PRIO:
-+ cstats = this_cpu_ptr(cls->prio.cstats);
-+ break;
-+ case CEETM_WBFS:
-+ cstats = this_cpu_ptr(cls->wbfs.cstats);
-+ break;
-+ }
-+
-+ if (congested) {
-+ dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
-+ netif_tx_stop_all_queues(dpa_priv->net_dev);
-+ dpa_priv->cgr_data.cgr_congested_count++;
-+ if (cstats)
-+ cstats->congested_count++;
-+ } else {
-+ dpa_priv->cgr_data.congested_jiffies +=
-+ (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
-+ netif_tx_wake_all_queues(dpa_priv->net_dev);
-+ }
-+}
-+
-+/* Allocate a ceetm fq */
-+static int ceetm_alloc_fq(struct ceetm_fq **fq, struct net_device *dev,
-+ struct ceetm_class *cls)
-+{
-+ *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
-+ if (!*fq)
-+ return -ENOMEM;
-+
-+ (*fq)->net_dev = dev;
-+ (*fq)->ceetm_cls = cls;
-+ return 0;
-+}
-+
-+/* Configure a ceetm Class Congestion Group */
-+static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
-+ struct qm_ceetm_channel *channel, unsigned int id,
-+ struct ceetm_fq *fq, struct dpa_priv_s *dpa_priv)
-+{
-+ int err;
-+ u32 cs_th;
-+ u16 ccg_mask;
-+ struct qm_ceetm_ccg_params ccg_params;
-+
-+ err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
-+ if (err)
-+ return err;
-+
-+ /* Configure the count mode (frames/bytes), enable congestion state
-+ * notifications, configure the congestion entry and exit thresholds,
-+ * enable tail-drop, configure the tail-drop mode, and set the
-+ * overhead accounting limit
-+ */
-+ ccg_mask = QM_CCGR_WE_MODE |
-+ QM_CCGR_WE_CSCN_EN |
-+ QM_CCGR_WE_CS_THRES_IN | QM_CCGR_WE_CS_THRES_OUT |
-+ QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
-+ QM_CCGR_WE_OAL;
-+
-+ ccg_params.mode = 0; /* count bytes */
-+ ccg_params.cscn_en = 1; /* generate notifications */
-+ ccg_params.td_en = 1; /* enable tail-drop */
-+ ccg_params.td_mode = 0; /* tail-drop on congestion state */
-+ ccg_params.oal = (signed char)(min(sizeof(struct sk_buff) +
-+ dpa_priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-+
-+ /* Set the congestion state thresholds according to the link speed */
-+ if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
-+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
-+ else
-+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
-+
-+ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1);
-+ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out,
-+ cs_th * CEETM_CCGR_RATIO, 1);
-+
-+ err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Configure a ceetm Logical Frame Queue */
-+static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
-+ struct qm_ceetm_lfq **lfq)
-+{
-+ int err;
-+ u64 context_a;
-+ u32 context_b;
-+
-+ err = qman_ceetm_lfq_claim(lfq, cq);
-+ if (err)
-+ return err;
-+
-+ /* Get the former contexts in order to preserve context B */
-+ err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
-+ if (err)
-+ return err;
-+
-+ context_a = CEETM_CONTEXT_A;
-+ err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
-+ if (err)
-+ return err;
-+
-+ (*lfq)->ern = ceetm_ern;
-+
-+ err = qman_ceetm_create_fq(*lfq, &fq->fq);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Configure a prio ceetm class */
-+static int ceetm_config_prio_cls(struct ceetm_class *cls,
-+ struct net_device *dev,
-+ struct qm_ceetm_channel *channel,
-+ unsigned int id)
-+{
-+ int err;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+
-+ err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CCG */
-+ err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
-+ dpa_priv);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CQ */
-+ err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
-+ if (err)
-+ return err;
-+
-+ if (cls->shaped) {
-+ err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
-+ if (err)
-+ return err;
-+
-+ err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
-+ if (err)
-+ return err;
-+ }
-+
-+ /* Claim and configure a LFQ */
-+ err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Configure a wbfs ceetm class */
-+static int ceetm_config_wbfs_cls(struct ceetm_class *cls,
-+ struct net_device *dev,
-+ struct qm_ceetm_channel *channel,
-+ unsigned int id, int type)
-+{
-+ int err;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+
-+ err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CCG */
-+ err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
-+ dpa_priv);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CQ */
-+ if (type == WBFS_GRP_B)
-+ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
-+ cls->wbfs.ccg);
-+ else
-+ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
-+ cls->wbfs.ccg);
-+ if (err)
-+ return err;
-+
-+ /* Configure the CQ weight: real number multiplied by 100 to get rid
-+ * of the fraction
-+ */
-+ err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
-+ cls->wbfs.weight * 100);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure a LFQ */
-+ err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Find class in qdisc hash table using given handle */
-+static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct Qdisc_class_common *clc;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
-+ __func__, handle, sch->handle);
-+
-+ clc = qdisc_class_find(&priv->clhash, handle);
-+ return clc ? container_of(clc, struct ceetm_class, common) : NULL;
-+}
-+
-+/* Insert a class in the qdisc's class hash */
-+static void ceetm_link_class(struct Qdisc *sch,
-+ struct Qdisc_class_hash *clhash,
-+ struct Qdisc_class_common *common)
-+{
-+ sch_tree_lock(sch);
-+ qdisc_class_hash_insert(clhash, common);
-+ sch_tree_unlock(sch);
-+ qdisc_class_hash_grow(sch, clhash);
-+}
-+
-+/* Destroy a ceetm class */
-+static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
-+{
-+ if (!cl)
-+ return;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (cl->root.child) {
-+ qdisc_destroy(cl->root.child);
-+ cl->root.child = NULL;
-+ }
-+
-+ if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the channel %d\n",
-+ __func__, cl->root.ch->idx);
-+
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (cl->prio.child) {
-+ qdisc_destroy(cl->prio.child);
-+ cl->prio.child = NULL;
-+ }
-+
-+ if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the LFQ %d\n",
-+ __func__, cl->prio.lfq->idx);
-+
-+ if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CQ %d\n",
-+ __func__, cl->prio.cq->idx);
-+
-+ if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CCG %d\n",
-+ __func__, cl->prio.ccg->idx);
-+
-+ kfree(cl->prio.fq);
-+
-+ if (cl->prio.cstats)
-+ free_percpu(cl->prio.cstats);
-+
-+ break;
-+
-+ case CEETM_WBFS:
-+ if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the LFQ %d\n",
-+ __func__, cl->wbfs.lfq->idx);
-+
-+ if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CQ %d\n",
-+ __func__, cl->wbfs.cq->idx);
-+
-+ if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CCG %d\n",
-+ __func__, cl->wbfs.ccg->idx);
-+
-+ kfree(cl->wbfs.fq);
-+
-+ if (cl->wbfs.cstats)
-+ free_percpu(cl->wbfs.cstats);
-+ }
-+
-+ tcf_destroy_chain(&cl->filter_list);
-+ kfree(cl);
-+}
-+
-+/* Destroy a ceetm qdisc */
-+static void ceetm_destroy(struct Qdisc *sch)
-+{
-+ unsigned int ntx, i;
-+ struct hlist_node *next;
-+ struct ceetm_class *cl;
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
-+ __func__, sch->handle);
-+
-+ /* All filters need to be removed before destroying the classes */
-+ tcf_destroy_chain(&priv->filter_list);
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
-+ tcf_destroy_chain(&cl->filter_list);
-+ }
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
-+ common.hnode)
-+ ceetm_cls_destroy(sch, cl);
-+ }
-+
-+ qdisc_class_hash_destroy(&priv->clhash);
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ dpa_disable_ceetm(dev);
-+
-+ if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the LNI %d\n",
-+ __func__, priv->root.lni->idx);
-+
-+ if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the SP %d\n",
-+ __func__, priv->root.sp->idx);
-+
-+ if (priv->root.qstats)
-+ free_percpu(priv->root.qstats);
-+
-+ if (!priv->root.qdiscs)
-+ break;
-+
-+ /* Remove the pfifo qdiscs */
-+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
-+ if (priv->root.qdiscs[ntx])
-+ qdisc_destroy(priv->root.qdiscs[ntx]);
-+
-+ kfree(priv->root.qdiscs);
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (priv->prio.parent)
-+ priv->prio.parent->root.child = NULL;
-+ break;
-+
-+ case CEETM_WBFS:
-+ if (priv->wbfs.parent)
-+ priv->wbfs.parent->prio.child = NULL;
-+ break;
-+ }
-+}
-+
-+static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
-+{
-+ struct Qdisc *qdisc;
-+ unsigned int ntx, i;
-+ struct nlattr *nest;
-+ struct tc_ceetm_qopt qopt;
-+ struct ceetm_qdisc_stats *qstats;
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ sch_tree_lock(sch);
-+ memset(&qopt, 0, sizeof(qopt));
-+ qopt.type = priv->type;
-+ qopt.shaped = priv->shaped;
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ /* Gather statistics from the underlying pfifo qdiscs */
-+ sch->q.qlen = 0;
-+ memset(&sch->bstats, 0, sizeof(sch->bstats));
-+ memset(&sch->qstats, 0, sizeof(sch->qstats));
-+
-+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
-+ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
-+ sch->q.qlen += qdisc->q.qlen;
-+ sch->bstats.bytes += qdisc->bstats.bytes;
-+ sch->bstats.packets += qdisc->bstats.packets;
-+ sch->qstats.qlen += qdisc->qstats.qlen;
-+ sch->qstats.backlog += qdisc->qstats.backlog;
-+ sch->qstats.drops += qdisc->qstats.drops;
-+ sch->qstats.requeues += qdisc->qstats.requeues;
-+ sch->qstats.overlimits += qdisc->qstats.overlimits;
-+ }
-+
-+ for_each_online_cpu(i) {
-+ qstats = per_cpu_ptr(priv->root.qstats, i);
-+ sch->qstats.drops += qstats->drops;
-+ }
-+
-+ qopt.rate = priv->root.rate;
-+ qopt.ceil = priv->root.ceil;
-+ qopt.overhead = priv->root.overhead;
-+ break;
-+
-+ case CEETM_PRIO:
-+ qopt.qcount = priv->prio.qcount;
-+ break;
-+
-+ case CEETM_WBFS:
-+ qopt.qcount = priv->wbfs.qcount;
-+ qopt.cr = priv->wbfs.cr;
-+ qopt.er = priv->wbfs.er;
-+ break;
-+
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ sch_tree_unlock(sch);
-+ return -EINVAL;
-+ }
-+
-+ nest = nla_nest_start(skb, TCA_OPTIONS);
-+ if (!nest)
-+ goto nla_put_failure;
-+ if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
-+ goto nla_put_failure;
-+ nla_nest_end(skb, nest);
-+
-+ sch_tree_unlock(sch);
-+ return skb->len;
-+
-+nla_put_failure:
-+ sch_tree_unlock(sch);
-+ nla_nest_cancel(skb, nest);
-+ return -EMSGSIZE;
-+}
-+
-+/* Configure a root ceetm qdisc */
-+static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ struct netdev_queue *dev_queue;
-+ struct Qdisc *qdisc;
-+ enum qm_dc_portal dcp_id;
-+ unsigned int i, sp_id, parent_id;
-+ int err;
-+ u64 bps;
-+ struct qm_ceetm_sp *sp;
-+ struct qm_ceetm_lni *lni;
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = dpa_priv->mac_dev;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ /* Validate inputs */
-+ if (sch->parent != TC_H_ROOT) {
-+ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
-+ tcf_destroy_chain(&priv->filter_list);
-+ qdisc_class_hash_destroy(&priv->clhash);
-+ return -EINVAL;
-+ }
-+
-+ if (!mac_dev) {
-+ pr_err("CEETM: the interface is lacking a mac\n");
-+ err = -EINVAL;
-+ goto err_init_root;
-+ }
-+
-+ /* pre-allocate underlying pfifo qdiscs */
-+ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
-+ sizeof(priv->root.qdiscs[0]),
-+ GFP_KERNEL);
-+ if (!priv->root.qdiscs) {
-+ err = -ENOMEM;
-+ goto err_init_root;
-+ }
-+
-+ for (i = 0; i < dev->num_tx_queues; i++) {
-+ dev_queue = netdev_get_tx_queue(dev, i);
-+ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
-+ TC_H_MIN(i + PFIFO_MIN_OFFSET));
-+
-+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
-+ parent_id);
-+ if (!qdisc) {
-+ err = -ENOMEM;
-+ goto err_init_root;
-+ }
-+
-+ priv->root.qdiscs[i] = qdisc;
-+ qdisc->flags |= TCQ_F_ONETXQUEUE;
-+ }
-+
-+ sch->flags |= TCQ_F_MQROOT;
-+
-+ priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
-+ if (!priv->root.qstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_root;
-+ }
-+
-+ priv->shaped = qopt->shaped;
-+ priv->root.rate = qopt->rate;
-+ priv->root.ceil = qopt->ceil;
-+ priv->root.overhead = qopt->overhead;
-+
-+ /* Claim the SP */
-+ get_dcp_and_sp(dev, &dcp_id, &sp_id);
-+ err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
-+ __func__);
-+ goto err_init_root;
-+ }
-+
-+ priv->root.sp = sp;
-+
-+ /* Claim the LNI - will use the same id as the SP id since SPs 0-7
-+ * are connected to the TX FMan ports
-+ */
-+ err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
-+ __func__);
-+ goto err_init_root;
-+ }
-+
-+ priv->root.lni = lni;
-+
-+ err = qman_ceetm_sp_set_lni(sp, lni);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to link the SP and LNI\n",
-+ __func__);
-+ goto err_init_root;
-+ }
-+
-+ lni->sp = sp;
-+
-+ /* Configure the LNI shaper */
-+ if (priv->shaped) {
-+ err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
-+ __func__);
-+ goto err_init_root;
-+ }
-+
-+ bps = priv->root.rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
-+ __func__);
-+ goto err_init_root;
-+ }
-+
-+ bps = priv->root.ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
-+ __func__);
-+ goto err_init_root;
-+ }
-+ }
-+
-+ /* TODO default configuration */
-+
-+ dpa_enable_ceetm(dev);
-+ return 0;
-+
-+err_init_root:
-+ ceetm_destroy(sch);
-+ return err;
-+}
-+
-+/* Configure a prio ceetm qdisc */
-+static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err;
-+ unsigned int i;
-+ struct ceetm_class *parent_cl, *child_cl;
-+ struct Qdisc *parent_qdisc;
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (sch->parent == TC_H_ROOT) {
-+ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
-+ }
-+
-+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
-+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
-+ }
-+
-+ /* Obtain the parent root ceetm_class */
-+ parent_cl = ceetm_find(sch->parent, parent_qdisc);
-+
-+ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
-+ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
-+ }
-+
-+ priv->prio.parent = parent_cl;
-+ parent_cl->root.child = sch;
-+
-+ priv->shaped = parent_cl->shaped;
-+ priv->prio.qcount = qopt->qcount;
-+
-+ /* Create and configure qcount child classes */
-+ for (i = 0; i < priv->prio.qcount; i++) {
-+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
-+ if (!child_cl) {
-+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
-+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_prio;
-+ }
-+
-+ child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
-+ if (!child_cl->prio.cstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_prio_cls;
-+ }
-+
-+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
-+ child_cl->refcnt = 1;
-+ child_cl->parent = sch;
-+ child_cl->type = CEETM_PRIO;
-+ child_cl->shaped = priv->shaped;
-+ child_cl->prio.child = NULL;
-+
-+ /* All shaped CQs have CR and ER enabled by default */
-+ child_cl->prio.cr = child_cl->shaped;
-+ child_cl->prio.er = child_cl->shaped;
-+ child_cl->prio.fq = NULL;
-+ child_cl->prio.cq = NULL;
-+
-+ /* Configure the corresponding hardware CQ */
-+ err = ceetm_config_prio_cls(child_cl, dev,
-+ parent_cl->root.ch, i);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
-+ __func__, child_cl->common.classid);
-+ goto err_init_prio_cls;
-+ }
-+
-+ /* Add class handle in Qdisc */
-+ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
-+ pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X associated with CQ %d and CCG %d\n",
-+ __func__, child_cl->common.classid,
-+ child_cl->prio.cq->idx, child_cl->prio.ccg->idx);
-+ }
-+
-+ return 0;
-+
-+err_init_prio_cls:
-+ ceetm_cls_destroy(sch, child_cl);
-+err_init_prio:
-+ ceetm_destroy(sch);
-+ return err;
-+}
-+
-+/* Configure a wbfs ceetm qdisc */
-+static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err, group_b, small_group;
-+ unsigned int i, id, prio_a, prio_b;
-+ struct ceetm_class *parent_cl, *child_cl, *root_cl;
-+ struct Qdisc *parent_qdisc;
-+ struct ceetm_qdisc *parent_priv;
-+ struct qm_ceetm_channel *channel;
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ /* Validate inputs */
-+ if (sch->parent == TC_H_ROOT) {
-+ pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ /* Obtain the parent prio ceetm qdisc */
-+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
-+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ /* Obtain the parent prio ceetm class */
-+ parent_cl = ceetm_find(sch->parent, parent_qdisc);
-+ parent_priv = qdisc_priv(parent_qdisc);
-+
-+ if (!parent_cl || parent_cl->type != CEETM_PRIO) {
-+ pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a prio ceetm class\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ if (!qopt->qcount || !qopt->qweight[0]) {
-+ pr_err("CEETM: qcount and qweight are mandatory for a wbfs ceetm qdisc\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ priv->shaped = parent_cl->shaped;
-+
-+ if (!priv->shaped && (qopt->cr || qopt->er)) {
-+ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ if (priv->shaped && !(qopt->cr || qopt->er)) {
-+ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ /* Obtain the parent root ceetm class */
-+ root_cl = parent_priv->prio.parent;
-+ if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) ||
-+ root_cl->root.wbfs_grp_large) {
-+ pr_err("CEETM: no more wbfs classes are available\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) &&
-+ qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
-+ pr_err("CEETM: only %d wbfs classes are available\n",
-+ CEETM_MIN_WBFS_QCOUNT);
-+ err = -EINVAL;
-+ goto err_init_wbfs;
-+ }
-+
-+ priv->wbfs.parent = parent_cl;
-+ parent_cl->prio.child = sch;
-+
-+ priv->wbfs.qcount = qopt->qcount;
-+ priv->wbfs.cr = qopt->cr;
-+ priv->wbfs.er = qopt->er;
-+
-+ channel = root_cl->root.ch;
-+
-+ /* Configure the hardware wbfs channel groups */
-+ if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
-+ /* Configure the large group A */
-+ priv->wbfs.group_type = WBFS_GRP_LARGE;
-+ small_group = false;
-+ group_b = false;
-+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
-+ prio_b = prio_a;
-+
-+ } else if (root_cl->root.wbfs_grp_a) {
-+ /* Configure the group B */
-+ priv->wbfs.group_type = WBFS_GRP_B;
-+
-+ err = qman_ceetm_channel_get_group(channel, &small_group,
-+ &prio_a, &prio_b);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
-+ __func__);
-+ goto err_init_wbfs;
-+ }
-+
-+ small_group = true;
-+ group_b = true;
-+ prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
-+ /* If group A isn't configured, configure it as group B */
-+ prio_a = prio_a ? : prio_b;
-+
-+ } else {
-+ /* Configure the small group A */
-+ priv->wbfs.group_type = WBFS_GRP_A;
-+
-+ err = qman_ceetm_channel_get_group(channel, &small_group,
-+ &prio_a, &prio_b);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
-+ __func__);
-+ goto err_init_wbfs;
-+ }
-+
-+ small_group = true;
-+ group_b = false;
-+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
-+ /* If group B isn't configured, configure it as group A */
-+ prio_b = prio_b ? : prio_a;
-+ }
-+
-+ err = qman_ceetm_channel_set_group(channel, small_group, prio_a,
-+ prio_b);
-+ if (err)
-+ goto err_init_wbfs;
-+
-+ if (priv->shaped) {
-+ err = qman_ceetm_channel_set_group_cr_eligibility(channel,
-+ group_b,
-+ priv->wbfs.cr);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to set group CR eligibility\n",
-+ __func__);
-+ goto err_init_wbfs;
-+ }
-+
-+ err = qman_ceetm_channel_set_group_er_eligibility(channel,
-+ group_b,
-+ priv->wbfs.er);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to set group ER eligibility\n",
-+ __func__);
-+ goto err_init_wbfs;
-+ }
-+ }
-+
-+ /* Create qcount child classes */
-+ for (i = 0; i < priv->wbfs.qcount; i++) {
-+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
-+ if (!child_cl) {
-+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
-+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_wbfs;
-+ }
-+
-+ child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
-+ if (!child_cl->wbfs.cstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_wbfs_cls;
-+ }
-+
-+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
-+ child_cl->refcnt = 1;
-+ child_cl->parent = sch;
-+ child_cl->type = CEETM_WBFS;
-+ child_cl->shaped = priv->shaped;
-+ child_cl->wbfs.fq = NULL;
-+ child_cl->wbfs.cq = NULL;
-+ child_cl->wbfs.weight = qopt->qweight[i];
-+
-+ if (priv->wbfs.group_type == WBFS_GRP_B)
-+ id = WBFS_GRP_B_OFFSET + i;
-+ else
-+ id = WBFS_GRP_A_OFFSET + i;
-+
-+ err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
-+ priv->wbfs.group_type);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
-+ __func__, child_cl->common.classid);
-+ goto err_init_wbfs_cls;
-+ }
-+
-+ /* Add class handle in Qdisc */
-+ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
-+ pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X associated with CQ %d and CCG %d\n",
-+ __func__, child_cl->common.classid,
-+ child_cl->wbfs.cq->idx, child_cl->wbfs.ccg->idx);
-+ }
-+
-+ /* Signal the root class that a group has been configured */
-+ switch (priv->wbfs.group_type) {
-+ case WBFS_GRP_LARGE:
-+ root_cl->root.wbfs_grp_large = true;
-+ break;
-+ case WBFS_GRP_A:
-+ root_cl->root.wbfs_grp_a = true;
-+ break;
-+ case WBFS_GRP_B:
-+ root_cl->root.wbfs_grp_b = true;
-+ break;
-+ }
-+
-+ return 0;
-+
-+err_init_wbfs_cls:
-+ ceetm_cls_destroy(sch, child_cl);
-+err_init_wbfs:
-+ ceetm_destroy(sch);
-+ return err;
-+}
-+
-+/* Configure a generic ceetm qdisc */
-+static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
-+{
-+ struct tc_ceetm_qopt *qopt;
-+ struct nlattr *tb[TCA_CEETM_QOPS + 1];
-+ int ret;
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (!netif_is_multiqueue(dev))
-+ return -EOPNOTSUPP;
-+
-+ if (!opt) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
-+ if (ret < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return ret;
-+ }
-+
-+ if (!tb[TCA_CEETM_QOPS]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(sch->handle)) {
-+ pr_err("CEETM: a qdisc should not have a minor\n");
-+ return -EINVAL;
-+ }
-+
-+ qopt = nla_data(tb[TCA_CEETM_QOPS]);
-+
-+ /* Initialize the class hash list. Each qdisc has its own class hash */
-+ ret = qdisc_class_hash_init(&priv->clhash);
-+ if (ret < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ priv->type = qopt->type;
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ ret = ceetm_init_root(sch, priv, qopt);
-+ break;
-+ case CEETM_PRIO:
-+ ret = ceetm_init_prio(sch, priv, qopt);
-+ break;
-+ case CEETM_WBFS:
-+ ret = ceetm_init_wbfs(sch, priv, qopt);
-+ break;
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ ceetm_destroy(sch);
-+ ret = -EINVAL;
-+ }
-+
-+ return ret;
-+}
-+
-+/* Edit a root ceetm qdisc */
-+static int ceetm_change_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct net_device *dev,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err = 0;
-+ u64 bps;
-+
-+ if (priv->shaped != (bool)qopt->shaped) {
-+ pr_err("CEETM: qdisc %X is %s\n", sch->handle,
-+ priv->shaped ? "shaped" : "unshaped");
-+ return -EINVAL;
-+ }
-+
-+ /* Nothing to modify for unshaped qdiscs */
-+ if (!priv->shaped)
-+ return 0;
-+
-+ /* Configure the LNI shaper */
-+ if (priv->root.overhead != qopt->overhead) {
-+ err = qman_ceetm_lni_enable_shaper(priv->root.lni, 1,
-+ qopt->overhead);
-+ if (err)
-+ goto change_err;
-+ priv->root.overhead = qopt->overhead;
-+ }
-+
-+ if (priv->root.rate != qopt->rate) {
-+ bps = qopt->rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_commit_rate_bps(priv->root.lni, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_err;
-+ priv->root.rate = qopt->rate;
-+ }
-+
-+ if (priv->root.ceil != qopt->ceil) {
-+ bps = qopt->ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_excess_rate_bps(priv->root.lni, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_err;
-+ priv->root.ceil = qopt->ceil;
-+ }
-+
-+ return 0;
-+
-+change_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the root ceetm qdisc %X\n",
-+ __func__, sch->handle);
-+ return err;
-+}
-+
-+/* Edit a wbfs ceetm qdisc */
-+static int ceetm_change_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err;
-+ bool group_b;
-+ struct qm_ceetm_channel *channel;
-+ struct ceetm_class *prio_class, *root_class;
-+ struct ceetm_qdisc *prio_qdisc;
-+
-+ if (qopt->qcount) {
-+ pr_err("CEETM: the qcount can not be modified\n");
-+ return -EINVAL;
-+ }
-+
-+ if (qopt->qweight[0]) {
-+ pr_err("CEETM: the qweight can be modified through the wbfs classes\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!priv->shaped && (qopt->cr || qopt->er)) {
-+ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
-+ return -EINVAL;
-+ }
-+
-+ if (priv->shaped && !(qopt->cr || qopt->er)) {
-+ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Nothing to modify for unshaped qdiscs */
-+ if (!priv->shaped)
-+ return 0;
-+
-+ prio_class = priv->wbfs.parent;
-+ prio_qdisc = qdisc_priv(prio_class->parent);
-+ root_class = prio_qdisc->prio.parent;
-+ channel = root_class->root.ch;
-+ group_b = priv->wbfs.group_type == WBFS_GRP_B;
-+
-+ if (qopt->cr != priv->wbfs.cr) {
-+ err = qman_ceetm_channel_set_group_cr_eligibility(channel,
-+ group_b,
-+ qopt->cr);
-+ if (err)
-+ goto change_err;
-+ priv->wbfs.cr = qopt->cr;
-+ }
-+
-+ if (qopt->er != priv->wbfs.er) {
-+ err = qman_ceetm_channel_set_group_er_eligibility(channel,
-+ group_b,
-+ qopt->er);
-+ if (err)
-+ goto change_err;
-+ priv->wbfs.er = qopt->er;
-+ }
-+
-+ return 0;
-+
-+change_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the wbfs ceetm qdisc %X\n",
-+ __func__, sch->handle);
-+ return err;
-+}
-+
-+/* Edit a ceetm qdisc */
-+static int ceetm_change(struct Qdisc *sch, struct nlattr *opt)
-+{
-+ struct tc_ceetm_qopt *qopt;
-+ struct nlattr *tb[TCA_CEETM_QOPS + 1];
-+ int ret;
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
-+ if (ret < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return ret;
-+ }
-+
-+ if (!tb[TCA_CEETM_QOPS]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(sch->handle)) {
-+ pr_err("CEETM: a qdisc should not have a minor\n");
-+ return -EINVAL;
-+ }
-+
-+ qopt = nla_data(tb[TCA_CEETM_QOPS]);
-+
-+ if (priv->type != qopt->type) {
-+ pr_err("CEETM: qdisc %X is not of the provided type\n",
-+ sch->handle);
-+ return -EINVAL;
-+ }
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ ret = ceetm_change_root(sch, priv, dev, qopt);
-+ break;
-+ case CEETM_PRIO:
-+ pr_err("CEETM: prio qdiscs can not be modified\n");
-+ ret = -EINVAL;
-+ break;
-+ case CEETM_WBFS:
-+ ret = ceetm_change_wbfs(sch, priv, qopt);
-+ break;
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ ret = -EINVAL;
-+ }
-+
-+ return ret;
-+}
-+
-+/* Attach the underlying pfifo qdiscs */
-+static void ceetm_attach(struct Qdisc *sch)
-+{
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct Qdisc *qdisc, *old_qdisc;
-+ unsigned int i;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ for (i = 0; i < dev->num_tx_queues; i++) {
-+ qdisc = priv->root.qdiscs[i];
-+ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
-+ if (old_qdisc)
-+ qdisc_destroy(old_qdisc);
-+ }
-+}
-+
-+static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
-+{
-+ struct ceetm_class *cl;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
-+ __func__, classid, sch->handle);
-+ cl = ceetm_find(classid, sch);
-+
-+ if (cl)
-+ cl->refcnt++; /* Will decrement in put() */
-+ return (unsigned long)cl;
-+}
-+
-+static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+ cl->refcnt--;
-+
-+ if (cl->refcnt == 0)
-+ ceetm_cls_destroy(sch, cl);
-+}
-+
-+static int ceetm_cls_change_root(struct ceetm_class *cl,
-+ struct tc_ceetm_copt *copt,
-+ struct net_device *dev)
-+{
-+ int err;
-+ u64 bps;
-+
-+ if ((bool)copt->shaped != cl->shaped) {
-+ pr_err("CEETM: class %X is %s\n", cl->common.classid,
-+ cl->shaped ? "shaped" : "unshaped");
-+ return -EINVAL;
-+ }
-+
-+ if (cl->shaped && cl->root.rate != copt->rate) {
-+ bps = copt->rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_commit_rate_bps(cl->root.ch, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_cls_err;
-+ cl->root.rate = copt->rate;
-+ }
-+
-+ if (cl->shaped && cl->root.ceil != copt->ceil) {
-+ bps = copt->ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_excess_rate_bps(cl->root.ch, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_cls_err;
-+ cl->root.ceil = copt->ceil;
-+ }
-+
-+ if (!cl->shaped && cl->root.tbl != copt->tbl) {
-+ err = qman_ceetm_channel_set_weight(cl->root.ch, copt->tbl);
-+ if (err)
-+ goto change_cls_err;
-+ cl->root.tbl = copt->tbl;
-+ }
-+
-+ return 0;
-+
-+change_cls_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm root class %X\n",
-+ __func__, cl->common.classid);
-+ return err;
-+}
-+
-+static int ceetm_cls_change_prio(struct ceetm_class *cl,
-+ struct tc_ceetm_copt *copt)
-+{
-+ int err;
-+
-+ if (!cl->shaped && (copt->cr || copt->er)) {
-+ pr_err("CEETM: only shaped classes can have CR and ER enabled\n");
-+ return -EINVAL;
-+ }
-+
-+ if (cl->prio.cr != (bool)copt->cr) {
-+ err = qman_ceetm_channel_set_cq_cr_eligibility(
-+ cl->prio.cq->parent,
-+ cl->prio.cq->idx,
-+ copt->cr);
-+ if (err)
-+ goto change_cls_err;
-+ cl->prio.cr = copt->cr;
-+ }
-+
-+ if (cl->prio.er != (bool)copt->er) {
-+ err = qman_ceetm_channel_set_cq_er_eligibility(
-+ cl->prio.cq->parent,
-+ cl->prio.cq->idx,
-+ copt->er);
-+ if (err)
-+ goto change_cls_err;
-+ cl->prio.er = copt->er;
-+ }
-+
-+ return 0;
-+
-+change_cls_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
-+ __func__, cl->common.classid);
-+ return err;
-+}
-+
-+static int ceetm_cls_change_wbfs(struct ceetm_class *cl,
-+ struct tc_ceetm_copt *copt)
-+{
-+ int err;
-+
-+ if (copt->weight != cl->wbfs.weight) {
-+ /* Configure the CQ weight: real number multiplied by 100 to
-+ * get rid of the fraction
-+ */
-+ err = qman_ceetm_set_queue_weight_in_ratio(cl->wbfs.cq,
-+ copt->weight * 100);
-+
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
-+ __func__, cl->common.classid);
-+ return err;
-+ }
-+
-+ cl->wbfs.weight = copt->weight;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Add a ceetm root class or configure a ceetm root/prio/wbfs class */
-+static int ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
-+ struct nlattr **tca, unsigned long *arg)
-+{
-+ int err;
-+ u64 bps;
-+ struct ceetm_qdisc *priv;
-+ struct ceetm_class *cl = (struct ceetm_class *)*arg;
-+ struct nlattr *opt = tca[TCA_OPTIONS];
-+ struct nlattr *tb[__TCA_CEETM_MAX];
-+ struct tc_ceetm_copt *copt;
-+ struct qm_ceetm_channel *channel;
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
-+ __func__, classid, sch->handle);
-+
-+ if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
-+ return -EINVAL;
-+ }
-+
-+ priv = qdisc_priv(sch);
-+
-+ if (!opt) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (!cl && sch->handle != parentid) {
-+ pr_err("CEETM: classes can be attached to the root ceetm qdisc only\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!cl && priv->type != CEETM_ROOT) {
-+ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
-+ return -EINVAL;
-+ }
-+
-+ err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
-+ if (err < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (!tb[TCA_CEETM_COPT]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
-+ pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm root classes\n");
-+ return -EINVAL;
-+ }
-+
-+ copt = nla_data(tb[TCA_CEETM_COPT]);
-+
-+ /* Configure an existing ceetm class */
-+ if (cl) {
-+ if (copt->type != cl->type) {
-+ pr_err("CEETM: class %X is not of the provided type\n",
-+ cl->common.classid);
-+ return -EINVAL;
-+ }
-+
-+ switch (copt->type) {
-+ case CEETM_ROOT:
-+ return ceetm_cls_change_root(cl, copt, dev);
-+
-+ case CEETM_PRIO:
-+ return ceetm_cls_change_prio(cl, copt);
-+
-+ case CEETM_WBFS:
-+ return ceetm_cls_change_wbfs(cl, copt);
-+
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid class\n",
-+ __func__);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ /* Add a new root ceetm class */
-+ if (copt->type != CEETM_ROOT) {
-+ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
-+ return -EINVAL;
-+ }
-+
-+ if (copt->shaped && !priv->shaped) {
-+ pr_err("CEETM: can not add a shaped ceetm root class under an unshaped ceetm root qdisc\n");
-+ return -EINVAL;
-+ }
-+
-+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
-+ if (!cl)
-+ return -ENOMEM;
-+
-+ cl->type = copt->type;
-+ cl->shaped = copt->shaped;
-+ cl->root.rate = copt->rate;
-+ cl->root.ceil = copt->ceil;
-+ cl->root.tbl = copt->tbl;
-+
-+ cl->common.classid = classid;
-+ cl->refcnt = 1;
-+ cl->parent = sch;
-+ cl->root.child = NULL;
-+ cl->root.wbfs_grp_a = false;
-+ cl->root.wbfs_grp_b = false;
-+ cl->root.wbfs_grp_large = false;
-+
-+ /* Claim a CEETM channel */
-+ err = qman_ceetm_channel_claim(&channel, priv->root.lni);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
-+ __func__);
-+ goto claim_err;
-+ }
-+
-+ cl->root.ch = channel;
-+
-+ if (cl->shaped) {
-+ /* Configure the channel shaper */
-+ err = qman_ceetm_channel_enable_shaper(channel, 1);
-+ if (err)
-+ goto channel_err;
-+
-+ bps = cl->root.rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
-+ dev->mtu);
-+ if (err)
-+ goto channel_err;
-+
-+ bps = cl->root.ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
-+ dev->mtu);
-+ if (err)
-+ goto channel_err;
-+
-+ } else {
-+ /* Configure the uFQ algorithm */
-+ err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
-+ if (err)
-+ goto channel_err;
-+ }
-+
-+ /* Add class handle in Qdisc */
-+ ceetm_link_class(sch, &priv->clhash, &cl->common);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with channel %d\n",
-+ __func__, classid, channel->idx);
-+ *arg = (unsigned long)cl;
-+ return 0;
-+
-+channel_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
-+ __func__, channel->idx);
-+ if (qman_ceetm_channel_release(channel))
-+ pr_err(KBUILD_BASENAME " : %s : failed to release the channel %d\n",
-+ __func__, channel->idx);
-+claim_err:
-+ kfree(cl);
-+ return err;
-+}
-+
-+static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl;
-+ unsigned int i;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (arg->stop)
-+ return;
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
-+ if (arg->count < arg->skip) {
-+ arg->count++;
-+ continue;
-+ }
-+ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
-+ arg->stop = 1;
-+ return;
-+ }
-+ arg->count++;
-+ }
-+ }
-+}
-+
-+static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
-+ struct sk_buff *skb, struct tcmsg *tcm)
-+{
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+ struct nlattr *nest;
-+ struct tc_ceetm_copt copt;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ sch_tree_lock(sch);
-+
-+ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
-+ tcm->tcm_handle = cl->common.classid;
-+
-+ memset(&copt, 0, sizeof(copt));
-+
-+ copt.shaped = cl->shaped;
-+ copt.type = cl->type;
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (cl->root.child)
-+ tcm->tcm_info = cl->root.child->handle;
-+
-+ copt.rate = cl->root.rate;
-+ copt.ceil = cl->root.ceil;
-+ copt.tbl = cl->root.tbl;
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (cl->prio.child)
-+ tcm->tcm_info = cl->prio.child->handle;
-+
-+ copt.cr = cl->prio.cr;
-+ copt.er = cl->prio.er;
-+ break;
-+
-+ case CEETM_WBFS:
-+ copt.weight = cl->wbfs.weight;
-+ break;
-+ }
-+
-+ nest = nla_nest_start(skb, TCA_OPTIONS);
-+ if (!nest)
-+ goto nla_put_failure;
-+ if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
-+ goto nla_put_failure;
-+ nla_nest_end(skb, nest);
-+ sch_tree_unlock(sch);
-+ return skb->len;
-+
-+nla_put_failure:
-+ sch_tree_unlock(sch);
-+ nla_nest_cancel(skb, nest);
-+ return -EMSGSIZE;
-+}
-+
-+static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ sch_tree_lock(sch);
-+ qdisc_class_hash_remove(&priv->clhash, &cl->common);
-+ cl->refcnt--;
-+
-+ /* The refcnt should be at least 1 since we have incremented it in
-+ * get(). Will decrement again in put() where we will call destroy()
-+ * to actually free the memory if it reaches 0.
-+ */
-+ WARN_ON(cl->refcnt == 0);
-+
-+ sch_tree_unlock(sch);
-+ return 0;
-+}
-+
-+/* Get the class' child qdisc, if any */
-+static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ return cl->root.child;
-+
-+ case CEETM_PRIO:
-+ return cl->prio.child;
-+ }
-+
-+ return NULL;
-+}
-+
-+static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
-+ struct Qdisc *new, struct Qdisc **old)
-+{
-+ if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
-+ struct gnet_dump *d)
-+{
-+ unsigned int i;
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+ struct gnet_stats_basic_packed tmp_bstats;
-+ struct ceetm_class_stats *cstats = NULL;
-+ struct qm_ceetm_cq *cq = NULL;
-+ struct tc_ceetm_xstats xstats;
-+
-+ memset(&xstats, 0, sizeof(xstats));
-+ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ return 0;
-+ case CEETM_PRIO:
-+ cq = cl->prio.cq;
-+ break;
-+ case CEETM_WBFS:
-+ cq = cl->wbfs.cq;
-+ break;
-+ }
-+
-+ for_each_online_cpu(i) {
-+ switch (cl->type) {
-+ case CEETM_PRIO:
-+ cstats = per_cpu_ptr(cl->prio.cstats, i);
-+ break;
-+ case CEETM_WBFS:
-+ cstats = per_cpu_ptr(cl->wbfs.cstats, i);
-+ break;
-+ }
-+
-+ if (cstats) {
-+ xstats.ern_drop_count += cstats->ern_drop_count;
-+ xstats.congested_count += cstats->congested_count;
-+ tmp_bstats.bytes += cstats->bstats.bytes;
-+ tmp_bstats.packets += cstats->bstats.packets;
-+ }
-+ }
-+
-+ if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0)
-+ return -1;
-+
-+ if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
-+ &xstats.frame_count,
-+ &xstats.byte_count))
-+ return -1;
-+
-+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
-+}
-+
-+static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+ struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+ return fl;
-+}
-+
-+static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
-+ u32 classid)
-+{
-+ struct ceetm_class *cl = ceetm_find(classid, sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+ return (unsigned long)cl;
-+}
-+
-+static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+}
-+
-+const struct Qdisc_class_ops ceetm_cls_ops = {
-+ .graft = ceetm_cls_graft,
-+ .leaf = ceetm_cls_leaf,
-+ .get = ceetm_cls_get,
-+ .put = ceetm_cls_put,
-+ .change = ceetm_cls_change,
-+ .delete = ceetm_cls_delete,
-+ .walk = ceetm_cls_walk,
-+ .tcf_chain = ceetm_tcf_chain,
-+ .bind_tcf = ceetm_tcf_bind,
-+ .unbind_tcf = ceetm_tcf_unbind,
-+ .dump = ceetm_cls_dump,
-+ .dump_stats = ceetm_cls_dump_stats,
-+};
-+
-+struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
-+ .id = "ceetm",
-+ .priv_size = sizeof(struct ceetm_qdisc),
-+ .cl_ops = &ceetm_cls_ops,
-+ .init = ceetm_init,
-+ .destroy = ceetm_destroy,
-+ .change = ceetm_change,
-+ .dump = ceetm_dump,
-+ .attach = ceetm_attach,
-+ .owner = THIS_MODULE,
-+};
-+
-+/* Run the filters and classifiers attached to the qdisc on the provided skb */
-+static struct ceetm_class *ceetm_classify(struct sk_buff *skb,
-+ struct Qdisc *sch, int *qerr,
-+ bool *act_drop)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl = NULL, *wbfs_cl;
-+ struct tcf_result res;
-+ struct tcf_proto *tcf;
-+ int result;
-+
-+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-+ tcf = priv->filter_list;
-+ while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
-+#ifdef CONFIG_NET_CLS_ACT
-+ switch (result) {
-+ case TC_ACT_QUEUED:
-+ case TC_ACT_STOLEN:
-+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-+ case TC_ACT_SHOT:
-+ /* No valid class found due to action */
-+ *act_drop = true;
-+ return NULL;
-+ }
-+#endif
-+ cl = (void *)res.class;
-+ if (!cl) {
-+ if (res.classid == sch->handle) {
-+ /* The filter leads to the qdisc */
-+ /* TODO default qdisc */
-+ return NULL;
-+ }
-+
-+ cl = ceetm_find(res.classid, sch);
-+ if (!cl)
-+ /* The filter leads to an invalid class */
-+ break;
-+ }
-+
-+ /* The class might have its own filters attached */
-+ tcf = cl->filter_list;
-+ }
-+
-+ if (!cl) {
-+ /* No valid class found */
-+ /* TODO default qdisc */
-+ return NULL;
-+ }
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (cl->root.child) {
-+ /* Run the prio qdisc classifiers */
-+ return ceetm_classify(skb, cl->root.child, qerr,
-+ act_drop);
-+ } else {
-+ /* The root class does not have a child prio qdisc */
-+ /* TODO default qdisc */
-+ return NULL;
-+ }
-+ case CEETM_PRIO:
-+ if (cl->prio.child) {
-+ /* If filters lead to a wbfs class, return it.
-+ * Otherwise, return the prio class
-+ */
-+ wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
-+ act_drop);
-+ /* A NULL result might indicate either an erroneous
-+ * filter, or no filters at all. We will assume the
-+ * latter
-+ */
-+ return wbfs_cl ? : cl;
-+ }
-+ }
-+
-+ /* For wbfs and childless prio classes, return the class directly */
-+ return cl;
-+}
-+
-+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
-+{
-+ int ret;
-+ bool act_drop = false;
-+ struct Qdisc *sch = net_dev->qdisc;
-+ struct ceetm_class *cl;
-+ struct dpa_priv_s *priv_dpa;
-+ struct qman_fq *egress_fq, *conf_fq;
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
-+ struct ceetm_class_stats *cstats;
-+ const int queue_mapping = dpa_get_queue_mapping(skb);
-+ spinlock_t *root_lock = qdisc_lock(sch);
-+
-+ spin_lock(root_lock);
-+ cl = ceetm_classify(skb, sch, &ret, &act_drop);
-+ spin_unlock(root_lock);
-+
-+#ifdef CONFIG_NET_CLS_ACT
-+ if (act_drop) {
-+ if (ret & __NET_XMIT_BYPASS)
-+ qstats->drops++;
-+ goto drop;
-+ }
-+#endif
-+ /* TODO default class */
-+ if (unlikely(!cl)) {
-+ qstats->drops++;
-+ goto drop;
-+ }
-+
-+ priv_dpa = netdev_priv(net_dev);
-+ conf_fq = priv_dpa->conf_fqs[queue_mapping];
-+
-+ /* Choose the proper tx fq and update the basic stats (bytes and
-+ * packets sent by the class)
-+ */
-+ switch (cl->type) {
-+ case CEETM_PRIO:
-+ egress_fq = &cl->prio.fq->fq;
-+ cstats = this_cpu_ptr(cl->prio.cstats);
-+ break;
-+ case CEETM_WBFS:
-+ egress_fq = &cl->wbfs.fq->fq;
-+ cstats = this_cpu_ptr(cl->wbfs.cstats);
-+ break;
-+ default:
-+ qstats->drops++;
-+ goto drop;
-+ }
-+
-+ bstats_update(&cstats->bstats, skb);
-+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
-+
-+drop:
-+ dev_kfree_skb_any(skb);
-+ return NET_XMIT_SUCCESS;
-+}
-+
-+static int __init ceetm_register(void)
-+{
-+ int _errno = 0;
-+
-+ pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
-+
-+ _errno = register_qdisc(&ceetm_qdisc_ops);
-+ if (unlikely(_errno))
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): register_qdisc() = %d\n",
-+ KBUILD_BASENAME ".c", __LINE__, __func__, _errno);
-+
-+ return _errno;
-+}
-+
-+static void __exit ceetm_unregister(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME ".c", __func__);
-+
-+ unregister_qdisc(&ceetm_qdisc_ops);
-+}
-+
-+module_init(ceetm_register);
-+module_exit(ceetm_unregister);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
-@@ -0,0 +1,236 @@
-+/* Copyright 2008-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPAA_ETH_CEETM_H
-+#define __DPAA_ETH_CEETM_H
-+
-+#include <net/pkt_sched.h>
-+#include <net/netlink.h>
-+#include <lnxwrp_fm.h>
-+
-+#include "mac.h"
-+#include "dpaa_eth_common.h"
-+
-+/* Mask to determine the sub-portal id from a channel number */
-+#define CHANNEL_SP_MASK 0x1f
-+/* The number of the last channel that services DCP0, connected to FMan 0.
-+ * Value validated for B4 and T series platforms.
-+ */
-+#define DCP0_MAX_CHANNEL 0x80f
-+/* A2V=1 - field A2 is valid
-+ * A0V=1 - field A0 is valid - enables frame confirmation
-+ * OVOM=1 - override operation mode bits with values from A2
-+ * EBD=1 - external buffers are deallocated at the end of the FMan flow
-+ * NL=0 - the BMI releases all the internal buffers
-+ */
-+#define CEETM_CONTEXT_A 0x1a00000080000000
-+/* The ratio between the superior and inferior congestion state thresholds. The
-+ * lower threshold is set to 7/8 of the superior one (as the default for WQ
-+ * scheduling).
-+ */
-+#define CEETM_CCGR_RATIO 0.875
-+/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
-+ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
-+ * are reserved for the maximum 32 CEETM channels (majors and minors are in
-+ * hex).
-+ */
-+#define PFIFO_MIN_OFFSET 0x21
-+
-+/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
-+#define CEETM_MAX_PRIO_QCOUNT 8
-+#define CEETM_MAX_WBFS_QCOUNT 8
-+#define CEETM_MIN_WBFS_QCOUNT 4
-+
-+/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
-+ * and/or 12-15 for group B).
-+ */
-+#define WBFS_GRP_A_OFFSET 8
-+#define WBFS_GRP_B_OFFSET 12
-+
-+#define WBFS_GRP_A 1
-+#define WBFS_GRP_B 2
-+#define WBFS_GRP_LARGE 3
-+
-+enum {
-+ TCA_CEETM_UNSPEC,
-+ TCA_CEETM_COPT,
-+ TCA_CEETM_QOPS,
-+ __TCA_CEETM_MAX,
-+};
-+
-+/* CEETM configuration types */
-+enum {
-+ CEETM_ROOT = 1,
-+ CEETM_PRIO,
-+ CEETM_WBFS
-+};
-+
-+#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
-+extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
-+
-+struct ceetm_class;
-+struct ceetm_qdisc_stats;
-+struct ceetm_class_stats;
-+
-+struct ceetm_fq {
-+ struct qman_fq fq;
-+ struct net_device *net_dev;
-+ struct ceetm_class *ceetm_cls;
-+};
-+
-+struct root_q {
-+ struct Qdisc **qdiscs;
-+ __u16 overhead;
-+ __u32 rate;
-+ __u32 ceil;
-+ struct qm_ceetm_sp *sp;
-+ struct qm_ceetm_lni *lni;
-+ struct ceetm_qdisc_stats __percpu *qstats;
-+};
-+
-+struct prio_q {
-+ __u16 qcount;
-+ struct ceetm_class *parent;
-+};
-+
-+struct wbfs_q {
-+ __u16 qcount;
-+ int group_type;
-+ struct ceetm_class *parent;
-+ __u16 cr;
-+ __u16 er;
-+};
-+
-+struct ceetm_qdisc {
-+ int type; /* LNI/CHNL/WBFS */
-+ bool shaped;
-+ union {
-+ struct root_q root;
-+ struct prio_q prio;
-+ struct wbfs_q wbfs;
-+ };
-+ struct Qdisc_class_hash clhash;
-+ struct tcf_proto *filter_list; /* qdisc attached filters */
-+};
-+
-+/* CEETM Qdisc configuration parameters */
-+struct tc_ceetm_qopt {
-+ __u32 type;
-+ __u16 shaped;
-+ __u16 qcount;
-+ __u16 overhead;
-+ __u32 rate;
-+ __u32 ceil;
-+ __u16 cr;
-+ __u16 er;
-+ __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
-+};
-+
-+struct root_c {
-+ unsigned int rate;
-+ unsigned int ceil;
-+ unsigned int tbl;
-+ bool wbfs_grp_a;
-+ bool wbfs_grp_b;
-+ bool wbfs_grp_large;
-+ struct Qdisc *child;
-+ struct qm_ceetm_channel *ch;
-+};
-+
-+struct prio_c {
-+ bool cr;
-+ bool er;
-+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
-+ struct qm_ceetm_lfq *lfq;
-+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
-+ struct qm_ceetm_ccg *ccg;
-+ /* only one wbfs can be linked to one priority CQ */
-+ struct Qdisc *child;
-+ struct ceetm_class_stats __percpu *cstats;
-+};
-+
-+struct wbfs_c {
-+ __u8 weight; /* The weight of the class between 1 and 248 */
-+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
-+ struct qm_ceetm_lfq *lfq;
-+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
-+ struct qm_ceetm_ccg *ccg;
-+ struct ceetm_class_stats __percpu *cstats;
-+};
-+
-+struct ceetm_class {
-+ struct Qdisc_class_common common;
-+ int refcnt; /* usage count of this class */
-+ struct tcf_proto *filter_list; /* class attached filters */
-+ struct Qdisc *parent;
-+ bool shaped;
-+ int type; /* ROOT/PRIO/WBFS */
-+ union {
-+ struct root_c root;
-+ struct prio_c prio;
-+ struct wbfs_c wbfs;
-+ };
-+};
-+
-+/* CEETM Class configuration parameters */
-+struct tc_ceetm_copt {
-+ __u32 type;
-+ __u16 shaped;
-+ __u32 rate;
-+ __u32 ceil;
-+ __u16 tbl;
-+ __u16 cr;
-+ __u16 er;
-+ __u8 weight;
-+};
-+
-+/* CEETM stats */
-+struct ceetm_qdisc_stats {
-+ __u32 drops;
-+};
-+
-+struct ceetm_class_stats {
-+ /* Software counters */
-+ struct gnet_stats_basic_packed bstats;
-+ __u32 ern_drop_count;
-+ __u32 congested_count;
-+};
-+
-+struct tc_ceetm_xstats {
-+ __u32 ern_drop_count;
-+ __u32 congested_count;
-+ /* Hardware counters */
-+ __u64 frame_count;
-+ __u64 byte_count;
-+};
-+
-+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
-@@ -0,0 +1,1812 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_net.h>
-+#include <linux/etherdevice.h>
-+#include <linux/kthread.h>
-+#include <linux/percpu.h>
-+#include <linux/highmem.h>
-+#include <linux/sort.h>
-+#include <linux/fsl_qman.h>
-+#include <linux/ip.h>
-+#include <linux/ipv6.h>
-+#include <linux/if_vlan.h> /* vlan_eth_hdr */
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#ifdef CONFIG_FSL_DPAA_1588
-+#include "dpaa_1588.h"
-+#endif
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+#include "dpaa_debugfs.h"
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+#include "mac.h"
-+
-+/* Size in bytes of the FQ taildrop threshold */
-+#define DPA_FQ_TD 0x200000
-+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+struct ptp_priv_s ptp_priv;
-+#endif
-+
-+static struct dpa_bp *dpa_bp_array[64];
-+
-+int dpa_max_frm;
-+EXPORT_SYMBOL(dpa_max_frm);
-+
-+int dpa_rx_extra_headroom;
-+EXPORT_SYMBOL(dpa_rx_extra_headroom);
-+
-+int dpa_num_cpus = NR_CPUS;
-+
-+static const struct fqid_cell tx_confirm_fqids[] = {
-+ {0, DPAA_ETH_TX_QUEUES}
-+};
-+
-+static struct fqid_cell default_fqids[][3] = {
-+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
-+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
-+};
-+
-+static const char fsl_qman_frame_queues[][25] = {
-+ [RX] = "fsl,qman-frame-queues-rx",
-+ [TX] = "fsl,qman-frame-queues-tx"
-+};
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+/* A set of callbacks for hooking into the fastpath at different points. */
-+struct dpaa_eth_hooks_s dpaa_eth_hooks;
-+EXPORT_SYMBOL(dpaa_eth_hooks);
-+/* This function should only be called on the probe paths, since it makes no
-+ * effort to guarantee consistency of the destination hooks structure.
-+ */
-+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
-+{
-+ if (hooks)
-+ dpaa_eth_hooks = *hooks;
-+ else
-+ pr_err("NULL pointer to hooks!\n");
-+}
-+EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
-+#endif
-+
-+int dpa_netdev_init(struct net_device *net_dev,
-+ const uint8_t *mac_addr,
-+ uint16_t tx_timeout)
-+{
-+ int err;
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+
-+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
-+
-+ net_dev->features |= net_dev->hw_features;
-+ net_dev->vlan_features = net_dev->features;
-+
-+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
-+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
-+
-+ net_dev->ethtool_ops = &dpa_ethtool_ops;
-+
-+ net_dev->needed_headroom = priv->tx_headroom;
-+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
-+
-+ err = register_netdev(net_dev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev() = %d\n", err);
-+ return err;
-+ }
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ /* create debugfs entry for this net_device */
-+ err = dpa_netdev_debugfs_create(net_dev);
-+ if (err) {
-+ unregister_netdev(net_dev);
-+ return err;
-+ }
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_netdev_init);
-+
-+int __cold dpa_start(struct net_device *net_dev)
-+{
-+ int err, i;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ err = mac_dev->init_phy(net_dev, priv->mac_dev);
-+ if (err < 0) {
-+ if (netif_msg_ifup(priv))
-+ netdev_err(net_dev, "init_phy() = %d\n", err);
-+ return err;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_enable(mac_dev->port_dev[i]);
-+ if (err)
-+ goto mac_start_failed;
-+ }
-+
-+ err = priv->mac_dev->start(mac_dev);
-+ if (err < 0) {
-+ if (netif_msg_ifup(priv))
-+ netdev_err(net_dev, "mac_dev->start() = %d\n", err);
-+ goto mac_start_failed;
-+ }
-+
-+ netif_tx_start_all_queues(net_dev);
-+
-+ return 0;
-+
-+mac_start_failed:
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_disable(mac_dev->port_dev[i]);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(dpa_start);
-+
-+int __cold dpa_stop(struct net_device *net_dev)
-+{
-+ int _errno, i, err;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ netif_tx_stop_all_queues(net_dev);
-+ /* Allow the Fman (Tx) port to process in-flight frames before we
-+ * try switching it off.
-+ */
-+ usleep_range(5000, 10000);
-+
-+ _errno = mac_dev->stop(mac_dev);
-+ if (unlikely(_errno < 0))
-+ if (netif_msg_ifdown(priv))
-+ netdev_err(net_dev, "mac_dev->stop() = %d\n",
-+ _errno);
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_disable(mac_dev->port_dev[i]);
-+ _errno = err ? err : _errno;
-+ }
-+
-+ if (mac_dev->phy_dev)
-+ phy_disconnect(mac_dev->phy_dev);
-+ mac_dev->phy_dev = NULL;
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_stop);
-+
-+void __cold dpa_timeout(struct net_device *net_dev)
-+{
-+ const struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+
-+ priv = netdev_priv(net_dev);
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ if (netif_msg_timer(priv))
-+ netdev_crit(net_dev, "Transmit timeout!\n");
-+
-+ percpu_priv->stats.tx_errors++;
-+}
-+EXPORT_SYMBOL(dpa_timeout);
-+
-+/* net_device */
-+
-+/**
-+ * @param net_dev the device for which statistics are calculated
-+ * @param stats the function fills this structure with the device's statistics
-+ * @return the address of the structure containing the statistics
-+ *
-+ * Calculates the statistics for the given device by adding the statistics
-+ * collected by each CPU.
-+ */
-+void __cold
-+dpa_get_stats64(struct net_device *net_dev,
-+ struct rtnl_link_stats64 *stats)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ u64 *cpustats;
-+ u64 *netstats = (u64 *)stats;
-+ int i, j;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
-+
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ cpustats = (u64 *)&percpu_priv->stats;
-+
-+ for (j = 0; j < numstats; j++)
-+ netstats[j] += cpustats[j];
-+ }
-+}
-+EXPORT_SYMBOL(dpa_get_stats64);
-+
-+int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
-+{
-+ const int max_mtu = dpa_get_max_mtu();
-+
-+ /* Make sure we don't exceed the Ethernet controller's MAXFRM */
-+ if (new_mtu < 68 || new_mtu > max_mtu) {
-+ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
-+ new_mtu, 68, max_mtu);
-+ return -EINVAL;
-+ }
-+ net_dev->mtu = new_mtu;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_change_mtu);
-+
-+/* .ndo_init callback */
-+int dpa_ndo_init(struct net_device *net_dev)
-+{
-+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
-+ * we choose conservatively and let the user explicitly set a higher
-+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
-+ * in the same LAN.
-+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
-+ * start with the maximum allowed.
-+ */
-+ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
-+
-+ pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
-+ net_dev->mtu = init_mtu;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_ndo_init);
-+
-+int dpa_set_features(struct net_device *dev, netdev_features_t features)
-+{
-+ /* Not much to do here for now */
-+ dev->features = features;
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_set_features);
-+
-+netdev_features_t dpa_fix_features(struct net_device *dev,
-+ netdev_features_t features)
-+{
-+ netdev_features_t unsupported_features = 0;
-+
-+ /* In theory we should never be requested to enable features that
-+ * we didn't set in netdev->features and netdev->hw_features at probe
-+ * time, but double check just to be on the safe side.
-+ * We don't support enabling Rx csum through ethtool yet
-+ */
-+ unsupported_features |= NETIF_F_RXCSUM;
-+
-+ features &= ~unsupported_features;
-+
-+ return features;
-+}
-+EXPORT_SYMBOL(dpa_fix_features);
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
-+ const void *data)
-+{
-+ u64 *ts, ns;
-+
-+ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
-+ data);
-+
-+ if (!ts || *ts == 0)
-+ return 0;
-+
-+ be64_to_cpus(ts);
-+
-+ /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
-+ ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
-+
-+ return ns;
-+}
-+
-+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
-+ struct skb_shared_hwtstamps *shhwtstamps, const void *data)
-+{
-+ u64 ns;
-+
-+ ns = dpa_get_timestamp_ns(priv, rx_tx, data);
-+
-+ if (ns == 0)
-+ return -EINVAL;
-+
-+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
-+
-+ return 0;
-+}
-+
-+static void dpa_ts_tx_enable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
-+ if (mac_dev->ptp_enable)
-+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
-+
-+ priv->ts_tx_en = true;
-+}
-+
-+static void dpa_ts_tx_disable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+
-+#if 0
-+/* the RTC might be needed by the Rx Ts, cannot disable here
-+ * no separate ptp_disable API for Rx/Tx, cannot disable here
-+ */
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
-+
-+ if (mac_dev->ptp_disable)
-+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
-+#endif
-+
-+ priv->ts_tx_en = false;
-+}
-+
-+static void dpa_ts_rx_enable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
-+ if (mac_dev->ptp_enable)
-+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
-+
-+ priv->ts_rx_en = true;
-+}
-+
-+static void dpa_ts_rx_disable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+
-+#if 0
-+/* the RTC might be needed by the Tx Ts, cannot disable here
-+ * no separate ptp_disable API for Rx/Tx, cannot disable here
-+ */
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
-+
-+ if (mac_dev->ptp_disable)
-+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
-+#endif
-+
-+ priv->ts_rx_en = false;
-+}
-+
-+static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct hwtstamp_config config;
-+
-+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
-+ return -EFAULT;
-+
-+ switch (config.tx_type) {
-+ case HWTSTAMP_TX_OFF:
-+ dpa_ts_tx_disable(dev);
-+ break;
-+ case HWTSTAMP_TX_ON:
-+ dpa_ts_tx_enable(dev);
-+ break;
-+ default:
-+ return -ERANGE;
-+ }
-+
-+ if (config.rx_filter == HWTSTAMP_FILTER_NONE)
-+ dpa_ts_rx_disable(dev);
-+ else {
-+ dpa_ts_rx_enable(dev);
-+ /* TS is set for all frame types, not only those requested */
-+ config.rx_filter = HWTSTAMP_FILTER_ALL;
-+ }
-+
-+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-+ -EFAULT : 0;
-+}
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+#ifdef CONFIG_FSL_DPAA_1588
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+#endif
-+ int ret = 0;
-+
-+ /* at least one timestamping feature must be enabled */
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (!netif_running(dev))
-+#endif
-+ return -EINVAL;
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (cmd == SIOCSHWTSTAMP)
-+ return dpa_ts_ioctl(dev, rq, cmd);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
-+ if (priv->tsu && priv->tsu->valid)
-+ ret = dpa_ioctl_1588(dev, rq, cmd);
-+ else
-+ ret = -ENODEV;
-+ }
-+#endif
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(dpa_ioctl);
-+
-+int __cold dpa_remove(struct platform_device *of_dev)
-+{
-+ int err;
-+ struct device *dev;
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+
-+ dev = &of_dev->dev;
-+ net_dev = dev_get_drvdata(dev);
-+
-+ priv = netdev_priv(net_dev);
-+
-+ dpaa_eth_sysfs_remove(dev);
-+
-+ dev_set_drvdata(dev, NULL);
-+ unregister_netdev(net_dev);
-+
-+ err = dpa_fq_free(dev, &priv->dpa_fq_list);
-+
-+ qman_delete_cgr_safe(&priv->ingress_cgr);
-+ qman_release_cgrid(priv->ingress_cgr.cgrid);
-+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
-+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-+
-+ dpa_private_napi_del(net_dev);
-+
-+ dpa_bp_free(priv);
-+
-+ if (priv->buf_layout)
-+ devm_kfree(dev, priv->buf_layout);
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ /* remove debugfs entry for this net_device */
-+ dpa_netdev_debugfs_remove(net_dev);
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid)
-+ dpa_ptp_cleanup(priv);
-+#endif
-+
-+ free_netdev(net_dev);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(dpa_remove);
-+
-+struct mac_device * __cold __must_check
-+__attribute__((nonnull))
-+dpa_mac_probe(struct platform_device *_of_dev)
-+{
-+ struct device *dpa_dev, *dev;
-+ struct device_node *mac_node;
-+ struct platform_device *of_dev;
-+ struct mac_device *mac_dev;
-+#ifdef CONFIG_FSL_DPAA_1588
-+ int lenp;
-+ const phandle *phandle_prop;
-+ struct net_device *net_dev = NULL;
-+ struct dpa_priv_s *priv = NULL;
-+ struct device_node *timer_node;
-+#endif
-+ dpa_dev = &_of_dev->dev;
-+
-+ mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
-+ if (unlikely(mac_node == NULL)) {
-+ dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
-+ return ERR_PTR(-EFAULT);
-+ }
-+
-+ of_dev = of_find_device_by_node(mac_node);
-+ if (unlikely(of_dev == NULL)) {
-+ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
-+ mac_node->full_name);
-+ of_node_put(mac_node);
-+ return ERR_PTR(-EINVAL);
-+ }
-+ of_node_put(mac_node);
-+
-+ dev = &of_dev->dev;
-+
-+ mac_dev = dev_get_drvdata(dev);
-+ if (unlikely(mac_dev == NULL)) {
-+ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
-+ dev_name(dev));
-+ return ERR_PTR(-EINVAL);
-+ }
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
-+ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
-+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
-+ (mac_dev->speed == SPEED_1000)))) {
-+ timer_node = of_find_node_by_phandle(*phandle_prop);
-+ if (timer_node)
-+ net_dev = dev_get_drvdata(dpa_dev);
-+ if (timer_node && net_dev) {
-+ priv = netdev_priv(net_dev);
-+ if (!dpa_ptp_init(priv))
-+ dev_info(dev, "%s: ptp 1588 is initialized.\n",
-+ mac_node->full_name);
-+ }
-+ }
-+#endif
-+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+ if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
-+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
-+ (mac_dev->speed == SPEED_1000))) {
-+ ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0);
-+ if (ptp_priv.node) {
-+ ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
-+ if (unlikely(ptp_priv.of_dev == NULL)) {
-+ dev_err(dpa_dev,
-+ "Cannot find device represented by timer_node\n");
-+ of_node_put(ptp_priv.node);
-+ return ERR_PTR(-EINVAL);
-+ }
-+ ptp_priv.mac_dev = mac_dev;
-+ }
-+ }
-+#endif
-+ return mac_dev;
-+}
-+EXPORT_SYMBOL(dpa_mac_probe);
-+
-+int dpa_set_mac_address(struct net_device *net_dev, void *addr)
-+{
-+ const struct dpa_priv_s *priv;
-+ int _errno;
-+ struct mac_device *mac_dev;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ _errno = eth_mac_addr(net_dev, addr);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev,
-+ "eth_mac_addr() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ mac_dev = priv->mac_dev;
-+
-+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
-+ net_dev->dev_addr);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev,
-+ "mac_dev->change_addr() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_set_mac_address);
-+
-+void dpa_set_rx_mode(struct net_device *net_dev)
-+{
-+ int _errno;
-+ const struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
-+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
-+ _errno = priv->mac_dev->set_promisc(
-+ priv->mac_dev->get_mac_handle(priv->mac_dev),
-+ priv->mac_dev->promisc);
-+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
-+ netdev_err(net_dev,
-+ "mac_dev->set_promisc() = %d\n",
-+ _errno);
-+ }
-+
-+ _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
-+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
-+ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
-+}
-+EXPORT_SYMBOL(dpa_set_rx_mode);
-+
-+void dpa_set_buffers_layout(struct mac_device *mac_dev,
-+ struct dpa_buffer_layout_s *layout)
-+{
-+ struct fm_port_params params;
-+
-+ /* Rx */
-+ layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
-+ layout[RX].parse_results = true;
-+ layout[RX].hash_results = true;
-+#ifdef CONFIG_FSL_DPAA_TS
-+ layout[RX].time_stamp = true;
-+#endif
-+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
-+ layout[RX].manip_extra_space = params.manip_extra_space;
-+ /* a value of zero for data alignment means "don't care", so align to
-+ * a non-zero value to prevent FMD from using its own default
-+ */
-+ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
-+
-+ /* Tx */
-+ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
-+ layout[TX].parse_results = true;
-+ layout[TX].hash_results = true;
-+#ifdef CONFIG_FSL_DPAA_TS
-+ layout[TX].time_stamp = true;
-+#endif
-+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
-+ layout[TX].manip_extra_space = params.manip_extra_space;
-+ layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
-+}
-+EXPORT_SYMBOL(dpa_set_buffers_layout);
-+
-+int __attribute__((nonnull))
-+dpa_bp_alloc(struct dpa_bp *dpa_bp)
-+{
-+ int err;
-+ struct bman_pool_params bp_params;
-+ struct platform_device *pdev;
-+
-+ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
-+ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
-+ return -EINVAL;
-+ }
-+
-+ memset(&bp_params, 0, sizeof(struct bman_pool_params));
-+#ifdef CONFIG_FMAN_PFC
-+ bp_params.flags = BMAN_POOL_FLAG_THRESH;
-+ bp_params.thresholds[0] = bp_params.thresholds[2] =
-+ CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
-+ bp_params.thresholds[1] = bp_params.thresholds[3] =
-+ CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
-+#endif
-+
-+ /* If the pool is already specified, we only create one per bpid */
-+ if (dpa_bpid2pool_use(dpa_bp->bpid))
-+ return 0;
-+
-+ if (dpa_bp->bpid == 0)
-+ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
-+ else
-+ bp_params.bpid = dpa_bp->bpid;
-+
-+ dpa_bp->pool = bman_new_pool(&bp_params);
-+ if (unlikely(dpa_bp->pool == NULL)) {
-+ pr_err("bman_new_pool() failed\n");
-+ return -ENODEV;
-+ }
-+
-+ dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
-+
-+ pdev = platform_device_register_simple("dpaa_eth_bpool",
-+ dpa_bp->bpid, NULL, 0);
-+ if (IS_ERR(pdev)) {
-+ pr_err("platform_device_register_simple() failed\n");
-+ err = PTR_ERR(pdev);
-+ goto pdev_register_failed;
-+ }
-+ {
-+ struct dma_map_ops *ops = get_dma_ops(&pdev->dev);
-+ ops->dma_supported = NULL;
-+ }
-+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
-+ if (err) {
-+ pr_err("dma_coerce_mask_and_coherent() failed\n");
-+ goto pdev_mask_failed;
-+ }
-+#ifdef CONFIG_FMAN_ARM
-+ /* force coherency */
-+ pdev->dev.archdata.dma_coherent = true;
-+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
-+#endif
-+
-+ dpa_bp->dev = &pdev->dev;
-+
-+ if (dpa_bp->seed_cb) {
-+ err = dpa_bp->seed_cb(dpa_bp);
-+ if (err)
-+ goto pool_seed_failed;
-+ }
-+
-+ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
-+
-+ return 0;
-+
-+pool_seed_failed:
-+pdev_mask_failed:
-+ platform_device_unregister(pdev);
-+pdev_register_failed:
-+ bman_free_pool(dpa_bp->pool);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(dpa_bp_alloc);
-+
-+void dpa_bp_drain(struct dpa_bp *bp)
-+{
-+ int ret, num = 8;
-+
-+ do {
-+ struct bm_buffer bmb[8];
-+ int i;
-+
-+ ret = bman_acquire(bp->pool, bmb, num, 0);
-+ if (ret < 0) {
-+ if (num == 8) {
-+ /* we have less than 8 buffers left;
-+ * drain them one by one
-+ */
-+ num = 1;
-+ ret = 1;
-+ continue;
-+ } else {
-+ /* Pool is fully drained */
-+ break;
-+ }
-+ }
-+
-+ for (i = 0; i < num; i++) {
-+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
-+
-+ dma_unmap_single(bp->dev, addr, bp->size,
-+ DMA_BIDIRECTIONAL);
-+
-+ bp->free_buf_cb(phys_to_virt(addr));
-+ }
-+ } while (ret > 0);
-+}
-+EXPORT_SYMBOL(dpa_bp_drain);
-+
-+static void __cold __attribute__((nonnull))
-+_dpa_bp_free(struct dpa_bp *dpa_bp)
-+{
-+ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
-+
-+ /* the mapping between bpid and dpa_bp is done very late in the
-+ * allocation procedure; if something failed before the mapping, the bp
-+ * was not configured, therefore we don't need the below instructions
-+ */
-+ if (!bp)
-+ return;
-+
-+ if (!atomic_dec_and_test(&bp->refs))
-+ return;
-+
-+ if (bp->free_buf_cb)
-+ dpa_bp_drain(bp);
-+
-+ dpa_bp_array[bp->bpid] = NULL;
-+ bman_free_pool(bp->pool);
-+
-+ if (bp->dev)
-+ platform_device_unregister(to_platform_device(bp->dev));
-+}
-+
-+void __cold __attribute__((nonnull))
-+dpa_bp_free(struct dpa_priv_s *priv)
-+{
-+ int i;
-+
-+ if (priv->dpa_bp)
-+ for (i = 0; i < priv->bp_count; i++)
-+ _dpa_bp_free(&priv->dpa_bp[i]);
-+}
-+EXPORT_SYMBOL(dpa_bp_free);
-+
-+struct dpa_bp *dpa_bpid2pool(int bpid)
-+{
-+ return dpa_bp_array[bpid];
-+}
-+EXPORT_SYMBOL(dpa_bpid2pool);
-+
-+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
-+{
-+ dpa_bp_array[bpid] = dpa_bp;
-+ atomic_set(&dpa_bp->refs, 1);
-+}
-+
-+bool dpa_bpid2pool_use(int bpid)
-+{
-+ if (dpa_bpid2pool(bpid)) {
-+ atomic_inc(&dpa_bp_array[bpid]->refs);
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
-+ struct net_device *sb_dev,
-+ select_queue_fallback_t fallback)
-+{
-+ return dpa_get_queue_mapping(skb);
-+}
-+EXPORT_SYMBOL(dpa_select_queue);
-+#endif
-+
-+struct dpa_fq *dpa_fq_alloc(struct device *dev,
-+ u32 fq_start,
-+ u32 fq_count,
-+ struct list_head *list,
-+ enum dpa_fq_type fq_type)
-+{
-+ int i;
-+ struct dpa_fq *dpa_fq;
-+
-+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
-+ if (dpa_fq == NULL)
-+ return NULL;
-+
-+ for (i = 0; i < fq_count; i++) {
-+ dpa_fq[i].fq_type = fq_type;
-+ if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
-+ dpa_fq[i].fqid = fq_start ?
-+ DPAA_ETH_FQ_DELTA + fq_start + i : 0;
-+ else
-+ dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
-+
-+ list_add_tail(&dpa_fq[i].list, list);
-+ }
-+
-+#ifdef CONFIG_FMAN_PFC
-+ if (fq_type == FQ_TYPE_TX)
-+ for (i = 0; i < fq_count; i++)
-+ dpa_fq[i].wq = i / dpa_num_cpus;
-+ else
-+#endif
-+ for (i = 0; i < fq_count; i++)
-+ _dpa_assign_wq(dpa_fq + i);
-+
-+ return dpa_fq;
-+}
-+EXPORT_SYMBOL(dpa_fq_alloc);
-+
-+/* Probing of FQs for MACful ports */
-+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
-+ struct fm_port_fqs *port_fqs,
-+ bool alloc_tx_conf_fqs,
-+ enum port_type ptype)
-+{
-+ struct fqid_cell *fqids = NULL;
-+ const void *fqids_off = NULL;
-+ struct dpa_fq *dpa_fq = NULL;
-+ struct device_node *np = dev->of_node;
-+ int num_ranges;
-+ int i, lenp;
-+
-+ if (ptype == TX && alloc_tx_conf_fqs) {
-+ if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
-+ tx_confirm_fqids->count, list,
-+ FQ_TYPE_TX_CONF_MQ))
-+ goto fq_alloc_failed;
-+ }
-+
-+ fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
-+ if (fqids_off == NULL) {
-+ /* No dts definition, so use the defaults. */
-+ fqids = default_fqids[ptype];
-+ num_ranges = 3;
-+ } else {
-+ num_ranges = lenp / sizeof(*fqids);
-+
-+ fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
-+ GFP_KERNEL);
-+ if (fqids == NULL)
-+ goto fqids_alloc_failed;
-+
-+ /* convert to CPU endianess */
-+ for (i = 0; i < num_ranges; i++) {
-+ fqids[i].start = be32_to_cpup(fqids_off +
-+ i * sizeof(*fqids));
-+ fqids[i].count = be32_to_cpup(fqids_off +
-+ i * sizeof(*fqids) + sizeof(__be32));
-+ }
-+ }
-+
-+ for (i = 0; i < num_ranges; i++) {
-+ switch (i) {
-+ case 0:
-+ /* The first queue is the error queue */
-+ if (fqids[i].count != 1)
-+ goto invalid_error_queue;
-+
-+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ ptype == RX ?
-+ FQ_TYPE_RX_ERROR :
-+ FQ_TYPE_TX_ERROR);
-+ if (dpa_fq == NULL)
-+ goto fq_alloc_failed;
-+
-+ if (ptype == RX)
-+ port_fqs->rx_errq = &dpa_fq[0];
-+ else
-+ port_fqs->tx_errq = &dpa_fq[0];
-+ break;
-+ case 1:
-+ /* the second queue is the default queue */
-+ if (fqids[i].count != 1)
-+ goto invalid_default_queue;
-+
-+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ ptype == RX ?
-+ FQ_TYPE_RX_DEFAULT :
-+ FQ_TYPE_TX_CONFIRM);
-+ if (dpa_fq == NULL)
-+ goto fq_alloc_failed;
-+
-+ if (ptype == RX)
-+ port_fqs->rx_defq = &dpa_fq[0];
-+ else
-+ port_fqs->tx_defq = &dpa_fq[0];
-+ break;
-+ default:
-+ /* all subsequent queues are either RX* PCD or Tx */
-+ if (ptype == RX) {
-+ if (!dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ FQ_TYPE_RX_PCD) ||
-+ !dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ FQ_TYPE_RX_PCD_HI_PRIO))
-+ goto fq_alloc_failed;
-+ } else {
-+ if (!dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ FQ_TYPE_TX))
-+ goto fq_alloc_failed;
-+ }
-+ break;
-+ }
-+ }
-+
-+ return 0;
-+
-+fq_alloc_failed:
-+fqids_alloc_failed:
-+ dev_err(dev, "Cannot allocate memory for frame queues\n");
-+ return -ENOMEM;
-+
-+invalid_default_queue:
-+invalid_error_queue:
-+ dev_err(dev, "Too many default or error queues\n");
-+ return -EINVAL;
-+}
-+EXPORT_SYMBOL(dpa_fq_probe_mac);
-+
-+static u32 rx_pool_channel;
-+static DEFINE_SPINLOCK(rx_pool_channel_init);
-+
-+int dpa_get_channel(void)
-+{
-+ spin_lock(&rx_pool_channel_init);
-+ if (!rx_pool_channel) {
-+ u32 pool;
-+ int ret = qman_alloc_pool(&pool);
-+ if (!ret)
-+ rx_pool_channel = pool;
-+ }
-+ spin_unlock(&rx_pool_channel_init);
-+ if (!rx_pool_channel)
-+ return -ENOMEM;
-+ return rx_pool_channel;
-+}
-+EXPORT_SYMBOL(dpa_get_channel);
-+
-+void dpa_release_channel(void)
-+{
-+ qman_release_pool(rx_pool_channel);
-+}
-+EXPORT_SYMBOL(dpa_release_channel);
-+
-+void dpaa_eth_add_channel(u16 channel)
-+{
-+ const cpumask_t *cpus = qman_affine_cpus();
-+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
-+ int cpu;
-+ struct qman_portal *portal;
-+
-+ for_each_cpu(cpu, cpus) {
-+ portal = (struct qman_portal *)qman_get_affine_portal(cpu);
-+ qman_p_static_dequeue_add(portal, pool);
-+ }
-+}
-+EXPORT_SYMBOL(dpaa_eth_add_channel);
-+
-+/**
-+ * Congestion group state change notification callback.
-+ * Stops the device's egress queues while they are congested and
-+ * wakes them upon exiting congested state.
-+ * Also updates some CGR-related stats.
-+ */
-+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
-+
-+ int congested)
-+{
-+ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
-+ struct dpa_priv_s, cgr_data.cgr);
-+
-+ if (congested) {
-+ priv->cgr_data.congestion_start_jiffies = jiffies;
-+ netif_tx_stop_all_queues(priv->net_dev);
-+ priv->cgr_data.cgr_congested_count++;
-+ } else {
-+ priv->cgr_data.congested_jiffies +=
-+ (jiffies - priv->cgr_data.congestion_start_jiffies);
-+ netif_tx_wake_all_queues(priv->net_dev);
-+ }
-+}
-+
-+int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
-+{
-+ struct qm_mcc_initcgr initcgr;
-+ u32 cs_th;
-+ int err;
-+
-+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
-+ if (err < 0) {
-+ pr_err("Error %d allocating CGR ID\n", err);
-+ goto out_error;
-+ }
-+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
-+
-+ /* Enable Congestion State Change Notifications and CS taildrop */
-+ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
-+ initcgr.cgr.cscn_en = QM_CGR_EN;
-+
-+ /* Set different thresholds based on the MAC speed.
-+ * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
-+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
-+ * In such cases, we ought to reconfigure the threshold, too.
-+ */
-+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
-+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
-+ else
-+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
-+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
-+
-+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
-+ initcgr.cgr.cstd_en = QM_CGR_EN;
-+
-+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
-+ &initcgr);
-+ if (err < 0) {
-+ pr_err("Error %d creating CGR with ID %d\n", err,
-+ priv->cgr_data.cgr.cgrid);
-+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-+ goto out_error;
-+ }
-+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
-+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
-+ priv->cgr_data.cgr.chan);
-+
-+out_error:
-+ return err;
-+}
-+EXPORT_SYMBOL(dpaa_eth_cgr_init);
-+
-+static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
-+ struct dpa_fq *fq,
-+ const struct qman_fq *template)
-+{
-+ fq->fq_base = *template;
-+ fq->net_dev = priv->net_dev;
-+
-+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
-+ fq->channel = priv->channel;
-+}
-+
-+static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
-+ struct dpa_fq *fq,
-+ struct fm_port *port,
-+ const struct qman_fq *template)
-+{
-+ fq->fq_base = *template;
-+ fq->net_dev = priv->net_dev;
-+
-+ if (port) {
-+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
-+ fq->channel = (uint16_t)fm_get_tx_port_channel(port);
-+ } else {
-+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
-+ }
-+}
-+
-+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
-+ struct fm_port *tx_port)
-+{
-+ struct dpa_fq *fq;
-+ uint16_t portals[NR_CPUS];
-+ int cpu, portal_cnt = 0, num_portals = 0;
-+ uint32_t pcd_fqid, pcd_fqid_hi_prio;
-+ const cpumask_t *affine_cpus = qman_affine_cpus();
-+ int egress_cnt = 0, conf_cnt = 0;
-+
-+ /* Prepare for PCD FQs init */
-+ for_each_cpu(cpu, affine_cpus)
-+ portals[num_portals++] = qman_affine_channel(cpu);
-+ if (num_portals == 0)
-+ dev_err(priv->net_dev->dev.parent,
-+ "No Qman software (affine) channels found");
-+
-+ pcd_fqid = (priv->mac_dev) ?
-+ DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
-+ pcd_fqid_hi_prio = (priv->mac_dev) ?
-+ DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
-+
-+ /* Initialize each FQ in the list */
-+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
-+ switch (fq->fq_type) {
-+ case FQ_TYPE_RX_DEFAULT:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
-+ break;
-+ case FQ_TYPE_RX_ERROR:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
-+ break;
-+ case FQ_TYPE_RX_PCD:
-+ /* For MACless we can't have dynamic Rx queues */
-+ BUG_ON(!priv->mac_dev && !fq->fqid);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
-+ if (!fq->fqid)
-+ fq->fqid = pcd_fqid++;
-+ fq->channel = portals[portal_cnt];
-+ portal_cnt = (portal_cnt + 1) % num_portals;
-+ break;
-+ case FQ_TYPE_RX_PCD_HI_PRIO:
-+ /* For MACless we can't have dynamic Hi Pri Rx queues */
-+ BUG_ON(!priv->mac_dev && !fq->fqid);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
-+ if (!fq->fqid)
-+ fq->fqid = pcd_fqid_hi_prio++;
-+ fq->channel = portals[portal_cnt];
-+ portal_cnt = (portal_cnt + 1) % num_portals;
-+ break;
-+ case FQ_TYPE_TX:
-+ dpa_setup_egress(priv, fq, tx_port,
-+ &fq_cbs->egress_ern);
-+ /* If we have more Tx queues than the number of cores,
-+ * just ignore the extra ones.
-+ */
-+ if (egress_cnt < DPAA_ETH_TX_QUEUES)
-+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
-+ break;
-+ case FQ_TYPE_TX_CONFIRM:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
-+ break;
-+ case FQ_TYPE_TX_CONF_MQ:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
-+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
-+ break;
-+ case FQ_TYPE_TX_ERROR:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
-+ break;
-+ default:
-+ dev_warn(priv->net_dev->dev.parent,
-+ "Unknown FQ type detected!\n");
-+ break;
-+ }
-+ }
-+
-+ /* The number of Tx queues may be smaller than the number of cores, if
-+ * the Tx queue range is specified in the device tree instead of being
-+ * dynamically allocated.
-+ * Make sure all CPUs receive a corresponding Tx queue.
-+ */
-+ while (egress_cnt < DPAA_ETH_TX_QUEUES) {
-+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
-+ if (fq->fq_type != FQ_TYPE_TX)
-+ continue;
-+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
-+ if (egress_cnt == DPAA_ETH_TX_QUEUES)
-+ break;
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(dpa_fq_setup);
-+
-+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
-+{
-+ int _errno;
-+ const struct dpa_priv_s *priv;
-+ struct device *dev;
-+ struct qman_fq *fq;
-+ struct qm_mcc_initfq initfq;
-+ struct qman_fq *confq;
-+ int queue_id;
-+
-+ priv = netdev_priv(dpa_fq->net_dev);
-+ dev = dpa_fq->net_dev->dev.parent;
-+
-+ if (dpa_fq->fqid == 0)
-+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
-+
-+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
-+
-+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
-+ if (_errno) {
-+ dev_err(dev, "qman_create_fq() failed\n");
-+ return _errno;
-+ }
-+ fq = &dpa_fq->fq_base;
-+
-+ if (dpa_fq->init) {
-+ memset(&initfq, 0, sizeof(initfq));
-+
-+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
-+ /* FIXME: why would we want to keep an empty FQ in cache? */
-+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
-+
-+ /* Try to reduce the number of portal interrupts for
-+ * Tx Confirmation FQs.
-+ */
-+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
-+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
-+
-+ /* FQ placement */
-+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
-+
-+ initfq.fqd.dest.channel = dpa_fq->channel;
-+ initfq.fqd.dest.wq = dpa_fq->wq;
-+
-+ /* Put all egress queues in a congestion group of their own.
-+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
-+ * rather than Tx - but they nonetheless account for the
-+ * memory footprint on behalf of egress traffic. We therefore
-+ * place them in the netdev's CGR, along with the Tx FQs.
-+ */
-+ if (dpa_fq->fq_type == FQ_TYPE_TX ||
-+ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
-+ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
-+ initfq.we_mask |= QM_INITFQ_WE_CGID;
-+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
-+ initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
-+ /* Set a fixed overhead accounting, in an attempt to
-+ * reduce the impact of fixed-size skb shells and the
-+ * driver's needed headroom on system memory. This is
-+ * especially the case when the egress traffic is
-+ * composed of small datagrams.
-+ * Unfortunately, QMan's OAL value is capped to an
-+ * insufficient value, but even that is better than
-+ * no overhead accounting at all.
-+ */
-+ initfq.we_mask |= QM_INITFQ_WE_OAC;
-+ initfq.fqd.oac_init.oac = QM_OAC_CG;
-+ initfq.fqd.oac_init.oal =
-+ (signed char)(min(sizeof(struct sk_buff) +
-+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-+ }
-+
-+ if (td_enable) {
-+ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
-+ qm_fqd_taildrop_set(&initfq.fqd.td,
-+ DPA_FQ_TD, 1);
-+ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
-+ }
-+
-+ /* Configure the Tx confirmation queue, now that we know
-+ * which Tx queue it pairs with.
-+ */
-+ if (dpa_fq->fq_type == FQ_TYPE_TX) {
-+ queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
-+ if (queue_id >= 0) {
-+ confq = priv->conf_fqs[queue_id];
-+ if (confq) {
-+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
-+ /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
-+ * A2V=1 (contextA A2 field is valid)
-+ * A0V=1 (contextA A0 field is valid)
-+ * B0V=1 (contextB field is valid)
-+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
-+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
-+ */
-+ initfq.fqd.context_a.hi = 0x1e000000;
-+ initfq.fqd.context_a.lo = 0x80000000;
-+ }
-+ }
-+ }
-+
-+ /* Put all *private* ingress queues in our "ingress CGR". */
-+ if (priv->use_ingress_cgr &&
-+ (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
-+ dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
-+ dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
-+ dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
-+ initfq.we_mask |= QM_INITFQ_WE_CGID;
-+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
-+ initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
-+ /* Set a fixed overhead accounting, just like for the
-+ * egress CGR.
-+ */
-+ initfq.we_mask |= QM_INITFQ_WE_OAC;
-+ initfq.fqd.oac_init.oac = QM_OAC_CG;
-+ initfq.fqd.oac_init.oal =
-+ (signed char)(min(sizeof(struct sk_buff) +
-+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-+ }
-+
-+ /* Initialization common to all ingress queues */
-+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
-+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
-+ initfq.fqd.fq_ctrl |=
-+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
-+ initfq.fqd.context_a.stashing.exclusive =
-+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
-+ QM_STASHING_EXCL_ANNOTATION;
-+ initfq.fqd.context_a.stashing.data_cl = 2;
-+ initfq.fqd.context_a.stashing.annotation_cl = 1;
-+ initfq.fqd.context_a.stashing.context_cl =
-+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
-+ }
-+
-+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
-+ if (_errno < 0) {
-+ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno)) {
-+ dpa_fq->init = 0;
-+ } else {
-+ dev_err(dev, "qman_init_fq(%u) = %d\n",
-+ qman_fq_fqid(fq), _errno);
-+ qman_destroy_fq(fq, 0);
-+ }
-+ return _errno;
-+ }
-+ }
-+
-+ dpa_fq->fqid = qman_fq_fqid(fq);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_fq_init);
-+
-+int __cold __attribute__((nonnull))
-+_dpa_fq_free(struct device *dev, struct qman_fq *fq)
-+{
-+ int _errno, __errno;
-+ struct dpa_fq *dpa_fq;
-+ const struct dpa_priv_s *priv;
-+
-+ _errno = 0;
-+
-+ dpa_fq = container_of(fq, struct dpa_fq, fq_base);
-+ priv = netdev_priv(dpa_fq->net_dev);
-+
-+ if (dpa_fq->init) {
-+ _errno = qman_retire_fq(fq, NULL);
-+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
-+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
-+ qman_fq_fqid(fq), _errno);
-+
-+ __errno = qman_oos_fq(fq);
-+ if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
-+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
-+ qman_fq_fqid(fq), __errno);
-+ if (_errno >= 0)
-+ _errno = __errno;
-+ }
-+ }
-+
-+ qman_destroy_fq(fq, 0);
-+ list_del(&dpa_fq->list);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(_dpa_fq_free);
-+
-+int __cold __attribute__((nonnull))
-+dpa_fq_free(struct device *dev, struct list_head *list)
-+{
-+ int _errno, __errno;
-+ struct dpa_fq *dpa_fq, *tmp;
-+
-+ _errno = 0;
-+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
-+ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
-+ if (unlikely(__errno < 0) && _errno >= 0)
-+ _errno = __errno;
-+ }
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_fq_free);
-+
-+int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable)
-+{
-+ int _errno, __errno;
-+ struct dpa_fq *dpa_fq, *tmp;
-+ static bool print_msg __read_mostly;
-+
-+ _errno = 0;
-+ print_msg = true;
-+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
-+ __errno = dpa_fq_init(dpa_fq, td_enable);
-+ if (unlikely(__errno < 0) && _errno >= 0) {
-+ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, __errno)) {
-+ if (print_msg) {
-+ dev_warn(dev,
-+ "Skip RX PCD High Priority FQs initialization\n");
-+ print_msg = false;
-+ }
-+ if (_dpa_fq_free(dev, (struct qman_fq *)dpa_fq))
-+ dev_warn(dev,
-+ "Error freeing frame queues\n");
-+ } else {
-+ _errno = __errno;
-+ break;
-+ }
-+ }
-+ }
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_fqs_init);
-+static void
-+dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
-+ struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
-+{
-+ struct fm_port_params tx_port_param;
-+ bool frag_enabled = false;
-+
-+ memset(&tx_port_param, 0, sizeof(tx_port_param));
-+ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
-+ buf_layout, frag_enabled);
-+}
-+
-+static void
-+dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
-+ struct dpa_fq *errq, struct dpa_fq *defq,
-+ struct dpa_buffer_layout_s *buf_layout)
-+{
-+ struct fm_port_params rx_port_param;
-+ int i;
-+ bool frag_enabled = false;
-+
-+ memset(&rx_port_param, 0, sizeof(rx_port_param));
-+ count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
-+ rx_port_param.num_pools = (uint8_t)count;
-+ for (i = 0; i < count; i++) {
-+ if (i >= rx_port_param.num_pools)
-+ break;
-+ rx_port_param.pool_param[i].id = bp[i].bpid;
-+ rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
-+ }
-+
-+ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
-+ buf_layout, frag_enabled);
-+}
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+/* Defined as weak, to be implemented by fman pcd tester. */
-+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
-+__attribute__((weak));
-+
-+int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
-+#else
-+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
-+
-+int dpa_free_pcd_fqids(struct device *, uint32_t);
-+
-+#endif /* CONFIG_FSL_SDK_FMAN_TEST */
-+
-+
-+int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
-+ uint8_t alignment, uint32_t *base_fqid)
-+{
-+ dev_crit(dev, "callback not implemented!\n");
-+
-+ return 0;
-+}
-+
-+int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
-+{
-+
-+ dev_crit(dev, "callback not implemented!\n");
-+
-+ return 0;
-+}
-+
-+void dpaa_eth_init_ports(struct mac_device *mac_dev,
-+ struct dpa_bp *bp, size_t count,
-+ struct fm_port_fqs *port_fqs,
-+ struct dpa_buffer_layout_s *buf_layout,
-+ struct device *dev)
-+{
-+ struct fm_port_pcd_param rx_port_pcd_param;
-+ struct fm_port *rxport = mac_dev->port_dev[RX];
-+ struct fm_port *txport = mac_dev->port_dev[TX];
-+
-+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
-+ port_fqs->tx_defq, &buf_layout[TX]);
-+ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
-+ port_fqs->rx_defq, &buf_layout[RX]);
-+
-+ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
-+ rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
-+ rx_port_pcd_param.dev = dev;
-+ fm_port_pcd_bind(rxport, &rx_port_pcd_param);
-+}
-+EXPORT_SYMBOL(dpaa_eth_init_ports);
-+
-+void dpa_release_sgt(struct qm_sg_entry *sgt)
-+{
-+ struct dpa_bp *dpa_bp;
-+ struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
-+ uint8_t i = 0, j;
-+
-+ memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
-+
-+ do {
-+ dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
-+ DPA_BUG_ON(!dpa_bp);
-+
-+ j = 0;
-+ do {
-+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
-+ bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
-+
-+ j++; i++;
-+ } while (j < ARRAY_SIZE(bmb) &&
-+ !qm_sg_entry_get_final(&sgt[i-1]) &&
-+ qm_sg_entry_get_bpid(&sgt[i-1]) ==
-+ qm_sg_entry_get_bpid(&sgt[i]));
-+
-+ while (bman_release(dpa_bp->pool, bmb, j, 0))
-+ cpu_relax();
-+ } while (!qm_sg_entry_get_final(&sgt[i-1]));
-+}
-+EXPORT_SYMBOL(dpa_release_sgt);
-+
-+void __attribute__((nonnull))
-+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
-+{
-+ struct qm_sg_entry *sgt;
-+ struct dpa_bp *dpa_bp;
-+ struct bm_buffer bmb;
-+ dma_addr_t addr;
-+ void *vaddr;
-+
-+ bmb.opaque = 0;
-+ bm_buffer_set64(&bmb, qm_fd_addr(fd));
-+
-+ dpa_bp = dpa_bpid2pool(fd->bpid);
-+ DPA_BUG_ON(!dpa_bp);
-+
-+ if (fd->format == qm_fd_sg) {
-+ vaddr = phys_to_virt(qm_fd_addr(fd));
-+ sgt = vaddr + dpa_fd_offset(fd);
-+
-+ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+
-+ dpa_release_sgt(sgt);
-+ addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ return;
-+ }
-+ bm_buffer_set64(&bmb, addr);
-+ }
-+
-+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
-+ cpu_relax();
-+}
-+EXPORT_SYMBOL(dpa_fd_release);
-+
-+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_mr_entry *msg)
-+{
-+ switch (msg->ern.rc & QM_MR_RC_MASK) {
-+ case QM_MR_RC_CGR_TAILDROP:
-+ percpu_priv->ern_cnt.cg_tdrop++;
-+ break;
-+ case QM_MR_RC_WRED:
-+ percpu_priv->ern_cnt.wred++;
-+ break;
-+ case QM_MR_RC_ERROR:
-+ percpu_priv->ern_cnt.err_cond++;
-+ break;
-+ case QM_MR_RC_ORPWINDOW_EARLY:
-+ percpu_priv->ern_cnt.early_window++;
-+ break;
-+ case QM_MR_RC_ORPWINDOW_LATE:
-+ percpu_priv->ern_cnt.late_window++;
-+ break;
-+ case QM_MR_RC_FQ_TAILDROP:
-+ percpu_priv->ern_cnt.fq_tdrop++;
-+ break;
-+ case QM_MR_RC_ORPWINDOW_RETIRED:
-+ percpu_priv->ern_cnt.fq_retired++;
-+ break;
-+ case QM_MR_RC_ORP_ZERO:
-+ percpu_priv->ern_cnt.orp_zero++;
-+ break;
-+ }
-+}
-+EXPORT_SYMBOL(count_ern);
-+
-+/**
-+ * Turn on HW checksum computation for this outgoing frame.
-+ * If the current protocol is not something we support in this regard
-+ * (or if the stack has already computed the SW checksum), we do nothing.
-+ *
-+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
-+ * otherwise.
-+ *
-+ * Note that this function may modify the fd->cmd field and the skb data buffer
-+ * (the Parse Results area).
-+ */
-+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
-+{
-+ fm_prs_result_t *parse_result;
-+ struct iphdr *iph;
-+ struct ipv6hdr *ipv6h = NULL;
-+ u8 l4_proto;
-+ u16 ethertype = ntohs(skb->protocol);
-+ int retval = 0;
-+
-+ if (skb->ip_summed != CHECKSUM_PARTIAL)
-+ return 0;
-+
-+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
-+ * L4 alone from the FM configuration anyway.
-+ */
-+
-+ /* Fill in some fields of the Parse Results array, so the FMan
-+ * can find them as if they came from the FMan Parser.
-+ */
-+ parse_result = (fm_prs_result_t *)parse_results;
-+
-+ /* If we're dealing with VLAN, get the real Ethernet type */
-+ if (ethertype == ETH_P_8021Q) {
-+ /* We can't always assume the MAC header is set correctly
-+ * by the stack, so reset to beginning of skb->data
-+ */
-+ skb_reset_mac_header(skb);
-+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
-+ }
-+
-+ /* Fill in the relevant L3 parse result fields
-+ * and read the L4 protocol type
-+ */
-+ switch (ethertype) {
-+ case ETH_P_IP:
-+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
-+ iph = ip_hdr(skb);
-+ DPA_BUG_ON(iph == NULL);
-+ l4_proto = iph->protocol;
-+ break;
-+ case ETH_P_IPV6:
-+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
-+ ipv6h = ipv6_hdr(skb);
-+ DPA_BUG_ON(ipv6h == NULL);
-+ l4_proto = ipv6h->nexthdr;
-+ break;
-+ default:
-+ /* We shouldn't even be here */
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_alert(priv->net_dev,
-+ "Can't compute HW csum for L3 proto 0x%x\n",
-+ ntohs(skb->protocol));
-+ retval = -EIO;
-+ goto return_error;
-+ }
-+
-+ /* Fill in the relevant L4 parse result fields */
-+ switch (l4_proto) {
-+ case IPPROTO_UDP:
-+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
-+ break;
-+ case IPPROTO_TCP:
-+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
-+ break;
-+ default:
-+ /* This can as well be a BUG() */
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_alert(priv->net_dev,
-+ "Can't compute HW csum for L4 proto 0x%x\n",
-+ l4_proto);
-+ retval = -EIO;
-+ goto return_error;
-+ }
-+
-+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
-+ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
-+ parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
-+
-+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
-+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
-+
-+ /* On P1023 and similar platforms fd->cmd interpretation could
-+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
-+ * is not set so we do not need to check; in the future, if/when
-+ * using context_a we need to check this bit
-+ */
-+
-+return_error:
-+ return retval;
-+}
-+EXPORT_SYMBOL(dpa_enable_tx_csum);
-+
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+void dpa_enable_ceetm(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ priv->ceetm_en = true;
-+}
-+EXPORT_SYMBOL(dpa_enable_ceetm);
-+
-+void dpa_disable_ceetm(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ priv->ceetm_en = false;
-+}
-+EXPORT_SYMBOL(dpa_disable_ceetm);
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
-@@ -0,0 +1,226 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPAA_ETH_COMMON_H
-+#define __DPAA_ETH_COMMON_H
-+
-+#include <linux/etherdevice.h> /* struct net_device */
-+#include <linux/fsl_bman.h> /* struct bm_buffer */
-+#include <linux/of_platform.h> /* struct platform_device */
-+#include <linux/net_tstamp.h> /* struct hwtstamp_config */
-+
-+#include "dpaa_eth.h"
-+#include "lnxwrp_fsl_fman.h"
-+
-+#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
-+ frag_enabled) \
-+{ \
-+ param.errq = errq_id; \
-+ param.defq = defq_id; \
-+ param.priv_data_size = buf_layout->priv_data_size; \
-+ param.parse_results = buf_layout->parse_results; \
-+ param.hash_results = buf_layout->hash_results; \
-+ param.frag_enable = frag_enabled; \
-+ param.time_stamp = buf_layout->time_stamp; \
-+ param.manip_extra_space = buf_layout->manip_extra_space; \
-+ param.data_align = buf_layout->data_align; \
-+ fm_set_##type##_port_params(port, &param); \
-+}
-+
-+#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-+
-+#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
-+
-+#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
-+
-+#define DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno) \
-+ (((dpa_fq)->fq_type == FQ_TYPE_RX_PCD_HI_PRIO) && \
-+ (_errno == -EIO))
-+/* return codes for the dpaa-eth hooks */
-+enum dpaa_eth_hook_result {
-+ /* fd/skb was retained by the hook.
-+ *
-+ * On the Rx path, this means the Ethernet driver will _not_
-+ * deliver the skb to the stack. Instead, the hook implementation
-+ * is expected to properly dispose of the skb.
-+ *
-+ * On the Tx path, the Ethernet driver's dpa_tx() function will
-+ * immediately return NETDEV_TX_OK. The hook implementation is expected
-+ * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
-+ * unless you know exactly what you're doing!
-+ *
-+ * On the confirmation/error paths, the Ethernet driver will _not_
-+ * perform any fd cleanup, nor update the interface statistics.
-+ */
-+ DPAA_ETH_STOLEN,
-+ /* fd/skb was returned to the Ethernet driver for regular processing.
-+ * The hook is not allowed to, for instance, reallocate the skb (as if
-+ * by linearizing, copying, cloning or reallocating the headroom).
-+ */
-+ DPAA_ETH_CONTINUE
-+};
-+
-+typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
-+ struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
-+typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
-+ struct sk_buff *skb, struct net_device *net_dev);
-+typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
-+ struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
-+
-+/* used in napi related functions */
-+extern u16 qman_portal_max;
-+
-+/* from dpa_ethtool.c */
-+extern const struct ethtool_ops dpa_ethtool_ops;
-+
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+/* Various hooks used for unit-testing and/or fastpath optimizations.
-+ * Currently only one set of such hooks is supported.
-+ */
-+struct dpaa_eth_hooks_s {
-+ /* Invoked on the Tx private path, immediately after receiving the skb
-+ * from the stack.
-+ */
-+ dpaa_eth_egress_hook_t tx;
-+
-+ /* Invoked on the Rx private path, right before passing the skb
-+ * up the stack. At that point, the packet's protocol id has already
-+ * been set. The skb's data pointer is now at the L3 header, and
-+ * skb->mac_header points to the L2 header. skb->len has been adjusted
-+ * to be the length of L3+payload (i.e., the length of the
-+ * original frame minus the L2 header len).
-+ * For more details on what the skb looks like, see eth_type_trans().
-+ */
-+ dpaa_eth_ingress_hook_t rx_default;
-+
-+ /* Driver hook for the Rx error private path. */
-+ dpaa_eth_confirm_hook_t rx_error;
-+ /* Driver hook for the Tx confirmation private path. */
-+ dpaa_eth_confirm_hook_t tx_confirm;
-+ /* Driver hook for the Tx error private path. */
-+ dpaa_eth_confirm_hook_t tx_error;
-+};
-+
-+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
-+
-+extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
-+#endif
-+
-+int dpa_netdev_init(struct net_device *net_dev,
-+ const uint8_t *mac_addr,
-+ uint16_t tx_timeout);
-+int __cold dpa_start(struct net_device *net_dev);
-+int __cold dpa_stop(struct net_device *net_dev);
-+void __cold dpa_timeout(struct net_device *net_dev);
-+void __cold
-+dpa_get_stats64(struct net_device *net_dev,
-+ struct rtnl_link_stats64 *stats);
-+int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
-+int dpa_ndo_init(struct net_device *net_dev);
-+int dpa_set_features(struct net_device *dev, netdev_features_t features);
-+netdev_features_t dpa_fix_features(struct net_device *dev,
-+ netdev_features_t features);
-+#ifdef CONFIG_FSL_DPAA_TS
-+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
-+ enum port_type rx_tx, const void *data);
-+/* Updates the skb shared hw timestamp from the hardware timestamp */
-+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
-+ struct skb_shared_hwtstamps *shhwtstamps, const void *data);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-+int __cold dpa_remove(struct platform_device *of_dev);
-+struct mac_device * __cold __must_check
-+__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
-+int dpa_set_mac_address(struct net_device *net_dev, void *addr);
-+void dpa_set_rx_mode(struct net_device *net_dev);
-+void dpa_set_buffers_layout(struct mac_device *mac_dev,
-+ struct dpa_buffer_layout_s *layout);
-+int __attribute__((nonnull))
-+dpa_bp_alloc(struct dpa_bp *dpa_bp);
-+void __cold __attribute__((nonnull))
-+dpa_bp_free(struct dpa_priv_s *priv);
-+struct dpa_bp *dpa_bpid2pool(int bpid);
-+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
-+bool dpa_bpid2pool_use(int bpid);
-+void dpa_bp_drain(struct dpa_bp *bp);
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
-+ struct net_device *sb_dev,
-+ select_queue_fallback_t fallback);
-+#endif
-+struct dpa_fq *dpa_fq_alloc(struct device *dev,
-+ u32 fq_start,
-+ u32 fq_count,
-+ struct list_head *list,
-+ enum dpa_fq_type fq_type);
-+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
-+ struct fm_port_fqs *port_fqs,
-+ bool tx_conf_fqs_per_core,
-+ enum port_type ptype);
-+int dpa_get_channel(void);
-+void dpa_release_channel(void);
-+void dpaa_eth_add_channel(u16 channel);
-+int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
-+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
-+ struct fm_port *tx_port);
-+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
-+int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable);
-+int __cold __attribute__((nonnull))
-+dpa_fq_free(struct device *dev, struct list_head *list);
-+void dpaa_eth_init_ports(struct mac_device *mac_dev,
-+ struct dpa_bp *bp, size_t count,
-+ struct fm_port_fqs *port_fqs,
-+ struct dpa_buffer_layout_s *buf_layout,
-+ struct device *dev);
-+void dpa_release_sgt(struct qm_sg_entry *sgt);
-+void __attribute__((nonnull))
-+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
-+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_mr_entry *msg);
-+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+void dpa_enable_ceetm(struct net_device *dev);
-+void dpa_disable_ceetm(struct net_device *dev);
-+#endif
-+struct proxy_device {
-+ struct mac_device *mac_dev;
-+};
-+
-+/* mac device control functions exposed by proxy interface*/
-+int dpa_proxy_start(struct net_device *net_dev);
-+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
-+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev);
-+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev);
-+
-+#endif /* __DPAA_ETH_COMMON_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
-@@ -0,0 +1,381 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_platform.h>
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#include "dpaa_eth_base.h"
-+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
-+#include "mac.h"
-+
-+#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+MODULE_DESCRIPTION(DPA_DESCRIPTION);
-+
-+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
-+#ifdef CONFIG_PM
-+
-+static int proxy_suspend(struct device *dev)
-+{
-+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ int err = 0;
-+
-+ err = fm_port_suspend(mac_dev->port_dev[RX]);
-+ if (err)
-+ goto port_suspend_failed;
-+
-+ err = fm_port_suspend(mac_dev->port_dev[TX]);
-+ if (err)
-+ err = fm_port_resume(mac_dev->port_dev[RX]);
-+
-+port_suspend_failed:
-+ return err;
-+}
-+
-+static int proxy_resume(struct device *dev)
-+{
-+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ int err = 0;
-+
-+ err = fm_port_resume(mac_dev->port_dev[TX]);
-+ if (err)
-+ goto port_resume_failed;
-+
-+ err = fm_port_resume(mac_dev->port_dev[RX]);
-+ if (err)
-+ err = fm_port_suspend(mac_dev->port_dev[TX]);
-+
-+port_resume_failed:
-+ return err;
-+}
-+
-+static const struct dev_pm_ops proxy_pm_ops = {
-+ .suspend = proxy_suspend,
-+ .resume = proxy_resume,
-+};
-+
-+#define PROXY_PM_OPS (&proxy_pm_ops)
-+
-+#else /* CONFIG_PM */
-+
-+#define PROXY_PM_OPS NULL
-+
-+#endif /* CONFIG_PM */
-+
-+static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
-+{
-+ int err = 0, i;
-+ struct device *dev;
-+ struct device_node *dpa_node;
-+ struct dpa_bp *dpa_bp;
-+ struct list_head proxy_fq_list;
-+ size_t count;
-+ struct fm_port_fqs port_fqs;
-+ struct dpa_buffer_layout_s *buf_layout = NULL;
-+ struct mac_device *mac_dev;
-+ struct proxy_device *proxy_dev;
-+
-+ dev = &_of_dev->dev;
-+
-+ dpa_node = dev->of_node;
-+
-+ if (!of_device_is_available(dpa_node))
-+ return -ENODEV;
-+
-+ /* Get the buffer pools assigned to this interface */
-+ dpa_bp = dpa_bp_probe(_of_dev, &count);
-+ if (IS_ERR(dpa_bp))
-+ return PTR_ERR(dpa_bp);
-+
-+ mac_dev = dpa_mac_probe(_of_dev);
-+ if (IS_ERR(mac_dev))
-+ return PTR_ERR(mac_dev);
-+
-+ proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
-+ if (!proxy_dev) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ proxy_dev->mac_dev = mac_dev;
-+ dev_set_drvdata(dev, proxy_dev);
-+
-+ /* We have physical ports, so we need to establish
-+ * the buffer layout.
-+ */
-+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
-+ GFP_KERNEL);
-+ if (!buf_layout) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return -ENOMEM;
-+ }
-+ dpa_set_buffers_layout(mac_dev, buf_layout);
-+
-+ INIT_LIST_HEAD(&proxy_fq_list);
-+
-+ memset(&port_fqs, 0, sizeof(port_fqs));
-+
-+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
-+ if (!err)
-+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
-+ TX);
-+ if (err < 0) {
-+ devm_kfree(dev, buf_layout);
-+ return err;
-+ }
-+
-+ /* Proxy initializer - Just configures the MAC on behalf of
-+ * another partition.
-+ */
-+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
-+ buf_layout, dev);
-+
-+ /* Proxy interfaces need to be started, and the allocated
-+ * memory freed
-+ */
-+ devm_kfree(dev, buf_layout);
-+ devm_kfree(dev, dpa_bp);
-+
-+ /* Free FQ structures */
-+ devm_kfree(dev, port_fqs.rx_defq);
-+ devm_kfree(dev, port_fqs.rx_errq);
-+ devm_kfree(dev, port_fqs.tx_defq);
-+ devm_kfree(dev, port_fqs.tx_errq);
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_enable(mac_dev->port_dev[i]);
-+ if (err)
-+ goto port_enable_fail;
-+ }
-+
-+ dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
-+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
-+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
-+
-+ return 0; /* Proxy interface initialization ended */
-+
-+port_enable_fail:
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_disable(mac_dev->port_dev[i]);
-+ dpa_eth_proxy_remove(_of_dev);
-+
-+ return err;
-+}
-+
-+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev;
-+ int _errno;
-+
-+ mac_dev = proxy_dev->mac_dev;
-+
-+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
-+ net_dev->dev_addr);
-+ if (_errno < 0)
-+ return _errno;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_proxy_set_mac_address);
-+
-+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ int _errno;
-+
-+ if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
-+ mac_dev->promisc = !mac_dev->promisc;
-+ _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
-+ mac_dev->promisc);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
-+ _errno);
-+ }
-+
-+ _errno = mac_dev->set_multi(net_dev, mac_dev);
-+ if (unlikely(_errno < 0))
-+ return _errno;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
-+
-+int dpa_proxy_start(struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev;
-+ const struct dpa_priv_s *priv;
-+ struct proxy_device *proxy_dev;
-+ int _errno;
-+ int i;
-+
-+ priv = netdev_priv(net_dev);
-+ proxy_dev = (struct proxy_device *)priv->peer;
-+ mac_dev = proxy_dev->mac_dev;
-+
-+ _errno = mac_dev->init_phy(net_dev, mac_dev);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev, "init_phy() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ _errno = fm_port_enable(mac_dev->port_dev[i]);
-+ if (_errno)
-+ goto port_enable_fail;
-+ }
-+
-+ _errno = mac_dev->start(mac_dev);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev, "mac_dev->start() = %d\n",
-+ _errno);
-+ goto port_enable_fail;
-+ }
-+
-+ return _errno;
-+
-+port_enable_fail:
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_disable(mac_dev->port_dev[i]);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_proxy_start);
-+
-+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ const struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ int _errno, i, err;
-+
-+ _errno = mac_dev->stop(mac_dev);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev, "mac_dev->stop() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_disable(mac_dev->port_dev[i]);
-+ _errno = err ? err : _errno;
-+ }
-+
-+ if (mac_dev->phy_dev)
-+ phy_disconnect(mac_dev->phy_dev);
-+ mac_dev->phy_dev = NULL;
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_proxy_stop);
-+
-+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
-+{
-+ struct device *dev = &of_dev->dev;
-+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
-+
-+ kfree(proxy_dev);
-+
-+ dev_set_drvdata(dev, NULL);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id dpa_proxy_match[] = {
-+ {
-+ .compatible = "fsl,dpa-ethernet-init"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, dpa_proxy_match);
-+
-+static struct platform_driver dpa_proxy_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME "-proxy",
-+ .of_match_table = dpa_proxy_match,
-+ .owner = THIS_MODULE,
-+ .pm = PROXY_PM_OPS,
-+ },
-+ .probe = dpaa_eth_proxy_probe,
-+ .remove = dpa_eth_proxy_remove
-+};
-+
-+static int __init __cold dpa_proxy_load(void)
-+{
-+ int _errno;
-+
-+ pr_info(DPA_DESCRIPTION "\n");
-+
-+ /* Initialize dpaa_eth mirror values */
-+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
-+ dpa_max_frm = fm_get_max_frm();
-+
-+ _errno = platform_driver_register(&dpa_proxy_driver);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ }
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ return _errno;
-+}
-+module_init(dpa_proxy_load);
-+
-+static void __exit __cold dpa_proxy_unload(void)
-+{
-+ platform_driver_unregister(&dpa_proxy_driver);
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(dpa_proxy_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
-@@ -0,0 +1,1113 @@
-+/* Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/skbuff.h>
-+#include <linux/highmem.h>
-+#include <linux/fsl_bman.h>
-+
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#ifdef CONFIG_FSL_DPAA_1588
-+#include "dpaa_1588.h"
-+#endif
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+#include "dpaa_eth_ceetm.h"
-+#endif
-+
-+/* DMA map and add a page frag back into the bpool.
-+ * @vaddr fragment must have been allocated with netdev_alloc_frag(),
-+ * specifically for fitting into @dpa_bp.
-+ */
-+static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
-+ int *count_ptr)
-+{
-+ struct bm_buffer bmb;
-+ dma_addr_t addr;
-+
-+ bmb.opaque = 0;
-+
-+ addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ return;
-+ }
-+
-+ bm_buffer_set64(&bmb, addr);
-+
-+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
-+ cpu_relax();
-+
-+ (*count_ptr)++;
-+}
-+
-+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
-+{
-+ struct bm_buffer bmb[8];
-+ void *new_buf;
-+ dma_addr_t addr;
-+ uint8_t i;
-+ struct device *dev = dpa_bp->dev;
-+ struct sk_buff *skb, **skbh;
-+
-+ memset(bmb, 0, sizeof(struct bm_buffer) * 8);
-+
-+ for (i = 0; i < 8; i++) {
-+ /* We'll prepend the skb back-pointer; can't use the DPA
-+ * priv space, because FMan will overwrite it (from offset 0)
-+ * if it ends up being the second, third, etc. fragment
-+ * in a S/G frame.
-+ *
-+ * We only need enough space to store a pointer, but allocate
-+ * an entire cacheline for performance reasons.
-+ */
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022))
-+ new_buf = page_address(alloc_page(GFP_ATOMIC));
-+ else
-+#endif
-+ new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
-+
-+ if (unlikely(!new_buf))
-+ goto netdev_alloc_failed;
-+ new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
-+
-+ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-+ if (unlikely(!skb)) {
-+ put_page(virt_to_head_page(new_buf));
-+ goto build_skb_failed;
-+ }
-+ DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
-+
-+ addr = dma_map_single(dev, new_buf,
-+ dpa_bp->size, DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dev, addr)))
-+ goto dma_map_failed;
-+
-+ bm_buffer_set64(&bmb[i], addr);
-+ }
-+
-+release_bufs:
-+ /* Release the buffers. In case bman is busy, keep trying
-+ * until successful. bman_release() is guaranteed to succeed
-+ * in a reasonable amount of time
-+ */
-+ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
-+ cpu_relax();
-+ return i;
-+
-+dma_map_failed:
-+ kfree_skb(skb);
-+
-+build_skb_failed:
-+netdev_alloc_failed:
-+ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
-+ WARN_ONCE(1, "Memory allocation failure on Rx\n");
-+
-+ bm_buffer_set64(&bmb[i], 0);
-+ /* Avoid releasing a completely null buffer; bman_release() requires
-+ * at least one buffer.
-+ */
-+ if (likely(i))
-+ goto release_bufs;
-+
-+ return 0;
-+}
-+
-+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
-+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
-+{
-+ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
-+ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
-+}
-+
-+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
-+{
-+ int i;
-+
-+ /* Give each CPU an allotment of "config_count" buffers */
-+ for_each_possible_cpu(i) {
-+ int j;
-+
-+ /* Although we access another CPU's counters here
-+ * we do it at boot time so it is safe
-+ */
-+ for (j = 0; j < dpa_bp->config_count; j += 8)
-+ dpa_bp_add_8_bufs(dpa_bp, i);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_bp_priv_seed);
-+
-+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
-+ * REFILL_THRESHOLD.
-+ */
-+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
-+{
-+ int count = *countptr;
-+ int new_bufs;
-+
-+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
-+ do {
-+ new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
-+ if (unlikely(!new_bufs)) {
-+ /* Avoid looping forever if we've temporarily
-+ * run out of memory. We'll try again at the
-+ * next NAPI cycle.
-+ */
-+ break;
-+ }
-+ count += new_bufs;
-+ } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
-+
-+ *countptr = count;
-+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpaa_eth_refill_bpools);
-+
-+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
-+ * either contiguous frames or scatter/gather ones.
-+ * Skb freeing is not handled here.
-+ *
-+ * This function may be called on error paths in the Tx function, so guard
-+ * against cases when not all fd relevant fields were filled in.
-+ *
-+ * Return the skb backpointer, since for S/G frames the buffer containing it
-+ * gets freed here.
-+ */
-+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd)
-+{
-+ const struct qm_sg_entry *sgt;
-+ int i;
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ dma_addr_t sg_addr;
-+ struct sk_buff **skbh;
-+ struct sk_buff *skb = NULL;
-+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
-+ int nr_frags;
-+ int sg_len;
-+
-+ /* retrieve skb back pointer */
-+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
-+
-+ if (unlikely(fd->format == qm_fd_sg)) {
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
-+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
-+ dma_dir);
-+
-+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
-+ * it's from lowmem.
-+ */
-+ sgt = phys_to_virt(addr + dpa_fd_offset(fd));
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid &&
-+ priv->tsu->hwts_tx_en_ioctl)
-+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (unlikely(priv->ts_tx_en &&
-+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
-+ struct skb_shared_hwtstamps shhwtstamps;
-+
-+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ }
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
-+ sg_addr = qm_sg_addr(&sgt[0]);
-+ sg_len = qm_sg_entry_get_len(&sgt[0]);
-+ dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
-+
-+ /* remaining pages were mapped with dma_map_page() */
-+ for (i = 1; i <= nr_frags; i++) {
-+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
-+ sg_addr = qm_sg_addr(&sgt[i]);
-+ sg_len = qm_sg_entry_get_len(&sgt[i]);
-+ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
-+ }
-+
-+ /* Free the page frag that we allocated on Tx */
-+ put_page(virt_to_head_page(sgt));
-+ } else {
-+ dma_unmap_single(dpa_bp->dev, addr,
-+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
-+#ifdef CONFIG_FSL_DPAA_TS
-+ /* get the timestamp for non-SG frames */
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid &&
-+ priv->tsu->hwts_tx_en_ioctl)
-+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
-+#endif
-+ if (unlikely(priv->ts_tx_en &&
-+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
-+ struct skb_shared_hwtstamps shhwtstamps;
-+
-+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ }
-+#endif
-+ }
-+
-+ return skb;
-+}
-+EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
-+
-+#ifndef CONFIG_FSL_DPAA_TS
-+bool dpa_skb_is_recyclable(struct sk_buff *skb)
-+{
-+ /* No recycling possible if skb buffer is kmalloc'ed */
-+ if (skb->head_frag == 0)
-+ return false;
-+
-+ /* or if it's an userspace buffer */
-+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
-+ return false;
-+
-+ /* or if it's cloned or shared */
-+ if (skb_shared(skb) || skb_cloned(skb) ||
-+ skb->fclone != SKB_FCLONE_UNAVAILABLE)
-+ return false;
-+
-+ return true;
-+}
-+EXPORT_SYMBOL(dpa_skb_is_recyclable);
-+
-+bool dpa_buf_is_recyclable(struct sk_buff *skb,
-+ uint32_t min_size,
-+ uint16_t min_offset,
-+ unsigned char **new_buf_start)
-+{
-+ unsigned char *new;
-+
-+ /* In order to recycle a buffer, the following conditions must be met:
-+ * - buffer size no less than the buffer pool size
-+ * - buffer size no higher than an upper limit (to avoid moving too much
-+ * system memory to the buffer pools)
-+ * - buffer address aligned to cacheline bytes
-+ * - offset of data from start of buffer no lower than a minimum value
-+ * - offset of data from start of buffer no higher than a maximum value
-+ */
-+ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
-+
-+ /* left align to the nearest cacheline */
-+ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
-+
-+ if (likely(new >= skb->head &&
-+ new >= (skb->data - DPA_MAX_FD_OFFSET) &&
-+ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
-+ *new_buf_start = new;
-+ return true;
-+ }
-+
-+ return false;
-+}
-+EXPORT_SYMBOL(dpa_buf_is_recyclable);
-+#endif
-+
-+/* Build a linear skb around the received buffer.
-+ * We are guaranteed there is enough room at the end of the data buffer to
-+ * accommodate the shared info area of the skb.
-+ */
-+static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd, int *use_gro)
-+{
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ ssize_t fd_off = dpa_fd_offset(fd);
-+ void *vaddr;
-+ const fm_prs_result_t *parse_results;
-+ struct sk_buff *skb = NULL, **skbh;
-+
-+ vaddr = phys_to_virt(addr);
-+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-+
-+ /* Retrieve the skb and adjust data and tail pointers, to make sure
-+ * forwarded skbs will have enough space on Tx if extra headers
-+ * are added.
-+ */
-+ DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
-+ /* When using jumbo Rx buffers, we risk having frames dropped due to
-+ * the socket backlog reaching its maximum allowed size.
-+ * Use the frame length for the skb truesize instead of the buffer
-+ * size, as this is the size of the data that actually gets copied to
-+ * userspace.
-+ * The stack may increase the payload. In this case, it will want to
-+ * warn us that the frame length is larger than the truesize. We
-+ * bypass the warning.
-+ */
-+#ifndef CONFIG_PPC
-+ /* We do not support Jumbo frames on LS1043 and thus we edit
-+ * the skb truesize only when the 4k errata is not present.
-+ */
-+ if (likely(!dpaa_errata_a010022))
-+#endif
-+ skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
-+#endif
-+
-+ DPA_BUG_ON(fd_off != priv->rx_headroom);
-+ skb_reserve(skb, fd_off);
-+ skb_put(skb, dpa_fd_length(fd));
-+
-+ /* Peek at the parse results for csum validation */
-+ parse_results = (const fm_prs_result_t *)(vaddr +
-+ DPA_RX_PRIV_DATA_SIZE);
-+ _dpa_process_parse_results(parse_results, fd, skb, use_gro);
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
-+ dpa_ptp_store_rxstamp(priv, skb, vaddr);
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (priv->ts_rx_en)
-+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ return skb;
-+}
-+
-+
-+/* Build an skb with the data of the first S/G entry in the linear portion and
-+ * the rest of the frame as skb fragments.
-+ *
-+ * The page fragment holding the S/G Table is recycled here.
-+ */
-+static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd, int *use_gro,
-+ int *count_ptr)
-+{
-+ const struct qm_sg_entry *sgt;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ ssize_t fd_off = dpa_fd_offset(fd);
-+ dma_addr_t sg_addr;
-+ void *vaddr, *sg_vaddr;
-+ struct dpa_bp *dpa_bp;
-+ struct page *page, *head_page;
-+ int frag_offset, frag_len;
-+ int page_offset;
-+ int i;
-+ const fm_prs_result_t *parse_results;
-+ struct sk_buff *skb = NULL, *skb_tmp, **skbh;
-+
-+ vaddr = phys_to_virt(addr);
-+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-+
-+ dpa_bp = priv->dpa_bp;
-+ /* Iterate through the SGT entries and add data buffers to the skb */
-+ sgt = vaddr + fd_off;
-+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
-+ /* Extension bit is not supported */
-+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
-+
-+ /* We use a single global Rx pool */
-+ DPA_BUG_ON(dpa_bp !=
-+ dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
-+
-+ sg_addr = qm_sg_addr(&sgt[i]);
-+ sg_vaddr = phys_to_virt(sg_addr);
-+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
-+ SMP_CACHE_BYTES));
-+
-+ dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+ if (i == 0) {
-+ DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
-+ DPA_BUG_ON(skb->head != sg_vaddr);
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid &&
-+ priv->tsu->hwts_rx_en_ioctl)
-+ dpa_ptp_store_rxstamp(priv, skb, vaddr);
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (priv->ts_rx_en)
-+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ /* In the case of a SG frame, FMan stores the Internal
-+ * Context in the buffer containing the sgt.
-+ * Inspect the parse results before anything else.
-+ */
-+ parse_results = (const fm_prs_result_t *)(vaddr +
-+ DPA_RX_PRIV_DATA_SIZE);
-+ _dpa_process_parse_results(parse_results, fd, skb,
-+ use_gro);
-+
-+ /* Make sure forwarded skbs will have enough space
-+ * on Tx, if extra headers are added.
-+ */
-+ DPA_BUG_ON(fd_off != priv->rx_headroom);
-+ skb_reserve(skb, fd_off);
-+ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
-+ } else {
-+ /* Not the first S/G entry; all data from buffer will
-+ * be added in an skb fragment; fragment index is offset
-+ * by one since first S/G entry was incorporated in the
-+ * linear part of the skb.
-+ *
-+ * Caution: 'page' may be a tail page.
-+ */
-+ DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
-+ page = virt_to_page(sg_vaddr);
-+ head_page = virt_to_head_page(sg_vaddr);
-+
-+ /* Free (only) the skbuff shell because its data buffer
-+ * is already a frag in the main skb.
-+ */
-+ get_page(head_page);
-+ dev_kfree_skb(skb_tmp);
-+
-+ /* Compute offset in (possibly tail) page */
-+ page_offset = ((unsigned long)sg_vaddr &
-+ (PAGE_SIZE - 1)) +
-+ (page_address(page) - page_address(head_page));
-+ /* page_offset only refers to the beginning of sgt[i];
-+ * but the buffer itself may have an internal offset.
-+ */
-+ frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
-+ page_offset;
-+ frag_len = qm_sg_entry_get_len(&sgt[i]);
-+ /* skb_add_rx_frag() does no checking on the page; if
-+ * we pass it a tail page, we'll end up with
-+ * bad page accounting and eventually with segafults.
-+ */
-+ skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
-+ frag_len, dpa_bp->size);
-+ }
-+ /* Update the pool count for the current {cpu x bpool} */
-+ (*count_ptr)--;
-+
-+ if (qm_sg_entry_get_final(&sgt[i]))
-+ break;
-+ }
-+ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
-+
-+ /* recycle the SGT fragment */
-+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
-+ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
-+ return skb;
-+}
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb)
-+{
-+ if (unlikely(priv->loop_to < 0))
-+ return 0; /* loop disabled by default */
-+
-+ skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
-+ dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
-+
-+ return 1; /* Frame Tx on the selected interface */
-+}
-+#endif
-+
-+void __hot _dpa_rx(struct net_device *net_dev,
-+ struct qman_portal *portal,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid,
-+ int *count_ptr)
-+{
-+ struct dpa_bp *dpa_bp;
-+ struct sk_buff *skb;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ u32 fd_status = fd->status;
-+ unsigned int skb_len;
-+ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
-+ int use_gro = net_dev->features & NETIF_F_GRO;
-+
-+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_warn(net_dev, "FD status = 0x%08x\n",
-+ fd_status & FM_FD_STAT_RX_ERRORS);
-+
-+ percpu_stats->rx_errors++;
-+ goto _release_frame;
-+ }
-+
-+ dpa_bp = priv->dpa_bp;
-+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
-+
-+ /* prefetch the first 64 bytes of the frame or the SGT start */
-+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
-+ prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
-+
-+ /* The only FD types that we may receive are contig and S/G */
-+ DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
-+
-+ if (likely(fd->format == qm_fd_contig)) {
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ /* Execute the Rx processing hook, if it exists. */
-+ if (dpaa_eth_hooks.rx_default &&
-+ dpaa_eth_hooks.rx_default((void *)fd, net_dev,
-+ fqid) == DPAA_ETH_STOLEN) {
-+ /* won't count the rx bytes in */
-+ return;
-+ }
-+#endif
-+ skb = contig_fd_to_skb(priv, fd, &use_gro);
-+ } else {
-+ skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
-+ percpu_priv->rx_sg++;
-+ }
-+
-+ /* Account for either the contig buffer or the SGT buffer (depending on
-+ * which case we were in) having been removed from the pool.
-+ */
-+ (*count_ptr)--;
-+ skb->protocol = eth_type_trans(skb, net_dev);
-+
-+ /* IP Reassembled frames are allowed to be larger than MTU */
-+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
-+ !(fd_status & FM_FD_IPR))) {
-+ percpu_stats->rx_dropped++;
-+ goto drop_bad_frame;
-+ }
-+
-+ skb_len = skb->len;
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ if (dpa_skb_loop(priv, skb)) {
-+ percpu_stats->rx_packets++;
-+ percpu_stats->rx_bytes += skb_len;
-+ return;
-+ }
-+#endif
-+
-+ if (use_gro) {
-+ gro_result_t gro_result;
-+ const struct qman_portal_config *pc =
-+ qman_p_get_portal_config(portal);
-+ struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
-+
-+ np->p = portal;
-+ gro_result = napi_gro_receive(&np->napi, skb);
-+ /* If frame is dropped by the stack, rx_dropped counter is
-+ * incremented automatically, so no need for us to update it
-+ */
-+ if (unlikely(gro_result == GRO_DROP))
-+ goto packet_dropped;
-+ } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
-+ goto packet_dropped;
-+
-+ percpu_stats->rx_packets++;
-+ percpu_stats->rx_bytes += skb_len;
-+
-+packet_dropped:
-+ return;
-+
-+drop_bad_frame:
-+ dev_kfree_skb(skb);
-+ return;
-+
-+_release_frame:
-+ dpa_fd_release(net_dev, fd);
-+}
-+
-+int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd,
-+ int *count_ptr, int *offset)
-+{
-+ struct sk_buff **skbh;
-+ dma_addr_t addr;
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ struct net_device *net_dev = priv->net_dev;
-+ int err;
-+ enum dma_data_direction dma_dir;
-+ unsigned char *buffer_start;
-+ int dma_map_size;
-+
-+#ifndef CONFIG_FSL_DPAA_TS
-+ /* Check recycling conditions; only if timestamp support is not
-+ * enabled, otherwise we need the fd back on tx confirmation
-+ */
-+
-+ /* We can recycle the buffer if:
-+ * - the pool is not full
-+ * - the buffer meets the skb recycling conditions
-+ * - the buffer meets our own (size, offset, align) conditions
-+ */
-+ if (likely((*count_ptr < dpa_bp->target_count) &&
-+ dpa_skb_is_recyclable(skb) &&
-+ dpa_buf_is_recyclable(skb, dpa_bp->size,
-+ priv->tx_headroom, &buffer_start))) {
-+ /* Buffer is recyclable; use the new start address
-+ * and set fd parameters and DMA mapping direction
-+ */
-+ fd->bpid = dpa_bp->bpid;
-+ DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
-+ fd->offset = (uint16_t)(skb->data - buffer_start);
-+ dma_dir = DMA_BIDIRECTIONAL;
-+ dma_map_size = dpa_bp->size;
-+
-+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
-+ *offset = skb_headroom(skb) - fd->offset;
-+ } else
-+#endif
-+ {
-+ /* Not recyclable.
-+ * We are guaranteed to have at least tx_headroom bytes
-+ * available, so just use that for offset.
-+ */
-+ fd->bpid = 0xff;
-+ buffer_start = skb->data - priv->tx_headroom;
-+ fd->offset = priv->tx_headroom;
-+ dma_dir = DMA_TO_DEVICE;
-+ dma_map_size = skb_tail_pointer(skb) - buffer_start;
-+
-+ /* The buffer will be Tx-confirmed, but the TxConf cb must
-+ * necessarily look at our Tx private data to retrieve the
-+ * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
-+ */
-+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
-+ }
-+
-+ /* Enable L3/L4 hardware checksum computation.
-+ *
-+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
-+ * need to write into the skb.
-+ */
-+ err = dpa_enable_tx_csum(priv, skb, fd,
-+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
-+ if (unlikely(err < 0)) {
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_err(net_dev, "HW csum error: %d\n", err);
-+ return err;
-+ }
-+
-+ /* Fill in the rest of the FD fields */
-+ fd->format = qm_fd_contig;
-+ fd->length20 = skb->len;
-+ fd->cmd |= FM_FD_CMD_FCO;
-+
-+ /* Map the entire buffer size that may be seen by FMan, but no more */
-+ addr = dma_map_single(dpa_bp->dev, skbh, dma_map_size, dma_dir);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_err(net_dev, "dma_map_single() failed\n");
-+ return -EINVAL;
-+ }
-+ qm_fd_addr_set64(fd, addr);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(skb_to_contig_fd);
-+
-+#ifndef CONFIG_PPC
-+struct sk_buff *split_skb_at_4k_boundaries(struct sk_buff *skb)
-+{
-+ unsigned int length, nr_frags, moved_len = 0;
-+ u64 page_start;
-+ struct page *page;
-+ skb_frag_t *frag;
-+ int i = 0, j = 0;
-+
-+ /* make sure skb is not shared */
-+ skb = skb_share_check(skb, GFP_ATOMIC);
-+ if (!skb)
-+ return NULL;
-+
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ page_start = (u64)skb->data;
-+
-+ /* split the linear part at the first 4k boundary and create one (big)
-+ * fragment with the rest
-+ */
-+ if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb))) {
-+ /* we'll add one more frag, make sure there's room */
-+ if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
-+ return NULL;
-+
-+ /* next page boundary */
-+ page_start = (page_start + 0x1000) & ~0xFFF;
-+ page = virt_to_page(page_start);
-+
-+ /* move the rest of fragments to make room for a new one at j */
-+ for (i = nr_frags - 1; i >= j; i--)
-+ skb_shinfo(skb)->frags[i + 1] = skb_shinfo(skb)->frags[i];
-+
-+ /* move length bytes to a paged fragment at j */
-+ length = min((u64)0x1000,
-+ (u64)skb->data + skb_headlen(skb) - page_start);
-+ skb->data_len += length;
-+ moved_len += length;
-+ skb_fill_page_desc(skb, j++, page, 0, length);
-+ get_page(page);
-+ skb_shinfo(skb)->nr_frags = ++nr_frags;
-+ }
-+ /* adjust the tail pointer */
-+ skb->tail -= moved_len;
-+ j = 0;
-+
-+ /* split any paged fragment that crosses a 4K boundary */
-+ while (j < nr_frags) {
-+ frag = &skb_shinfo(skb)->frags[j];
-+
-+ /* if there is a 4K boundary between the fragment's offset and end */
-+ if (HAS_DMA_ISSUE(frag->page_offset, frag->size)) {
-+ /* we'll add one more frag, make sure there's room */
-+ if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
-+ return NULL;
-+
-+ /* new page boundary */
-+ page_start = (u64)page_address(skb_frag_page(frag)) +
-+ frag->page_offset + 0x1000;
-+ page_start = (u64)page_start & ~0xFFF;
-+ page = virt_to_page(page_start);
-+
-+ /* move the rest of fragments to make room for a new one at j+1 */
-+ for (i = nr_frags - 1; i > j; i--)
-+ skb_shinfo(skb)->frags[i + 1] =
-+ skb_shinfo(skb)->frags[i];
-+
-+ /* move length bytes to a new paged fragment at j+1 */
-+ length = (u64)page_address(skb_frag_page(frag)) +
-+ frag->page_offset + frag->size - page_start;
-+ frag->size -= length;
-+ skb_fill_page_desc(skb, j + 1, page, 0, length);
-+ get_page(page);
-+ skb_shinfo(skb)->nr_frags = ++nr_frags;
-+ }
-+
-+ /* move to next frag */
-+ j++;
-+ }
-+
-+ return skb;
-+}
-+#endif
-+
-+int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd)
-+{
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ dma_addr_t addr;
-+ dma_addr_t sg_addr;
-+ struct sk_buff **skbh;
-+ struct net_device *net_dev = priv->net_dev;
-+ int sg_len, sgt_size;
-+ int err;
-+
-+ struct qm_sg_entry *sgt;
-+ void *sgt_buf;
-+ skb_frag_t *frag;
-+ int i = 0, j = 0;
-+ int nr_frags;
-+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
-+
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ fd->format = qm_fd_sg;
-+
-+ sgt_size = sizeof(struct qm_sg_entry) * (1 + nr_frags);
-+
-+ /* Get a page frag to store the SGTable, or a full page if the errata
-+ * is in place and we need to avoid crossing a 4k boundary.
-+ */
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022))
-+ sgt_buf = page_address(alloc_page(GFP_ATOMIC));
-+ else
-+#endif
-+ sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
-+ if (unlikely(!sgt_buf)) {
-+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ /* it seems that the memory allocator does not zero the allocated mem */
-+ memset(sgt_buf, 0, priv->tx_headroom + sgt_size);
-+
-+ /* Enable L3/L4 hardware checksum computation.
-+ *
-+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
-+ * need to write into the skb.
-+ */
-+ err = dpa_enable_tx_csum(priv, skb, fd,
-+ sgt_buf + DPA_TX_PRIV_DATA_SIZE);
-+ if (unlikely(err < 0)) {
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_err(net_dev, "HW csum error: %d\n", err);
-+ goto csum_failed;
-+ }
-+
-+ /* Assign the data from skb->data to the first SG list entry */
-+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
-+ sg_len = skb_headlen(skb);
-+ qm_sg_entry_set_bpid(&sgt[0], 0xff);
-+ qm_sg_entry_set_offset(&sgt[0], 0);
-+ qm_sg_entry_set_len(&sgt[0], sg_len);
-+ qm_sg_entry_set_ext(&sgt[0], 0);
-+ qm_sg_entry_set_final(&sgt[0], 0);
-+
-+ addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ err = -EINVAL;
-+ goto sg0_map_failed;
-+ }
-+
-+ qm_sg_entry_set64(&sgt[0], addr);
-+
-+ /* populate the rest of SGT entries */
-+ for (i = 1; i <= nr_frags; i++) {
-+ frag = &skb_shinfo(skb)->frags[i - 1];
-+ qm_sg_entry_set_bpid(&sgt[i], 0xff);
-+ qm_sg_entry_set_offset(&sgt[i], 0);
-+ qm_sg_entry_set_len(&sgt[i], frag->size);
-+ qm_sg_entry_set_ext(&sgt[i], 0);
-+
-+ if (i == nr_frags)
-+ qm_sg_entry_set_final(&sgt[i], 1);
-+ else
-+ qm_sg_entry_set_final(&sgt[i], 0);
-+
-+ DPA_BUG_ON(!skb_frag_page(frag));
-+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
-+ dma_dir);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ err = -EINVAL;
-+ goto sg_map_failed;
-+ }
-+
-+ /* keep the offset in the address */
-+ qm_sg_entry_set64(&sgt[i], addr);
-+ }
-+
-+ fd->length20 = skb->len;
-+ fd->offset = priv->tx_headroom;
-+
-+ /* DMA map the SGT page */
-+ DPA_WRITE_SKB_PTR(skb, skbh, sgt_buf, 0);
-+ addr = dma_map_single(dpa_bp->dev, sgt_buf,
-+ priv->tx_headroom + sgt_size,
-+ dma_dir);
-+
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ err = -EINVAL;
-+ goto sgt_map_failed;
-+ }
-+
-+ qm_fd_addr_set64(fd, addr);
-+ fd->bpid = 0xff;
-+ fd->cmd |= FM_FD_CMD_FCO;
-+
-+ return 0;
-+
-+sgt_map_failed:
-+sg_map_failed:
-+ for (j = 0; j < i; j++) {
-+ sg_addr = qm_sg_addr(&sgt[j]);
-+ dma_unmap_page(dpa_bp->dev, sg_addr,
-+ qm_sg_entry_get_len(&sgt[j]), dma_dir);
-+ }
-+sg0_map_failed:
-+csum_failed:
-+ put_page(virt_to_head_page(sgt_buf));
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(skb_to_sg_fd);
-+
-+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv;
-+ const int queue_mapping = dpa_get_queue_mapping(skb);
-+ struct qman_fq *egress_fq, *conf_fq;
-+
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ /* If there is a Tx hook, run it. */
-+ if (dpaa_eth_hooks.tx &&
-+ dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
-+ /* won't update any Tx stats */
-+ return NETDEV_TX_OK;
-+#endif
-+
-+ priv = netdev_priv(net_dev);
-+
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+ if (priv->ceetm_en)
-+ return ceetm_tx(skb, net_dev);
-+#endif
-+
-+ egress_fq = priv->egress_fqs[queue_mapping];
-+ conf_fq = priv->conf_fqs[queue_mapping];
-+
-+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
-+}
-+
-+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
-+ struct qman_fq *egress_fq, struct qman_fq *conf_fq)
-+{
-+ struct dpa_priv_s *priv;
-+ struct qm_fd fd;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ int err = 0;
-+ const bool nonlinear = skb_is_nonlinear(skb);
-+ int *countptr, offset = 0;
-+
-+ priv = netdev_priv(net_dev);
-+ /* Non-migratable context, safe to use raw_cpu_ptr */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+ percpu_stats = &percpu_priv->stats;
-+ countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
-+
-+ clear_fd(&fd);
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
-+ fd.cmd |= FM_FD_CMD_UPD;
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (unlikely(priv->ts_tx_en &&
-+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
-+ fd.cmd |= FM_FD_CMD_UPD;
-+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022)) {
-+ skb = split_skb_at_4k_boundaries(skb);
-+ if (!skb)
-+ goto skb_to_fd_failed;
-+ }
-+#endif
-+
-+ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
-+ * we don't feed FMan with more fragments than it supports.
-+ * Btw, we're using the first sgt entry to store the linear part of
-+ * the skb, so we're one extra frag short.
-+ */
-+ if (nonlinear &&
-+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
-+ /* Just create a S/G fd based on the skb */
-+ err = skb_to_sg_fd(priv, skb, &fd);
-+ percpu_priv->tx_frag_skbuffs++;
-+ } else {
-+ /* Make sure we have enough headroom to accommodate private
-+ * data, parse results, etc. Normally this shouldn't happen if
-+ * we're here via the standard kernel stack.
-+ */
-+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
-+ struct sk_buff *skb_new;
-+
-+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
-+ if (unlikely(!skb_new)) {
-+ dev_kfree_skb(skb);
-+ percpu_stats->tx_errors++;
-+ return NETDEV_TX_OK;
-+ }
-+ dev_kfree_skb(skb);
-+ skb = skb_new;
-+ }
-+
-+ /* We're going to store the skb backpointer at the beginning
-+ * of the data buffer, so we need a privately owned skb
-+ */
-+
-+ /* Code borrowed from skb_unshare(). */
-+ if (skb_cloned(skb)) {
-+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
-+ kfree_skb(skb);
-+ skb = nskb;
-+ /* skb_copy() has now linearized the skbuff. */
-+ } else if (unlikely(nonlinear)) {
-+ /* We are here because the egress skb contains
-+ * more fragments than we support. In this case,
-+ * we have no choice but to linearize it ourselves.
-+ */
-+ err = __skb_linearize(skb);
-+ }
-+ if (unlikely(!skb || err < 0))
-+ /* Common out-of-memory error path */
-+ goto enomem;
-+
-+ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
-+ }
-+ if (unlikely(err < 0))
-+ goto skb_to_fd_failed;
-+
-+ if (fd.bpid != 0xff) {
-+ skb_recycle(skb);
-+ /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
-+ * but we need the skb to look as if returned by build_skb().
-+ * We need to manually adjust the tailptr as well.
-+ */
-+ skb->data = skb->head + offset;
-+ skb_reset_tail_pointer(skb);
-+
-+ (*countptr)++;
-+ percpu_priv->tx_returned++;
-+ }
-+
-+ if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
-+ goto xmit_failed;
-+
-+ return NETDEV_TX_OK;
-+
-+xmit_failed:
-+ if (fd.bpid != 0xff) {
-+ (*countptr)--;
-+ percpu_priv->tx_returned--;
-+ dpa_fd_release(net_dev, &fd);
-+ percpu_stats->tx_errors++;
-+ return NETDEV_TX_OK;
-+ }
-+ _dpa_cleanup_tx_fd(priv, &fd);
-+skb_to_fd_failed:
-+enomem:
-+ percpu_stats->tx_errors++;
-+ dev_kfree_skb(skb);
-+ return NETDEV_TX_OK;
-+}
-+EXPORT_SYMBOL(dpa_tx_extended);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
-@@ -0,0 +1,278 @@
-+/* Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include <linux/io.h>
-+#include <linux/of_net.h>
-+#include "dpaa_eth.h"
-+#include "mac.h" /* struct mac_device */
-+#ifdef CONFIG_FSL_DPAA_1588
-+#include "dpaa_1588.h"
-+#endif
-+
-+static ssize_t dpaa_eth_show_addr(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev)
-+ return sprintf(buf, "%llx",
-+ (unsigned long long)mac_dev->res->start);
-+ else
-+ return sprintf(buf, "none");
-+}
-+
-+static ssize_t dpaa_eth_show_type(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t res = 0;
-+
-+ if (priv)
-+ res = sprintf(buf, "%s", priv->if_type);
-+
-+ return res;
-+}
-+
-+static ssize_t dpaa_eth_show_fqids(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t bytes = 0;
-+ int i = 0;
-+ char *str;
-+ struct dpa_fq *fq;
-+ struct dpa_fq *tmp;
-+ struct dpa_fq *prev = NULL;
-+ u32 first_fqid = 0;
-+ u32 last_fqid = 0;
-+ char *prevstr = NULL;
-+
-+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
-+ switch (fq->fq_type) {
-+ case FQ_TYPE_RX_DEFAULT:
-+ str = "Rx default";
-+ break;
-+ case FQ_TYPE_RX_ERROR:
-+ str = "Rx error";
-+ break;
-+ case FQ_TYPE_RX_PCD:
-+ str = "Rx PCD";
-+ break;
-+ case FQ_TYPE_TX_CONFIRM:
-+ str = "Tx default confirmation";
-+ break;
-+ case FQ_TYPE_TX_CONF_MQ:
-+ str = "Tx confirmation (mq)";
-+ break;
-+ case FQ_TYPE_TX_ERROR:
-+ str = "Tx error";
-+ break;
-+ case FQ_TYPE_TX:
-+ str = "Tx";
-+ break;
-+ case FQ_TYPE_RX_PCD_HI_PRIO:
-+ str ="Rx PCD High Priority";
-+ break;
-+ default:
-+ str = "Unknown";
-+ }
-+
-+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
-+ str != prevstr)) {
-+ if (last_fqid == first_fqid)
-+ bytes += sprintf(buf + bytes,
-+ "%s: %d\n", prevstr, prev->fqid);
-+ else
-+ bytes += sprintf(buf + bytes,
-+ "%s: %d - %d\n", prevstr,
-+ first_fqid, last_fqid);
-+ }
-+
-+ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
-+ last_fqid = fq->fqid;
-+ else
-+ first_fqid = last_fqid = fq->fqid;
-+
-+ prev = fq;
-+ prevstr = str;
-+ i++;
-+ }
-+
-+ if (prev) {
-+ if (last_fqid == first_fqid)
-+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
-+ prev->fqid);
-+ else
-+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
-+ first_fqid, last_fqid);
-+ }
-+
-+ return bytes;
-+}
-+
-+static ssize_t dpaa_eth_show_bpids(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ ssize_t bytes = 0;
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ int i = 0;
-+
-+ for (i = 0; i < priv->bp_count; i++)
-+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
-+ dpa_bp[i].bpid);
-+
-+ return bytes;
-+}
-+
-+static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ int n = 0;
-+
-+ if (mac_dev)
-+ n = fm_mac_dump_regs(mac_dev, buf, n);
-+ else
-+ return sprintf(buf, "no mac registers\n");
-+
-+ return n;
-+}
-+
-+static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ int n = 0;
-+
-+ if (mac_dev)
-+ n = fm_mac_dump_rx_stats(mac_dev, buf, n);
-+ else
-+ return sprintf(buf, "no mac rx stats\n");
-+
-+ return n;
-+}
-+
-+static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ int n = 0;
-+
-+ if (mac_dev)
-+ n = fm_mac_dump_tx_stats(mac_dev, buf, n);
-+ else
-+ return sprintf(buf, "no mac tx stats\n");
-+
-+ return n;
-+}
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+
-+ if (priv->tsu && priv->tsu->valid)
-+ return sprintf(buf, "1\n");
-+ else
-+ return sprintf(buf, "0\n");
-+}
-+
-+static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ unsigned int num;
-+ unsigned long flags;
-+
-+ if (kstrtouint(buf, 0, &num) < 0)
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ if (num) {
-+ if (priv->tsu)
-+ priv->tsu->valid = TRUE;
-+ } else {
-+ if (priv->tsu)
-+ priv->tsu->valid = FALSE;
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return count;
-+}
-+#endif
-+
-+static struct device_attribute dpaa_eth_attrs[] = {
-+ __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
-+ __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
-+ __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
-+ __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
-+ __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
-+ __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
-+ __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
-+#ifdef CONFIG_FSL_DPAA_1588
-+ __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
-+ dpaa_eth_set_ptp_1588),
-+#endif
-+};
-+
-+void dpaa_eth_sysfs_init(struct device *dev)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
-+ if (device_create_file(dev, &dpaa_eth_attrs[i])) {
-+ dev_err(dev, "Error creating sysfs file\n");
-+ while (i > 0)
-+ device_remove_file(dev, &dpaa_eth_attrs[--i]);
-+ return;
-+ }
-+}
-+EXPORT_SYMBOL(dpaa_eth_sysfs_init);
-+
-+void dpaa_eth_sysfs_remove(struct device *dev)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
-+ device_remove_file(dev, &dpaa_eth_attrs[i]);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
-@@ -0,0 +1,144 @@
-+/* Copyright 2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM dpaa_eth
-+
-+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _DPAA_ETH_TRACE_H
-+
-+#include <linux/skbuff.h>
-+#include <linux/netdevice.h>
-+#include "dpaa_eth.h"
-+#include <linux/tracepoint.h>
-+
-+#define fd_format_name(format) { qm_fd_##format, #format }
-+#define fd_format_list \
-+ fd_format_name(contig), \
-+ fd_format_name(sg)
-+#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
-+ " status=0x%08x"
-+
-+/* This is used to declare a class of events.
-+ * individual events of this type will be defined below.
-+ */
-+
-+/* Store details about a frame descriptor and the FQ on which it was
-+ * transmitted/received.
-+ */
-+DECLARE_EVENT_CLASS(dpaa_eth_fd,
-+ /* Trace function prototype */
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ /* Repeat argument list here */
-+ TP_ARGS(netdev, fq, fd),
-+
-+ /* A structure containing the relevant information we want to record.
-+ * Declare name and type for each normal element, name, type and size
-+ * for arrays. Use __string for variable length strings.
-+ */
-+ TP_STRUCT__entry(
-+ __field(u32, fqid)
-+ __field(u64, fd_addr)
-+ __field(u8, fd_format)
-+ __field(u16, fd_offset)
-+ __field(u32, fd_length)
-+ __field(u32, fd_status)
-+ __string(name, netdev->name)
-+ ),
-+
-+ /* The function that assigns values to the above declared fields */
-+ TP_fast_assign(
-+ __entry->fqid = fq->fqid;
-+ __entry->fd_addr = qm_fd_addr_get64(fd);
-+ __entry->fd_format = fd->format;
-+ __entry->fd_offset = dpa_fd_offset(fd);
-+ __entry->fd_length = dpa_fd_length(fd);
-+ __entry->fd_status = fd->status;
-+ __assign_str(name, netdev->name);
-+ ),
-+
-+ /* This is what gets printed when the trace event is triggered */
-+ /* TODO: print the status using __print_flags() */
-+ TP_printk(TR_FMT,
-+ __get_str(name), __entry->fqid, __entry->fd_addr,
-+ __print_symbolic(__entry->fd_format, fd_format_list),
-+ __entry->fd_offset, __entry->fd_length, __entry->fd_status)
-+);
-+
-+/* Now declare events of the above type. Format is:
-+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
-+ */
-+
-+/* Tx (egress) fd */
-+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
-+
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ TP_ARGS(netdev, fq, fd)
-+);
-+
-+/* Rx fd */
-+DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
-+
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ TP_ARGS(netdev, fq, fd)
-+);
-+
-+/* Tx confirmation fd */
-+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
-+
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ TP_ARGS(netdev, fq, fd)
-+);
-+
-+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
-+ * The syntax is the same as for DECLARE_EVENT_CLASS().
-+ */
-+
-+#endif /* _DPAA_ETH_TRACE_H */
-+
-+/* This must be outside ifdef _DPAA_ETH_TRACE_H */
-+#undef TRACE_INCLUDE_PATH
-+#define TRACE_INCLUDE_PATH .
-+#undef TRACE_INCLUDE_FILE
-+#define TRACE_INCLUDE_FILE dpaa_eth_trace
-+#include <trace/define_trace.h>
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
-@@ -0,0 +1,544 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/string.h>
-+
-+#include "dpaa_eth.h"
-+#include "mac.h" /* struct mac_device */
-+#include "dpaa_eth_common.h"
-+
-+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
-+ "interrupts",
-+ "rx packets",
-+ "tx packets",
-+ "tx recycled",
-+ "tx confirm",
-+ "tx S/G",
-+ "rx S/G",
-+ "tx error",
-+ "rx error",
-+ "bp count"
-+};
-+
-+static char dpa_stats_global[][ETH_GSTRING_LEN] = {
-+ /* dpa rx errors */
-+ "rx dma error",
-+ "rx frame physical error",
-+ "rx frame size error",
-+ "rx header error",
-+ "rx csum error",
-+
-+ /* demultiplexing errors */
-+ "qman cg_tdrop",
-+ "qman wred",
-+ "qman error cond",
-+ "qman early window",
-+ "qman late window",
-+ "qman fq tdrop",
-+ "qman fq retired",
-+ "qman orp disabled",
-+
-+ /* congestion related stats */
-+ "congestion time (ms)",
-+ "entered congestion",
-+ "congested (0/1)"
-+};
-+
-+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
-+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
-+
-+static int __cold dpa_get_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *et_cmd)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_dbg(net_dev, "phy device not initialized\n");
-+ return 0;
-+ }
-+
-+ _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
-+
-+ return _errno;
-+}
-+
-+static int __cold dpa_set_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *et_cmd)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
-+
-+ return _errno;
-+}
-+
-+static void __cold dpa_get_drvinfo(struct net_device *net_dev,
-+ struct ethtool_drvinfo *drvinfo)
-+{
-+ int _errno;
-+
-+ strncpy(drvinfo->driver, KBUILD_MODNAME,
-+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
-+ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%X", 0);
-+
-+ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
-+ /* Truncated output */
-+ netdev_notice(net_dev, "snprintf() = %d\n", _errno);
-+ } else if (unlikely(_errno < 0)) {
-+ netdev_warn(net_dev, "snprintf() = %d\n", _errno);
-+ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
-+ }
-+ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
-+}
-+
-+static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
-+{
-+ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
-+}
-+
-+static void __cold dpa_set_msglevel(struct net_device *net_dev,
-+ uint32_t msg_enable)
-+{
-+ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
-+}
-+
-+static int __cold dpa_nway_reset(struct net_device *net_dev)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ _errno = 0;
-+ if (priv->mac_dev->phy_dev->autoneg) {
-+ _errno = phy_start_aneg(priv->mac_dev->phy_dev);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
-+ _errno);
-+ }
-+
-+ return _errno;
-+}
-+
-+static void __cold dpa_get_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ struct phy_device *phy_dev;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ if (mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return;
-+ }
-+
-+ phy_dev = mac_dev->phy_dev;
-+ if (unlikely(phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return;
-+ }
-+
-+ epause->autoneg = mac_dev->autoneg_pause;
-+ epause->rx_pause = mac_dev->rx_pause_active;
-+ epause->tx_pause = mac_dev->tx_pause_active;
-+}
-+
-+static int __cold dpa_set_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ struct phy_device *phy_dev;
-+ int _errno;
-+ u32 newadv, oldadv;
-+ bool rx_pause, tx_pause;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ if (mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ phy_dev = mac_dev->phy_dev;
-+ if (unlikely(phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ if (!(phy_dev->supported & SUPPORTED_Pause) ||
-+ (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
-+ (epause->rx_pause != epause->tx_pause)))
-+ return -EINVAL;
-+
-+ /* The MAC should know how to handle PAUSE frame autonegotiation before
-+ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
-+ * settings.
-+ */
-+ mac_dev->autoneg_pause = !!epause->autoneg;
-+ mac_dev->rx_pause_req = !!epause->rx_pause;
-+ mac_dev->tx_pause_req = !!epause->tx_pause;
-+
-+ /* Determine the sym/asym advertised PAUSE capabilities from the desired
-+ * rx/tx pause settings.
-+ */
-+ newadv = 0;
-+ if (epause->rx_pause)
-+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-+ if (epause->tx_pause)
-+ newadv |= ADVERTISED_Asym_Pause;
-+
-+ oldadv = phy_dev->advertising &
-+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-+
-+ /* If there are differences between the old and the new advertised
-+ * values, restart PHY autonegotiation and advertise the new values.
-+ */
-+ if (oldadv != newadv) {
-+ phy_dev->advertising &= ~(ADVERTISED_Pause
-+ | ADVERTISED_Asym_Pause);
-+ phy_dev->advertising |= newadv;
-+ if (phy_dev->autoneg) {
-+ _errno = phy_start_aneg(phy_dev);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
-+ _errno);
-+ }
-+ }
-+
-+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
-+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
-+
-+ return _errno;
-+}
-+
-+#ifdef CONFIG_PM
-+static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+
-+ wol->supported = 0;
-+ wol->wolopts = 0;
-+
-+ if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
-+ return;
-+
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ wol->supported = WAKE_MAGIC;
-+ wol->wolopts = WAKE_MAGIC;
-+ }
-+}
-+
-+static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_dbg(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ if (!device_can_wakeup(net_dev->dev.parent) ||
-+ (wol->wolopts & ~WAKE_MAGIC))
-+ return -EOPNOTSUPP;
-+
-+ priv->wol = 0;
-+
-+ if (wol->wolopts & WAKE_MAGIC) {
-+ priv->wol = DPAA_WOL_MAGIC;
-+ device_set_wakeup_enable(net_dev->dev.parent, 1);
-+ } else {
-+ device_set_wakeup_enable(net_dev->dev.parent, 0);
-+ }
-+
-+ return 0;
-+}
-+#endif
-+
-+static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
-+{
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
-+}
-+
-+static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
-+{
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
-+}
-+
-+static int dpa_get_sset_count(struct net_device *net_dev, int type)
-+{
-+ unsigned int total_stats, num_stats;
-+
-+ num_stats = num_online_cpus() + 1;
-+ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
-+
-+ switch (type) {
-+ case ETH_SS_STATS:
-+ return total_stats;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
-+ int crr_cpu, u64 bp_count, u64 *data)
-+{
-+ int num_stat_values = num_cpus + 1;
-+ int crr_stat = 0;
-+
-+ /* update current CPU's stats and also add them to the total values */
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = bp_count;
-+ data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
-+}
-+
-+static void dpa_get_ethtool_stats(struct net_device *net_dev,
-+ struct ethtool_stats *stats, u64 *data)
-+{
-+ u64 bp_count, cg_time, cg_num, cg_status;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct qm_mcr_querycgr query_cgr;
-+ struct dpa_rx_errors rx_errors;
-+ struct dpa_ern_cnt ern_cnt;
-+ struct dpa_priv_s *priv;
-+ unsigned int num_cpus, offset;
-+ struct dpa_bp *dpa_bp;
-+ int total_stats, i;
-+
-+ total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
-+ priv = netdev_priv(net_dev);
-+ dpa_bp = priv->dpa_bp;
-+ num_cpus = num_online_cpus();
-+ bp_count = 0;
-+
-+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
-+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
-+ memset(data, 0, total_stats * sizeof(u64));
-+
-+ for_each_online_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ if (dpa_bp->percpu_count)
-+ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
-+
-+ rx_errors.dme += percpu_priv->rx_errors.dme;
-+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
-+ rx_errors.fse += percpu_priv->rx_errors.fse;
-+ rx_errors.phe += percpu_priv->rx_errors.phe;
-+ rx_errors.cse += percpu_priv->rx_errors.cse;
-+
-+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
-+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
-+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
-+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
-+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
-+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
-+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
-+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
-+
-+ copy_stats(percpu_priv, num_cpus, i, bp_count, data);
-+ }
-+
-+ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
-+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
-+
-+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
-+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
-+
-+ /* gather congestion related counters */
-+ cg_num = 0;
-+ cg_status = 0;
-+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
-+ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
-+ cg_num = priv->cgr_data.cgr_congested_count;
-+ cg_status = query_cgr.cgr.cs;
-+
-+ /* reset congestion stats (like QMan API does */
-+ priv->cgr_data.congested_jiffies = 0;
-+ priv->cgr_data.cgr_congested_count = 0;
-+ }
-+
-+ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
-+ data[offset++] = cg_time;
-+ data[offset++] = cg_num;
-+ data[offset++] = cg_status;
-+}
-+
-+static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
-+{
-+ unsigned int i, j, num_cpus, size;
-+ char stat_string_cpu[ETH_GSTRING_LEN];
-+ u8 *strings;
-+
-+ strings = data;
-+ num_cpus = num_online_cpus();
-+ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
-+
-+ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
-+ for (j = 0; j < num_cpus; j++) {
-+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
-+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
-+ strings += ETH_GSTRING_LEN;
-+ }
-+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
-+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
-+ strings += ETH_GSTRING_LEN;
-+ }
-+ memcpy(strings, dpa_stats_global, size);
-+}
-+
-+const struct ethtool_ops dpa_ethtool_ops = {
-+ .get_settings = dpa_get_settings,
-+ .set_settings = dpa_set_settings,
-+ .get_drvinfo = dpa_get_drvinfo,
-+ .get_msglevel = dpa_get_msglevel,
-+ .set_msglevel = dpa_set_msglevel,
-+ .nway_reset = dpa_nway_reset,
-+ .get_pauseparam = dpa_get_pauseparam,
-+ .set_pauseparam = dpa_set_pauseparam,
-+ .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
-+ .get_link = ethtool_op_get_link,
-+ .get_eee = dpa_get_eee,
-+ .set_eee = dpa_set_eee,
-+ .get_sset_count = dpa_get_sset_count,
-+ .get_ethtool_stats = dpa_get_ethtool_stats,
-+ .get_strings = dpa_get_strings,
-+#ifdef CONFIG_PM
-+ .get_wol = dpa_get_wol,
-+ .set_wol = dpa_set_wol,
-+#endif
-+};
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
-@@ -0,0 +1,290 @@
-+/*
-+ * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
-+ *
-+ * Author: Yangbo Lu <yangbo.lu@freescale.com>
-+ *
-+ * Copyright 2014 Freescale Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the
-+ * Free Software Foundation; either version 2 of the License, or (at your
-+ * option) any later version.
-+*/
-+
-+#include <linux/device.h>
-+#include <linux/hrtimer.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/of_platform.h>
-+#include <linux/timex.h>
-+#include <linux/io.h>
-+
-+#include <linux/ptp_clock_kernel.h>
-+
-+#include "dpaa_eth.h"
-+#include "mac.h"
-+
-+struct ptp_clock *clock;
-+
-+static struct mac_device *mac_dev;
-+static u32 freqCompensation;
-+
-+/* Bit definitions for the TMR_CTRL register */
-+#define ALM1P (1<<31) /* Alarm1 output polarity */
-+#define ALM2P (1<<30) /* Alarm2 output polarity */
-+#define FS (1<<28) /* FIPER start indication */
-+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
-+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
-+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
-+#define TCLK_PERIOD_MASK (0x3ff)
-+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
-+#define FRD (1<<14) /* FIPER Realignment Disable */
-+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
-+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
-+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
-+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
-+#define COPH (1<<7) /* Generated clock output phase. */
-+#define CIPH (1<<6) /* External oscillator input clock phase */
-+#define TMSR (1<<5) /* Timer soft reset. */
-+#define BYP (1<<3) /* Bypass drift compensated clock */
-+#define TE (1<<2) /* 1588 timer enable. */
-+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
-+#define CKSEL_MASK (0x3)
-+
-+/* Bit definitions for the TMR_TEVENT register */
-+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
-+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
-+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
-+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
-+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
-+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
-+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
-+
-+/* Bit definitions for the TMR_TEMASK register */
-+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
-+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
-+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
-+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
-+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
-+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
-+
-+/* Bit definitions for the TMR_PEVENT register */
-+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
-+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
-+#define RXP (1<<0) /* PTP frame has been received */
-+
-+/* Bit definitions for the TMR_PEMASK register */
-+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
-+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
-+#define RXPEN (1<<0) /* Receive PTP packet event enable */
-+
-+/* Bit definitions for the TMR_STAT register */
-+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
-+#define STAT_VEC_MASK (0x3f)
-+
-+/* Bit definitions for the TMR_PRSC register */
-+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
-+#define PRSC_OCK_MASK (0xffff)
-+
-+
-+#define N_EXT_TS 2
-+
-+static void set_alarm(void)
-+{
-+ u64 ns;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
-+ ns += 1500000000ULL;
-+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
-+ ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
-+ if (mac_dev->fm_rtc_set_alarm)
-+ mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
-+}
-+
-+static void set_fipers(void)
-+{
-+ u64 fiper;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(mac_dev->fm_dev);
-+
-+ set_alarm();
-+ fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
-+ if (mac_dev->fm_rtc_set_fiper)
-+ mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
-+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(mac_dev->fm_dev);
-+}
-+
-+/* PTP clock operations */
-+
-+static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-+{
-+ u64 adj;
-+ u32 diff, tmr_add;
-+ int neg_adj = 0;
-+
-+ if (ppb < 0) {
-+ neg_adj = 1;
-+ ppb = -ppb;
-+ }
-+
-+ tmr_add = freqCompensation;
-+ adj = tmr_add;
-+ adj *= ppb;
-+ diff = div_u64(adj, 1000000000ULL);
-+
-+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-+
-+ if (mac_dev->fm_rtc_set_drift)
-+ mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
-+
-+ return 0;
-+}
-+
-+static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
-+{
-+ s64 now;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
-+
-+ now += delta;
-+
-+ if (mac_dev->fm_rtc_set_cnt)
-+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
-+ set_fipers();
-+
-+ return 0;
-+}
-+
-+static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-+{
-+ u64 ns;
-+ u32 remainder;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
-+
-+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-+ ts->tv_nsec = remainder;
-+ return 0;
-+}
-+
-+static int ptp_dpa_settime(struct ptp_clock_info *ptp,
-+ const struct timespec64 *ts)
-+{
-+ u64 ns;
-+
-+ ns = ts->tv_sec * 1000000000ULL;
-+ ns += ts->tv_nsec;
-+
-+ if (mac_dev->fm_rtc_set_cnt)
-+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
-+ set_fipers();
-+ return 0;
-+}
-+
-+static int ptp_dpa_enable(struct ptp_clock_info *ptp,
-+ struct ptp_clock_request *rq, int on)
-+{
-+ u32 bit;
-+
-+ switch (rq->type) {
-+ case PTP_CLK_REQ_EXTTS:
-+ switch (rq->extts.index) {
-+ case 0:
-+ bit = ETS1EN;
-+ break;
-+ case 1:
-+ bit = ETS2EN;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ if (on) {
-+ if (mac_dev->fm_rtc_enable_interrupt)
-+ mac_dev->fm_rtc_enable_interrupt(
-+ mac_dev->fm_dev, bit);
-+ } else {
-+ if (mac_dev->fm_rtc_disable_interrupt)
-+ mac_dev->fm_rtc_disable_interrupt(
-+ mac_dev->fm_dev, bit);
-+ }
-+ return 0;
-+
-+ case PTP_CLK_REQ_PPS:
-+ if (on) {
-+ if (mac_dev->fm_rtc_enable_interrupt)
-+ mac_dev->fm_rtc_enable_interrupt(
-+ mac_dev->fm_dev, PP1EN);
-+ } else {
-+ if (mac_dev->fm_rtc_disable_interrupt)
-+ mac_dev->fm_rtc_disable_interrupt(
-+ mac_dev->fm_dev, PP1EN);
-+ }
-+ return 0;
-+
-+ default:
-+ break;
-+ }
-+
-+ return -EOPNOTSUPP;
-+}
-+
-+static struct ptp_clock_info ptp_dpa_caps = {
-+ .owner = THIS_MODULE,
-+ .name = "dpaa clock",
-+ .max_adj = 512000,
-+ .n_alarm = 0,
-+ .n_ext_ts = N_EXT_TS,
-+ .n_per_out = 0,
-+ .pps = 1,
-+ .adjfreq = ptp_dpa_adjfreq,
-+ .adjtime = ptp_dpa_adjtime,
-+ .gettime64 = ptp_dpa_gettime,
-+ .settime64 = ptp_dpa_settime,
-+ .enable = ptp_dpa_enable,
-+};
-+
-+static int __init __cold dpa_ptp_load(void)
-+{
-+ struct device *ptp_dev;
-+ struct timespec64 now;
-+ int dpa_phc_index;
-+ int err;
-+
-+ if (!(ptp_priv.of_dev && ptp_priv.mac_dev))
-+ return -ENODEV;
-+
-+ ptp_dev = &ptp_priv.of_dev->dev;
-+ mac_dev = ptp_priv.mac_dev;
-+
-+ if (mac_dev->fm_rtc_get_drift)
-+ mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
-+
-+ getnstimeofday64(&now);
-+ ptp_dpa_settime(&ptp_dpa_caps, &now);
-+
-+ clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
-+ if (IS_ERR(clock)) {
-+ err = PTR_ERR(clock);
-+ return err;
-+ }
-+ dpa_phc_index = ptp_clock_index(clock);
-+ return 0;
-+}
-+module_init(dpa_ptp_load);
-+
-+static void __exit __cold dpa_ptp_unload(void)
-+{
-+ if (mac_dev->fm_rtc_disable_interrupt)
-+ mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
-+ ptp_clock_unregister(clock);
-+}
-+module_exit(dpa_ptp_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
-@@ -0,0 +1,909 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/io.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_mdio.h>
-+#include <linux/phy.h>
-+#include <linux/netdevice.h>
-+
-+#include "dpaa_eth.h"
-+#include "mac.h"
-+#include "lnxwrp_fsl_fman.h"
-+
-+#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
-+
-+#include "fsl_fman_dtsec.h"
-+#include "fsl_fman_tgec.h"
-+#include "fsl_fman_memac.h"
-+#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
-+
-+#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
-+
-+MODULE_DESCRIPTION(MAC_DESCRIPTION);
-+
-+struct mac_priv_s {
-+ struct fm_mac_dev *fm_mac;
-+};
-+
-+const char *mac_driver_description __initconst = MAC_DESCRIPTION;
-+const size_t mac_sizeof_priv[] = {
-+ [DTSEC] = sizeof(struct mac_priv_s),
-+ [XGMAC] = sizeof(struct mac_priv_s),
-+ [MEMAC] = sizeof(struct mac_priv_s)
-+};
-+
-+static const enet_mode_t _100[] = {
-+ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
-+ [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
-+};
-+
-+static const enet_mode_t _1000[] = {
-+ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
-+ [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
-+ [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
-+ [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
-+ [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
-+};
-+
-+static enet_mode_t __cold __attribute__((nonnull))
-+macdev2enetinterface(const struct mac_device *mac_dev)
-+{
-+ switch (mac_dev->max_speed) {
-+ case SPEED_100:
-+ return _100[mac_dev->phy_if];
-+ case SPEED_1000:
-+ return _1000[mac_dev->phy_if];
-+ case SPEED_2500:
-+ return e_ENET_MODE_SGMII_2500;
-+ case SPEED_10000:
-+ return e_ENET_MODE_XGMII_10000;
-+ default:
-+ return e_ENET_MODE_MII_100;
-+ }
-+}
-+
-+static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
-+{
-+ struct mac_device *mac_dev;
-+
-+ mac_dev = (struct mac_device *)_mac_dev;
-+
-+ if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
-+ /* don't flag RX FIFO after the first */
-+ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
-+ e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
-+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
-+ exception);
-+ }
-+
-+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
-+ exception);
-+}
-+
-+static int __cold init(struct mac_device *mac_dev)
-+{
-+ int _errno;
-+ struct mac_priv_s *priv;
-+ t_FmMacParams param;
-+ uint32_t version;
-+
-+ priv = macdev_priv(mac_dev);
-+
-+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
-+ mac_dev->dev, mac_dev->res->start, 0x2000);
-+ param.enetMode = macdev2enetinterface(mac_dev);
-+ memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
-+ sizeof(mac_dev->addr)));
-+ param.macId = mac_dev->cell_index;
-+ param.h_Fm = (handle_t)mac_dev->fm;
-+ param.mdioIrq = NO_IRQ;
-+ param.f_Exception = mac_exception;
-+ param.f_Event = mac_exception;
-+ param.h_App = mac_dev;
-+
-+ priv->fm_mac = fm_mac_config(&param);
-+ if (unlikely(priv->fm_mac == NULL)) {
-+ _errno = -EINVAL;
-+ goto _return;
-+ }
-+
-+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
-+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
-+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
-+
-+ _errno = fm_mac_config_max_frame_length(priv->fm_mac,
-+ fm_get_max_frm());
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
-+ /* 10G always works with pad and CRC */
-+ _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ _errno = fm_mac_config_half_duplex(priv->fm_mac,
-+ mac_dev->half_duplex);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ } else {
-+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ }
-+
-+ _errno = fm_mac_init(priv->fm_mac);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
-+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
-+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
-+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
-+ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ }
-+#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
-+
-+ /* For 10G MAC, disable Tx ECC exception */
-+ if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
-+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
-+ e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ }
-+
-+ _errno = fm_mac_get_version(priv->fm_mac, &version);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
-+ ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
-+ "dTSEC" : "XGEC"), version);
-+
-+ goto _return;
-+
-+
-+_return_fm_mac_free:
-+ fm_mac_free(mac_dev->get_mac_handle(mac_dev));
-+
-+_return:
-+ return _errno;
-+}
-+
-+static int __cold memac_init(struct mac_device *mac_dev)
-+{
-+ int _errno;
-+ struct mac_priv_s *priv;
-+ t_FmMacParams param;
-+
-+ priv = macdev_priv(mac_dev);
-+
-+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
-+ mac_dev->dev, mac_dev->res->start, 0x2000);
-+ param.enetMode = macdev2enetinterface(mac_dev);
-+ memcpy(&param.addr, mac_dev->addr, sizeof(mac_dev->addr));
-+ param.macId = mac_dev->cell_index;
-+ param.h_Fm = (handle_t)mac_dev->fm;
-+ param.mdioIrq = NO_IRQ;
-+ param.f_Exception = mac_exception;
-+ param.f_Event = mac_exception;
-+ param.h_App = mac_dev;
-+
-+ priv->fm_mac = fm_mac_config(&param);
-+ if (unlikely(priv->fm_mac == NULL)) {
-+ _errno = -EINVAL;
-+ goto _return;
-+ }
-+
-+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
-+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
-+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
-+
-+ _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ _errno = fm_mac_init(priv->fm_mac);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ dev_info(mac_dev->dev, "FMan MEMAC\n");
-+
-+ goto _return;
-+
-+_return_fm_mac_free:
-+ fm_mac_free(priv->fm_mac);
-+
-+_return:
-+ return _errno;
-+}
-+
-+static int __cold start(struct mac_device *mac_dev)
-+{
-+ int _errno;
-+ struct phy_device *phy_dev = mac_dev->phy_dev;
-+
-+ _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
-+
-+ if (!_errno && phy_dev)
-+ phy_start(phy_dev);
-+
-+ return _errno;
-+}
-+
-+static int __cold stop(struct mac_device *mac_dev)
-+{
-+ if (mac_dev->phy_dev)
-+ phy_stop(mac_dev->phy_dev);
-+
-+ return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
-+}
-+
-+static int __cold set_multi(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct mac_priv_s *mac_priv;
-+ struct mac_address *old_addr, *tmp;
-+ struct netdev_hw_addr *ha;
-+ int _errno;
-+
-+ mac_priv = macdev_priv(mac_dev);
-+
-+ /* Clear previous address list */
-+ list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
-+ _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
-+ (t_EnetAddr *)old_addr->addr);
-+ if (_errno < 0)
-+ return _errno;
-+
-+ list_del(&old_addr->list);
-+ kfree(old_addr);
-+ }
-+
-+ /* Add all the addresses from the new list */
-+ netdev_for_each_mc_addr(ha, net_dev) {
-+ _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
-+ (t_EnetAddr *)ha->addr);
-+ if (_errno < 0)
-+ return _errno;
-+
-+ tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
-+ if (!tmp) {
-+ dev_err(mac_dev->dev, "Out of memory\n");
-+ return -ENOMEM;
-+ }
-+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
-+ list_add(&tmp->list, &mac_dev->mc_addr_list);
-+ }
-+ return 0;
-+}
-+
-+/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
-+ * active PAUSE settings. Otherwise, the new active settings should be reflected
-+ * in FMan.
-+ */
-+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
-+{
-+ struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
-+ int _errno = 0;
-+
-+ if (unlikely(rx != mac_dev->rx_pause_active)) {
-+ _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
-+ if (likely(_errno == 0))
-+ mac_dev->rx_pause_active = rx;
-+ }
-+
-+ if (unlikely(tx != mac_dev->tx_pause_active)) {
-+ _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
-+ if (likely(_errno == 0))
-+ mac_dev->tx_pause_active = tx;
-+ }
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(set_mac_active_pause);
-+
-+/* Determine the MAC RX/TX PAUSE frames settings based on PHY
-+ * autonegotiation or values set by eththool.
-+ */
-+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
-+{
-+ struct phy_device *phy_dev = mac_dev->phy_dev;
-+ u16 lcl_adv, rmt_adv;
-+ u8 flowctrl;
-+
-+ *rx_pause = *tx_pause = false;
-+
-+ if (!phy_dev->duplex)
-+ return;
-+
-+ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
-+ * are those set by ethtool.
-+ */
-+ if (!mac_dev->autoneg_pause) {
-+ *rx_pause = mac_dev->rx_pause_req;
-+ *tx_pause = mac_dev->tx_pause_req;
-+ return;
-+ }
-+
-+ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
-+ * settings depend on the result of the link negotiation.
-+ */
-+
-+ /* get local capabilities */
-+ lcl_adv = 0;
-+ if (phy_dev->advertising & ADVERTISED_Pause)
-+ lcl_adv |= ADVERTISE_PAUSE_CAP;
-+ if (phy_dev->advertising & ADVERTISED_Asym_Pause)
-+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
-+
-+ /* get link partner capabilities */
-+ rmt_adv = 0;
-+ if (phy_dev->pause)
-+ rmt_adv |= LPA_PAUSE_CAP;
-+ if (phy_dev->asym_pause)
-+ rmt_adv |= LPA_PAUSE_ASYM;
-+
-+ /* Calculate TX/RX settings based on local and peer advertised
-+ * symmetric/asymmetric PAUSE capabilities.
-+ */
-+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
-+ if (flowctrl & FLOW_CTRL_RX)
-+ *rx_pause = true;
-+ if (flowctrl & FLOW_CTRL_TX)
-+ *tx_pause = true;
-+}
-+EXPORT_SYMBOL(get_pause_cfg);
-+
-+static void adjust_link_void(struct net_device *net_dev)
-+{
-+}
-+
-+static void adjust_link(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ struct phy_device *phy_dev = mac_dev->phy_dev;
-+ struct fm_mac_dev *fm_mac_dev;
-+ bool rx_pause, tx_pause;
-+ int _errno;
-+
-+ fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
-+ fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
-+ phy_dev->duplex);
-+
-+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
-+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
-+}
-+
-+/* Initializes driver's PHY state, and attaches to the PHY.
-+ * Returns 0 on success.
-+ */
-+static int dtsec_init_phy(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct phy_device *phy_dev;
-+
-+ if (of_phy_is_fixed_link(mac_dev->phy_node))
-+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
-+ 0, mac_dev->phy_if);
-+ else
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link, 0, mac_dev->phy_if);
-+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
-+ netdev_err(net_dev, "Could not connect to PHY %s\n",
-+ mac_dev->phy_node ?
-+ mac_dev->phy_node->full_name :
-+ mac_dev->fixed_bus_id);
-+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
-+ }
-+
-+ /* Remove any features not supported by the controller */
-+ phy_dev->supported &= mac_dev->if_support;
-+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-+ * as most of the PHY drivers do not enable them by default.
-+ */
-+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ phy_dev->advertising = phy_dev->supported;
-+
-+ mac_dev->phy_dev = phy_dev;
-+
-+ return 0;
-+}
-+
-+static int xgmac_init_phy(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct phy_device *phy_dev;
-+
-+ if (of_phy_is_fixed_link(mac_dev->phy_node))
-+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
-+ 0, mac_dev->phy_if);
-+ else
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link_void, 0, mac_dev->phy_if);
-+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
-+ netdev_err(net_dev, "Could not attach to PHY %s\n",
-+ mac_dev->phy_node ?
-+ mac_dev->phy_node->full_name :
-+ mac_dev->fixed_bus_id);
-+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
-+ }
-+
-+ phy_dev->supported &= mac_dev->if_support;
-+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-+ * as most of the PHY drivers do not enable them by default.
-+ */
-+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ phy_dev->advertising = phy_dev->supported;
-+
-+ mac_dev->phy_dev = phy_dev;
-+
-+ return 0;
-+}
-+
-+static int memac_init_phy(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct phy_device *phy_dev;
-+
-+ if (of_phy_is_fixed_link(mac_dev->phy_node)) {
-+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
-+ 0, mac_dev->phy_if);
-+ } else if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
-+ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)) {
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link_void, 0,
-+ mac_dev->phy_if);
-+ } else {
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link, 0, mac_dev->phy_if);
-+ }
-+
-+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
-+ netdev_err(net_dev, "Could not connect to PHY %s\n",
-+ mac_dev->phy_node ?
-+ mac_dev->phy_node->full_name :
-+ mac_dev->fixed_bus_id);
-+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
-+ }
-+
-+ /* Remove any features not supported by the controller */
-+ phy_dev->supported &= mac_dev->if_support;
-+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-+ * as most of the PHY drivers do not enable them by default.
-+ */
-+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ phy_dev->advertising = phy_dev->supported;
-+
-+ mac_dev->phy_dev = phy_dev;
-+
-+ return 0;
-+}
-+
-+static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int _errno, __errno;
-+
-+ _errno = fm_mac_disable(fm_mac_dev);
-+ __errno = fm_mac_free(fm_mac_dev);
-+
-+ if (unlikely(__errno < 0))
-+ _errno = __errno;
-+
-+ return _errno;
-+}
-+
-+static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
-+{
-+ const struct mac_priv_s *priv;
-+ priv = macdev_priv(mac_dev);
-+ return priv->fm_mac;
-+}
-+
-+static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
-+ int i = 0, n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
-+
-+ FM_DMP_V32(buf, n, p_mm, tsec_id);
-+ FM_DMP_V32(buf, n, p_mm, tsec_id2);
-+ FM_DMP_V32(buf, n, p_mm, ievent);
-+ FM_DMP_V32(buf, n, p_mm, imask);
-+ FM_DMP_V32(buf, n, p_mm, ecntrl);
-+ FM_DMP_V32(buf, n, p_mm, ptv);
-+ FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
-+ FM_DMP_V32(buf, n, p_mm, tmr_pevent);
-+ FM_DMP_V32(buf, n, p_mm, tmr_pemask);
-+ FM_DMP_V32(buf, n, p_mm, tctrl);
-+ FM_DMP_V32(buf, n, p_mm, rctrl);
-+ FM_DMP_V32(buf, n, p_mm, maccfg1);
-+ FM_DMP_V32(buf, n, p_mm, maccfg2);
-+ FM_DMP_V32(buf, n, p_mm, ipgifg);
-+ FM_DMP_V32(buf, n, p_mm, hafdup);
-+ FM_DMP_V32(buf, n, p_mm, maxfrm);
-+
-+ FM_DMP_V32(buf, n, p_mm, macstnaddr1);
-+ FM_DMP_V32(buf, n, p_mm, macstnaddr2);
-+
-+ for (i = 0; i < 7; ++i) {
-+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
-+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
-+ }
-+
-+ FM_DMP_V32(buf, n, p_mm, car1);
-+ FM_DMP_V32(buf, n, p_mm, car2);
-+
-+ return n;
-+}
-+
-+static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
-+
-+ FM_DMP_V32(buf, n, p_mm, tgec_id);
-+ FM_DMP_V32(buf, n, p_mm, command_config);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_0);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_1);
-+ FM_DMP_V32(buf, n, p_mm, maxfrm);
-+ FM_DMP_V32(buf, n, p_mm, pause_quant);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
-+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
-+ FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
-+ FM_DMP_V32(buf, n, p_mm, mdio_command);
-+ FM_DMP_V32(buf, n, p_mm, mdio_data);
-+ FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
-+ FM_DMP_V32(buf, n, p_mm, status);
-+ FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_2);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_3);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
-+ FM_DMP_V32(buf, n, p_mm, imask);
-+ FM_DMP_V32(buf, n, p_mm, ievent);
-+
-+ return n;
-+}
-+
-+static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
-+ int i = 0, n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
-+
-+ FM_DMP_V32(buf, n, p_mm, command_config);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
-+ FM_DMP_V32(buf, n, p_mm, maxfrm);
-+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
-+ FM_DMP_V32(buf, n, p_mm, ievent);
-+ FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
-+ FM_DMP_V32(buf, n, p_mm, imask);
-+
-+ for (i = 0; i < 4; ++i)
-+ FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
-+
-+ for (i = 0; i < 4; ++i)
-+ FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
-+
-+ FM_DMP_V32(buf, n, p_mm, rx_pause_status);
-+
-+ for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
-+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
-+ }
-+
-+ FM_DMP_V32(buf, n, p_mm, lpwake_timer);
-+ FM_DMP_V32(buf, n, p_mm, sleep_timer);
-+ FM_DMP_V32(buf, n, p_mm, statn_config);
-+ FM_DMP_V32(buf, n, p_mm, if_mode);
-+ FM_DMP_V32(buf, n, p_mm, if_status);
-+ FM_DMP_V32(buf, n, p_mm, hg_config);
-+ FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
-+ FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
-+ FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
-+ FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
-+ FM_DMP_V32(buf, n, p_mm, rhm);
-+ FM_DMP_V32(buf, n, p_mm, thm);
-+
-+ return n;
-+}
-+
-+static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
-+
-+ /* Rx Statistics Counter */
-+ FM_DMP_V32(buf, n, p_mm, reoct_l);
-+ FM_DMP_V32(buf, n, p_mm, reoct_u);
-+ FM_DMP_V32(buf, n, p_mm, roct_l);
-+ FM_DMP_V32(buf, n, p_mm, roct_u);
-+ FM_DMP_V32(buf, n, p_mm, raln_l);
-+ FM_DMP_V32(buf, n, p_mm, raln_u);
-+ FM_DMP_V32(buf, n, p_mm, rxpf_l);
-+ FM_DMP_V32(buf, n, p_mm, rxpf_u);
-+ FM_DMP_V32(buf, n, p_mm, rfrm_l);
-+ FM_DMP_V32(buf, n, p_mm, rfrm_u);
-+ FM_DMP_V32(buf, n, p_mm, rfcs_l);
-+ FM_DMP_V32(buf, n, p_mm, rfcs_u);
-+ FM_DMP_V32(buf, n, p_mm, rvlan_l);
-+ FM_DMP_V32(buf, n, p_mm, rvlan_u);
-+ FM_DMP_V32(buf, n, p_mm, rerr_l);
-+ FM_DMP_V32(buf, n, p_mm, rerr_u);
-+ FM_DMP_V32(buf, n, p_mm, ruca_l);
-+ FM_DMP_V32(buf, n, p_mm, ruca_u);
-+ FM_DMP_V32(buf, n, p_mm, rmca_l);
-+ FM_DMP_V32(buf, n, p_mm, rmca_u);
-+ FM_DMP_V32(buf, n, p_mm, rbca_l);
-+ FM_DMP_V32(buf, n, p_mm, rbca_u);
-+ FM_DMP_V32(buf, n, p_mm, rdrp_l);
-+ FM_DMP_V32(buf, n, p_mm, rdrp_u);
-+ FM_DMP_V32(buf, n, p_mm, rpkt_l);
-+ FM_DMP_V32(buf, n, p_mm, rpkt_u);
-+ FM_DMP_V32(buf, n, p_mm, rund_l);
-+ FM_DMP_V32(buf, n, p_mm, rund_u);
-+ FM_DMP_V32(buf, n, p_mm, r64_l);
-+ FM_DMP_V32(buf, n, p_mm, r64_u);
-+ FM_DMP_V32(buf, n, p_mm, r127_l);
-+ FM_DMP_V32(buf, n, p_mm, r127_u);
-+ FM_DMP_V32(buf, n, p_mm, r255_l);
-+ FM_DMP_V32(buf, n, p_mm, r255_u);
-+ FM_DMP_V32(buf, n, p_mm, r511_l);
-+ FM_DMP_V32(buf, n, p_mm, r511_u);
-+ FM_DMP_V32(buf, n, p_mm, r1023_l);
-+ FM_DMP_V32(buf, n, p_mm, r1023_u);
-+ FM_DMP_V32(buf, n, p_mm, r1518_l);
-+ FM_DMP_V32(buf, n, p_mm, r1518_u);
-+ FM_DMP_V32(buf, n, p_mm, r1519x_l);
-+ FM_DMP_V32(buf, n, p_mm, r1519x_u);
-+ FM_DMP_V32(buf, n, p_mm, rovr_l);
-+ FM_DMP_V32(buf, n, p_mm, rovr_u);
-+ FM_DMP_V32(buf, n, p_mm, rjbr_l);
-+ FM_DMP_V32(buf, n, p_mm, rjbr_u);
-+ FM_DMP_V32(buf, n, p_mm, rfrg_l);
-+ FM_DMP_V32(buf, n, p_mm, rfrg_u);
-+ FM_DMP_V32(buf, n, p_mm, rcnp_l);
-+ FM_DMP_V32(buf, n, p_mm, rcnp_u);
-+ FM_DMP_V32(buf, n, p_mm, rdrntp_l);
-+ FM_DMP_V32(buf, n, p_mm, rdrntp_u);
-+
-+ return n;
-+}
-+
-+static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
-+
-+
-+ /* Tx Statistics Counter */
-+ FM_DMP_V32(buf, n, p_mm, teoct_l);
-+ FM_DMP_V32(buf, n, p_mm, teoct_u);
-+ FM_DMP_V32(buf, n, p_mm, toct_l);
-+ FM_DMP_V32(buf, n, p_mm, toct_u);
-+ FM_DMP_V32(buf, n, p_mm, txpf_l);
-+ FM_DMP_V32(buf, n, p_mm, txpf_u);
-+ FM_DMP_V32(buf, n, p_mm, tfrm_l);
-+ FM_DMP_V32(buf, n, p_mm, tfrm_u);
-+ FM_DMP_V32(buf, n, p_mm, tfcs_l);
-+ FM_DMP_V32(buf, n, p_mm, tfcs_u);
-+ FM_DMP_V32(buf, n, p_mm, tvlan_l);
-+ FM_DMP_V32(buf, n, p_mm, tvlan_u);
-+ FM_DMP_V32(buf, n, p_mm, terr_l);
-+ FM_DMP_V32(buf, n, p_mm, terr_u);
-+ FM_DMP_V32(buf, n, p_mm, tuca_l);
-+ FM_DMP_V32(buf, n, p_mm, tuca_u);
-+ FM_DMP_V32(buf, n, p_mm, tmca_l);
-+ FM_DMP_V32(buf, n, p_mm, tmca_u);
-+ FM_DMP_V32(buf, n, p_mm, tbca_l);
-+ FM_DMP_V32(buf, n, p_mm, tbca_u);
-+ FM_DMP_V32(buf, n, p_mm, tpkt_l);
-+ FM_DMP_V32(buf, n, p_mm, tpkt_u);
-+ FM_DMP_V32(buf, n, p_mm, tund_l);
-+ FM_DMP_V32(buf, n, p_mm, tund_u);
-+ FM_DMP_V32(buf, n, p_mm, t64_l);
-+ FM_DMP_V32(buf, n, p_mm, t64_u);
-+ FM_DMP_V32(buf, n, p_mm, t127_l);
-+ FM_DMP_V32(buf, n, p_mm, t127_u);
-+ FM_DMP_V32(buf, n, p_mm, t255_l);
-+ FM_DMP_V32(buf, n, p_mm, t255_u);
-+ FM_DMP_V32(buf, n, p_mm, t511_l);
-+ FM_DMP_V32(buf, n, p_mm, t511_u);
-+ FM_DMP_V32(buf, n, p_mm, t1023_l);
-+ FM_DMP_V32(buf, n, p_mm, t1023_u);
-+ FM_DMP_V32(buf, n, p_mm, t1518_l);
-+ FM_DMP_V32(buf, n, p_mm, t1518_u);
-+ FM_DMP_V32(buf, n, p_mm, t1519x_l);
-+ FM_DMP_V32(buf, n, p_mm, t1519x_u);
-+ FM_DMP_V32(buf, n, p_mm, tcnp_l);
-+ FM_DMP_V32(buf, n, p_mm, tcnp_u);
-+
-+ return n;
-+}
-+
-+int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ int n = nn;
-+
-+ n = h_mac->dump_mac_regs(h_mac, buf, n);
-+
-+ return n;
-+}
-+EXPORT_SYMBOL(fm_mac_dump_regs);
-+
-+int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ int n = nn;
-+
-+ if(h_mac->dump_mac_rx_stats)
-+ n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
-+
-+ return n;
-+}
-+EXPORT_SYMBOL(fm_mac_dump_rx_stats);
-+
-+int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ int n = nn;
-+
-+ if(h_mac->dump_mac_tx_stats)
-+ n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
-+
-+ return n;
-+}
-+EXPORT_SYMBOL(fm_mac_dump_tx_stats);
-+
-+static void __cold setup_dtsec(struct mac_device *mac_dev)
-+{
-+ mac_dev->init_phy = dtsec_init_phy;
-+ mac_dev->init = init;
-+ mac_dev->start = start;
-+ mac_dev->stop = stop;
-+ mac_dev->set_promisc = fm_mac_set_promiscuous;
-+ mac_dev->change_addr = fm_mac_modify_mac_addr;
-+ mac_dev->set_multi = set_multi;
-+ mac_dev->uninit = uninit;
-+ mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
-+ mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
-+ mac_dev->get_mac_handle = get_mac_handle;
-+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
-+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
-+ mac_dev->fm_rtc_enable = fm_rtc_enable;
-+ mac_dev->fm_rtc_disable = fm_rtc_disable;
-+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
-+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
-+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
-+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
-+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
-+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
-+ mac_dev->set_wol = fm_mac_set_wol;
-+ mac_dev->dump_mac_regs = dtsec_dump_regs;
-+}
-+
-+static void __cold setup_xgmac(struct mac_device *mac_dev)
-+{
-+ mac_dev->init_phy = xgmac_init_phy;
-+ mac_dev->init = init;
-+ mac_dev->start = start;
-+ mac_dev->stop = stop;
-+ mac_dev->set_promisc = fm_mac_set_promiscuous;
-+ mac_dev->change_addr = fm_mac_modify_mac_addr;
-+ mac_dev->set_multi = set_multi;
-+ mac_dev->uninit = uninit;
-+ mac_dev->get_mac_handle = get_mac_handle;
-+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
-+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
-+ mac_dev->set_wol = fm_mac_set_wol;
-+ mac_dev->dump_mac_regs = xgmac_dump_regs;
-+}
-+
-+static void __cold setup_memac(struct mac_device *mac_dev)
-+{
-+ mac_dev->init_phy = memac_init_phy;
-+ mac_dev->init = memac_init;
-+ mac_dev->start = start;
-+ mac_dev->stop = stop;
-+ mac_dev->set_promisc = fm_mac_set_promiscuous;
-+ mac_dev->change_addr = fm_mac_modify_mac_addr;
-+ mac_dev->set_multi = set_multi;
-+ mac_dev->uninit = uninit;
-+ mac_dev->get_mac_handle = get_mac_handle;
-+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
-+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
-+ mac_dev->fm_rtc_enable = fm_rtc_enable;
-+ mac_dev->fm_rtc_disable = fm_rtc_disable;
-+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
-+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
-+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
-+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
-+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
-+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
-+ mac_dev->set_wol = fm_mac_set_wol;
-+ mac_dev->dump_mac_regs = memac_dump_regs;
-+ mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
-+ mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
-+}
-+
-+void (*const mac_setup[])(struct mac_device *mac_dev) = {
-+ [DTSEC] = setup_dtsec,
-+ [XGMAC] = setup_xgmac,
-+ [MEMAC] = setup_memac
-+};
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
-@@ -0,0 +1,486 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_address.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_net.h>
-+#include <linux/of_mdio.h>
-+#include <linux/phy_fixed.h>
-+#include <linux/device.h>
-+#include <linux/phy.h>
-+#include <linux/io.h>
-+
-+#include "lnxwrp_fm_ext.h"
-+
-+#include "mac.h"
-+
-+#define DTSEC_SUPPORTED \
-+ (SUPPORTED_10baseT_Half \
-+ | SUPPORTED_10baseT_Full \
-+ | SUPPORTED_100baseT_Half \
-+ | SUPPORTED_100baseT_Full \
-+ | SUPPORTED_Autoneg \
-+ | SUPPORTED_Pause \
-+ | SUPPORTED_Asym_Pause \
-+ | SUPPORTED_MII)
-+
-+static const char phy_str[][11] = {
-+ [PHY_INTERFACE_MODE_MII] = "mii",
-+ [PHY_INTERFACE_MODE_GMII] = "gmii",
-+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
-+ [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
-+ [PHY_INTERFACE_MODE_TBI] = "tbi",
-+ [PHY_INTERFACE_MODE_RMII] = "rmii",
-+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
-+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
-+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
-+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
-+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
-+ [PHY_INTERFACE_MODE_XGMII] = "xgmii",
-+ [PHY_INTERFACE_MODE_SGMII_2500] = "sgmii-2500",
-+};
-+
-+static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(phy_str); i++)
-+ if (strcmp(str, phy_str[i]) == 0)
-+ return (phy_interface_t)i;
-+
-+ return PHY_INTERFACE_MODE_MII;
-+}
-+
-+static const uint16_t phy2speed[] = {
-+ [PHY_INTERFACE_MODE_MII] = SPEED_100,
-+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
-+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
-+ [PHY_INTERFACE_MODE_SGMII_2500] = SPEED_2500,
-+};
-+
-+static struct mac_device * __cold
-+alloc_macdev(struct device *dev, size_t sizeof_priv,
-+ void (*setup)(struct mac_device *mac_dev))
-+{
-+ struct mac_device *mac_dev;
-+
-+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
-+ if (unlikely(mac_dev == NULL))
-+ mac_dev = ERR_PTR(-ENOMEM);
-+ else {
-+ mac_dev->dev = dev;
-+ dev_set_drvdata(dev, mac_dev);
-+ setup(mac_dev);
-+ }
-+
-+ return mac_dev;
-+}
-+
-+static int __cold free_macdev(struct mac_device *mac_dev)
-+{
-+ dev_set_drvdata(mac_dev->dev, NULL);
-+
-+ return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
-+}
-+
-+static const struct of_device_id mac_match[] = {
-+ [DTSEC] = {
-+ .compatible = "fsl,fman-1g-mac"
-+ },
-+ [XGMAC] = {
-+ .compatible = "fsl,fman-10g-mac"
-+ },
-+ [MEMAC] = {
-+ .compatible = "fsl,fman-memac"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, mac_match);
-+
-+static int __cold mac_probe(struct platform_device *_of_dev)
-+{
-+ int _errno, i;
-+ struct device *dev;
-+ struct device_node *mac_node, *dev_node;
-+ struct mac_device *mac_dev;
-+ struct platform_device *of_dev;
-+ struct resource res;
-+ const char *char_prop;
-+ int nph;
-+ u32 cell_index;
-+ const struct of_device_id *match;
-+
-+ dev = &_of_dev->dev;
-+ mac_node = dev->of_node;
-+
-+ match = of_match_device(mac_match, dev);
-+ if (!match)
-+ return -EINVAL;
-+
-+ for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
-+ i++)
-+ ;
-+ BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
-+
-+ mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
-+ if (IS_ERR(mac_dev)) {
-+ _errno = PTR_ERR(mac_dev);
-+ dev_err(dev, "alloc_macdev() = %d\n", _errno);
-+ goto _return;
-+ }
-+
-+ INIT_LIST_HEAD(&mac_dev->mc_addr_list);
-+
-+ /* Get the FM node */
-+ dev_node = of_get_parent(mac_node);
-+ if (unlikely(dev_node == NULL)) {
-+ dev_err(dev, "of_get_parent(%s) failed\n",
-+ mac_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ of_dev = of_find_device_by_node(dev_node);
-+ if (unlikely(of_dev == NULL)) {
-+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
-+ dev_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+
-+ mac_dev->fm_dev = fm_bind(&of_dev->dev);
-+ if (unlikely(mac_dev->fm_dev == NULL)) {
-+ dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
-+ _errno = -ENODEV;
-+ goto _return_of_node_put;
-+ }
-+
-+ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
-+ of_node_put(dev_node);
-+
-+ /* Get the address of the memory mapped registers */
-+ _errno = of_address_to_resource(mac_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ dev_err(dev, "of_address_to_resource(%s) = %d\n",
-+ mac_node->full_name, _errno);
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ mac_dev->res = __devm_request_region(
-+ dev,
-+ fm_get_mem_region(mac_dev->fm_dev),
-+ res.start, res.end + 1 - res.start, "mac");
-+ if (unlikely(mac_dev->res == NULL)) {
-+ dev_err(dev, "__devm_request_mem_region(mac) failed\n");
-+ _errno = -EBUSY;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
-+ mac_dev->res->end + 1
-+ - mac_dev->res->start);
-+ if (unlikely(mac_dev->vaddr == NULL)) {
-+ dev_err(dev, "devm_ioremap() failed\n");
-+ _errno = -EIO;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+#define TBIPA_OFFSET 0x1c
-+#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
-+ mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
-+ if (mac_dev->tbi_node) {
-+ u32 tbiaddr = TBIPA_DEFAULT_ADDR;
-+ const __be32 *tbi_reg;
-+ void __iomem *addr;
-+
-+ tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
-+ if (tbi_reg)
-+ tbiaddr = be32_to_cpup(tbi_reg);
-+ addr = mac_dev->vaddr + TBIPA_OFFSET;
-+ /* TODO: out_be32 does not exist on ARM */
-+ out_be32(addr, tbiaddr);
-+ }
-+
-+ if (!of_device_is_available(mac_node)) {
-+ devm_iounmap(dev, mac_dev->vaddr);
-+ __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
-+ res.start, res.end + 1 - res.start);
-+ fm_unbind(mac_dev->fm_dev);
-+ devm_kfree(dev, mac_dev);
-+ dev_set_drvdata(dev, NULL);
-+ return -ENODEV;
-+ }
-+
-+ /* Get the cell-index */
-+ _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
-+ if (unlikely(_errno)) {
-+ dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
-+ mac_node->full_name);
-+ goto _return_dev_set_drvdata;
-+ }
-+ mac_dev->cell_index = (uint8_t)cell_index;
-+ if (mac_dev->cell_index >= 8)
-+ mac_dev->cell_index -= 8;
-+
-+ /* Get the MAC address */
-+ _errno = of_get_mac_address(mac_node, mac_dev->addr);
-+ if (unlikely(_errno)) {
-+ dev_err(dev, "of_get_mac_address(%s) failed\n",
-+ mac_node->full_name);
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ /* Verify the number of port handles */
-+ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
-+ if (unlikely(nph < 0)) {
-+ dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
-+ mac_node->full_name);
-+ _errno = nph;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
-+ dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
-+ mac_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
-+ if (unlikely(dev_node == NULL)) {
-+ dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
-+ mac_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+
-+ of_dev = of_find_device_by_node(dev_node);
-+ if (unlikely(of_dev == NULL)) {
-+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
-+ dev_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+
-+ mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
-+ if (unlikely(mac_dev->port_dev[i] == NULL)) {
-+ dev_err(dev, "dev_get_drvdata(%s) failed\n",
-+ dev_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+ of_node_put(dev_node);
-+ }
-+
-+ /* Get the PHY connection type */
-+ _errno = of_property_read_string(mac_node, "phy-connection-type",
-+ &char_prop);
-+ if (unlikely(_errno)) {
-+ dev_warn(dev,
-+ "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
-+ mac_node->full_name);
-+ mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
-+ } else
-+ mac_dev->phy_if = str2phy(char_prop);
-+
-+ mac_dev->link = false;
-+ mac_dev->half_duplex = false;
-+ mac_dev->speed = phy2speed[mac_dev->phy_if];
-+ mac_dev->max_speed = mac_dev->speed;
-+ mac_dev->if_support = DTSEC_SUPPORTED;
-+ /* We don't support half-duplex in SGMII mode */
-+ if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii") ||
-+ strstr(char_prop, "sgmii-2500"))
-+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
-+ SUPPORTED_100baseT_Half);
-+
-+ /* Gigabit support (no half-duplex) */
-+ if (mac_dev->max_speed == SPEED_1000 ||
-+ mac_dev->max_speed == SPEED_2500)
-+ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
-+
-+ /* The 10G interface only supports one mode */
-+ if (strstr(char_prop, "xgmii"))
-+ mac_dev->if_support = SUPPORTED_10000baseT_Full;
-+
-+ /* Get the rest of the PHY information */
-+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
-+ if (!mac_dev->phy_node) {
-+ struct phy_device *phy;
-+
-+ if (!of_phy_is_fixed_link(mac_node)) {
-+ dev_err(dev, "Wrong PHY information of mac node %s\n",
-+ mac_node->full_name);
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ _errno = of_phy_register_fixed_link(mac_node);
-+ if (_errno)
-+ goto _return_dev_set_drvdata;
-+
-+ mac_dev->fixed_link = devm_kzalloc(mac_dev->dev,
-+ sizeof(*mac_dev->fixed_link),
-+ GFP_KERNEL);
-+ if (!mac_dev->fixed_link)
-+ goto _return_dev_set_drvdata;
-+
-+ mac_dev->phy_node = of_node_get(mac_node);
-+ phy = of_phy_find_device(mac_dev->phy_node);
-+ if (!phy)
-+ goto _return_dev_set_drvdata;
-+
-+ mac_dev->fixed_link->link = phy->link;
-+ mac_dev->fixed_link->speed = phy->speed;
-+ mac_dev->fixed_link->duplex = phy->duplex;
-+ mac_dev->fixed_link->pause = phy->pause;
-+ mac_dev->fixed_link->asym_pause = phy->asym_pause;
-+ }
-+
-+ _errno = mac_dev->init(mac_dev);
-+ if (unlikely(_errno < 0)) {
-+ dev_err(dev, "mac_dev->init() = %d\n", _errno);
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ /* pause frame autonegotiation enabled*/
-+ mac_dev->autoneg_pause = true;
-+
-+ /* by intializing the values to false, force FMD to enable PAUSE frames
-+ * on RX and TX
-+ */
-+ mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
-+ mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
-+ _errno = set_mac_active_pause(mac_dev, true, true);
-+ if (unlikely(_errno < 0))
-+ dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
-+
-+ dev_info(dev,
-+ "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
-+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
-+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
-+
-+ goto _return;
-+
-+_return_of_node_put:
-+ of_node_put(dev_node);
-+_return_dev_set_drvdata:
-+ dev_set_drvdata(dev, NULL);
-+_return:
-+ return _errno;
-+}
-+
-+static int __cold mac_remove(struct platform_device *of_dev)
-+{
-+ int i, _errno;
-+ struct device *dev;
-+ struct mac_device *mac_dev;
-+
-+ dev = &of_dev->dev;
-+ mac_dev = (struct mac_device *)dev_get_drvdata(dev);
-+
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_unbind(mac_dev->port_dev[i]);
-+
-+ fm_unbind(mac_dev->fm_dev);
-+
-+ _errno = free_macdev(mac_dev);
-+
-+ return _errno;
-+}
-+
-+static struct platform_driver mac_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .of_match_table = mac_match,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = mac_probe,
-+ .remove = mac_remove
-+};
-+
-+static int __init __cold mac_load(void)
-+{
-+ int _errno;
-+
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
-+
-+ _errno = platform_driver_register(&mac_driver);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ goto _return;
-+ }
-+
-+ goto _return;
-+
-+_return:
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ return _errno;
-+}
-+module_init(mac_load);
-+
-+static void __exit __cold mac_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ platform_driver_unregister(&mac_driver);
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(mac_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
-@@ -0,0 +1,135 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __MAC_H
-+#define __MAC_H
-+
-+#include <linux/device.h> /* struct device, BUS_ID_SIZE */
-+#include <linux/if_ether.h> /* ETH_ALEN */
-+#include <linux/phy.h> /* phy_interface_t, struct phy_device */
-+#include <linux/list.h>
-+
-+#include "lnxwrp_fsl_fman.h" /* struct port_device */
-+
-+enum {DTSEC, XGMAC, MEMAC};
-+
-+struct mac_device {
-+ struct device *dev;
-+ void *priv;
-+ uint8_t cell_index;
-+ struct resource *res;
-+ void __iomem *vaddr;
-+ uint8_t addr[ETH_ALEN];
-+ bool promisc;
-+
-+ struct fm *fm_dev;
-+ struct fm_port *port_dev[2];
-+
-+ phy_interface_t phy_if;
-+ u32 if_support;
-+ bool link;
-+ bool half_duplex;
-+ uint16_t speed;
-+ uint16_t max_speed;
-+ struct device_node *phy_node;
-+ char fixed_bus_id[MII_BUS_ID_SIZE + 3];
-+ struct device_node *tbi_node;
-+ struct phy_device *phy_dev;
-+ void *fm;
-+ /* List of multicast addresses */
-+ struct list_head mc_addr_list;
-+ struct fixed_phy_status *fixed_link;
-+
-+ bool autoneg_pause;
-+ bool rx_pause_req;
-+ bool tx_pause_req;
-+ bool rx_pause_active;
-+ bool tx_pause_active;
-+
-+ struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
-+ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
-+ int (*init)(struct mac_device *mac_dev);
-+ int (*start)(struct mac_device *mac_dev);
-+ int (*stop)(struct mac_device *mac_dev);
-+ int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
-+ int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
-+ int (*set_multi)(struct net_device *net_dev,
-+ struct mac_device *mac_dev);
-+ int (*uninit)(struct fm_mac_dev *fm_mac_dev);
-+ int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
-+ int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
-+ int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
-+ int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
-+ int (*fm_rtc_enable)(struct fm *fm_dev);
-+ int (*fm_rtc_disable)(struct fm *fm_dev);
-+ int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
-+ int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
-+ int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
-+ int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
-+ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
-+ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
-+ uint64_t fiper);
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+ int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
-+ int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
-+#endif
-+ int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
-+ bool en);
-+ int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
-+ int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
-+ int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
-+};
-+
-+struct mac_address {
-+ uint8_t addr[ETH_ALEN];
-+ struct list_head list;
-+};
-+
-+#define get_fm_handle(net_dev) \
-+ (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
-+
-+#define for_each_port_device(i, port_dev) \
-+ for (i = 0; i < ARRAY_SIZE(port_dev); i++)
-+
-+static inline __attribute((nonnull)) void *macdev_priv(
-+ const struct mac_device *mac_dev)
-+{
-+ return (void *)mac_dev + sizeof(*mac_dev);
-+}
-+
-+extern const char *mac_driver_description;
-+extern const size_t mac_sizeof_priv[];
-+extern void (*const mac_setup[])(struct mac_device *mac_dev);
-+
-+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
-+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
-+
-+#endif /* __MAC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
-@@ -0,0 +1,848 @@
-+/* Copyright 2011-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
-+ * Validates device-tree configuration and sets up the offline ports.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_platform.h>
-+#include <linux/fsl_qman.h>
-+
-+#include "offline_port.h"
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+
-+#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
-+/* Manip extra space and data alignment for fragmentation */
-+#define FRAG_MANIP_SPACE 128
-+#define FRAG_DATA_ALIGN 64
-+
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
-+MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
-+
-+
-+static const struct of_device_id oh_port_match_table[] = {
-+ {
-+ .compatible = "fsl,dpa-oh"
-+ },
-+ {
-+ .compatible = "fsl,dpa-oh-shared"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, oh_port_match_table);
-+
-+#ifdef CONFIG_PM
-+
-+static int oh_suspend(struct device *dev)
-+{
-+ struct dpa_oh_config_s *oh_config;
-+
-+ oh_config = dev_get_drvdata(dev);
-+ return fm_port_suspend(oh_config->oh_port);
-+}
-+
-+static int oh_resume(struct device *dev)
-+{
-+ struct dpa_oh_config_s *oh_config;
-+
-+ oh_config = dev_get_drvdata(dev);
-+ return fm_port_resume(oh_config->oh_port);
-+}
-+
-+static const struct dev_pm_ops oh_pm_ops = {
-+ .suspend = oh_suspend,
-+ .resume = oh_resume,
-+};
-+
-+#define OH_PM_OPS (&oh_pm_ops)
-+
-+#else /* CONFIG_PM */
-+
-+#define OH_PM_OPS NULL
-+
-+#endif /* CONFIG_PM */
-+
-+/* Creates Frame Queues */
-+static uint32_t oh_fq_create(struct qman_fq *fq,
-+ uint32_t fq_id, uint16_t channel,
-+ uint16_t wq_id)
-+{
-+ struct qm_mcc_initfq fq_opts;
-+ uint32_t create_flags, init_flags;
-+ uint32_t ret = 0;
-+
-+ if (fq == NULL)
-+ return 1;
-+
-+ /* Set flags for FQ create */
-+ create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
-+
-+ /* Create frame queue */
-+ ret = qman_create_fq(fq_id, create_flags, fq);
-+ if (ret != 0)
-+ return 1;
-+
-+ /* Set flags for FQ init */
-+ init_flags = QMAN_INITFQ_FLAG_SCHED;
-+
-+ /* Set FQ init options. Specify destination WQ ID and channel */
-+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
-+ fq_opts.fqd.dest.wq = wq_id;
-+ fq_opts.fqd.dest.channel = channel;
-+
-+ /* Initialize frame queue */
-+ ret = qman_init_fq(fq, init_flags, &fq_opts);
-+ if (ret != 0) {
-+ qman_destroy_fq(fq, 0);
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static void dump_fq(struct device *dev, int fqid, uint16_t channel)
-+{
-+ if (channel) {
-+ /* display fqs with a valid (!= 0) destination channel */
-+ dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
-+ }
-+}
-+
-+static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
-+ int fqs_count, uint16_t channel_id)
-+{
-+ int i;
-+ for (i = 0; i < fqs_count; i++)
-+ dump_fq(dev, (fqs + i)->fqid, channel_id);
-+}
-+
-+static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
-+{
-+ struct list_head *fq_list;
-+ struct fq_duple *fqd;
-+ int i;
-+
-+ dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
-+ dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
-+
-+ /* TX queues (old initialization) */
-+ dev_info(dev, "Initialized queues:");
-+ for (i = 0; i < conf->egress_cnt; i++)
-+ dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
-+ conf->channel);
-+
-+ /* initialized ingress queues */
-+ list_for_each(fq_list, &conf->fqs_ingress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
-+ }
-+
-+ /* initialized egress queues */
-+ list_for_each(fq_list, &conf->fqs_egress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
-+ }
-+}
-+
-+/* Destroys Frame Queues */
-+static void oh_fq_destroy(struct qman_fq *fq)
-+{
-+ int _errno = 0;
-+
-+ _errno = qman_retire_fq(fq, NULL);
-+ if (unlikely(_errno < 0))
-+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__,
-+ qman_fq_fqid(fq), _errno);
-+
-+ _errno = qman_oos_fq(fq);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__,
-+ qman_fq_fqid(fq), _errno);
-+ }
-+
-+ qman_destroy_fq(fq, 0);
-+}
-+
-+/* Allocation code for the OH port's PCD frame queues */
-+static int __cold oh_alloc_pcd_fqids(struct device *dev,
-+ uint32_t num,
-+ uint8_t alignment,
-+ uint32_t *base_fqid)
-+{
-+ dev_crit(dev, "callback not implemented!\n");
-+ BUG();
-+
-+ return 0;
-+}
-+
-+static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
-+{
-+ dev_crit(dev, "callback not implemented!\n");
-+ BUG();
-+
-+ return 0;
-+}
-+
-+static void oh_set_buffer_layout(struct fm_port *port,
-+ struct dpa_buffer_layout_s *layout)
-+{
-+ struct fm_port_params params;
-+
-+ layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
-+ layout->parse_results = true;
-+ layout->hash_results = true;
-+ layout->time_stamp = false;
-+
-+ fm_port_get_buff_layout_ext_params(port, &params);
-+ layout->manip_extra_space = params.manip_extra_space;
-+ layout->data_align = params.data_align;
-+}
-+
-+static int
-+oh_port_probe(struct platform_device *_of_dev)
-+{
-+ struct device *dpa_oh_dev;
-+ struct device_node *dpa_oh_node;
-+ int lenp, _errno = 0, fq_idx, duple_idx;
-+ int n_size, i, j, ret, duples_count;
-+ struct platform_device *oh_of_dev;
-+ struct device_node *oh_node, *bpool_node = NULL, *root_node;
-+ struct device *oh_dev;
-+ struct dpa_oh_config_s *oh_config = NULL;
-+ const __be32 *oh_all_queues;
-+ const __be32 *channel_ids;
-+ const __be32 *oh_tx_queues;
-+ uint32_t queues_count;
-+ uint32_t crt_fqid_base;
-+ uint32_t crt_fq_count;
-+ bool frag_enabled = false;
-+ struct fm_port_params oh_port_tx_params;
-+ struct fm_port_pcd_param oh_port_pcd_params;
-+ struct dpa_buffer_layout_s buf_layout;
-+
-+ /* True if the current partition owns the OH port. */
-+ bool init_oh_port;
-+
-+ const struct of_device_id *match;
-+ int crt_ext_pools_count;
-+ u32 ext_pool_size;
-+ u32 port_id;
-+ u32 channel_id;
-+
-+ int channel_ids_count;
-+ int channel_idx;
-+ struct fq_duple *fqd;
-+ struct list_head *fq_list, *fq_list_tmp;
-+
-+ const __be32 *bpool_cfg;
-+ uint32_t bpid;
-+
-+ memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
-+ dpa_oh_dev = &_of_dev->dev;
-+ dpa_oh_node = dpa_oh_dev->of_node;
-+ BUG_ON(dpa_oh_node == NULL);
-+
-+ match = of_match_device(oh_port_match_table, dpa_oh_dev);
-+ if (!match)
-+ return -EINVAL;
-+
-+ dev_dbg(dpa_oh_dev, "Probing OH port...\n");
-+
-+ /* Find the referenced OH node */
-+ oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
-+ if (oh_node == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "Can't find OH node referenced from node %s\n",
-+ dpa_oh_node->full_name);
-+ return -EINVAL;
-+ }
-+ dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
-+ match->compatible);
-+
-+ _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
-+ if (_errno) {
-+ dev_err(dpa_oh_dev, "No port id found in node %s\n",
-+ dpa_oh_node->full_name);
-+ goto return_kfree;
-+ }
-+
-+ _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
-+ &channel_id);
-+ if (_errno) {
-+ dev_err(dpa_oh_dev, "No channel id found in node %s\n",
-+ dpa_oh_node->full_name);
-+ goto return_kfree;
-+ }
-+
-+ oh_of_dev = of_find_device_by_node(oh_node);
-+ BUG_ON(oh_of_dev == NULL);
-+ oh_dev = &oh_of_dev->dev;
-+
-+ /* The OH port must be initialized exactly once.
-+ * The following scenarios are of interest:
-+ * - the node is Linux-private (will always initialize it);
-+ * - the node is shared between two Linux partitions
-+ * (only one of them will initialize it);
-+ * - the node is shared between a Linux and a LWE partition
-+ * (Linux will initialize it) - "fsl,dpa-oh-shared"
-+ */
-+
-+ /* Check if the current partition owns the OH port
-+ * and ought to initialize it. It may be the case that we leave this
-+ * to another (also Linux) partition.
-+ */
-+ init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
-+
-+ /* If we aren't the "owner" of the OH node, we're done here. */
-+ if (!init_oh_port) {
-+ dev_dbg(dpa_oh_dev,
-+ "Not owning the shared OH port %s, will not initialize it.\n",
-+ oh_node->full_name);
-+ of_node_put(oh_node);
-+ return 0;
-+ }
-+
-+ /* Allocate OH dev private data */
-+ oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
-+ if (oh_config == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "Can't allocate private data for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
-+ INIT_LIST_HEAD(&oh_config->fqs_egress_list);
-+
-+ /* FQs that enter OH port */
-+ lenp = 0;
-+ oh_all_queues = of_get_property(dpa_oh_node,
-+ "fsl,qman-frame-queues-ingress", &lenp);
-+ if (lenp % (2 * sizeof(*oh_all_queues))) {
-+ dev_warn(dpa_oh_dev,
-+ "Wrong ingress queues format for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ /* just ignore the last unpaired value */
-+ }
-+
-+ duples_count = lenp / (2 * sizeof(*oh_all_queues));
-+ dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
-+ duples_count);
-+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
-+
-+ fqd = devm_kzalloc(dpa_oh_dev,
-+ sizeof(struct fq_duple), GFP_KERNEL);
-+ if (!fqd) {
-+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ fqd->fqs = devm_kzalloc(dpa_oh_dev,
-+ crt_fq_count * sizeof(struct qman_fq),
-+ GFP_KERNEL);
-+ if (!fqd->fqs) {
-+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ for (j = 0; j < crt_fq_count; j++)
-+ (fqd->fqs + j)->fqid = crt_fqid_base + j;
-+ fqd->fqs_count = crt_fq_count;
-+ fqd->channel_id = (uint16_t)channel_id;
-+ list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
-+ }
-+
-+ /* create the ingress queues */
-+ list_for_each(fq_list, &oh_config->fqs_ingress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+
-+ for (j = 0; j < fqd->fqs_count; j++) {
-+ ret = oh_fq_create(fqd->fqs + j,
-+ (fqd->fqs + j)->fqid,
-+ fqd->channel_id, 3);
-+ if (ret != 0) {
-+ dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
-+ (fqd->fqs + j)->fqid,
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ }
-+ }
-+
-+ /* FQs that exit OH port */
-+ lenp = 0;
-+ oh_all_queues = of_get_property(dpa_oh_node,
-+ "fsl,qman-frame-queues-egress", &lenp);
-+ if (lenp % (2 * sizeof(*oh_all_queues))) {
-+ dev_warn(dpa_oh_dev,
-+ "Wrong egress queues format for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ /* just ignore the last unpaired value */
-+ }
-+
-+ duples_count = lenp / (2 * sizeof(*oh_all_queues));
-+ dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
-+ duples_count);
-+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
-+
-+ fqd = devm_kzalloc(dpa_oh_dev,
-+ sizeof(struct fq_duple), GFP_KERNEL);
-+ if (!fqd) {
-+ dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ fqd->fqs = devm_kzalloc(dpa_oh_dev,
-+ crt_fq_count * sizeof(struct qman_fq),
-+ GFP_KERNEL);
-+ if (!fqd->fqs) {
-+ dev_err(dpa_oh_dev,
-+ "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ for (j = 0; j < crt_fq_count; j++)
-+ (fqd->fqs + j)->fqid = crt_fqid_base + j;
-+ fqd->fqs_count = crt_fq_count;
-+ /* channel ID is specified in another attribute */
-+ fqd->channel_id = 0;
-+ list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
-+
-+ /* allocate the queue */
-+
-+ }
-+
-+ /* channel_ids for FQs that exit OH port */
-+ lenp = 0;
-+ channel_ids = of_get_property(dpa_oh_node,
-+ "fsl,qman-channel-ids-egress", &lenp);
-+
-+ channel_ids_count = lenp / (sizeof(*channel_ids));
-+ if (channel_ids_count != duples_count) {
-+ dev_warn(dpa_oh_dev,
-+ "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ /* just ignore the queues that do not have a Channel ID */
-+ }
-+
-+ channel_idx = 0;
-+ list_for_each(fq_list, &oh_config->fqs_egress_list) {
-+ if (channel_idx + 1 > channel_ids_count)
-+ break;
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ fqd->channel_id =
-+ (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
-+ }
-+
-+ /* create egress queues */
-+ list_for_each(fq_list, &oh_config->fqs_egress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+
-+ if (fqd->channel_id == 0) {
-+ /* missing channel id in dts */
-+ continue;
-+ }
-+
-+ for (j = 0; j < fqd->fqs_count; j++) {
-+ ret = oh_fq_create(fqd->fqs + j,
-+ (fqd->fqs + j)->fqid,
-+ fqd->channel_id, 3);
-+ if (ret != 0) {
-+ dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
-+ (fqd->fqs + j)->fqid,
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ }
-+ }
-+
-+ /* Read FQ ids/nums for the DPA OH node */
-+ oh_all_queues = of_get_property(dpa_oh_node,
-+ "fsl,qman-frame-queues-oh", &lenp);
-+ if (oh_all_queues == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "No frame queues have been defined for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ /* Check that the OH error and default FQs are there */
-+ BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
-+ queues_count = lenp / (2 * sizeof(*oh_all_queues));
-+ if (queues_count != 2) {
-+ dev_err(dpa_oh_dev,
-+ "Error and Default queues must be defined for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ /* Read the FQIDs defined for this OH port */
-+ dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
-+ fq_idx = 0;
-+
-+ /* Error FQID - must be present */
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ if (crt_fq_count != 1) {
-+ dev_err(dpa_oh_dev,
-+ "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
-+ oh_node->full_name, dpa_oh_node->full_name,
-+ crt_fq_count);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ oh_config->error_fqid = crt_fqid_base;
-+ dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
-+ oh_config->error_fqid, oh_node->full_name);
-+
-+ /* Default FQID - must be present */
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ if (crt_fq_count != 1) {
-+ dev_err(dpa_oh_dev,
-+ "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
-+ oh_node->full_name, dpa_oh_node->full_name,
-+ crt_fq_count);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ oh_config->default_fqid = crt_fqid_base;
-+ dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
-+ oh_config->default_fqid, oh_node->full_name);
-+
-+ /* TX FQID - presence is optional */
-+ oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
-+ &lenp);
-+ if (oh_tx_queues == NULL) {
-+ dev_dbg(dpa_oh_dev,
-+ "No tx queues have been defined for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ goto config_port;
-+ }
-+
-+ /* Check that queues-tx has only a base and a count defined */
-+ BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
-+ queues_count = lenp / (2 * sizeof(*oh_tx_queues));
-+ if (queues_count != 1) {
-+ dev_err(dpa_oh_dev,
-+ "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ fq_idx = 0;
-+ crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
-+ crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
-+ oh_config->egress_cnt = crt_fq_count;
-+
-+ /* Allocate TX queues */
-+ dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
-+ oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
-+ crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
-+ if (oh_config->egress_fqs == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ /* Create TX queues */
-+ for (i = 0; i < crt_fq_count; i++) {
-+ ret = oh_fq_create(oh_config->egress_fqs + i,
-+ crt_fqid_base + i, (uint16_t)channel_id, 3);
-+ if (ret != 0) {
-+ dev_err(dpa_oh_dev,
-+ "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
-+ crt_fqid_base + i, oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ }
-+
-+config_port:
-+ /* Get a handle to the fm_port so we can set
-+ * its configuration params
-+ */
-+ oh_config->oh_port = fm_port_bind(oh_dev);
-+ if (oh_config->oh_port == NULL) {
-+ dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
-+ oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
-+
-+ /* read the pool handlers */
-+ crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
-+ "fsl,bman-buffer-pools", NULL);
-+ if (crt_ext_pools_count <= 0) {
-+ dev_info(dpa_oh_dev,
-+ "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
-+ oh_node->full_name);
-+ goto init_port;
-+ }
-+
-+ /* used for reading ext_pool_size*/
-+ root_node = of_find_node_by_path("/");
-+ if (root_node == NULL) {
-+ dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ n_size = of_n_size_cells(root_node);
-+ of_node_put(root_node);
-+
-+ dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
-+ crt_ext_pools_count);
-+
-+ oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
-+
-+ for (i = 0; i < crt_ext_pools_count; i++) {
-+ bpool_node = of_parse_phandle(dpa_oh_node,
-+ "fsl,bman-buffer-pools", i);
-+ if (bpool_node == NULL) {
-+ dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
-+ if (_errno) {
-+ dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
-+ dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
-+
-+ bpool_cfg = of_get_property(bpool_node,
-+ "fsl,bpool-ethernet-cfg", &lenp);
-+ if (bpool_cfg == NULL) {
-+ dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
-+ oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
-+ dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
-+ ext_pool_size);
-+ of_node_put(bpool_node);
-+
-+ }
-+
-+ if (buf_layout.data_align != FRAG_DATA_ALIGN ||
-+ buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
-+ goto init_port;
-+
-+ frag_enabled = true;
-+ dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
-+ port_id);
-+
-+init_port:
-+ of_node_put(oh_node);
-+ /* Set Tx params */
-+ dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
-+ oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
-+ frag_enabled);
-+ /* Set PCD params */
-+ oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
-+ oh_port_pcd_params.cbf = oh_free_pcd_fqids;
-+ oh_port_pcd_params.dev = dpa_oh_dev;
-+ fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
-+
-+ dev_set_drvdata(dpa_oh_dev, oh_config);
-+
-+ /* Enable the OH port */
-+ _errno = fm_port_enable(oh_config->oh_port);
-+ if (_errno)
-+ goto return_kfree;
-+
-+ dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
-+
-+ /* print of all referenced & created queues */
-+ dump_oh_config(dpa_oh_dev, oh_config);
-+
-+ return 0;
-+
-+return_kfree:
-+ if (bpool_node)
-+ of_node_put(bpool_node);
-+ if (oh_node)
-+ of_node_put(oh_node);
-+ if (oh_config && oh_config->egress_fqs)
-+ devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
-+
-+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ list_del(fq_list);
-+ devm_kfree(dpa_oh_dev, fqd->fqs);
-+ devm_kfree(dpa_oh_dev, fqd);
-+ }
-+
-+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ list_del(fq_list);
-+ devm_kfree(dpa_oh_dev, fqd->fqs);
-+ devm_kfree(dpa_oh_dev, fqd);
-+ }
-+
-+ devm_kfree(dpa_oh_dev, oh_config);
-+ return _errno;
-+}
-+
-+static int __cold oh_port_remove(struct platform_device *_of_dev)
-+{
-+ int _errno = 0, i;
-+ struct dpa_oh_config_s *oh_config;
-+
-+ pr_info("Removing OH port...\n");
-+
-+ oh_config = dev_get_drvdata(&_of_dev->dev);
-+ if (oh_config == NULL) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): No OH config in device private data!\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__);
-+ _errno = -ENODEV;
-+ goto return_error;
-+ }
-+
-+ if (oh_config->egress_fqs)
-+ for (i = 0; i < oh_config->egress_cnt; i++)
-+ oh_fq_destroy(oh_config->egress_fqs + i);
-+
-+ if (oh_config->oh_port == NULL) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): No fm port in device private data!\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__);
-+ _errno = -EINVAL;
-+ goto free_egress_fqs;
-+ }
-+
-+ _errno = fm_port_disable(oh_config->oh_port);
-+
-+free_egress_fqs:
-+ if (oh_config->egress_fqs)
-+ devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
-+ devm_kfree(&_of_dev->dev, oh_config);
-+ dev_set_drvdata(&_of_dev->dev, NULL);
-+
-+return_error:
-+ return _errno;
-+}
-+
-+static struct platform_driver oh_port_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .of_match_table = oh_port_match_table,
-+ .owner = THIS_MODULE,
-+ .pm = OH_PM_OPS,
-+ },
-+ .probe = oh_port_probe,
-+ .remove = oh_port_remove
-+};
-+
-+static int __init __cold oh_port_load(void)
-+{
-+ int _errno;
-+
-+ pr_info(OH_MOD_DESCRIPTION "\n");
-+
-+ _errno = platform_driver_register(&oh_port_driver);
-+ if (_errno < 0) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ }
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+ return _errno;
-+}
-+module_init(oh_port_load);
-+
-+static void __exit __cold oh_port_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ platform_driver_unregister(&oh_port_driver);
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(oh_port_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
-@@ -0,0 +1,59 @@
-+/* Copyright 2011 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __OFFLINE_PORT_H
-+#define __OFFLINE_PORT_H
-+
-+struct fm_port;
-+struct qman_fq;
-+
-+/* fqs are defined in duples (base_fq, fq_count) */
-+struct fq_duple {
-+ struct qman_fq *fqs;
-+ int fqs_count;
-+ uint16_t channel_id;
-+ struct list_head fq_list;
-+};
-+
-+/* OH port configuration */
-+struct dpa_oh_config_s {
-+ uint32_t error_fqid;
-+ uint32_t default_fqid;
-+ struct fm_port *oh_port;
-+ uint32_t egress_cnt;
-+ struct qman_fq *egress_fqs;
-+ uint16_t channel;
-+
-+ struct list_head fqs_ingress_list;
-+ struct list_head fqs_egress_list;
-+};
-+
-+#endif /* __OFFLINE_PORT_H */